| author | Carsten "Tomcat" Book <cbook@mozilla.com> |
| Wed, 09 Nov 2016 16:38:04 +0100 | |
| changeset 321813 | 336759fad4621dfcd0a3293840edbed67018accd |
| parent 321692 | 6b7f1acfbb4b3fab041817a7dfbe39743416181f (current diff) |
| parent 321812 | eac5fd08280a9cfa83925050cd70facc8252eac9 (diff) |
| child 321814 | 3e3b7cff19a51048d6c633630187f2233c882705 |
| child 321826 | c06f9e99eeb80b4695f0b44b6e2e6675b272bd00 |
| child 321906 | 310ae43d23b7392aad985af26c9907a598360b36 |
| push id | 30934 |
| push user | cbook@mozilla.com |
| push date | Wed, 09 Nov 2016 15:38:21 +0000 |
| treeherder | mozilla-central@336759fad462 [default view] [failures only] |
| perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
| reviewers | merge |
| milestone | 52.0a1 |
| first release with | nightly linux32
336759fad462
/
52.0a1
/
20161110030211
/
files
nightly linux64
336759fad462
/
52.0a1
/
20161110030211
/
files
nightly mac
336759fad462
/
52.0a1
/
20161110030211
/
files
nightly win32
336759fad462
/
52.0a1
/
20161110030211
/
files
nightly win64
336759fad462
/
52.0a1
/
20161110030211
/
files
|
| last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
| releases | nightly linux32
52.0a1
/
20161110030211
/
pushlog to previous
nightly linux64
52.0a1
/
20161110030211
/
pushlog to previous
nightly mac
52.0a1
/
20161110030211
/
pushlog to previous
nightly win32
52.0a1
/
20161110030211
/
pushlog to previous
nightly win64
52.0a1
/
20161110030211
/
pushlog to previous
|
--- a/CLOBBER +++ b/CLOBBER @@ -17,9 +17,9 @@ # # Modifying this file will now automatically clobber the buildbot machines \o/ # # Are you updating CLOBBER because you think it's needed for your WebIDL # changes to stick? As of bug 928195, this shouldn't be necessary! Please # don't change CLOBBER for WebIDL changes any more. -bug 1313485 - OS X bustage requires clobber to fix +Bug 1277704 - jemalloc may need a clobber
--- a/accessible/atk/AccessibleWrap.cpp +++ b/accessible/atk/AccessibleWrap.cpp @@ -368,16 +368,19 @@ AccessibleWrap::CreateMaiInterfaces(void if (IsLink()) interfacesBits |= 1 << MAI_INTERFACE_HYPERLINK_IMPL; if (!nsAccUtils::MustPrune(this)) { // These interfaces require children // Table interface. if (AsTable()) interfacesBits |= 1 << MAI_INTERFACE_TABLE; + if (AsTableCell()) + interfacesBits |= 1 << MAI_INTERFACE_TABLE_CELL; + // Selection interface. if (IsSelect()) { interfacesBits |= 1 << MAI_INTERFACE_SELECTION; } } return interfacesBits; } @@ -1125,16 +1128,19 @@ GetInterfacesForProxy(ProxyAccessible* a interfaces |= 1 << MAI_INTERFACE_HYPERLINK_IMPL; if (aInterfaces & Interfaces::VALUE) interfaces |= 1 << MAI_INTERFACE_VALUE; if (aInterfaces & Interfaces::TABLE) interfaces |= 1 << MAI_INTERFACE_TABLE; + if (aInterfaces & Interfaces::TABLECELL) + interfaces |= 1 << MAI_INTERFACE_TABLE_CELL; + if (aInterfaces & Interfaces::IMAGE) interfaces |= 1 << MAI_INTERFACE_IMAGE; if (aInterfaces & Interfaces::DOCUMENT) interfaces |= 1 << MAI_INTERFACE_DOCUMENT; if (aInterfaces & Interfaces::SELECTION) { interfaces |= 1 << MAI_INTERFACE_SELECTION;
--- a/browser/base/content/test/general/head.js +++ b/browser/base/content/test/general/head.js @@ -623,18 +623,16 @@ function promiseTabLoadEvent(tab, url) info(`Skipping spurious load event for ${loadedUrl}`); return false; } info("Tab event received: load"); return true; } - // Create two promises: one resolved from the content process when the page - // loads and one that is rejected if we take too long to load the url. let loaded = BrowserTestUtils.browserLoaded(tab.linkedBrowser, false, handle); if (url) BrowserTestUtils.loadURI(tab.linkedBrowser, url); return loaded; }
--- a/browser/base/content/test/plugins/head.js +++ b/browser/base/content/test/plugins/head.js @@ -85,18 +85,16 @@ function promiseTabLoadEvent(tab, url) { info(`Skipping spurious load event for ${loadedUrl}`); return false; } info("Tab event received: load"); return true; } - // Create two promises: one resolved from the content process when the page - // loads and one that is rejected if we take too long to load the url. let loaded = BrowserTestUtils.browserLoaded(tab.linkedBrowser, false, handle); if (url) BrowserTestUtils.loadURI(tab.linkedBrowser, url); return loaded; }
--- a/browser/components/extensions/ext-contextMenus.js +++ b/browser/components/extensions/ext-contextMenus.js @@ -4,16 +4,17 @@ Cu.import("resource://gre/modules/ExtensionUtils.jsm"); Cu.import("resource://gre/modules/MatchPattern.jsm"); Cu.import("resource://gre/modules/Services.jsm"); Cu.import("resource://gre/modules/XPCOMUtils.jsm"); var { EventManager, + ExtensionError, IconDetails, } = ExtensionUtils; // Map[Extension -> Map[ID -> MenuItem]] // Note: we want to enumerate all the menu items so // this cannot be a weak map. var gContextMenuMap = new Map(); @@ -321,17 +322,17 @@ MenuItem.prototype = { return; } let menuMap = gContextMenuMap.get(this.extension); if (!menuMap.has(parentId)) { throw new Error("Could not find any MenuItem with id: " + parentId); } for (let item = menuMap.get(parentId); item; item = item.parent) { if (item === this) { - throw new Error("MenuItem cannot be an ancestor (or self) of its new parent."); + throw new ExtensionError("MenuItem cannot be an ancestor (or self) of its new parent."); } } }, set parentId(parentId) { this.ensureValidParentId(parentId); if (this.parent) {
--- a/browser/components/extensions/test/browser/.eslintrc.js +++ b/browser/components/extensions/test/browser/.eslintrc.js @@ -24,9 +24,13 @@ module.exports = { // eslint-disable-li "closeContextMenu": true, "closeExtensionContextMenu": true, "focusWindow": true, "makeWidgetId": true, "openContextMenu": true, "openExtensionContextMenu": true, "CustomizableUI": true, }, + + "rules": { + "no-shadow": 0, + }, };
--- a/browser/components/extensions/test/browser/browser_ext_browserAction_context.js +++ b/browser/components/extensions/test/browser/browser_ext_browserAction_context.js @@ -1,45 +1,31 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; function* runTests(options) { - function background(getTests) { - // Gets the current details of the browser action, and returns a - // promise that resolves to an object containing them. - function getDetails(tabId) { - return Promise.all([ - browser.browserAction.getTitle({tabId}), - browser.browserAction.getPopup({tabId}), - browser.browserAction.getBadgeText({tabId}), - browser.browserAction.getBadgeBackgroundColor({tabId})] - ).then(details => { - return Promise.resolve({title: details[0], - popup: details[1], - badge: details[2], - badgeBackgroundColor: details[3]}); - }); - } + async function background(getTests) { + async function checkDetails(expecting, tabId) { + let title = await browser.browserAction.getTitle({tabId}); + browser.test.assertEq(expecting.title, title, + "expected value from getTitle"); - function checkDetails(expecting, tabId) { - return getDetails(tabId).then(details => { - browser.test.assertEq(expecting.title, details.title, - "expected value from getTitle"); - - browser.test.assertEq(expecting.popup, details.popup, - "expected value from getPopup"); + let popup = await browser.browserAction.getPopup({tabId}); + browser.test.assertEq(expecting.popup, popup, + "expected value from getPopup"); - browser.test.assertEq(expecting.badge, details.badge, - "expected value from getBadge"); + let badge = await browser.browserAction.getBadgeText({tabId}); + browser.test.assertEq(expecting.badge, badge, + "expected value from getBadge"); - browser.test.assertEq(String(expecting.badgeBackgroundColor), - String(details.badgeBackgroundColor), - "expected value from getBadgeBackgroundColor"); - }); + let badgeBackgroundColor = await browser.browserAction.getBadgeBackgroundColor({tabId}); + browser.test.assertEq(String(expecting.badgeBackgroundColor), + String(badgeBackgroundColor), + "expected value from getBadgeBackgroundColor"); } let expectDefaults = expecting => { return checkDetails(expecting); }; let tabs = []; let tests = getTests(tabs, expectDefaults); @@ -52,49 +38,37 @@ function* runTests(options) { () => browser.browserAction.setTitle({tabId, title: "foo"}), () => browser.browserAction.setIcon({tabId, path: "foo.png"}), () => browser.browserAction.setPopup({tabId, popup: "foo.html"}), () => browser.browserAction.setBadgeText({tabId, text: "foo"}), () => browser.browserAction.setBadgeBackgroundColor({tabId, color: [0xff, 0, 0, 0xff]}), ]; for (let call of calls) { - let checkError = e => { - browser.test.assertTrue(e.message.includes(`Invalid tab ID: ${tabId}`), - `Expected invalid tab ID error, got ${e}`); - }; - try { - call().then(() => { - browser.test.fail(`Expected call to fail: ${call}`); - }, e => { - checkError(e); - }); - } catch (e) { - checkError(e); - } + await browser.test.assertRejects( + new Promise(resolve => resolve(call())), + RegExp(`Invalid tab ID: ${tabId}`), + "Expected invalid tab ID error"); } } // Runs the next test in the `tests` array, checks the results, // and passes control back to the outer test scope. function nextTest() { let test = tests.shift(); - test(expecting => { + test(async expecting => { // Check that the API returns the expected values, and then // run the next test. - new Promise(resolve => { - return browser.tabs.query({active: true, currentWindow: true}, resolve); - }).then(tabs => { - return checkDetails(expecting, tabs[0].id); - }).then(() => { - // Check that the actual icon has the expected values, then - // run the next test. - browser.test.sendMessage("nextTest", expecting, tests.length); - }); + let tabs = await browser.tabs.query({active: true, currentWindow: true}); + await checkDetails(expecting, tabs[0].id); + + // Check that the actual icon has the expected values, then + // run the next test. + browser.test.sendMessage("nextTest", expecting, tests.length); }); } browser.test.onMessage.addListener((msg) => { if (msg != "runNextTest") { browser.test.fail("Expecting 'runNextTest' message"); } @@ -236,117 +210,110 @@ add_task(function* testTabSwitchContext( {"icon": browser.runtime.getURL("default-2.png"), "popup": browser.runtime.getURL("default-2.html"), "title": "Default Title 2", "badge": "d2", "badgeBackgroundColor": [0, 0xff, 0, 0xff]}, ]; return [ - expect => { + async expect => { browser.test.log("Initial state, expect default properties."); - expectDefaults(details[0]).then(() => { - expect(details[0]); - }); + + await expectDefaults(details[0]); + expect(details[0]); }, - expect => { + async expect => { browser.test.log("Change the icon in the current tab. Expect default properties excluding the icon."); browser.browserAction.setIcon({tabId: tabs[0], path: "1.png"}); - expectDefaults(details[0]).then(() => { - expect(details[1]); - }); + + await expectDefaults(details[0]); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Create a new tab. Expect default properties."); - browser.tabs.create({active: true, url: "about:blank?0"}, tab => { - tabs.push(tab.id); - expectDefaults(details[0]).then(() => { - expect(details[0]); - }); - }); + let tab = await browser.tabs.create({active: true, url: "about:blank?0"}); + tabs.push(tab.id); + + await expectDefaults(details[0]); + expect(details[0]); }, - expect => { + async expect => { browser.test.log("Change properties. Expect new properties."); let tabId = tabs[1]; browser.browserAction.setIcon({tabId, path: "2.png"}); browser.browserAction.setPopup({tabId, popup: "2.html"}); browser.browserAction.setTitle({tabId, title: "Title 2"}); browser.browserAction.setBadgeText({tabId, text: "2"}); browser.browserAction.setBadgeBackgroundColor({tabId, color: "#ff0000"}); browser.browserAction.disable(tabId); - expectDefaults(details[0]).then(() => { - expect(details[2]); - }); + await expectDefaults(details[0]); + expect(details[2]); }, expect => { browser.test.log("Navigate to a new page. Expect no changes."); // TODO: This listener should not be necessary, but the |tabs.update| // callback currently fires too early in e10s windows. browser.tabs.onUpdated.addListener(function listener(tabId, changed) { if (tabId == tabs[1] && changed.url) { browser.tabs.onUpdated.removeListener(listener); expect(details[2]); } }); browser.tabs.update(tabs[1], {url: "about:blank?1"}); }, - expect => { + async expect => { browser.test.log("Switch back to the first tab. Expect previously set properties."); - browser.tabs.update(tabs[0], {active: true}, () => { - expect(details[1]); - }); + await browser.tabs.update(tabs[0], {active: true}); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Change default values, expect those changes reflected."); browser.browserAction.setIcon({path: "default-2.png"}); browser.browserAction.setPopup({popup: "default-2.html"}); browser.browserAction.setTitle({title: "Default Title 2"}); browser.browserAction.setBadgeText({text: "d2"}); browser.browserAction.setBadgeBackgroundColor({color: [0, 0xff, 0, 0xff]}); browser.browserAction.disable(); - expectDefaults(details[3]).then(() => { - expect(details[3]); - }); + + await expectDefaults(details[3]); + expect(details[3]); }, - expect => { + async expect => { browser.test.log("Re-enable by default. Expect enabled."); browser.browserAction.enable(); - expectDefaults(details[4]).then(() => { - expect(details[4]); - }); + + await expectDefaults(details[4]); + expect(details[4]); }, - expect => { + async expect => { browser.test.log("Switch back to tab 2. Expect former value, unaffected by changes to defaults in previous step."); - browser.tabs.update(tabs[1], {active: true}, () => { - expectDefaults(details[3]).then(() => { - expect(details[2]); - }); - }); + await browser.tabs.update(tabs[1], {active: true}); + + await expectDefaults(details[3]); + expect(details[2]); }, - expect => { + async expect => { browser.test.log("Delete tab, switch back to tab 1. Expect previous results again."); - browser.tabs.remove(tabs[1], () => { - expect(details[4]); - }); + await browser.tabs.remove(tabs[1]); + expect(details[4]); }, - expect => { + async expect => { browser.test.log("Create a new tab. Expect new default properties."); - browser.tabs.create({active: true, url: "about:blank?2"}, tab => { - tabs.push(tab.id); - expect(details[5]); - }); + let tab = await browser.tabs.create({active: true, url: "about:blank?2"}); + tabs.push(tab.id); + expect(details[5]); }, - expect => { + async expect => { browser.test.log("Delete tab."); - browser.tabs.remove(tabs[2], () => { - expect(details[4]); - }); + await browser.tabs.remove(tabs[2]); + expect(details[4]); }, ]; }, }); }); add_task(function* testDefaultTitle() { yield runTests({ @@ -386,46 +353,46 @@ add_task(function* testDefaultTitle() { {"title": "", "popup": "", "badge": "", "badgeBackgroundColor": DEFAULT_BADGE_COLOR, "icon": browser.runtime.getURL("icon.png")}, ]; return [ - expect => { + async expect => { browser.test.log("Initial state. Expect extension title as default title."); - expectDefaults(details[0]).then(() => { - expect(details[0]); - }); + + await expectDefaults(details[0]); + expect(details[0]); }, - expect => { + async expect => { browser.test.log("Change the title. Expect new title."); browser.browserAction.setTitle({tabId: tabs[0], title: "Foo Title"}); - expectDefaults(details[0]).then(() => { - expect(details[1]); - }); + + await expectDefaults(details[0]); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Change the default. Expect same properties."); browser.browserAction.setTitle({title: "Bar Title"}); - expectDefaults(details[2]).then(() => { - expect(details[1]); - }); + + await expectDefaults(details[2]); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Clear the title. Expect new default title."); browser.browserAction.setTitle({tabId: tabs[0], title: ""}); - expectDefaults(details[2]).then(() => { - expect(details[2]); - }); + + await expectDefaults(details[2]); + expect(details[2]); }, - expect => { + async expect => { browser.test.log("Set default title to null string. Expect null string from API, extension title in UI."); browser.browserAction.setTitle({title: ""}); - expectDefaults(details[3]).then(() => { - expect(details[3]); - }); + + await expectDefaults(details[3]); + expect(details[3]); }, ]; }, }); });
--- a/browser/components/extensions/test/browser/browser_ext_browserAction_pageAction_icon_permissions.js +++ b/browser/components/extensions/test/browser/browser_ext_browserAction_pageAction_icon_permissions.js @@ -15,24 +15,20 @@ add_task(function* testInvalidIconSizes( let tabId = tabs[0].id; let promises = []; for (let api of ["pageAction", "browserAction"]) { // helper function to run setIcon and check if it fails let assertSetIconThrows = function(detail, error, message) { detail.tabId = tabId; promises.push( - browser[api].setIcon(detail).then( - () => { - browser.test.fail("Expected an error on invalid icon size."); - browser.test.notifyFail("setIcon with invalid icon size"); - }, - error => { - browser.test.succeed("setIcon with invalid icon size"); - })); + browser.test.assertRejects( + browser[api].setIcon(detail), + /must be an integer/, + "setIcon with invalid icon size")); }; let imageData = new ImageData(1, 1); // test invalid icon size inputs for (let type of ["path", "imageData"]) { let img = type == "imageData" ? imageData : "test.png"; @@ -143,24 +139,20 @@ add_task(function* testSecureURLsDenied( let urls = ["chrome://browser/content/browser.xul", "javascript:true"]; let promises = []; for (let url of urls) { for (let api of ["pageAction", "browserAction"]) { promises.push( - browser[api].setIcon({tabId, path: url}).then( - () => { - browser.test.fail(`Load of '${url}' succeeded. Expected failure.`); - browser.test.notifyFail("setIcon security tests"); - }, - error => { - browser.test.succeed(`Load of '${url}' failed. Expected failure. ${error}`); - })); + browser.test.assertRejects( + browser[api].setIcon({tabId, path: url}), + /Illegal URL/, + `Load of '${url}' should fail.`)); } } Promise.all(promises).then(() => { browser.test.notifyPass("setIcon security tests"); }); }); },
--- a/browser/components/extensions/test/browser/browser_ext_contextMenus.js +++ b/browser/components/extensions/test/browser/browser_ext_contextMenus.js @@ -47,17 +47,17 @@ add_task(function* () { gBrowser.selectedTab = tab1; let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["contextMenus"], }, - background: function() { + background: async function() { // A generic onclick callback function. function genericOnClick(info, tab) { browser.test.sendMessage("onclick", {info, tab}); } browser.contextMenus.onClicked.addListener((info, tab) => { browser.test.sendMessage("browser.contextMenus.onClicked", {info, tab}); }); @@ -117,24 +117,22 @@ add_task(function* () { }); browser.contextMenus.remove(parentToDel); browser.contextMenus.create({ title: "Without onclick property", id: "ext-without-onclick", }); - browser.contextMenus.update(parent, {parentId: child2}).then( - () => { - browser.test.notifyFail("contextmenus"); - }, - () => { - browser.test.notifyPass("contextmenus"); - } - ); + await browser.test.assertRejects( + browser.contextMenus.update(parent, {parentId: child2}), + /cannot be an ancestor/, + "Should not be able to reparent an item as descendent of itself"); + + browser.test.notifyPass("contextmenus"); }, }); yield extension.startup(); yield extension.awaitFinish("contextmenus"); let expectedClickInfo = { menuItemId: "ext-image",
--- a/browser/components/extensions/test/browser/browser_ext_incognito_popup.js +++ b/browser/components/extensions/test/browser/browser_ext_incognito_popup.js @@ -9,91 +9,92 @@ add_task(function* testIncognitoPopup() "browser_action": { "default_popup": "popup.html", }, "page_action": { "default_popup": "popup.html", }, }, - background() { + background: async function() { let resolveMessage; browser.runtime.onMessage.addListener(msg => { if (resolveMessage && msg.message == "popup-details") { resolveMessage(msg); } }); let awaitPopup = windowId => { return new Promise(resolve => { resolveMessage = resolve; }).then(msg => { browser.test.assertEq(windowId, msg.windowId, "Got popup message from correct window"); return msg; }); }; - let testWindow = window => { - return browser.tabs.query({active: true, windowId: window.id}).then(([tab]) => { - return browser.pageAction.show(tab.id); - }).then(() => { - browser.test.sendMessage("click-pageAction"); + let testWindow = async window => { + let [tab] = await browser.tabs.query({active: true, windowId: window.id}); + + await browser.pageAction.show(tab.id); + browser.test.sendMessage("click-pageAction"); - return awaitPopup(window.id); - }).then(msg => { - browser.test.assertEq(window.incognito, msg.incognito, "Correct incognito status in pageAction popup"); + let msg = await awaitPopup(window.id); + browser.test.assertEq(window.incognito, msg.incognito, "Correct incognito status in pageAction popup"); - browser.test.sendMessage("click-browserAction"); + browser.test.sendMessage("click-browserAction"); - return awaitPopup(window.id); - }).then(msg => { - browser.test.assertEq(window.incognito, msg.incognito, "Correct incognito status in browserAction popup"); - }); + msg = await awaitPopup(window.id); + browser.test.assertEq(window.incognito, msg.incognito, "Correct incognito status in browserAction popup"); }; const URL = "http://example.com/incognito"; let windowReady = new Promise(resolve => { browser.tabs.onUpdated.addListener(function listener(tabId, changed, tab) { if (changed.status == "complete" && tab.url == URL) { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); }); - browser.windows.getCurrent().then(window => { - return testWindow(window); - }).then(() => { - return browser.windows.create({incognito: true, url: URL}); - }).then(window => { - return windowReady.then(() => { - return testWindow(window); - }).then(() => { - return browser.windows.remove(window.id); - }); - }).then(() => { + try { + { + let window = await browser.windows.getCurrent(); + + await testWindow(window); + } + + { + let window = await browser.windows.create({incognito: true, url: URL}); + await windowReady; + + await testWindow(window); + + await browser.windows.remove(window.id); + } + browser.test.notifyPass("incognito"); - }).catch(error => { + } catch (error) { browser.test.fail(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("incognito"); - }); + } }, files: { "popup.html": '<html><head><meta charset="utf-8"><script src="popup.js"></script></head></html>', - "popup.js": function() { - browser.windows.getCurrent().then(win => { - browser.runtime.sendMessage({ - message: "popup-details", - windowId: win.id, - incognito: browser.extension.inIncognitoContext, - }); - window.close(); + "popup.js": async function() { + let win = await browser.windows.getCurrent(); + browser.runtime.sendMessage({ + message: "popup-details", + windowId: win.id, + incognito: browser.extension.inIncognitoContext, }); + window.close(); }, }, }); extension.onMessage("click-browserAction", () => { clickBrowserAction(extension, Services.wm.getMostRecentWindow("navigator:browser")); });
--- a/browser/components/extensions/test/browser/browser_ext_legacy_extension_context_contentscript.js +++ b/browser/components/extensions/test/browser/browser_ext_legacy_extension_context_contentscript.js @@ -22,27 +22,29 @@ function promiseAddonStartup(extension) * tab info. */ add_task(function* test_legacy_extension_context_contentscript_connection() { function backgroundScript() { // Extract the assigned uuid from the background page url and send it // in a test message. let uuid = window.location.hostname; - browser.test.onMessage.addListener(msg => { + browser.test.onMessage.addListener(async msg => { if (msg == "open-test-tab") { - browser.tabs.create({url: "http://example.com/"}) - .then(tab => browser.test.sendMessage("get-expected-sender-info", { - uuid, tab, - })); + let tab = await browser.tabs.create({url: "http://example.com/"}); + browser.test.sendMessage("get-expected-sender-info", + {uuid, tab}); } else if (msg == "close-current-tab") { - browser.tabs.query({active: true}) - .then(tabs => browser.tabs.remove(tabs[0].id)) - .then(() => browser.test.sendMessage("current-tab-closed", true)) - .catch(() => browser.test.sendMessage("current-tab-closed", false)); + try { + let [tab] = await browser.tabs.query({active: true}); + await browser.tabs.remove(tab.id); + browser.test.sendMessage("current-tab-closed", true); + } catch (e) { + browser.test.sendMessage("current-tab-closed", false); + } } }); browser.test.sendMessage("ready"); } function contentScript() { browser.runtime.sendMessage("webextension -> legacy_extension message", (reply) => {
--- a/browser/components/extensions/test/browser/browser_ext_optionsPage_privileges.js +++ b/browser/components/extensions/test/browser/browser_ext_optionsPage_privileges.js @@ -12,26 +12,27 @@ add_task(function* test_tab_options_priv browser.test.log(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("options-ui-privileges"); }); } }); browser.runtime.openOptionsPage(); } - function optionsScript() { - browser.tabs.query({url: "http://example.com/"}).then(tabs => { - browser.test.assertEq("http://example.com/", tabs[0].url, "Got the expect tab"); - return browser.tabs.getCurrent(); - }).then(tab => { + async function optionsScript() { + try { + let [tab] = await browser.tabs.query({url: "http://example.com/"}); + browser.test.assertEq("http://example.com/", tab.url, "Got the expect tab"); + + tab = await browser.tabs.getCurrent(); browser.runtime.sendMessage({msgName: "removeTabId", tabId: tab.id}); - }).catch(error => { + } catch (error) { browser.test.log(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("options-ui-privileges"); - }); + } } const ID = "options_privileges@tests.mozilla.org"; let extension = ExtensionTestUtils.loadExtension({ useAddonManager: "temporary", manifest: { applications: {gecko: {id: ID}},
--- a/browser/components/extensions/test/browser/browser_ext_pageAction_context.js +++ b/browser/components/extensions/test/browser/browser_ext_pageAction_context.js @@ -74,108 +74,105 @@ add_task(function* testTabSwitchContext( browser.tabs.onUpdated.addListener(function listener(tabId, changed) { if (tabId == details.id && changed.url == details.url) { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); }); }; + return [ expect => { browser.test.log("Initial state. No icon visible."); expect(null); }, - expect => { + async expect => { browser.test.log("Show the icon on the first tab, expect default properties."); - browser.pageAction.show(tabs[0]).then(() => { - expect(details[0]); - }); + await browser.pageAction.show(tabs[0]); + expect(details[0]); }, expect => { browser.test.log("Change the icon. Expect default properties excluding the icon."); browser.pageAction.setIcon({tabId: tabs[0], path: "1.png"}); expect(details[1]); }, - expect => { + async expect => { browser.test.log("Create a new tab. No icon visible."); - browser.tabs.create({active: true, url: "about:blank?0"}, tab => { - tabs.push(tab.id); - expect(null); - }); + let tab = await browser.tabs.create({active: true, url: "about:blank?0"}); + tabs.push(tab.id); + expect(null); }, expect => { browser.test.log("Await tab load. No icon visible."); expect(null); }, - expect => { + async expect => { browser.test.log("Change properties. Expect new properties."); let tabId = tabs[1]; - browser.pageAction.show(tabId).then(() => { - browser.pageAction.setIcon({tabId, path: "2.png"}); - browser.pageAction.setPopup({tabId, popup: "2.html"}); - browser.pageAction.setTitle({tabId, title: "Title 2"}); + await browser.pageAction.show(tabId); - expect(details[2]); - }); + browser.pageAction.setIcon({tabId, path: "2.png"}); + browser.pageAction.setPopup({tabId, popup: "2.html"}); + browser.pageAction.setTitle({tabId, title: "Title 2"}); + + expect(details[2]); }, - expect => { + async expect => { browser.test.log("Change the hash. Expect same properties."); - promiseTabLoad({id: tabs[1], url: "about:blank?0#ref"}).then(() => { - expect(details[2]); - }); + let promise = promiseTabLoad({id: tabs[1], url: "about:blank?0#ref"}); + browser.tabs.update(tabs[1], {url: "about:blank?0#ref"}); + await promise; - browser.tabs.update(tabs[1], {url: "about:blank?0#ref"}); + expect(details[2]); }, expect => { browser.test.log("Clear the title. Expect default title."); browser.pageAction.setTitle({tabId: tabs[1], title: ""}); expect(details[3]); }, - expect => { + async expect => { browser.test.log("Navigate to a new page. Expect icon hidden."); // TODO: This listener should not be necessary, but the |tabs.update| // callback currently fires too early in e10s windows. - promiseTabLoad({id: tabs[1], url: "about:blank?1"}).then(() => { - expect(null); - }); + let promise = promiseTabLoad({id: tabs[1], url: "about:blank?1"}); browser.tabs.update(tabs[1], {url: "about:blank?1"}); - }, - expect => { - browser.test.log("Show the icon. Expect default properties again."); - browser.pageAction.show(tabs[1]).then(() => { - expect(details[0]); - }); + + await promise; + expect(null); }, - expect => { + async expect => { + browser.test.log("Show the icon. Expect default properties again."); + + await browser.pageAction.show(tabs[1]); + expect(details[0]); + }, + async expect => { browser.test.log("Switch back to the first tab. Expect previously set properties."); - browser.tabs.update(tabs[0], {active: true}, () => { - expect(details[1]); - }); + await browser.tabs.update(tabs[0], {active: true}); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Hide the icon on tab 2. Switch back, expect hidden."); - browser.pageAction.hide(tabs[1]).then(() => { - browser.tabs.update(tabs[1], {active: true}, () => { - expect(null); - }); - }); + await browser.pageAction.hide(tabs[1]); + + await browser.tabs.update(tabs[1], {active: true}); + expect(null); }, - expect => { + async expect => { browser.test.log("Switch back to tab 1. Expect previous results again."); - browser.tabs.remove(tabs[1], () => { - expect(details[1]); - }); + await browser.tabs.remove(tabs[1]); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Hide the icon. Expect hidden."); - browser.pageAction.hide(tabs[0]).then(() => { - expect(null); - }); + + await browser.pageAction.hide(tabs[0]); + expect(null); }, ]; }, }); });
--- a/browser/components/extensions/test/browser/browser_ext_pageAction_popup.js +++ b/browser/components/extensions/test/browser/browser_ext_pageAction_popup.js @@ -34,17 +34,17 @@ add_task(function* testPageActionPopup() "data/popup-b.html": scriptPage("popup-b.js"), "data/popup-b.js": function() { browser.runtime.sendMessage("from-popup-b"); }, "data/background.html": scriptPage("background.js"), - "data/background.js": function() { + "data/background.js": async function() { let tabId; let sendClick; let tests = [ () => { sendClick({expectEvent: false, expectPopup: "a"}); }, () => { @@ -110,17 +110,17 @@ add_task(function* testPageActionPopup() } else { browser.test.fail("unexpected click event"); } expect.event = false; browser.test.sendMessage("next-test"); }); - browser.test.onMessage.addListener((msg) => { + browser.test.onMessage.addListener(msg => { if (msg == "close-popup") { browser.runtime.sendMessage("close-popup"); return; } if (msg != "next-test") { browser.test.fail("Expecting 'next-test' message"); } @@ -128,23 +128,21 @@ add_task(function* testPageActionPopup() if (tests.length) { let test = tests.shift(); test(); } else { browser.test.notifyPass("pageaction-tests-done"); } }); - browser.tabs.query({active: true, currentWindow: true}, tabs => { - tabId = tabs[0].id; + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + tabId = tab.id; - browser.pageAction.show(tabId).then(() => { - browser.test.sendMessage("next-test"); - }); - }); + await browser.pageAction.show(tabId); + browser.test.sendMessage("next-test"); }, }, }); extension.onMessage("send-click", () => { clickPageAction(extension); });
--- a/browser/components/extensions/test/browser/browser_ext_pageAction_title.js +++ b/browser/components/extensions/test/browser/browser_ext_pageAction_title.js @@ -79,106 +79,100 @@ add_task(function* testTabSwitchContext( }); }); }; return [ expect => { browser.test.log("Initial state. No icon visible."); expect(null); }, - expect => { + async expect => { browser.test.log("Show the icon on the first tab, expect default properties."); - browser.pageAction.show(tabs[0]).then(() => { - expect(details[0]); - }); + await browser.pageAction.show(tabs[0]); + expect(details[0]); }, expect => { browser.test.log("Change the icon. Expect default properties excluding the icon."); browser.pageAction.setIcon({tabId: tabs[0], path: "1.png"}); expect(details[1]); }, - expect => { + async expect => { browser.test.log("Create a new tab. No icon visible."); - browser.tabs.create({active: true, url: "about:blank?0"}, tab => { - tabs.push(tab.id); - expect(null); - }); + let tab = await browser.tabs.create({active: true, url: "about:blank?0"}); + tabs.push(tab.id); + expect(null); }, expect => { browser.test.log("Await tab load. No icon visible."); expect(null); }, - expect => { + async expect => { browser.test.log("Change properties. Expect new properties."); let tabId = tabs[1]; - browser.pageAction.show(tabId).then(() => { - browser.pageAction.setIcon({tabId, path: "2.png"}); - browser.pageAction.setPopup({tabId, popup: "2.html"}); - browser.pageAction.setTitle({tabId, title: "Title 2"}); - expect(details[2]); - }); + await browser.pageAction.show(tabId); + browser.pageAction.setIcon({tabId, path: "2.png"}); + browser.pageAction.setPopup({tabId, popup: "2.html"}); + browser.pageAction.setTitle({tabId, title: "Title 2"}); + + expect(details[2]); }, - expect => { + async expect => { browser.test.log("Change the hash. Expect same properties."); - promiseTabLoad({id: tabs[1], url: "about:blank?0#ref"}).then(() => { - expect(details[2]); - }); + let promise = promiseTabLoad({id: tabs[1], url: "about:blank?0#ref"}); browser.tabs.update(tabs[1], {url: "about:blank?0#ref"}); + + await promise; + expect(details[2]); }, expect => { browser.test.log("Clear the title. Expect default title."); browser.pageAction.setTitle({tabId: tabs[1], title: ""}); expect(details[3]); }, - expect => { + async expect => { browser.test.log("Navigate to a new page. Expect icon hidden."); // TODO: This listener should not be necessary, but the |tabs.update| // callback currently fires too early in e10s windows. - promiseTabLoad({id: tabs[1], url: "about:blank?1"}).then(() => { - expect(null); - }); + let promise = promiseTabLoad({id: tabs[1], url: "about:blank?1"}); browser.tabs.update(tabs[1], {url: "about:blank?1"}); - }, - expect => { - browser.test.log("Show the icon. Expect default properties again."); - browser.pageAction.show(tabs[1]).then(() => { - expect(details[0]); - }); + + await promise; + expect(null); }, - expect => { + async expect => { + browser.test.log("Show the icon. Expect default properties again."); + await browser.pageAction.show(tabs[1]); + expect(details[0]); + }, + async expect => { browser.test.log("Switch back to the first tab. Expect previously set properties."); - browser.tabs.update(tabs[0], {active: true}, () => { - expect(details[1]); - }); + await browser.tabs.update(tabs[0], {active: true}); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Hide the icon on tab 2. Switch back, expect hidden."); - browser.pageAction.hide(tabs[1]).then(() => { - browser.tabs.update(tabs[1], {active: true}, () => { - expect(null); - }); - }); + await browser.pageAction.hide(tabs[1]); + await browser.tabs.update(tabs[1], {active: true}); + expect(null); }, - expect => { + async expect => { browser.test.log("Switch back to tab 1. Expect previous results again."); - browser.tabs.remove(tabs[1], () => { - expect(details[1]); - }); + await browser.tabs.remove(tabs[1]); + expect(details[1]); }, - expect => { + async expect => { browser.test.log("Hide the icon. Expect hidden."); - browser.pageAction.hide(tabs[0]).then(() => { - expect(null); - }); + await browser.pageAction.hide(tabs[0]); + expect(null); }, ]; }, }); }); add_task(function* testDefaultTitle() { yield runTests({ @@ -206,21 +200,20 @@ add_task(function* testDefaultTitle() { "icon": browser.runtime.getURL("icon.png")}, ]; return [ expect => { browser.test.log("Initial state. No icon visible."); expect(null); }, - expect => { + async expect => { browser.test.log("Show the icon on the first tab, expect extension title as default title."); - browser.pageAction.show(tabs[0]).then(() => { - expect(details[0]); - }); + await browser.pageAction.show(tabs[0]); + expect(details[0]); }, expect => { browser.test.log("Change the title. Expect new title."); browser.pageAction.setTitle({tabId: tabs[0], title: "Foo Title"}); expect(details[1]); }, expect => { browser.test.log("Clear the title. Expect extension title.");
--- a/browser/components/extensions/test/browser/browser_ext_popup_sendMessage.js +++ b/browser/components/extensions/test/browser/browser_ext_popup_sendMessage.js @@ -15,55 +15,54 @@ add_task(function* test_popup_sendMessag "page_action": { "default_popup": "popup.html", "browser_style": true, }, }, files: { "popup.html": scriptPage("popup.js"), - "popup.js": function() { - browser.runtime.onMessage.addListener(msg => { + "popup.js": async function() { + browser.runtime.onMessage.addListener(async msg => { if (msg == "popup-ping") { return Promise.resolve("popup-pong"); } }); - browser.runtime.sendMessage("background-ping").then(response => { - browser.test.sendMessage("background-ping-response", response); - }); + let response = await browser.runtime.sendMessage("background-ping"); + browser.test.sendMessage("background-ping-response", response); }, }, - background() { - browser.tabs.query({active: true, currentWindow: true}).then(([tab]) => { - return browser.pageAction.show(tab.id); - }).then(() => { - browser.test.sendMessage("page-action-ready"); - }); + async background() { + browser.runtime.onMessage.addListener(async msg => { + if (msg == "background-ping") { + let response = await browser.runtime.sendMessage("popup-ping"); - browser.runtime.onMessage.addListener(msg => { - if (msg == "background-ping") { - browser.runtime.sendMessage("popup-ping").then(response => { - browser.test.sendMessage("popup-ping-response", response); - }); + browser.test.sendMessage("popup-ping-response", response); - return new Promise(resolve => { + await new Promise(resolve => { // Wait long enough that we're relatively sure the docShells have // been swapped. Note that this value is fairly arbitrary. The load // event that triggers the swap should happen almost immediately // after the message is sent. The extra quarter of a second gives us // enough leeway that we can expect to respond after the swap in the // vast majority of cases. setTimeout(resolve, 250); - }).then(() => { - return "background-pong"; }); + + return "background-pong"; } }); + + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + + await browser.pageAction.show(tab.id); + + browser.test.sendMessage("page-action-ready"); }, }); yield extension.startup(); { clickBrowserAction(extension);
--- a/browser/components/extensions/test/browser/browser_ext_popup_shutdown.js +++ b/browser/components/extensions/test/browser/browser_ext_popup_shutdown.js @@ -1,19 +1,18 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; let getExtension = () => { return ExtensionTestUtils.loadExtension({ - background() { - browser.tabs.query({active: true, currentWindow: true}, tabs => { - browser.pageAction.show(tabs[0].id) - .then(() => { browser.test.sendMessage("pageAction ready"); }); - }); + background: async function() { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + await browser.pageAction.show(tab.id); + browser.test.sendMessage("pageAction ready"); }, manifest: { "browser_action": { "default_popup": "popup.html", "browser_style": false, },
--- a/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js +++ b/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage.js @@ -56,17 +56,17 @@ add_tasks(function* test_inline_options( let extension = yield loadExtension(Object.assign({}, extraOptions, { manifest: { applications: {gecko: {id: "inline_options@tests.mozilla.org"}}, "options_ui": { "page": "options.html", }, }, - background: function() { + background: async function() { let _optionsPromise; let awaitOptions = () => { browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already"); return new Promise(resolve => { _optionsPromise = {resolve}; }); }; @@ -77,76 +77,73 @@ add_tasks(function* test_inline_options( _optionsPromise.resolve(sender.tab); _optionsPromise = null; } else { browser.test.fail("Saw unexpected options page load"); } } }); - let firstTab, optionsTab; - browser.tabs.query({currentWindow: true, active: true}).then(tabs => { - firstTab = tabs[0].id; + try { + let [firstTab] = await browser.tabs.query({currentWindow: true, active: true}); browser.test.log("Open options page. Expect fresh load."); - return Promise.all([ + + let [, optionsTab] = await Promise.all([ browser.runtime.openOptionsPage(), awaitOptions(), ]); - }).then(([, tab]) => { - browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager"); - browser.test.assertTrue(tab.active, "Tab is active"); - browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab"); - optionsTab = tab.id; + browser.test.assertEq("about:addons", optionsTab.url, "Tab contains AddonManager"); + browser.test.assertTrue(optionsTab.active, "Tab is active"); + browser.test.assertTrue(optionsTab.id != firstTab.id, "Tab is a new tab"); + browser.test.assertEq(0, browser.extension.getViews({type: "popup"}).length, "viewType is not popup"); browser.test.assertEq(1, browser.extension.getViews({type: "tab"}).length, "viewType is tab"); - browser.test.assertEq(1, browser.extension.getViews({windowId: tab.windowId}).length, "windowId matches"); + browser.test.assertEq(1, browser.extension.getViews({windowId: optionsTab.windowId}).length, "windowId matches"); + let views = browser.extension.getViews(); browser.test.assertEq(2, views.length, "Expected the options page and the background page"); browser.test.assertTrue(views.includes(window), "One of the views is the background page"); browser.test.assertTrue(views.some(w => w.iAmOption), "One of the views is the options page"); browser.test.log("Switch tabs."); - return browser.tabs.update(firstTab, {active: true}); - }).then(() => { + await browser.tabs.update(firstTab.id, {active: true}); + browser.test.log("Open options page again. Expect tab re-selected, no new load."); - return browser.runtime.openOptionsPage(); - }).then(() => { - return browser.tabs.query({currentWindow: true, active: true}); - }).then(([tab]) => { - browser.test.assertEq(optionsTab, tab.id, "Tab is the same as the previous options tab"); + await browser.runtime.openOptionsPage(); + let [tab] = await browser.tabs.query({currentWindow: true, active: true}); + + browser.test.assertEq(optionsTab.id, tab.id, "Tab is the same as the previous options tab"); browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager"); browser.test.log("Ping options page."); - return browser.runtime.sendMessage("ping"); - }).then((pong) => { + let pong = await browser.runtime.sendMessage("ping"); browser.test.assertEq("pong", pong, "Got pong."); browser.test.log("Remove options tab."); - return browser.tabs.remove(optionsTab); - }).then(() => { + await browser.tabs.remove(optionsTab.id); + browser.test.log("Open options page again. Expect fresh load."); - return Promise.all([ + [, tab] = await Promise.all([ browser.runtime.openOptionsPage(), awaitOptions(), ]); - }).then(([, tab]) => { browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager"); browser.test.assertTrue(tab.active, "Tab is active"); - browser.test.assertTrue(tab.id != optionsTab, "Tab is a new tab"); + browser.test.assertTrue(tab.id != optionsTab.id, "Tab is a new tab"); - return browser.tabs.remove(tab.id); - }).then(() => { + await browser.tabs.remove(tab.id); + browser.test.notifyPass("options-ui"); - }).catch(error => { - browser.test.log(`Error: ${error} :: ${error.stack}`); + } catch (error) { + browser.test.fail(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("options-ui"); - }); + } }, })); yield extension.awaitFinish("options-ui"); yield extension.unload(); yield BrowserTestUtils.removeTab(tab); }); @@ -160,17 +157,17 @@ add_tasks(function* test_tab_options(ext manifest: { applications: {gecko: {id: "tab_options@tests.mozilla.org"}}, "options_ui": { "page": "options.html", "open_in_tab": true, }, }, - background: function() { + background: async function() { let _optionsPromise; let awaitOptions = () => { browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already"); return new Promise(resolve => { _optionsPromise = {resolve}; }); }; @@ -183,77 +180,73 @@ add_tasks(function* test_tab_options(ext } else { browser.test.fail("Saw unexpected options page load"); } } }); let optionsURL = browser.extension.getURL("options.html"); - let firstTab, optionsTab; - browser.tabs.query({currentWindow: true, active: true}).then(tabs => { - firstTab = tabs[0].id; + try { + let [firstTab] = await browser.tabs.query({currentWindow: true, active: true}); browser.test.log("Open options page. Expect fresh load."); - return Promise.all([ + let [, optionsTab] = await Promise.all([ browser.runtime.openOptionsPage(), awaitOptions(), ]); - }).then(([, tab]) => { - browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html"); - browser.test.assertTrue(tab.active, "Tab is active"); - browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab"); + browser.test.assertEq(optionsURL, optionsTab.url, "Tab contains options.html"); + browser.test.assertTrue(optionsTab.active, "Tab is active"); + browser.test.assertTrue(optionsTab.id != firstTab.id, "Tab is a new tab"); - optionsTab = tab.id; browser.test.assertEq(0, browser.extension.getViews({type: "popup"}).length, "viewType is not popup"); browser.test.assertEq(1, browser.extension.getViews({type: "tab"}).length, "viewType is tab"); - browser.test.assertEq(1, browser.extension.getViews({windowId: tab.windowId}).length, "windowId matches"); + browser.test.assertEq(1, browser.extension.getViews({windowId: optionsTab.windowId}).length, "windowId matches"); + let views = browser.extension.getViews(); browser.test.assertEq(2, views.length, "Expected the options page and the background page"); browser.test.assertTrue(views.includes(window), "One of the views is the background page"); browser.test.assertTrue(views.some(w => w.iAmOption), "One of the views is the options page"); browser.test.log("Switch tabs."); - return browser.tabs.update(firstTab, {active: true}); - }).then(() => { + await browser.tabs.update(firstTab.id, {active: true}); + browser.test.log("Open options page again. Expect tab re-selected, no new load."); - return browser.runtime.openOptionsPage(); - }).then(() => { - return browser.tabs.query({currentWindow: true, active: true}); - }).then(([tab]) => { - browser.test.assertEq(optionsTab, tab.id, "Tab is the same as the previous options tab"); + await browser.runtime.openOptionsPage(); + let [tab] = await browser.tabs.query({currentWindow: true, active: true}); + + browser.test.assertEq(optionsTab.id, tab.id, "Tab is the same as the previous options tab"); browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html"); // Unfortunately, we can't currently do this, since onMessage doesn't // currently support responses when there are multiple listeners. // // browser.test.log("Ping options page."); // return new Promise(resolve => browser.runtime.sendMessage("ping", resolve)); browser.test.log("Remove options tab."); - return browser.tabs.remove(optionsTab); - }).then(() => { + await browser.tabs.remove(optionsTab.id); + browser.test.log("Open options page again. Expect fresh load."); - return Promise.all([ + [, tab] = await Promise.all([ browser.runtime.openOptionsPage(), awaitOptions(), ]); - }).then(([, tab]) => { browser.test.assertEq(optionsURL, tab.url, "Tab contains options.html"); browser.test.assertTrue(tab.active, "Tab is active"); - browser.test.assertTrue(tab.id != optionsTab, "Tab is a new tab"); + browser.test.assertTrue(tab.id != optionsTab.id, "Tab is a new tab"); - return browser.tabs.remove(tab.id); - }).then(() => { + await browser.tabs.remove(tab.id); + browser.test.notifyPass("options-ui-tab"); - }).catch(error => { - browser.test.log(`Error: ${error} :: ${error.stack}`); + } catch (error) { + browser.test.fail(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("options-ui-tab"); - }); + } }, })); yield extension.awaitFinish("options-ui-tab"); yield extension.unload(); yield BrowserTestUtils.removeTab(tab); }); @@ -261,34 +254,23 @@ add_tasks(function* test_tab_options(ext add_tasks(function* test_options_no_manifest(extraOptions) { info(`Test with no manifest key (${JSON.stringify(extraOptions)})`); let extension = yield loadExtension(Object.assign({}, extraOptions, { manifest: { applications: {gecko: {id: "no_options@tests.mozilla.org"}}, }, - background: function() { + async background() { browser.test.log("Try to open options page when not specified in the manifest."); - browser.runtime.openOptionsPage().then( - () => { - browser.test.fail("Opening options page without one specified in the manifest generated an error"); - browser.test.notifyFail("options-no-manifest"); - }, - error => { - let expected = "No `options_ui` declared"; - browser.test.assertTrue( - error.message.includes(expected), - `Got expected error (got: '${error.message}', expected: '${expected}'`); - } - ).then(() => { - browser.test.notifyPass("options-no-manifest"); - }).catch(error => { - browser.test.log(`Error: ${error} :: ${error.stack}`); - browser.test.notifyFail("options-no-manifest"); - }); + await browser.test.assertRejects( + browser.runtime.openOptionsPage(), + /No `options_ui` declared/, + "Expected error from openOptionsPage()"); + + browser.test.notifyPass("options-no-manifest"); }, })); yield extension.awaitFinish("options-no-manifest"); yield extension.unload(); });
--- a/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage_uninstall.js +++ b/browser/components/extensions/test/browser/browser_ext_runtime_openOptionsPage_uninstall.js @@ -43,17 +43,17 @@ add_task(function* test_inline_options_u let extension = yield loadExtension({ manifest: { applications: {gecko: {id: "inline_options_uninstall@tests.mozilla.org"}}, "options_ui": { "page": "options.html", }, }, - background: function() { + background: async function() { let _optionsPromise; let awaitOptions = () => { browser.test.assertFalse(_optionsPromise, "Should not be awaiting options already"); return new Promise(resolve => { _optionsPromise = {resolve}; }); }; @@ -64,34 +64,33 @@ add_task(function* test_inline_options_u _optionsPromise.resolve(sender.tab); _optionsPromise = null; } else { browser.test.fail("Saw unexpected options page load"); } } }); - let firstTab; - browser.tabs.query({currentWindow: true, active: true}).then(tabs => { - firstTab = tabs[0].id; + try { + let [firstTab] = await browser.tabs.query({currentWindow: true, active: true}); browser.test.log("Open options page. Expect fresh load."); - return Promise.all([ + let [, tab] = await Promise.all([ browser.runtime.openOptionsPage(), awaitOptions(), ]); - }).then(([, tab]) => { + browser.test.assertEq("about:addons", tab.url, "Tab contains AddonManager"); browser.test.assertTrue(tab.active, "Tab is active"); - browser.test.assertTrue(tab.id != firstTab, "Tab is a new tab"); + browser.test.assertTrue(tab.id != firstTab.id, "Tab is a new tab"); browser.test.sendMessage("options-ui-open"); - }).catch(error => { + } catch (error) { browser.test.fail(`Error: ${error} :: ${error.stack}`); - }); + } }, }); yield extension.awaitMessage("options-ui-open"); yield extension.unload(); is(gBrowser.selectedBrowser.currentURI.spec, "about:addons", "Add-on manager tab should still be open");
--- a/browser/components/extensions/test/browser/browser_ext_runtime_setUninstallURL.js +++ b/browser/components/extensions/test/browser/browser_ext_runtime_setUninstallURL.js @@ -25,71 +25,62 @@ function* makeAndInstallXPI(id, backgrou let loadTab = yield loadPromise; yield BrowserTestUtils.removeTab(loadTab); return addon; } add_task(function* test_setuninstallurl_badargs() { - function backgroundScript() { - let promises = [ - browser.runtime.setUninstallURL("this is not a url") - .then(() => { - browser.test.notifyFail("setUninstallURL should have failed with bad url"); - }) - .catch(error => { - browser.test.assertTrue(/Invalid URL/.test(error.message), "error message indicates malformed url"); - }), + async function background() { + await browser.test.assertRejects( + browser.runtime.setUninstallURL("this is not a url"), + /Invalid URL/, + "setUninstallURL with an invalid URL should fail"); - browser.runtime.setUninstallURL("file:///etc/passwd") - .then(() => { - browser.test.notifyFail("setUninstallURL should have failed with non-http[s] url"); - }) - .catch(error => { - browser.test.assertTrue(/must have the scheme http or https/.test(error.message), "error message indicates bad scheme"); - }), - ]; + await browser.test.assertRejects( + browser.runtime.setUninstallURL("file:///etc/passwd"), + /must have the scheme http or https/, + "setUninstallURL with an illegal URL should fail"); - Promise.all(promises) - .then(() => browser.test.notifyPass("setUninstallURL bad params")); + browser.test.notifyPass("setUninstallURL bad params"); } let extension = ExtensionTestUtils.loadExtension({ - background: "(" + backgroundScript.toString() + ")()", + background, }); yield extension.startup(); yield extension.awaitFinish(); yield extension.unload(); }); // Test the documented behavior of setUninstallURL() that passing an // empty string is equivalent to not setting an uninstall URL // (i.e., no new tab is opened upon uninstall) add_task(function* test_setuninstall_empty_url() { - function backgroundScript() { - browser.runtime.setUninstallURL("") - .then(() => browser.tabs.create({url: "http://example.com/addon_loaded"})); + async function backgroundScript() { + await browser.runtime.setUninstallURL(""); + browser.tabs.create({url: "http://example.com/addon_loaded"}); } let addon = yield makeAndInstallXPI("test_uinstallurl2@tests.mozilla.org", backgroundScript, "http://example.com/addon_loaded"); addon.uninstall(true); info("uninstalled"); // no need to explicitly check for the absence of a new tab, // BrowserTestUtils will eventually complain if one is opened. }); add_task(function* test_setuninstallurl() { - function backgroundScript() { - browser.runtime.setUninstallURL("http://example.com/addon_uninstalled") - .then(() => browser.tabs.create({url: "http://example.com/addon_loaded"})); + async function backgroundScript() { + await browser.runtime.setUninstallURL("http://example.com/addon_uninstalled"); + browser.tabs.create({url: "http://example.com/addon_loaded"}); } let addon = yield makeAndInstallXPI("test_uinstallurl@tests.mozilla.org", backgroundScript, "http://example.com/addon_loaded"); // look for a new tab with the uninstall url. let uninstallPromise = BrowserTestUtils.waitForNewTab(gBrowser, "http://example.com/addon_uninstalled");
--- a/browser/components/extensions/test/browser/browser_ext_tabs_audio.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_audio.js @@ -3,27 +3,17 @@ "use strict"; add_task(function* () { let tab1 = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "about:blank?1"); let tab2 = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "about:blank?2"); gBrowser.selectedTab = tab1; - function background() { - // Wrap API methods in promise-based variants. - let promiseTabs = {}; - Object.keys(browser.tabs).forEach(method => { - promiseTabs[method] = (...args) => { - return new Promise(resolve => { - browser.tabs[method](...args, resolve); - }); - }; - }); - + async function background() { function promiseUpdated(tabId, attr) { return new Promise(resolve => { let onUpdated = (tabId_, changeInfo, tab) => { if (tabId == tabId_ && attr in changeInfo) { browser.tabs.onUpdated.removeListener(onUpdated); resolve({changeInfo, tab}); } @@ -42,134 +32,129 @@ add_task(function* () { function changeTab(tabId, attr, on) { return new Promise((resolve, reject) => { deferred[tabId] = {resolve, reject}; browser.test.sendMessage("change-tab", tabId, attr, on); }); } - let windowId; - let tabIds; - promiseTabs.query({lastFocusedWindow: true}).then(tabs => { + try { + let tabs = await browser.tabs.query({lastFocusedWindow: true}); browser.test.assertEq(tabs.length, 3, "We have three tabs"); for (let tab of tabs) { // Note: We want to check that these are actual boolean values, not // just that they evaluate as false. browser.test.assertEq(false, tab.mutedInfo.muted, "Tab is not muted"); browser.test.assertEq(undefined, tab.mutedInfo.reason, "Tab has no muted info reason"); browser.test.assertEq(false, tab.audible, "Tab is not audible"); } - windowId = tabs[0].windowId; - tabIds = [tabs[1].id, tabs[2].id]; + let windowId = tabs[0].windowId; + let tabIds = [tabs[1].id, tabs[2].id]; browser.test.log("Test initial queries for muted and audible return no tabs"); - return Promise.all([ - promiseTabs.query({windowId, audible: false}), - promiseTabs.query({windowId, audible: true}), - promiseTabs.query({windowId, muted: true}), - promiseTabs.query({windowId, muted: false}), - ]); - }).then(([silent, audible, muted, nonMuted]) => { + let silent = await browser.tabs.query({windowId, audible: false}); + let audible = await browser.tabs.query({windowId, audible: true}); + let muted = await browser.tabs.query({windowId, muted: true}); + let nonMuted = await browser.tabs.query({windowId, muted: false}); + browser.test.assertEq(3, silent.length, "Three silent tabs"); browser.test.assertEq(0, audible.length, "No audible tabs"); browser.test.assertEq(0, muted.length, "No muted tabs"); browser.test.assertEq(3, nonMuted.length, "Three non-muted tabs"); browser.test.log("Toggle muted and audible externally on one tab each, and check results"); - return Promise.all([ + [muted, audible] = await Promise.all([ promiseUpdated(tabIds[0], "mutedInfo"), promiseUpdated(tabIds[1], "audible"), changeTab(tabIds[0], "muted", true), changeTab(tabIds[1], "audible", true), ]); - }).then(([muted, audible]) => { + for (let obj of [muted.changeInfo, muted.tab]) { browser.test.assertEq(true, obj.mutedInfo.muted, "Tab is muted"); browser.test.assertEq("user", obj.mutedInfo.reason, "Tab was muted by the user"); } browser.test.assertEq(true, audible.changeInfo.audible, "Tab audible state changed"); browser.test.assertEq(true, audible.tab.audible, "Tab is audible"); browser.test.log("Re-check queries. Expect one audible and one muted tab"); - return Promise.all([ - promiseTabs.query({windowId, audible: false}), - promiseTabs.query({windowId, audible: true}), - promiseTabs.query({windowId, muted: true}), - promiseTabs.query({windowId, muted: false}), - ]); - }).then(([silent, audible, muted, nonMuted]) => { + silent = await browser.tabs.query({windowId, audible: false}); + audible = await browser.tabs.query({windowId, audible: true}); + muted = await browser.tabs.query({windowId, muted: true}); + nonMuted = await browser.tabs.query({windowId, muted: false}); + browser.test.assertEq(2, silent.length, "Two silent tabs"); browser.test.assertEq(1, audible.length, "One audible tab"); browser.test.assertEq(1, muted.length, "One muted tab"); browser.test.assertEq(2, nonMuted.length, "Two non-muted tabs"); browser.test.assertEq(true, muted[0].mutedInfo.muted, "Tab is muted"); browser.test.assertEq("user", muted[0].mutedInfo.reason, "Tab was muted by the user"); browser.test.assertEq(true, audible[0].audible, "Tab is audible"); browser.test.log("Toggle muted internally on two tabs, and check results"); - return Promise.all([ + [nonMuted, muted] = await Promise.all([ promiseUpdated(tabIds[0], "mutedInfo"), promiseUpdated(tabIds[1], "mutedInfo"), - promiseTabs.update(tabIds[0], {muted: false}), - promiseTabs.update(tabIds[1], {muted: true}), + browser.tabs.update(tabIds[0], {muted: false}), + browser.tabs.update(tabIds[1], {muted: true}), ]); - }).then(([unmuted, muted]) => { - for (let obj of [unmuted.changeInfo, unmuted.tab]) { + + for (let obj of [nonMuted.changeInfo, nonMuted.tab]) { browser.test.assertEq(false, obj.mutedInfo.muted, "Tab is not muted"); } for (let obj of [muted.changeInfo, muted.tab]) { browser.test.assertEq(true, obj.mutedInfo.muted, "Tab is muted"); } - for (let obj of [unmuted.changeInfo, unmuted.tab, muted.changeInfo, muted.tab]) { + for (let obj of [nonMuted.changeInfo, nonMuted.tab, muted.changeInfo, muted.tab]) { browser.test.assertEq("extension", obj.mutedInfo.reason, "Mute state changed by extension"); // FIXME: browser.runtime.id is currently broken. browser.test.assertEq(browser.i18n.getMessage("@@extension_id"), obj.mutedInfo.extensionId, "Mute state changed by extension"); } browser.test.log("Test that mutedInfo is preserved by sessionstore"); - return changeTab(tabIds[1], "duplicate").then(promiseTabs.get); - }).then(tab => { + let tab = await changeTab(tabIds[1], "duplicate").then(browser.tabs.get); + browser.test.assertEq(true, tab.mutedInfo.muted, "Tab is muted"); browser.test.assertEq("extension", tab.mutedInfo.reason, "Mute state changed by extension"); // FIXME: browser.runtime.id is currently broken. browser.test.assertEq(browser.i18n.getMessage("@@extension_id"), tab.mutedInfo.extensionId, "Mute state changed by extension"); browser.test.log("Unmute externally, and check results"); - return Promise.all([ + [nonMuted] = await Promise.all([ promiseUpdated(tabIds[1], "mutedInfo"), changeTab(tabIds[1], "muted", false), - promiseTabs.remove(tab.id), + browser.tabs.remove(tab.id), ]); - }).then(([unmuted]) => { - for (let obj of [unmuted.changeInfo, unmuted.tab]) { + + for (let obj of [nonMuted.changeInfo, nonMuted.tab]) { browser.test.assertEq(false, obj.mutedInfo.muted, "Tab is not muted"); browser.test.assertEq("user", obj.mutedInfo.reason, "Mute state changed by user"); } browser.test.notifyPass("tab-audio"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("tab-audio"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_captureVisibleTab.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_captureVisibleTab.js @@ -21,103 +21,95 @@ function* runTest(options) { </html> `; let url = `data:text/html,${encodeURIComponent(html)}`; let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, url, true); tab.linkedBrowser.fullZoom = options.fullZoom; - function background(options) { - // Wrap API methods in promise-based variants. - let promiseTabs = {}; - Object.keys(browser.tabs).forEach(method => { - promiseTabs[method] = (...args) => { - return new Promise(resolve => { - browser.tabs[method](...args, resolve); - }); - }; - }); - + async function background(options) { browser.test.log(`Test color ${options.color} at fullZoom=${options.fullZoom}`); - promiseTabs.query({currentWindow: true, active: true}).then(([tab]) => { - return Promise.all([ - promiseTabs.captureVisibleTab(tab.windowId, {format: "jpeg", quality: 95}), - promiseTabs.captureVisibleTab(tab.windowId, {format: "png", quality: 95}), - promiseTabs.captureVisibleTab(tab.windowId, {quality: 95}), - promiseTabs.captureVisibleTab(tab.windowId), - ]).then(([jpeg, png, ...pngs]) => { - browser.test.assertTrue(pngs.every(url => url == png), "All PNGs are identical"); + try { + let [tab] = await browser.tabs.query({currentWindow: true, active: true}); - browser.test.assertTrue(jpeg.startsWith("data:image/jpeg;base64,"), "jpeg is JPEG"); - browser.test.assertTrue(png.startsWith("data:image/png;base64,"), "png is PNG"); + let [jpeg, png, ...pngs] = await Promise.all([ + browser.tabs.captureVisibleTab(tab.windowId, {format: "jpeg", quality: 95}), + browser.tabs.captureVisibleTab(tab.windowId, {format: "png", quality: 95}), + browser.tabs.captureVisibleTab(tab.windowId, {quality: 95}), + browser.tabs.captureVisibleTab(tab.windowId), + ]); + + browser.test.assertTrue(pngs.every(url => url == png), "All PNGs are identical"); + + browser.test.assertTrue(jpeg.startsWith("data:image/jpeg;base64,"), "jpeg is JPEG"); + browser.test.assertTrue(png.startsWith("data:image/png;base64,"), "png is PNG"); - let promises = [jpeg, png].map(url => new Promise(resolve => { - let img = new Image(); - img.src = url; - img.onload = () => resolve(img); - })); - return Promise.all(promises); - }).then(([jpeg, png]) => { - let tabDims = `${tab.width}\u00d7${tab.height}`; + let promises = [jpeg, png].map(url => new Promise(resolve => { + let img = new Image(); + img.src = url; + img.onload = () => resolve(img); + })); - let images = {jpeg, png}; - for (let format of Object.keys(images)) { - let img = images[format]; + [jpeg, png] = await Promise.all(promises); + let tabDims = `${tab.width}\u00d7${tab.height}`; - let dims = `${img.width}\u00d7${img.height}`; - browser.test.assertEq(tabDims, dims, `${format} dimensions are correct`); + let images = {jpeg, png}; + for (let format of Object.keys(images)) { + let img = images[format]; - let canvas = document.createElement("canvas"); - canvas.width = img.width; - canvas.height = img.height; - canvas.mozOpaque = true; + let dims = `${img.width}\u00d7${img.height}`; + browser.test.assertEq(tabDims, dims, `${format} dimensions are correct`); - let ctx = canvas.getContext("2d"); - ctx.drawImage(img, 0, 0); + let canvas = document.createElement("canvas"); + canvas.width = img.width; + canvas.height = img.height; + canvas.mozOpaque = true; - // Check the colors of the first and last pixels of the image, to make - // sure we capture the entire frame, and scale it correctly. - let coords = [ - {x: 0, y: 0, - color: options.color}, - {x: img.width - 1, - y: img.height - 1, - color: options.color}, - {x: img.width / 2 | 0, - y: img.height / 2 | 0, - color: options.neutral}, - ]; + let ctx = canvas.getContext("2d"); + ctx.drawImage(img, 0, 0); - for (let {x, y, color} of coords) { - let imageData = ctx.getImageData(x, y, 1, 1).data; + // Check the colors of the first and last pixels of the image, to make + // sure we capture the entire frame, and scale it correctly. + let coords = [ + {x: 0, y: 0, + color: options.color}, + {x: img.width - 1, + y: img.height - 1, + color: options.color}, + {x: img.width / 2 | 0, + y: img.height / 2 | 0, + color: options.neutral}, + ]; - if (format == "png") { - browser.test.assertEq(`rgba(${color},255)`, `rgba(${[...imageData]})`, `${format} image color is correct at (${x}, ${y})`); - } else { - // Allow for some deviation in JPEG version due to lossy compression. - const SLOP = 3; + for (let {x, y, color} of coords) { + let imageData = ctx.getImageData(x, y, 1, 1).data; - browser.test.log(`Testing ${format} image color at (${x}, ${y}), have rgba(${[...imageData]}), expecting approx. rgba(${color},255)`); + if (format == "png") { + browser.test.assertEq(`rgba(${color},255)`, `rgba(${[...imageData]})`, `${format} image color is correct at (${x}, ${y})`); + } else { + // Allow for some deviation in JPEG version due to lossy compression. + const SLOP = 3; - browser.test.assertTrue(Math.abs(color[0] - imageData[0]) <= SLOP, `${format} image color.red is correct at (${x}, ${y})`); - browser.test.assertTrue(Math.abs(color[1] - imageData[1]) <= SLOP, `${format} image color.green is correct at (${x}, ${y})`); - browser.test.assertTrue(Math.abs(color[2] - imageData[2]) <= SLOP, `${format} image color.blue is correct at (${x}, ${y})`); - browser.test.assertEq(255, imageData[3], `${format} image color.alpha is correct at (${x}, ${y})`); - } + browser.test.log(`Testing ${format} image color at (${x}, ${y}), have rgba(${[...imageData]}), expecting approx. rgba(${color},255)`); + + browser.test.assertTrue(Math.abs(color[0] - imageData[0]) <= SLOP, `${format} image color.red is correct at (${x}, ${y})`); + browser.test.assertTrue(Math.abs(color[1] - imageData[1]) <= SLOP, `${format} image color.green is correct at (${x}, ${y})`); + browser.test.assertTrue(Math.abs(color[2] - imageData[2]) <= SLOP, `${format} image color.blue is correct at (${x}, ${y})`); + browser.test.assertEq(255, imageData[3], `${format} image color.alpha is correct at (${x}, ${y})`); } } + } - browser.test.notifyPass("captureVisibleTab"); - }); - }).catch(e => { + browser.test.notifyPass("captureVisibleTab"); + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("captureVisibleTab"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["<all_urls>"], }, background: `(${background})(${JSON.stringify(options)})`,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_cookieStoreId.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_cookieStoreId.js @@ -37,107 +37,93 @@ add_task(function* () { background: function() { function testTab(data, tab) { browser.test.assertTrue(data.success, "we want a success"); browser.test.assertTrue(!!tab, "we have a tab"); browser.test.assertEq(data.expectedCookieStoreId, tab.cookieStoreId, "tab should have the correct cookieStoreId"); } - function runTest(data) { - // Tab Creation - browser.tabs.create({windowId: data.privateTab ? this.privateWindowId : this.defaultWindowId, - cookieStoreId: data.cookieStoreId}) + async function runTest(data) { + try { + // Tab Creation + let tab; + try { + tab = await browser.tabs.create({ + windowId: data.privateTab ? this.privateWindowId : this.defaultWindowId, + cookieStoreId: data.cookieStoreId, + }); + + browser.test.assertTrue(!data.failure, "we want a success"); + } catch (error) { + browser.test.assertTrue(!!data.failure, "we want a failure"); - // Tests for tab creation - .then((tab) => { - testTab(data, tab); - return tab; - }, (error) => { - browser.test.assertTrue(!!data.failure, "we want a failure"); - if (data.failure == "illegal") { - browser.test.assertTrue(/Illegal cookieStoreId/.test(error.message), - "runtime.lastError should report the expected error message"); - } else if (data.failure == "defaultToPrivate") { - browser.test.assertTrue("Illegal to set private cookieStorageId in a non private window", - error.message, - "runtime.lastError should report the expected error message"); - } else if (data.failure == "privateToDefault") { - browser.test.assertTrue("Illegal to set non private cookieStorageId in a private window", - error.message, - "runtime.lastError should report the expected error message"); - } else if (data.failure == "exist") { - browser.test.assertTrue(/No cookie store exists/.test(error.message), - "runtime.lastError should report the expected error message"); - } else { - browser.test.fail("The test is broken"); + if (data.failure == "illegal") { + browser.test.assertTrue(/Illegal cookieStoreId/.test(error.message), + "runtime.lastError should report the expected error message"); + } else if (data.failure == "defaultToPrivate") { + browser.test.assertTrue("Illegal to set private cookieStorageId in a non private window", + error.message, + "runtime.lastError should report the expected error message"); + } else if (data.failure == "privateToDefault") { + browser.test.assertTrue("Illegal to set non private cookieStorageId in a private window", + error.message, + "runtime.lastError should report the expected error message"); + } else if (data.failure == "exist") { + browser.test.assertTrue(/No cookie store exists/.test(error.message), + "runtime.lastError should report the expected error message"); + } else { + browser.test.fail("The test is broken"); + } + + browser.test.sendMessage("test-done"); + return; } - return null; - }) + // Tests for tab creation + testTab(data, tab); - // Tests for tab querying - .then((tab) => { - if (tab) { - return browser.tabs.query({windowId: data.privateTab ? this.privateWindowId : this.defaultWindowId, - cookieStoreId: data.cookieStoreId}) - .then((tabs) => { - browser.test.assertTrue(tabs.length >= 1, "Tab found!"); - testTab(data, tabs[0]); - return tab; - }); - } - }) + { + // Tests for tab querying + let [tab] = await browser.tabs.query({ + windowId: data.privateTab ? this.privateWindowId : this.defaultWindowId, + cookieStoreId: data.cookieStoreId, + }); - .then((tab) => { - if (tab) { - return browser.cookies.getAllCookieStores() - .then(stores => { - let store = stores.find(store => store.id === tab.cookieStoreId); - browser.test.assertTrue(!!store, "We have a store for this tab."); - return tab; - }); + browser.test.assertTrue(tab != undefined, "Tab found!"); + testTab(data, tab); } - }) + + let stores = await browser.cookies.getAllCookieStores(); - .then((tab) => { - if (tab) { - return browser.tabs.remove(tab.id); - } - }) + let store = stores.find(store => store.id === tab.cookieStoreId); + browser.test.assertTrue(!!store, "We have a store for this tab."); + + await browser.tabs.remove(tab.id); - .then(() => { browser.test.sendMessage("test-done"); - }, () => { - browser.test.fail("An exception has ben thrown"); - }); + } catch (e) { + browser.test.fail("An exception has been thrown"); + } } - function initialize() { - browser.windows.create({incognito: true}) - .then((win) => { - this.privateWindowId = win.id; - return browser.windows.create({incognito: false}); - }) - .then((win) => { - this.defaultWindowId = win.id; - }) - .then(() => { - browser.test.sendMessage("ready"); - }); + async function initialize() { + let win = await browser.windows.create({incognito: true}); + this.privateWindowId = win.id; + + win = await browser.windows.create({incognito: false}); + this.defaultWindowId = win.id; + + browser.test.sendMessage("ready"); } - function shutdown() { - browser.windows.remove(this.privateWindowId) - .then(() => { - browser.windows.remove(this.defaultWindowId); - }) - .then(() => { - browser.test.sendMessage("gone"); - }); + async function shutdown() { + await browser.windows.remove(this.privateWindowId); + await browser.windows.remove(this.defaultWindowId); + browser.test.sendMessage("gone"); } // Waiting for messages browser.test.onMessage.addListener((msg, data) => { if (msg == "be-ready") { initialize(); } else if (msg == "test") { runTest(data);
--- a/browser/components/extensions/test/browser/browser_ext_tabs_create.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_create.js @@ -85,17 +85,17 @@ add_task(function* () { result: {index: 1, active: false}, }, { create: {windowId: activeWindow}, result: {windowId: activeWindow}, }, ]; - function nextTest() { + async function nextTest() { if (!tests.length) { browser.test.notifyPass("tabs.create"); return; } let test = tests.shift(); let expected = Object.assign({}, DEFAULTS, test.result); @@ -114,43 +114,39 @@ add_task(function* () { let createdPromise = new Promise(resolve => { let onCreated = tab => { browser.test.assertTrue("id" in tab, `Expected tabs.onCreated callback to receive tab object`); resolve(); }; browser.tabs.onCreated.addListener(onCreated); }); - let tabId; - Promise.all([ + let [tab] = await Promise.all([ browser.tabs.create(test.create), createdPromise, - ]).then(([tab]) => { - tabId = tab.id; + ]); + let tabId = tab.id; - for (let key of Object.keys(expected)) { - if (key === "url") { - // FIXME: This doesn't get updated until later in the load cycle. - continue; - } - - browser.test.assertEq(expected[key], tab[key], `Expected value for tab.${key}`); + for (let key of Object.keys(expected)) { + if (key === "url") { + // FIXME: This doesn't get updated until later in the load cycle. + continue; } - return updatedPromise; - }).then(updated => { - browser.test.assertEq(tabId, updated.tabId, `Expected value for tab.id`); - browser.test.assertEq(expected.url, updated.url, `Expected value for tab.url`); + browser.test.assertEq(expected[key], tab[key], `Expected value for tab.${key}`); + } - return browser.tabs.remove(tabId); - }).then(() => { - return browser.tabs.update(activeTab, {active: true}); - }).then(() => { - nextTest(); - }); + let updated = await updatedPromise; + browser.test.assertEq(tabId, updated.tabId, `Expected value for tab.id`); + browser.test.assertEq(expected.url, updated.url, `Expected value for tab.url`); + + await browser.tabs.remove(tabId); + await browser.tabs.update(activeTab, {active: true}); + + nextTest(); } nextTest(); } browser.tabs.query({active: true, currentWindow: true}, tabs => { activeTab = tabs[0].id; activeWindow = tabs[0].windowId;
--- a/browser/components/extensions/test/browser/browser_ext_tabs_detectLanguage.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_detectLanguage.js @@ -3,48 +3,44 @@ "use strict"; add_task(function* testDetectLanguage() { let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background() { + background: async function() { const BASE_PATH = "browser/browser/components/extensions/test/browser"; function loadTab(url) { return browser.tabs.create({url}); } - loadTab(`http://example.co.jp/${BASE_PATH}/file_language_ja.html`).then(tab => { - return browser.tabs.detectLanguage(tab.id).then(lang => { - browser.test.assertEq("ja", lang, "Japanese document should be detected as Japanese"); - return browser.tabs.remove(tab.id); - }); - }).then(() => { - return loadTab(`http://example.co.jp/${BASE_PATH}/file_language_fr_en.html`); - }).then(tab => { - return browser.tabs.detectLanguage(tab.id).then(lang => { - browser.test.assertEq("fr", lang, "French/English document should be detected as primarily French"); - return browser.tabs.remove(tab.id); - }); - }).then(() => { - return loadTab(`http://example.co.jp/${BASE_PATH}/file_language_tlh.html`); - }).then(tab => { - return browser.tabs.detectLanguage(tab.id).then(lang => { - browser.test.assertEq("und", lang, "Klingon document should not be detected, should return 'und'"); - return browser.tabs.remove(tab.id); - }); - }).then(() => { + try { + let tab = await loadTab(`http://example.co.jp/${BASE_PATH}/file_language_ja.html`); + let lang = await browser.tabs.detectLanguage(tab.id); + browser.test.assertEq("ja", lang, "Japanese document should be detected as Japanese"); + await browser.tabs.remove(tab.id); + + tab = await loadTab(`http://example.co.jp/${BASE_PATH}/file_language_fr_en.html`); + lang = await browser.tabs.detectLanguage(tab.id); + browser.test.assertEq("fr", lang, "French/English document should be detected as primarily French"); + await browser.tabs.remove(tab.id); + + tab = await loadTab(`http://example.co.jp/${BASE_PATH}/file_language_tlh.html`); + lang = await browser.tabs.detectLanguage(tab.id); + browser.test.assertEq("und", lang, "Klingon document should not be detected, should return 'und'"); + await browser.tabs.remove(tab.id); + browser.test.notifyPass("detectLanguage"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("detectLanguage"); - }); + } }, }); yield extension.startup(); yield extension.awaitFinish("detectLanguage"); yield extension.unload();
--- a/browser/components/extensions/test/browser/browser_ext_tabs_duplicate.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_duplicate.js @@ -36,17 +36,17 @@ add_task(function* testDuplicateTab() { yield extension.unload(); while (gBrowser.tabs[0].linkedBrowser.currentURI.spec === "http://example.net/") { yield BrowserTestUtils.removeTab(gBrowser.tabs[0]); } }); add_task(function* testDuplicateTabLazily() { - function background() { + async function background() { let tabLoadComplete = new Promise(resolve => { browser.test.onMessage.addListener((message, tabId, result) => { if (message == "duplicate-tab-done") { resolve(tabId); } }); }); @@ -56,42 +56,38 @@ add_task(function* testDuplicateTabLazil if (tabId == tabId_ && changed.status == "complete") { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); }); } - let startTabId; - let url = "http://example.com/browser/browser/components/extensions/test/browser/file_dummy.html"; - browser.tabs.create({url}, tab => { - startTabId = tab.id; + try { + let url = "http://example.com/browser/browser/components/extensions/test/browser/file_dummy.html"; + let tab = await browser.tabs.create({url}); + let startTabId = tab.id; - awaitLoad(startTabId).then(() => { - browser.test.sendMessage("duplicate-tab", startTabId); + await awaitLoad(startTabId); + browser.test.sendMessage("duplicate-tab", startTabId); - tabLoadComplete.then(unloadedTabId => { - browser.tabs.get(startTabId, loadedtab => { - browser.test.assertEq("Dummy test page", loadedtab.title, "Title should be returned for loaded pages"); - browser.test.assertEq("complete", loadedtab.status, "Tab status should be complete for loaded pages"); - }); + let unloadedTabId = await tabLoadComplete; + let loadedtab = await browser.tabs.get(startTabId); + browser.test.assertEq("Dummy test page", loadedtab.title, "Title should be returned for loaded pages"); + browser.test.assertEq("complete", loadedtab.status, "Tab status should be complete for loaded pages"); - browser.tabs.get(unloadedTabId, unloadedtab => { - browser.test.assertEq("Dummy test page", unloadedtab.title, "Title should be returned after page has been unloaded"); - }); + let unloadedtab = await browser.tabs.get(unloadedTabId); + browser.test.assertEq("Dummy test page", unloadedtab.title, "Title should be returned after page has been unloaded"); - browser.tabs.remove([tab.id, unloadedTabId]); - browser.test.notifyPass("tabs.hasCorrectTabTitle"); - }); - }).catch(e => { - browser.test.fail(`${e} :: ${e.stack}`); - browser.test.notifyFail("tabs.hasCorrectTabTitle"); - }); - }); + await browser.tabs.remove([tab.id, unloadedTabId]); + browser.test.notifyPass("tabs.hasCorrectTabTitle"); + } catch (e) { + browser.test.fail(`${e} :: ${e.stack}`); + browser.test.notifyFail("tabs.hasCorrectTabTitle"); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_events.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_events.js @@ -1,14 +1,14 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testTabEvents() { - function background() { + async function background() { let events = []; browser.tabs.onCreated.addListener(tab => { events.push({type: "onCreated", tab}); }); browser.tabs.onAttached.addListener((tabId, info) => { events.push(Object.assign({type: "onAttached", tabId}, info)); }); @@ -20,136 +20,131 @@ add_task(function* testTabEvents() { browser.tabs.onRemoved.addListener((tabId, info) => { events.push(Object.assign({type: "onRemoved", tabId}, info)); }); browser.tabs.onMoved.addListener((tabId, info) => { events.push(Object.assign({type: "onMoved", tabId}, info)); }); - function expectEvents(names) { + async function expectEvents(names) { browser.test.log(`Expecting events: ${names.join(", ")}`); - return new Promise(resolve => { - setTimeout(resolve, 0); - }).then(() => { - browser.test.assertEq(names.length, events.length, "Got expected number of events"); - for (let [i, name] of names.entries()) { - browser.test.assertEq(name, i in events && events[i].type, - `Got expected ${name} event`); - } - return events.splice(0); - }); + await new Promise(resolve => setTimeout(resolve, 0)); + + browser.test.assertEq(names.length, events.length, "Got expected number of events"); + for (let [i, name] of names.entries()) { + browser.test.assertEq(name, i in events && events[i].type, + `Got expected ${name} event`); + } + return events.splice(0); } - browser.test.log("Create second browser window"); - let windowId; - Promise.all([ - browser.windows.getCurrent(), - browser.windows.create({url: "about:blank"}), - ]).then(windows => { - windowId = windows[0].id; + try { + browser.test.log("Create second browser window"); + + let windows = await Promise.all([ + browser.windows.getCurrent(), + browser.windows.create({url: "about:blank"}), + ]); + + let windowId = windows[0].id; let otherWindowId = windows[1].id; - let initialTab; - return expectEvents(["onCreated"]).then(([created]) => { - initialTab = created.tab; + let [created] = await expectEvents(["onCreated"]); + let initialTab = created.tab; + - browser.test.log("Create tab in window 1"); - return browser.tabs.create({windowId, index: 0, url: "about:blank"}); - }).then(tab => { - let oldIndex = tab.index; - browser.test.assertEq(0, oldIndex, "Tab has the expected index"); + browser.test.log("Create tab in window 1"); + let tab = await browser.tabs.create({windowId, index: 0, url: "about:blank"}); + let oldIndex = tab.index; + browser.test.assertEq(0, oldIndex, "Tab has the expected index"); + + [created] = await expectEvents(["onCreated"]); + browser.test.assertEq(tab.id, created.tab.id, "Got expected tab ID"); + browser.test.assertEq(oldIndex, created.tab.index, "Got expected tab index"); + - return expectEvents(["onCreated"]).then(([created]) => { - browser.test.assertEq(tab.id, created.tab.id, "Got expected tab ID"); - browser.test.assertEq(oldIndex, created.tab.index, "Got expected tab index"); + browser.test.log("Move tab to window 2"); + await browser.tabs.move([tab.id], {windowId: otherWindowId, index: 0}); - browser.test.log("Move tab to window 2"); - return browser.tabs.move([tab.id], {windowId: otherWindowId, index: 0}); - }).then(() => { - return expectEvents(["onDetached", "onAttached"]); - }).then(([detached, attached]) => { - browser.test.assertEq(oldIndex, detached.oldPosition, "Expected old index"); - browser.test.assertEq(windowId, detached.oldWindowId, "Expected old window ID"); + let [detached, attached] = await expectEvents(["onDetached", "onAttached"]); + browser.test.assertEq(oldIndex, detached.oldPosition, "Expected old index"); + browser.test.assertEq(windowId, detached.oldWindowId, "Expected old window ID"); + + browser.test.assertEq(0, attached.newPosition, "Expected new index"); + browser.test.assertEq(otherWindowId, attached.newWindowId, "Expected new window ID"); + - browser.test.assertEq(0, attached.newPosition, "Expected new index"); - browser.test.assertEq(otherWindowId, attached.newWindowId, "Expected new window ID"); + browser.test.log("Move tab within the same window"); + let [moved] = await browser.tabs.move([tab.id], {index: 1}); + browser.test.assertEq(1, moved.index, "Expected new index"); + + [moved] = await expectEvents(["onMoved"]); + browser.test.assertEq(tab.id, moved.tabId, "Expected tab ID"); + browser.test.assertEq(0, moved.fromIndex, "Expected old index"); + browser.test.assertEq(1, moved.toIndex, "Expected new index"); + browser.test.assertEq(otherWindowId, moved.windowId, "Expected window ID"); - browser.test.log("Move tab within the same window"); - return browser.tabs.move([tab.id], {index: 1}); - }).then(([moved]) => { - browser.test.assertEq(1, moved.index, "Expected new index"); + + browser.test.log("Remove tab"); + await browser.tabs.remove(tab.id); + let [removed] = await expectEvents(["onRemoved"]); - return expectEvents(["onMoved"]); - }).then(([moved]) => { - browser.test.assertEq(tab.id, moved.tabId, "Expected tab ID"); - browser.test.assertEq(0, moved.fromIndex, "Expected old index"); - browser.test.assertEq(1, moved.toIndex, "Expected new index"); - browser.test.assertEq(otherWindowId, moved.windowId, "Expected window ID"); + browser.test.assertEq(tab.id, removed.tabId, "Expected removed tab ID"); + browser.test.assertEq(otherWindowId, removed.windowId, "Expected removed tab window ID"); + // Note: We want to test for the actual boolean value false here. + browser.test.assertEq(false, removed.isWindowClosing, "Expected isWindowClosing value"); + - browser.test.log("Remove tab"); - return browser.tabs.remove(tab.id); - }).then(() => { - return expectEvents(["onRemoved"]); - }).then(([removed]) => { - browser.test.assertEq(tab.id, removed.tabId, "Expected removed tab ID"); - browser.test.assertEq(otherWindowId, removed.windowId, "Expected removed tab window ID"); - // Note: We want to test for the actual boolean value false here. - browser.test.assertEq(false, removed.isWindowClosing, "Expected isWindowClosing value"); + browser.test.log("Close second window"); + await browser.windows.remove(otherWindowId); + [removed] = await expectEvents(["onRemoved"]); + browser.test.assertEq(initialTab.id, removed.tabId, "Expected removed tab ID"); + browser.test.assertEq(otherWindowId, removed.windowId, "Expected removed tab window ID"); + browser.test.assertEq(true, removed.isWindowClosing, "Expected isWindowClosing value"); + - browser.test.log("Close second window"); - return browser.windows.remove(otherWindowId); - }).then(() => { - return expectEvents(["onRemoved"]); - }).then(([removed]) => { - browser.test.assertEq(initialTab.id, removed.tabId, "Expected removed tab ID"); - browser.test.assertEq(otherWindowId, removed.windowId, "Expected removed tab window ID"); - browser.test.assertEq(true, removed.isWindowClosing, "Expected isWindowClosing value"); + browser.test.log("Create additional tab in window 1"); + tab = await browser.tabs.create({windowId, url: "about:blank"}); + await expectEvents(["onCreated"]); + + + browser.test.log("Create a new window, adopting the new tab"); + // We have to explicitly wait for the event here, since its timing is + // not predictable. + let promiseAttached = new Promise(resolve => { + browser.tabs.onAttached.addListener(function listener(tabId) { + browser.tabs.onAttached.removeListener(listener); + resolve(); }); }); - }).then(() => { - browser.test.log("Create additional tab in window 1"); - return browser.tabs.create({windowId, url: "about:blank"}); - }).then(tab => { - return expectEvents(["onCreated"]).then(() => { - browser.test.log("Create a new window, adopting the new tab"); - // We have to explicitly wait for the event here, since its timing is - // not predictable. - let promiseAttached = new Promise(resolve => { - browser.tabs.onAttached.addListener(function listener(tabId) { - browser.tabs.onAttached.removeListener(listener); - resolve(); - }); - }); + let [window] = await Promise.all([ + browser.windows.create({tabId: tab.id}), + promiseAttached, + ]); + + [detached, attached] = await expectEvents(["onDetached", "onAttached"]); + + browser.test.assertEq(tab.id, detached.tabId, "Expected onDetached tab ID"); - return Promise.all([ - browser.windows.create({tabId: tab.id}), - promiseAttached, - ]); - }).then(([window]) => { - return expectEvents(["onDetached", "onAttached"]).then(([detached, attached]) => { - browser.test.assertEq(tab.id, detached.tabId, "Expected onDetached tab ID"); + browser.test.assertEq(tab.id, attached.tabId, "Expected onAttached tab ID"); + browser.test.assertEq(0, attached.newPosition, "Expected onAttached new index"); + browser.test.assertEq(window.id, attached.newWindowId, + "Expected onAttached new window id"); - browser.test.assertEq(tab.id, attached.tabId, "Expected onAttached tab ID"); - browser.test.assertEq(0, attached.newPosition, "Expected onAttached new index"); - browser.test.assertEq(window.id, attached.newWindowId, - "Expected onAttached new window id"); + browser.test.log("Close the new window"); + await browser.windows.remove(window.id); - browser.test.log("Close the new window"); - return browser.windows.remove(window.id); - }); - }); - }).then(() => { browser.test.notifyPass("tabs-events"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("tabs-events"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background, @@ -171,26 +166,24 @@ add_task(function* testTabEventsSize() { }); browser.tabs.onUpdated.addListener((tabId, changeInfo, tab) => { if (changeInfo.status == "complete") { sendSizeMessages(tab, "on-updated"); } }); - browser.test.onMessage.addListener((msg, arg) => { + browser.test.onMessage.addListener(async (msg, arg) => { if (msg === "create-tab") { - browser.tabs.create({url: "http://example.com/"}).then(tab => { - sendSizeMessages(tab, "create"); - browser.test.sendMessage("created-tab-id", tab.id); - }); + let tab = await browser.tabs.create({url: "http://example.com/"}); + sendSizeMessages(tab, "create"); + browser.test.sendMessage("created-tab-id", tab.id); } else if (msg === "update-tab") { - browser.tabs.update(arg, {url: "http://example.org/"}).then(tab => { - sendSizeMessages(tab, "update"); - }); + let tab = await browser.tabs.update(arg, {url: "http://example.org/"}); + sendSizeMessages(tab, "update"); } else if (msg === "remove-tab") { browser.tabs.remove(arg); browser.test.sendMessage("tab-removed"); } }); browser.test.sendMessage("ready"); } @@ -233,19 +226,17 @@ add_task(function* testTabEventsSize() { yield extension.awaitMessage("tab-removed"); } yield extension.unload(); SpecialPowers.clearUserPref(RESOLUTION_PREF); }); add_task(function* testTabRemovalEvent() { - function background() { - let removalTabId; - + async function background() { function awaitLoad(tabId) { return new Promise(resolve => { browser.tabs.onUpdated.addListener(function listener(tabId_, changed, tab) { if (tabId == tabId_ && changed.status == "complete") { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); @@ -257,27 +248,26 @@ add_task(function* testTabRemovalEvent() chrome.tabs.query({}, tabs => { for (let tab of tabs) { browser.test.assertTrue(tab.id != tabId, "Tab query should not include removed tabId"); } browser.test.notifyPass("tabs-events"); }); }); - let url = "http://example.com/browser/browser/components/extensions/test/browser/context.html"; - browser.tabs.create({url: url}) - .then(tab => { - removalTabId = tab.id; - return awaitLoad(tab.id); - }).then(() => { - return browser.tabs.remove(removalTabId); - }).catch(e => { + try { + let url = "http://example.com/browser/browser/components/extensions/test/browser/context.html"; + let tab = await browser.tabs.create({url: url}); + await awaitLoad(tab.id); + + await browser.tabs.remove(tab.id); + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("tabs-events"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_executeScript.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_executeScript.js @@ -31,22 +31,23 @@ add_task(function* testExecuteScript() { let messageManagersSize = countMM(MessageChannel.messageManagers); let responseManagersSize = countMM(MessageChannel.responseManagers); const BASE = "http://mochi.test:8888/browser/browser/components/extensions/test/browser/"; const URL = BASE + "file_iframe_document.html"; let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, URL, true); - function background() { - browser.tabs.query({active: true, currentWindow: true}).then(tabs => { - return browser.webNavigation.getAllFrames({tabId: tabs[0].id}); - }).then(frames => { + async function background() { + try { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + let frames = await browser.webNavigation.getAllFrames({tabId: tab.id}); + browser.test.log(`FRAMES: ${frames[1].frameId} ${JSON.stringify(frames)}\n`); - return Promise.all([ + await Promise.all([ browser.tabs.executeScript({ code: "42", }).then(result => { browser.test.assertEq(1, result.length, "Expected one callback result"); browser.test.assertEq(42, result[0], "Expected callback result"); }), browser.tabs.executeScript({ @@ -122,30 +123,30 @@ add_task(function* testExecuteScript() { let details = { frame_id: Number.MAX_SAFE_INTEGER, matchesHost: ["http://mochi.test/", "http://example.com/"], }; browser.test.assertEq(`No window matching ${JSON.stringify(details)}`, error.message, "Got expected error"); }), - browser.tabs.create({url: "http://example.net/", active: false}).then(tab => { - return browser.tabs.executeScript(tab.id, { + browser.tabs.create({url: "http://example.net/", active: false}).then(async tab => { + await browser.tabs.executeScript(tab.id, { code: "42", }).then(result => { browser.test.fail("Expected error when trying to execute on invalid domain"); }, error => { let details = { matchesHost: ["http://mochi.test/", "http://example.com/"], }; browser.test.assertEq(`No window matching ${JSON.stringify(details)}`, error.message, "Got expected error"); - }).then(() => { - return browser.tabs.remove(tab.id); }); + + await browser.tabs.remove(tab.id); }), browser.tabs.executeScript({ code: "Promise.resolve(42)", }).then(result => { browser.test.assertEq(42, result[0], "Got expected promise resolution value as result"); }), @@ -173,37 +174,37 @@ add_task(function* testExecuteScript() { browser.tabs.executeScript({ code: "location.href;", frameId: frames[1].frameId, }).then(result => { browser.test.assertEq(1, result.length, "Expected one result"); browser.test.assertEq("http://mochi.test:8888/", result[0], "Result for frameId[1] is correct"); }), - browser.tabs.create({url: "http://example.com/"}).then(tab => { - return browser.tabs.executeScript(tab.id, {code: "location.href"}).then(result => { - browser.test.assertEq("http://example.com/", result[0], "Script executed correctly in new tab"); + browser.tabs.create({url: "http://example.com/"}).then(async tab => { + let result = await browser.tabs.executeScript(tab.id, {code: "location.href"}); - return browser.tabs.remove(tab.id); - }); + browser.test.assertEq("http://example.com/", result[0], "Script executed correctly in new tab"); + + await browser.tabs.remove(tab.id); }), new Promise(resolve => { browser.runtime.onMessage.addListener(message => { browser.test.assertEq("script ran", message, "Expected runtime message"); resolve(); }); }), ]); - }).then(() => { + browser.test.notifyPass("executeScript"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("executeScript"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["http://mochi.test/", "http://example.com/", "webNavigation"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_bad.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_bad.js @@ -1,16 +1,16 @@ "use strict"; // This is a pretty terrible hack, but it's the best we can do until we // support |executeScript| callbacks and |lastError|. function* testHasNoPermission(params) { let contentSetup = params.contentSetup || (() => Promise.resolve()); - function background(contentSetup) { + async function background(contentSetup) { browser.runtime.onMessage.addListener((msg, sender) => { browser.test.assertEq(msg, "second script ran", "second script ran"); browser.test.notifyPass("executeScript"); }); browser.test.onMessage.addListener(msg => { browser.test.assertEq(msg, "execute-script"); @@ -25,19 +25,19 @@ function* testHasNoPermission(params) { // it, but it's just about the best we can do until we // support callbacks for executeScript. browser.tabs.executeScript(tabs[1].id, { file: "second-script.js", }); }); }); - contentSetup().then(() => { - browser.test.sendMessage("ready"); - }); + await contentSetup(); + + browser.test.sendMessage("ready"); } let extension = ExtensionTestUtils.loadExtension({ manifest: params.manifest, background: `(${background})(${contentSetup})`, files: { @@ -127,83 +127,76 @@ add_task(function* testBadPermissions() }); info("Test active tab, page action, no click"); yield testHasNoPermission({ manifest: { "permissions": ["http://example.com/", "activeTab"], "page_action": {}, }, - contentSetup() { - return new Promise(resolve => { - browser.tabs.query({active: true, currentWindow: true}, tabs => { - browser.pageAction.show(tabs[0].id).then(() => { - resolve(); - }); - }); - }); + async contentSetup() { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + await browser.pageAction.show(tab.id); }, }); yield BrowserTestUtils.removeTab(tab2); yield BrowserTestUtils.removeTab(tab1); }); add_task(function* testBadURL() { - function background() { - browser.tabs.query({currentWindow: true}, tabs => { - let promises = [ - new Promise(resolve => { - browser.tabs.executeScript({ - file: "http://example.com/script.js", - }, result => { - browser.test.assertEq(undefined, result, "Result value"); - - browser.test.assertTrue(browser.extension.lastError instanceof Error, - "runtime.lastError is Error"); - - browser.test.assertTrue(browser.runtime.lastError instanceof Error, - "runtime.lastError is Error"); - - browser.test.assertEq( - "Files to be injected must be within the extension", - browser.extension.lastError && browser.extension.lastError.message, - "extension.lastError value"); - - browser.test.assertEq( - "Files to be injected must be within the extension", - browser.runtime.lastError && browser.runtime.lastError.message, - "runtime.lastError value"); - - resolve(); - }); - }), - + async function background() { + let promises = [ + new Promise(resolve => { browser.tabs.executeScript({ file: "http://example.com/script.js", - }).catch(error => { - browser.test.assertTrue(error instanceof Error, "Error is Error"); + }, result => { + browser.test.assertEq(undefined, result, "Result value"); + + browser.test.assertTrue(browser.extension.lastError instanceof Error, + "runtime.lastError is Error"); - browser.test.assertEq(null, browser.extension.lastError, - "extension.lastError value"); + browser.test.assertTrue(browser.runtime.lastError instanceof Error, + "runtime.lastError is Error"); - browser.test.assertEq(null, browser.runtime.lastError, - "runtime.lastError value"); + browser.test.assertEq( + "Files to be injected must be within the extension", + browser.extension.lastError && browser.extension.lastError.message, + "extension.lastError value"); browser.test.assertEq( "Files to be injected must be within the extension", - error && error.message, - "error value"); - }), - ]; + browser.runtime.lastError && browser.runtime.lastError.message, + "runtime.lastError value"); + + resolve(); + }); + }), + + browser.tabs.executeScript({ + file: "http://example.com/script.js", + }).catch(error => { + browser.test.assertTrue(error instanceof Error, "Error is Error"); - Promise.all(promises).then(() => { - browser.test.notifyPass("executeScript-lastError"); - }); - }); + browser.test.assertEq(null, browser.extension.lastError, + "extension.lastError value"); + + browser.test.assertEq(null, browser.runtime.lastError, + "runtime.lastError value"); + + browser.test.assertEq( + "Files to be injected must be within the extension", + error && error.message, + "error value"); + }), + ]; + + await Promise.all(promises); + + browser.test.notifyPass("executeScript-lastError"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["<all_urls>"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_good.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_good.js @@ -2,33 +2,33 @@ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; requestLongerTimeout(2); function* testHasPermission(params) { let contentSetup = params.contentSetup || (() => Promise.resolve()); - function background(contentSetup) { + async function background(contentSetup) { browser.runtime.onMessage.addListener((msg, sender) => { browser.test.assertEq(msg, "script ran", "script ran"); browser.test.notifyPass("executeScript"); }); browser.test.onMessage.addListener(msg => { browser.test.assertEq(msg, "execute-script"); browser.tabs.executeScript({ file: "script.js", }); }); - contentSetup().then(() => { - browser.test.sendMessage("ready"); - }); + await contentSetup(); + + browser.test.sendMessage("ready"); } let extension = ExtensionTestUtils.loadExtension({ manifest: params.manifest, background: `(${background})(${contentSetup})`, files: { @@ -117,57 +117,46 @@ add_task(function* testGoodPermissions() }); info("Test activeTab permission with a page action click"); yield testHasPermission({ manifest: { "permissions": ["activeTab"], "page_action": {}, }, - contentSetup() { - return new Promise(resolve => { - browser.tabs.query({active: true, currentWindow: true}, tabs => { - browser.pageAction.show(tabs[0].id).then(() => { - resolve(); - }); - }); - }); + contentSetup: async () => { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + await browser.pageAction.show(tab.id); }, setup: clickPageAction, tearDown: closePageAction, }); info("Test activeTab permission with a browser action w/popup click"); yield testHasPermission({ manifest: { "permissions": ["activeTab"], "browser_action": {"default_popup": "_blank.html"}, }, - setup: extension => { - return clickBrowserAction(extension).then(() => { - return awaitExtensionPanel(extension, window, "_blank.html"); - }); + setup: async extension => { + await clickBrowserAction(extension); + return awaitExtensionPanel(extension, window, "_blank.html"); }, tearDown: closeBrowserAction, }); info("Test activeTab permission with a page action w/popup click"); yield testHasPermission({ manifest: { "permissions": ["activeTab"], "page_action": {"default_popup": "_blank.html"}, }, - contentSetup() { - return new Promise(resolve => { - browser.tabs.query({active: true, currentWindow: true}, tabs => { - browser.pageAction.show(tabs[0].id).then(() => { - resolve(); - }); - }); - }); + contentSetup: async () => { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + await browser.pageAction.show(tab.id); }, setup: clickPageAction, tearDown: closePageAction, }); info("Test activeTab permission with a context menu click"); yield testHasPermission({ manifest: {
--- a/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_runAt.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_executeScript_runAt.js @@ -12,91 +12,86 @@ * * And since we can't actually rely on that timing, it retries any attempts that * fail to load as early as expected, but don't load at any illegal time. */ add_task(function* testExecuteScript() { let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "about:blank", true); - function background() { + async function background() { let tab; const BASE = "http://mochi.test:8888/browser/browser/components/extensions/test/browser/"; const URL = BASE + "file_iframe_document.sjs"; const MAX_TRIES = 10; - let tries = 0; + + try { + [tab] = await browser.tabs.query({active: true, currentWindow: true}); - function again() { - if (tries++ == MAX_TRIES) { - return Promise.reject(new Error("Max tries exceeded")); - } - - let url = `${URL}?r=${Math.random()}`; + let success = false; + for (let tries = 0; !success && tries < MAX_TRIES; tries++) { + let url = `${URL}?r=${Math.random()}`; - let loadingPromise = new Promise(resolve => { - browser.tabs.onUpdated.addListener(function listener(tabId, changed, tab_) { - if (tabId == tab.id && changed.status == "loading" && tab_.url == url) { - browser.tabs.onUpdated.removeListener(listener); - resolve(); - } + let loadingPromise = new Promise(resolve => { + browser.tabs.onUpdated.addListener(function listener(tabId, changed, tab_) { + if (tabId == tab.id && changed.status == "loading" && tab_.url == url) { + browser.tabs.onUpdated.removeListener(listener); + resolve(); + } + }); }); - }); - // TODO: Test allFrames and frameId. + // TODO: Test allFrames and frameId. - return browser.tabs.update({url}).then(() => { - return loadingPromise; - }).then(() => { - return Promise.all([ + await browser.tabs.update({url}); + await loadingPromise; + + let states = await Promise.all([ // Send the executeScript requests in the reverse order that we expect // them to execute in, to avoid them passing only because of timing // races. browser.tabs.executeScript({ code: "document.readyState", runAt: "document_idle", }), browser.tabs.executeScript({ code: "document.readyState", runAt: "document_end", }), browser.tabs.executeScript({ code: "document.readyState", runAt: "document_start", }), ].reverse()); - }).then(states => { + browser.test.log(`Got states: ${states}`); // Make sure that none of our scripts executed earlier than expected, // regardless of retries. browser.test.assertTrue(states[1] == "interactive" || states[1] == "complete", `document_end state is valid: ${states[1]}`); browser.test.assertTrue(states[2] == "complete", `document_idle state is valid: ${states[2]}`); // If we have the earliest valid states for each script, we're done. // Otherwise, try again. - if (states[0] != "loading" || states[1] != "interactive" || states[2] != "complete") { - return again(); - } - }); - } + success = (states[0] == "loading" && + states[1] == "interactive" && + states[2] == "complete"); + } - browser.tabs.query({active: true, currentWindow: true}).then(tabs => { - tab = tabs[0]; + browser.test.assertTrue(success, "Got the earliest expected states at least once"); - return again(); - }).then(() => { browser.test.notifyPass("executeScript-runAt"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("executeScript-runAt"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["http://mochi.test/", "tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_insertCSS.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_insertCSS.js @@ -5,18 +5,18 @@ add_task(function* testExecuteScript() { let {MessageChannel} = Cu.import("resource://gre/modules/MessageChannel.jsm", {}); let messageManagersSize = MessageChannel.messageManagers.size; let responseManagersSize = MessageChannel.responseManagers.size; let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://mochi.test:8888/", true); - function background() { - let promises = [ + async function background() { + let tasks = [ { background: "transparent", foreground: "rgb(0, 113, 4)", promise: () => { return browser.tabs.insertCSS({ file: "file2.css", }); }, @@ -32,41 +32,35 @@ add_task(function* testExecuteScript() { }, ]; function checkCSS() { let computedStyle = window.getComputedStyle(document.body); return [computedStyle.backgroundColor, computedStyle.color]; } - function next() { - if (!promises.length) { - return; - } + try { + for (let {promise, background, foreground} of tasks) { + let result = await promise(); - let {promise, background, foreground} = promises.shift(); - return promise().then(result => { browser.test.assertEq(undefined, result, "Expected callback result"); - return browser.tabs.executeScript({ + [result] = await browser.tabs.executeScript({ code: `(${checkCSS})()`, }); - }).then(([result]) => { + browser.test.assertEq(background, result[0], "Expected background color"); browser.test.assertEq(foreground, result[1], "Expected foreground color"); - return next(); - }); - } + } - next().then(() => { browser.test.notifyPass("insertCSS"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFailure("insertCSS"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["http://mochi.test/"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_move.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_move.js @@ -8,116 +8,94 @@ add_task(function* () { gBrowser.selectedTab = tab1; let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { - browser.tabs.query({ - lastFocusedWindow: true, - }, function(tabs) { - let tab = tabs[0]; - browser.tabs.move(tab.id, {index: 0}); - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - browser.test.assertEq(tabs[0].url, tab.url, "should be first tab"); - browser.test.notifyPass("tabs.move.single"); - }); - }); + background: async function() { + let [tab] = await browser.tabs.query({lastFocusedWindow: true}); + + browser.tabs.move(tab.id, {index: 0}); + let tabs = await browser.tabs.query({lastFocusedWindow: true}); + + browser.test.assertEq(tabs[0].url, tab.url, "should be first tab"); + browser.test.notifyPass("tabs.move.single"); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.move.single"); yield extension.unload(); extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - tabs.sort(function(a, b) { return a.url > b.url; }); - browser.tabs.move(tabs.map(tab => tab.id), {index: 0}); - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - browser.test.assertEq(tabs[0].url, "about:blank", "should be first tab"); - browser.test.assertEq(tabs[1].url, "about:config", "should be second tab"); - browser.test.assertEq(tabs[2].url, "about:robots", "should be third tab"); - browser.test.notifyPass("tabs.move.multiple"); - }); - }); + background: async function() { + let tabs = await browser.tabs.query({lastFocusedWindow: true}); + + tabs.sort(function(a, b) { return a.url > b.url; }); + + browser.tabs.move(tabs.map(tab => tab.id), {index: 0}); + + tabs = await browser.tabs.query({lastFocusedWindow: true}); + + browser.test.assertEq(tabs[0].url, "about:blank", "should be first tab"); + browser.test.assertEq(tabs[1].url, "about:config", "should be second tab"); + browser.test.assertEq(tabs[2].url, "about:robots", "should be third tab"); + + browser.test.notifyPass("tabs.move.multiple"); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.move.multiple"); yield extension.unload(); extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - let tab = tabs[1]; - // Assuming that tab.id of 12345 does not exist. - browser.tabs.move([tab.id, 12345], {index: 0}) - .then( - tabs => { browser.test.fail("Promise should not resolve"); }, - e => { - browser.test.assertTrue(/Invalid tab/.test(e), - "Invalid tab should be in error"); - }) - .then( - browser.tabs.query({lastFocusedWindow: true}) - .then( - (tabs) => { - browser.test.assertEq(tabs[1].url, tab.url, "should be second tab"); - browser.test.notifyPass("tabs.move.invalid"); - } - ) - ); - }); + async background() { + let [, tab] = await browser.tabs.query({lastFocusedWindow: true}); + + // Assuming that tab.id of 12345 does not exist. + await browser.test.assertRejects( + browser.tabs.move([tab.id, 12345], {index: 0}), + /Invalid tab/, + "Should receive invalid tab error"); + + let tabs = await browser.tabs.query({lastFocusedWindow: true}); + browser.test.assertEq(tabs[1].url, tab.url, "should be second tab"); + browser.test.notifyPass("tabs.move.invalid"); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.move.invalid"); yield extension.unload(); extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - let tab = tabs[0]; - browser.tabs.move(tab.id, {index: -1}); - browser.tabs.query( - {lastFocusedWindow: true}, - tabs => { - browser.test.assertEq(tabs[2].url, tab.url, "should be last tab"); - browser.test.notifyPass("tabs.move.last"); - }); - }); + background: async function() { + let [tab] = await browser.tabs.query({lastFocusedWindow: true}); + browser.tabs.move(tab.id, {index: -1}); + + let tabs = await browser.tabs.query({lastFocusedWindow: true}); + + browser.test.assertEq(tabs[2].url, tab.url, "should be last tab"); + browser.test.notifyPass("tabs.move.last"); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.move.last"); yield extension.unload(); yield BrowserTestUtils.removeTab(tab1);
--- a/browser/components/extensions/test/browser/browser_ext_tabs_move_window.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_move_window.js @@ -7,42 +7,33 @@ add_task(function* () { let window1 = yield BrowserTestUtils.openNewBrowserWindow(); yield BrowserTestUtils.openNewForegroundTab(window1.gBrowser, "http://example.com/"); let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { - browser.tabs.query({ - url: "<all_urls>", - }, function(tabs) { - let destination = tabs[0]; - let source = tabs[1]; // skip over about:blank in window1 - browser.tabs.move(source.id, {windowId: destination.windowId, index: 0}); + async background() { + let tabs = await browser.tabs.query({url: "<all_urls>"}); + let destination = tabs[0]; + let source = tabs[1]; // skip over about:blank in window1 - browser.tabs.query( - {url: "<all_urls>"}, - tabs => { - browser.test.assertEq(tabs[0].url, "http://example.com/"); - browser.test.assertEq(tabs[0].windowId, destination.windowId); - browser.test.notifyPass("tabs.move.window"); - }); + // Assuming that this windowId does not exist. + await browser.test.assertRejects( + browser.tabs.move(source.id, {windowId: 123144576, index: 0}), + /Invalid window/, + "Should receive invalid window error"); - // Assuming that this windowId does not exist. - browser.tabs.move(source.id, {windowId: 123144576, index: 0}) - .then( - tabs => { browser.test.fail("Promise should not resolve"); }, - e => { - browser.test.assertTrue(/Invalid window/.test(e), - "Invalid window should be in error"); - } - ); - }); + browser.tabs.move(source.id, {windowId: destination.windowId, index: 0}); + + tabs = await browser.tabs.query({url: "<all_urls>"}); + browser.test.assertEq(tabs[0].url, "http://example.com/"); + browser.test.assertEq(tabs[0].windowId, destination.windowId); + browser.test.notifyPass("tabs.move.window"); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.move.window"); yield extension.unload(); for (let tab of window.gBrowser.tabs) { @@ -61,33 +52,33 @@ add_task(function* test_currentWindowAft browser.test.sendMessage("id", win.id); }); } }); browser.test.sendMessage("ready"); }, }; - function background() { + async function background() { let tabId; + const url = browser.extension.getURL("current.html"); - browser.tabs.create({url}).then(tab => { - tabId = tab.id; - }); - browser.test.onMessage.addListener(msg => { + + browser.test.onMessage.addListener(async msg => { if (msg === "move") { - browser.windows.create({tabId}).then(() => { - browser.test.sendMessage("moved"); - }); + await browser.windows.create({tabId}); + browser.test.sendMessage("moved"); } else if (msg === "close") { - browser.tabs.remove(tabId).then(() => { - browser.test.sendMessage("done"); - }); + await browser.tabs.remove(tabId); + browser.test.sendMessage("done"); } }); + + let tab = await browser.tabs.create({url}); + tabId = tab.id; } const extension = ExtensionTestUtils.loadExtension({files, background}); yield extension.startup(); yield extension.awaitMessage("ready"); extension.sendMessage("current");
--- a/browser/components/extensions/test/browser/browser_ext_tabs_onHighlighted.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_onHighlighted.js @@ -1,14 +1,14 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testTabEvents() { - function background() { + async function background() { /** The list of active tab ID's */ let tabIds = []; /** * Stores the events that fire for each tab. * * events { * tabId1: [event1, event2, ...], @@ -34,89 +34,87 @@ add_task(function* testTabEvents() { }); /** * Asserts that the expected events are fired for the tab with id = tabId. * The events associated to the specified tab are removed after this check is made. * * @param {number} tabId * @param {Array<string>} expectedEvents - * @returns {Promise} */ - function expectEvents(tabId, expectedEvents) { + async function expectEvents(tabId, expectedEvents) { browser.test.log(`Expecting events: ${expectedEvents.join(", ")}`); - return new Promise(resolve => { - setTimeout(resolve, 0); - }).then(() => { - browser.test.assertEq(expectedEvents.length, events[tabId].length, - `Got expected number of events for ${tabId}`); - for (let [i, name] of expectedEvents.entries()) { - browser.test.assertEq(name, i in events[tabId] && events[tabId][i], - `Got expected ${name} event`); - } - delete events[tabId]; - }); + await new Promise(resolve => setTimeout(resolve, 0)); + + browser.test.assertEq(expectedEvents.length, events[tabId].length, + `Got expected number of events for ${tabId}`); + + for (let [i, name] of expectedEvents.entries()) { + browser.test.assertEq(name, i in events[tabId] && events[tabId][i], + `Got expected ${name} event`); + } + delete events[tabId]; } /** * Opens a new tab and asserts that the correct events are fired. * * @param {number} windowId - * @returns {Promise} */ - function openTab(windowId) { - return browser.tabs.create({windowId}).then(tab => { - tabIds.push(tab.id); - browser.test.log(`Opened tab ${tab.id}`); - return expectEvents(tab.id, [ - "onActivated", - "onHighlighted", - ]); - }); + async function openTab(windowId) { + let tab = await browser.tabs.create({windowId}); + + tabIds.push(tab.id); + browser.test.log(`Opened tab ${tab.id}`); + + await expectEvents(tab.id, [ + "onActivated", + "onHighlighted", + ]); } /** * Highlights an existing tab and asserts that the correct events are fired. * * @param {number} tabId - * @returns {Promise} */ - function highlightTab(tabId) { + async function highlightTab(tabId) { browser.test.log(`Highlighting tab ${tabId}`); - return browser.tabs.update(tabId, {active: true}).then(tab => { - browser.test.assertEq(tab.id, tabId, `Tab ${tab.id} highlighted`); - return expectEvents(tab.id, [ - "onActivated", - "onHighlighted", - ]); - }); + let tab = await browser.tabs.update(tabId, {active: true}); + + browser.test.assertEq(tab.id, tabId, `Tab ${tab.id} highlighted`); + + await expectEvents(tab.id, [ + "onActivated", + "onHighlighted", + ]); } /** * The main entry point to the tests. */ - browser.tabs.query({active: true, currentWindow: true}, tabs => { - let activeWindow = tabs[0].windowId; - Promise.all([ - openTab(activeWindow), - openTab(activeWindow), - openTab(activeWindow), - ]).then(() => { - return Promise.all([ - highlightTab(tabIds[0]), - highlightTab(tabIds[1]), - highlightTab(tabIds[2]), - ]); - }).then(() => { - return Promise.all(tabIds.map(id => browser.tabs.remove(id))); - }).then(() => { - browser.test.notifyPass("tabs.highlight"); - }); - }); + let tabs = await browser.tabs.query({active: true, currentWindow: true}); + + let activeWindow = tabs[0].windowId; + await Promise.all([ + openTab(activeWindow), + openTab(activeWindow), + openTab(activeWindow), + ]); + + await Promise.all([ + highlightTab(tabIds[0]), + highlightTab(tabIds[1]), + highlightTab(tabIds[2]), + ]); + + await Promise.all(tabIds.map(id => browser.tabs.remove(id))); + + browser.test.notifyPass("tabs.highlight"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_query.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_query.js @@ -134,21 +134,21 @@ add_task(function* () { // test width and height extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background: function() { - browser.test.onMessage.addListener((msg) => { - browser.tabs.query({active: true}).then(tabs => { - browser.test.assertEq(tabs.length, 1, "should have one tab"); - browser.test.sendMessage("dims", {width: tabs[0].width, height: tabs[0].height}); - }); + browser.test.onMessage.addListener(async msg => { + let tabs = await browser.tabs.query({active: true}); + + browser.test.assertEq(tabs.length, 1, "should have one tab"); + browser.test.sendMessage("dims", {width: tabs[0].width, height: tabs[0].height}); }); browser.test.sendMessage("ready"); }, }); const RESOLUTION_PREF = "layout.css.devPixelsPerPx"; registerCleanupFunction(() => { SpecialPowers.clearUserPref(RESOLUTION_PREF); @@ -177,47 +177,47 @@ add_task(function* () { }); add_task(function* testQueryPermissions() { let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": [], }, - background: function(x) { - browser.tabs.query({currentWindow: true, active: true}).then((tabs) => { + async background() { + try { + let tabs = await browser.tabs.query({currentWindow: true, active: true}); browser.test.assertEq(tabs.length, 1, "Expect query to return tabs"); browser.test.notifyPass("queryPermissions"); - }).catch((e) => { + } catch (e) { browser.test.notifyFail("queryPermissions"); - }); + } }, }); yield extension.startup(); yield extension.awaitFinish("queryPermissions"); yield extension.unload(); }); add_task(function* testQueryWithURLPermissions() { let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": [], }, - background: function(x) { - browser.tabs.query({"url": "http://www.bbc.com/"}).then(() => { - browser.test.notifyFail("queryWithURLPermissions"); - }).catch((e) => { - browser.test.assertEq('The "tabs" permission is required to use the query API with the "url" parameter', - e.message, "Expected permissions error message"); - browser.test.notifyPass("queryWithURLPermissions"); - }); + async background() { + await browser.test.assertRejects( + browser.tabs.query({"url": "http://www.bbc.com/"}), + 'The "tabs" permission is required to use the query API with the "url" parameter', + "Expected tabs.query with 'url' to fail with permissions error message"); + + browser.test.notifyPass("queryWithURLPermissions"); }, }); yield extension.startup(); yield extension.awaitFinish("queryWithURLPermissions"); yield extension.unload();
--- a/browser/components/extensions/test/browser/browser_ext_tabs_reload.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_reload.js @@ -14,41 +14,41 @@ add_task(function* () { }, "tab.html": `<head> <meta charset="utf-8"> <script src="tab.js"></script> </head>`, }, - background: function() { + async background() { let tabLoadedCount = 0; - browser.tabs.create({url: "tab.html", active: true}).then(tab => { - browser.runtime.onMessage.addListener(msg => { - if (msg == "tab-loaded") { - tabLoadedCount++; + let tab = await browser.tabs.create({url: "tab.html", active: true}); - if (tabLoadedCount == 1) { - // Reload the tab once passing no arguments. - return browser.tabs.reload(); - } + browser.runtime.onMessage.addListener(msg => { + if (msg == "tab-loaded") { + tabLoadedCount++; + + if (tabLoadedCount == 1) { + // Reload the tab once passing no arguments. + return browser.tabs.reload(); + } - if (tabLoadedCount == 2) { - // Reload the tab again with explicit arguments. - return browser.tabs.reload(tab.id, { - bypassCache: false, - }); - } + if (tabLoadedCount == 2) { + // Reload the tab again with explicit arguments. + return browser.tabs.reload(tab.id, { + bypassCache: false, + }); + } - if (tabLoadedCount == 3) { - browser.test.notifyPass("tabs.reload"); - } + if (tabLoadedCount == 3) { + browser.test.notifyPass("tabs.reload"); } - }); + } }); }, }); yield extension.startup(); yield extension.awaitFinish("tabs.reload"); yield extension.unload(); });
--- a/browser/components/extensions/test/browser/browser_ext_tabs_reload_bypass_cache.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_reload_bypass_cache.js @@ -3,57 +3,56 @@ "use strict"; add_task(function* () { let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs", "<all_urls>"], }, - background: function() { + async background() { const BASE = "http://mochi.test:8888/browser/browser/components/extensions/test/browser/"; const URL = BASE + "file_bypass_cache.sjs"; function awaitLoad(tabId) { return new Promise(resolve => { browser.tabs.onUpdated.addListener(function listener(tabId_, changed, tab) { if (tabId == tabId_ && changed.status == "complete" && tab.url == URL) { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); }); } - let tabId; - browser.tabs.create({url: URL}).then((tab) => { - tabId = tab.id; - return awaitLoad(tabId); - }).then(() => { - return browser.tabs.reload(tabId, {bypassCache: false}); - }).then(() => { - return awaitLoad(tabId); - }).then(() => { - return browser.tabs.executeScript(tabId, {code: "document.body.textContent"}); - }).then(([textContent]) => { + try { + let tab = await browser.tabs.create({url: URL}); + await awaitLoad(tab.id); + + await browser.tabs.reload(tab.id, {bypassCache: false}); + await awaitLoad(tab.id); + + let [textContent] = await browser.tabs.executeScript(tab.id, {code: "document.body.textContent"}); browser.test.assertEq("", textContent, "`textContent` should be empty when bypassCache=false"); - return browser.tabs.reload(tabId, {bypassCache: true}); - }).then(() => { - return awaitLoad(tabId); - }).then(() => { - return browser.tabs.executeScript(tabId, {code: "document.body.textContent"}); - }).then(([textContent]) => { + + await browser.tabs.reload(tab.id, {bypassCache: true}); + await awaitLoad(tab.id); + + [textContent] = await browser.tabs.executeScript(tab.id, {code: "document.body.textContent"}); + let [pragma, cacheControl] = textContent.split(":"); browser.test.assertEq("no-cache", pragma, "`pragma` should be set to `no-cache` when bypassCache is true"); browser.test.assertEq("no-cache", cacheControl, "`cacheControl` should be set to `no-cache` when bypassCache is true"); - browser.tabs.remove(tabId); + + await browser.tabs.remove(tab.id); + browser.test.notifyPass("tabs.reload_bypass_cache"); - }).catch(error => { + } catch (error) { browser.test.fail(`${error} :: ${error.stack}`); browser.test.notifyFail("tabs.reload_bypass_cache"); - }); + } }, }); yield extension.startup(); yield extension.awaitFinish("tabs.reload_bypass_cache"); yield extension.unload(); });
--- a/browser/components/extensions/test/browser/browser_ext_tabs_removeCSS.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_removeCSS.js @@ -1,17 +1,17 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testExecuteScript() { let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://mochi.test:8888/", true); - function background() { - let promises = [ + async function background() { + let tasks = [ // Insert CSS file. { background: "transparent", foreground: "rgb(0, 113, 4)", promise: () => { return browser.tabs.insertCSS({ file: "file2.css", }); @@ -49,41 +49,33 @@ add_task(function* testExecuteScript() { }, ]; function checkCSS() { let computedStyle = window.getComputedStyle(document.body); return [computedStyle.backgroundColor, computedStyle.color]; } - function next() { - if (!promises.length) { - return; - } - - let {promise, background, foreground} = promises.shift(); - return promise().then(result => { + try { + for (let {promise, background, foreground} of tasks) { + let result = await promise(); browser.test.assertEq(undefined, result, "Expected callback result"); - return browser.tabs.executeScript({ + [result] = await browser.tabs.executeScript({ code: `(${checkCSS})()`, }); - }).then(([result]) => { browser.test.assertEq(background, result[0], "Expected background color"); browser.test.assertEq(foreground, result[1], "Expected foreground color"); - return next(); - }); - } + } - next().then(() => { browser.test.notifyPass("removeCSS"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFailure("removeCSS"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["http://mochi.test/"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_tabs_sendMessage.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_sendMessage.js @@ -9,17 +9,17 @@ add_task(function* tabsSendMessageReply( "content_scripts": [{ "matches": ["http://example.com/"], "js": ["content-script.js"], "run_at": "document_start", }], }, - background: function() { + background: async function() { let firstTab; let promiseResponse = new Promise(resolve => { browser.runtime.onMessage.addListener((msg, sender, respond) => { if (msg == "content-script-ready") { let tabId = sender.tab.id; Promise.all([ promiseResponse, @@ -61,50 +61,50 @@ add_task(function* tabsSendMessageReply( return Promise.resolve("expected-response"); } else if (msg[0] == "got-response") { resolve(msg[1]); } }); }); - browser.tabs.query({currentWindow: true, active: true}).then(tabs => { - firstTab = tabs[0].id; - browser.tabs.create({url: "http://example.com/"}); - }); + let tabs = await browser.tabs.query({currentWindow: true, active: true}); + firstTab = tabs[0].id; + browser.tabs.create({url: "http://example.com/"}); }, files: { - "content-script.js": function() { + "content-script.js": async function() { browser.runtime.onMessage.addListener((msg, sender, respond) => { if (msg == "respond-now") { respond(msg); } else if (msg == "respond-soon") { setTimeout(() => { respond(msg); }, 0); return true; } else if (msg == "respond-promise") { return Promise.resolve(msg); } else if (msg == "respond-never") { return; } else if (msg == "respond-error") { return Promise.reject(new Error(msg)); } else if (msg == "throw-error") { throw new Error(msg); } }); + browser.runtime.onMessage.addListener((msg, sender, respond) => { if (msg == "respond-now") { respond("hello"); } else if (msg == "respond-now-2") { respond(msg); } }); - browser.runtime.sendMessage("content-script-ready").then(response => { - browser.runtime.sendMessage(["got-response", response]); - }); + + let response = await browser.runtime.sendMessage("content-script-ready"); + browser.runtime.sendMessage(["got-response", response]); }, }, }); yield extension.startup(); yield extension.awaitFinish("sendMessage"); @@ -119,62 +119,60 @@ add_task(function* tabsSendHidden() { "content_scripts": [{ "matches": ["http://example.com/content*"], "js": ["content-script.js"], "run_at": "document_start", }], }, - background: function() { + background: async function() { let resolveContent; browser.runtime.onMessage.addListener((msg, sender) => { if (msg[0] == "content-ready") { resolveContent(msg[1]); } }); let awaitContent = url => { return new Promise(resolve => { resolveContent = resolve; }).then(result => { browser.test.assertEq(url, result, "Expected content script URL"); }); }; - const URL1 = "http://example.com/content1.html"; - const URL2 = "http://example.com/content2.html"; - browser.tabs.create({url: URL1}).then(tab => { - return awaitContent(URL1).then(() => { - return browser.tabs.sendMessage(tab.id, URL1); - }).then(url => { - browser.test.assertEq(URL1, url, "Should get response from expected content window"); + try { + const URL1 = "http://example.com/content1.html"; + const URL2 = "http://example.com/content2.html"; + + let tab = await browser.tabs.create({url: URL1}); + await awaitContent(URL1); + + let url = await browser.tabs.sendMessage(tab.id, URL1); + browser.test.assertEq(URL1, url, "Should get response from expected content window"); + + await browser.tabs.update(tab.id, {url: URL2}); + await awaitContent(URL2); - return browser.tabs.update(tab.id, {url: URL2}); - }).then(() => { - return awaitContent(URL2); - }).then(() => { - return browser.tabs.sendMessage(tab.id, URL2); - }).then(url => { - browser.test.assertEq(URL2, url, "Should get response from expected content window"); + url = await browser.tabs.sendMessage(tab.id, URL2); + browser.test.assertEq(URL2, url, "Should get response from expected content window"); - // Repeat once just to be sure the first message was processed by all - // listeners before we exit the test. - return browser.tabs.sendMessage(tab.id, URL2); - }).then(url => { - browser.test.assertEq(URL2, url, "Should get response from expected content window"); + // Repeat once just to be sure the first message was processed by all + // listeners before we exit the test. + url = await browser.tabs.sendMessage(tab.id, URL2); + browser.test.assertEq(URL2, url, "Should get response from expected content window"); - return browser.tabs.remove(tab.id); - }); - }).then(() => { + await browser.tabs.remove(tab.id); + browser.test.notifyPass("contentscript-bfcache-window"); - }).catch(error => { + } catch (error) { browser.test.fail(`Error: ${error} :: ${error.stack}`); browser.test.notifyFail("contentscript-bfcache-window"); - }); + } }, files: { "content-script.js": function() { // Store this in a local variable to make sure we don't touch any // properties of the possibly-hidden content window. let href = window.location.href; @@ -198,32 +196,30 @@ add_task(function* tabsSendHidden() { add_task(function* tabsSendMessageNoExceptionOnNonExistentTab() { let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, - background: function() { + async background() { let url = "http://example.com/mochitest/browser/browser/components/extensions/test/browser/file_dummy.html"; - browser.tabs.create({url}, tab => { - let exception; - try { - browser.tabs.sendMessage(tab.id, "message"); - browser.tabs.sendMessage(tab.id + 100, "message"); - } catch (e) { - exception = e; - } + let tab = await browser.tabs.create({url}); - browser.test.assertEq(undefined, exception, "no exception should be raised on tabs.sendMessage to nonexistent tabs"); - browser.tabs.remove(tab.id, function() { - browser.test.notifyPass("tabs.sendMessage"); - }); - }); + try { + browser.tabs.sendMessage(tab.id, "message"); + browser.tabs.sendMessage(tab.id + 100, "message"); + } catch (e) { + browser.test.fail("no exception should be raised on tabs.sendMessage to nonexistent tabs"); + } + + await browser.tabs.remove(tab.id); + + browser.test.notifyPass("tabs.sendMessage"); }, }); yield Promise.all([ extension.startup(), extension.awaitFinish("tabs.sendMessage"), ]);
--- a/browser/components/extensions/test/browser/browser_ext_tabs_update_url.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_update_url.js @@ -19,41 +19,31 @@ function* testTabsUpdateURL(existentTabU <h1>tab page</h1> </body> </html> `.trim(), }, background: function() { browser.test.sendMessage("ready", browser.runtime.getURL("tab.html")); - browser.test.onMessage.addListener((msg, tabsUpdateURL, isErrorExpected) => { - let onTabsUpdated = (tab) => { - if (isErrorExpected) { - browser.test.fail(`tabs.update with URL ${tabsUpdateURL} should be rejected`); - } else { - browser.test.assertTrue(tab, "on success the tab should be defined"); - } - }; + browser.test.onMessage.addListener(async (msg, tabsUpdateURL, isErrorExpected) => { + let tabs = await browser.tabs.query({lastFocusedWindow: true}); + + try { + let tab = await browser.tabs.update(tabs[1].id, {url: tabsUpdateURL}); - let onTabsUpdateError = (error) => { - if (!isErrorExpected) { - browser.test.fails(`tabs.update with URL ${tabsUpdateURL} should not be rejected`); - } else { - browser.test.assertTrue(/^Illegal URL/.test(error.message), - "tabs.update should be rejected with the expected error message"); - } - }; + browser.test.assertFalse(isErrorExpected, `tabs.update with URL ${tabsUpdateURL} should be rejected`); + browser.test.assertTrue(tab, "on success the tab should be defined"); + } catch (error) { + browser.test.assertTrue(isErrorExpected, `tabs.update with URL ${tabsUpdateURL} should not be rejected`); + browser.test.assertTrue(/^Illegal URL/.test(error.message), + "tabs.update should be rejected with the expected error message"); + } - let onTabsUpdateDone = () => browser.test.sendMessage("done"); - - browser.tabs.query({lastFocusedWindow: true}, (tabs) => { - browser.tabs.update(tabs[1].id, {url: tabsUpdateURL}) - .then(onTabsUpdated, onTabsUpdateError) - .then(onTabsUpdateDone); - }); + browser.test.sendMessage("done"); }); }, }); yield extension.startup(); let mozExtTabURL = yield extension.awaitMessage("ready");
--- a/browser/components/extensions/test/browser/browser_ext_tabs_zoom.js +++ b/browser/components/extensions/test/browser/browser_ext_tabs_zoom.js @@ -5,17 +5,17 @@ const SITE_SPECIFIC_PREF = "browser.zoom.siteSpecific"; add_task(function* () { let tab1 = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.com/"); let tab2 = yield BrowserTestUtils.openNewForegroundTab(gBrowser, "http://example.net/"); gBrowser.selectedTab = tab1; - function background() { + async function background() { function promiseUpdated(tabId, attr) { return new Promise(resolve => { let onUpdated = (tabId_, changeInfo, tab) => { if (tabId == tabId_ && attr in changeInfo) { browser.tabs.onUpdated.removeListener(onUpdated); resolve({changeInfo, tab}); } @@ -45,156 +45,144 @@ add_task(function* () { let eventPromises = []; browser.tabs.onZoomChange.addListener(info => { zoomEvents.push(info); if (eventPromises.length) { eventPromises.shift().resolve(); } }); - let awaitZoom = (tabId, newValue) => { + let awaitZoom = async (tabId, newValue) => { let listener; - return new Promise(resolve => { + await new Promise(async resolve => { listener = info => { if (info.tabId == tabId && info.newZoomFactor == newValue) { resolve(); } }; browser.tabs.onZoomChange.addListener(listener); - browser.tabs.getZoom(tabId).then(zoomFactor => { - if (zoomFactor == newValue) { - resolve(); - } - }); - }).then(() => { - browser.tabs.onZoomChange.removeListener(listener); + let zoomFactor = await browser.tabs.getZoom(tabId); + if (zoomFactor == newValue) { + resolve(); + } }); + + browser.tabs.onZoomChange.removeListener(listener); }; - let checkZoom = (tabId, newValue, oldValue = null) => { + let checkZoom = async (tabId, newValue, oldValue = null) => { let awaitEvent; if (oldValue != null && !zoomEvents.length) { awaitEvent = new Promise(resolve => { eventPromises.push({resolve}); }); } - return Promise.all([ + let [apiZoom, realZoom] = await Promise.all([ browser.tabs.getZoom(tabId), msg("get-zoom", tabId), awaitEvent, - ]).then(([apiZoom, realZoom]) => { - browser.test.assertEq(newValue, apiZoom, `Got expected zoom value from API`); - browser.test.assertEq(newValue, realZoom, `Got expected zoom value from parent`); + ]); + + browser.test.assertEq(newValue, apiZoom, `Got expected zoom value from API`); + browser.test.assertEq(newValue, realZoom, `Got expected zoom value from parent`); - if (oldValue != null) { - let event = zoomEvents.shift(); - browser.test.assertEq(tabId, event.tabId, `Got expected zoom event tab ID`); - browser.test.assertEq(newValue, event.newZoomFactor, `Got expected zoom event zoom factor`); - browser.test.assertEq(oldValue, event.oldZoomFactor, `Got expected zoom event old zoom factor`); + if (oldValue != null) { + let event = zoomEvents.shift(); + browser.test.assertEq(tabId, event.tabId, `Got expected zoom event tab ID`); + browser.test.assertEq(newValue, event.newZoomFactor, `Got expected zoom event zoom factor`); + browser.test.assertEq(oldValue, event.oldZoomFactor, `Got expected zoom event old zoom factor`); - browser.test.assertEq(3, Object.keys(event.zoomSettings).length, `Zoom settings should have 3 keys`); - browser.test.assertEq("automatic", event.zoomSettings.mode, `Mode should be "automatic"`); - browser.test.assertEq("per-origin", event.zoomSettings.scope, `Scope should be "per-origin"`); - browser.test.assertEq(1, event.zoomSettings.defaultZoomFactor, `Default zoom should be 1`); - } - }); + browser.test.assertEq(3, Object.keys(event.zoomSettings).length, `Zoom settings should have 3 keys`); + browser.test.assertEq("automatic", event.zoomSettings.mode, `Mode should be "automatic"`); + browser.test.assertEq("per-origin", event.zoomSettings.scope, `Scope should be "per-origin"`); + browser.test.assertEq(1, event.zoomSettings.defaultZoomFactor, `Default zoom should be 1`); + } }; - let tabIds; - - browser.tabs.query({lastFocusedWindow: true}).then(tabs => { + try { + let tabs = await browser.tabs.query({lastFocusedWindow: true}); browser.test.assertEq(tabs.length, 3, "We have three tabs"); - tabIds = [tabs[1].id, tabs[2].id]; + let tabIds = [tabs[1].id, tabs[2].id]; + await checkZoom(tabIds[0], 1); - return checkZoom(tabIds[0], 1); - }).then(() => { - return browser.tabs.setZoom(tabIds[0], 2); - }).then(() => { - return checkZoom(tabIds[0], 2, 1); - }).then(() => { - return browser.tabs.getZoomSettings(tabIds[0]); - }).then(zoomSettings => { + await browser.tabs.setZoom(tabIds[0], 2); + await checkZoom(tabIds[0], 2, 1); + + let zoomSettings = await browser.tabs.getZoomSettings(tabIds[0]); browser.test.assertEq(3, Object.keys(zoomSettings).length, `Zoom settings should have 3 keys`); browser.test.assertEq("automatic", zoomSettings.mode, `Mode should be "automatic"`); browser.test.assertEq("per-origin", zoomSettings.scope, `Scope should be "per-origin"`); browser.test.assertEq(1, zoomSettings.defaultZoomFactor, `Default zoom should be 1`); + browser.test.log(`Switch to tab 2`); - return browser.tabs.update(tabIds[1], {active: true}); - }).then(() => { - return checkZoom(tabIds[1], 1); - }).then(() => { + await browser.tabs.update(tabIds[1], {active: true}); + await checkZoom(tabIds[1], 1); + + browser.test.log(`Navigate tab 2 to origin of tab 1`); browser.tabs.update(tabIds[1], {url: "http://example.com"}); + await promiseUpdated(tabIds[1], "url"); + await checkZoom(tabIds[1], 2, 1); - return promiseUpdated(tabIds[1], "url"); - }).then(() => { - return checkZoom(tabIds[1], 2, 1); - }).then(() => { + browser.test.log(`Update zoom in tab 2, expect changes in both tabs`); - return browser.tabs.setZoom(tabIds[1], 1.5); - }).then(() => { - return checkZoom(tabIds[1], 1.5, 2); - }).then(() => { + await browser.tabs.setZoom(tabIds[1], 1.5); + await checkZoom(tabIds[1], 1.5, 2); + + browser.test.log(`Switch to tab 1, expect asynchronous zoom change just after the switch`); - return Promise.all([ + await Promise.all([ awaitZoom(tabIds[0], 1.5), browser.tabs.update(tabIds[0], {active: true}), ]); - }).then(() => { - return checkZoom(tabIds[0], 1.5, 2); - }).then(() => { + await checkZoom(tabIds[0], 1.5, 2); + + browser.test.log("Set zoom to 0, expect it set to 1"); - return browser.tabs.setZoom(tabIds[0], 0); - }).then(() => { - return checkZoom(tabIds[0], 1, 1.5); - }).then(() => { + await browser.tabs.setZoom(tabIds[0], 0); + await checkZoom(tabIds[0], 1, 1.5); + + browser.test.log("Change zoom externally, expect changes reflected"); - return msg("enlarge"); - }).then(() => { - return checkZoom(tabIds[0], 1.1, 1); - }).then(() => { - return Promise.all([ + await msg("enlarge"); + await checkZoom(tabIds[0], 1.1, 1); + + await Promise.all([ browser.tabs.setZoom(tabIds[0], 0), browser.tabs.setZoom(tabIds[1], 0), ]); - }).then(() => { - return Promise.all([ + await Promise.all([ checkZoom(tabIds[0], 1, 1.1), checkZoom(tabIds[1], 1, 1.5), ]); - }).then(() => { + + browser.test.log("Check that invalid zoom values throw an error"); - return browser.tabs.setZoom(tabIds[0], 42).then( - () => { - browser.test.fail("Expected an error"); - }, - error => { - browser.test.assertTrue(error.message.includes("Zoom value 42 out of range"), - "Got expected error"); - }); - }).then(() => { + await browser.test.assertRejects( + browser.tabs.setZoom(tabIds[0], 42), + /Zoom value 42 out of range/, + "Expected an out of range error"); + browser.test.log("Disable site-specific zoom, expect correct scope"); - return msg("site-specific", false); - }).then(() => { - return browser.tabs.getZoomSettings(tabIds[0]); - }).then(zoomSettings => { + await msg("site-specific", false); + zoomSettings = await browser.tabs.getZoomSettings(tabIds[0]); + browser.test.assertEq("per-tab", zoomSettings.scope, `Scope should be "per-tab"`); - }).then(() => { - return msg("site-specific", null); - }).then(() => { + await msg("site-specific", null); + browser.test.notifyPass("tab-zoom"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("tab-zoom"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_webNavigation_frameId0.js +++ b/browser/components/extensions/test/browser/browser_ext_webNavigation_frameId0.js @@ -15,25 +15,23 @@ add_task(function* webNavigation_getFram // main process: // Cu.import("resource://gre/modules/ExtensionManagement.jsm", {}); // // Or simply run the test again. const BASE = "http://mochi.test:8888/browser/browser/components/extensions/test/browser/"; const DUMMY_URL = BASE + "file_dummy.html"; let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, DUMMY_URL, true); - function background(DUMMY_URL) { - browser.tabs.query({active: true, currentWindow: true}).then(tabs => { - return browser.webNavigation.getAllFrames({tabId: tabs[0].id}); - }).then(frames => { - browser.test.assertEq(1, frames.length, "The dummy page has one frame"); - browser.test.assertEq(0, frames[0].frameId, "Main frame's ID must be 0"); - browser.test.assertEq(DUMMY_URL, frames[0].url, "Main frame URL must match"); - browser.test.notifyPass("frameId checked"); - }); + async function background(DUMMY_URL) { + let tabs = await browser.tabs.query({active: true, currentWindow: true}); + let frames = await browser.webNavigation.getAllFrames({tabId: tabs[0].id}); + browser.test.assertEq(1, frames.length, "The dummy page has one frame"); + browser.test.assertEq(0, frames[0].frameId, "Main frame's ID must be 0"); + browser.test.assertEq(DUMMY_URL, frames[0].url, "Main frame URL must match"); + browser.test.notifyPass("frameId checked"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["webNavigation"], }, background: `(${background})(${JSON.stringify(DUMMY_URL)});`,
--- a/browser/components/extensions/test/browser/browser_ext_webNavigation_getFrames.js +++ b/browser/components/extensions/test/browser/browser_ext_webNavigation_getFrames.js @@ -1,38 +1,31 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testWebNavigationGetNonExistentTab() { let extension = ExtensionTestUtils.loadExtension({ - background: "(" + function() { - let results = [ - // There is no "tabId = 0" because the id assigned by TabManager (defined in ext-utils.js) - // starts from 1. - browser.webNavigation.getAllFrames({tabId: 0}).then(() => { - browser.test.fail("getAllFrames Promise should be rejected on error"); - }, (error) => { - browser.test.assertEq("Invalid tab ID: 0", error.message, - "getAllFrames rejected Promise should pass the expected error"); - }), - // There is no "tabId = 0" because the id assigned by TabManager (defined in ext-utils.js) - // starts from 1, processId is currently marked as optional and it is ignored. - browser.webNavigation.getFrame({tabId: 0, frameId: 15, processId: 20}).then(() => { - browser.test.fail("getFrame Promise should be rejected on error"); - }, (error) => { - browser.test.assertEq("Invalid tab ID: 0", error.message, - "getFrame rejected Promise should pass the expected error"); - }), - ]; + background: async function() { + // There is no "tabId = 0" because the id assigned by TabManager (defined in ext-utils.js) + // starts from 1. + await browser.test.assertRejects( + browser.webNavigation.getAllFrames({tabId: 0}), + "Invalid tab ID: 0", + "getAllFrames rejected Promise should pass the expected error"); - Promise.all(results).then(() => { - browser.test.sendMessage("getNonExistentTab.done"); - }); - } + ")();", + // There is no "tabId = 0" because the id assigned by TabManager (defined in ext-utils.js) + // starts from 1, processId is currently marked as optional and it is ignored. + await browser.test.assertRejects( + browser.webNavigation.getFrame({tabId: 0, frameId: 15, processId: 20}), + "Invalid tab ID: 0", + "getFrame rejected Promise should pass the expected error"); + + browser.test.sendMessage("getNonExistentTab.done"); + }, manifest: { permissions: ["webNavigation"], }, }); info("load complete"); yield extension.startup(); info("startup complete"); @@ -40,68 +33,61 @@ add_task(function* testWebNavigationGetN yield extension.awaitMessage("getNonExistentTab.done"); yield extension.unload(); info("extension unloaded"); }); add_task(function* testWebNavigationFrames() { let extension = ExtensionTestUtils.loadExtension({ - background: "(" + function() { + background: async function() { let tabId; let collectedDetails = []; - browser.webNavigation.onCompleted.addListener((details) => { + browser.webNavigation.onCompleted.addListener(async details => { collectedDetails.push(details); if (details.frameId !== 0) { // wait for the top level iframe to be complete return; } - browser.webNavigation.getAllFrames({tabId}).then((getAllFramesDetails) => { - let getFramePromises = getAllFramesDetails.map((frameDetail) => { - let {frameId} = frameDetail; - // processId is currently marked as optional and it is ignored. - return browser.webNavigation.getFrame({tabId, frameId, processId: 0}); - }); + let getAllFramesDetails = await browser.webNavigation.getAllFrames({tabId}); - Promise.all(getFramePromises).then((getFrameResults) => { - browser.test.sendMessage("webNavigationFrames.done", { - collectedDetails, getAllFramesDetails, getFrameResults, - }); - }, () => { - browser.test.assertTrue(false, "None of the getFrame promises should have been rejected"); - }); + let getFramePromises = getAllFramesDetails.map(({frameId}) => { + // processId is currently marked as optional and it is ignored. + return browser.webNavigation.getFrame({tabId, frameId, processId: 0}); + }); + + let getFrameResults = await Promise.all(getFramePromises); + browser.test.sendMessage("webNavigationFrames.done", { + collectedDetails, getAllFramesDetails, getFrameResults, + }); - // Pick a random frameId. - let nonExistentFrameId = Math.floor(Math.random() * 10000); + // Pick a random frameId. + let nonExistentFrameId = Math.floor(Math.random() * 10000); - // Increment the picked random nonExistentFrameId until it doesn't exists. - while (getAllFramesDetails.filter((details) => details.frameId == nonExistentFrameId).length > 0) { - nonExistentFrameId += 1; - } + // Increment the picked random nonExistentFrameId until it doesn't exists. + while (getAllFramesDetails.filter((details) => details.frameId == nonExistentFrameId).length > 0) { + nonExistentFrameId += 1; + } - // Check that getFrame Promise is rejected with the expected error message on nonexistent frameId. - browser.webNavigation.getFrame({tabId, frameId: nonExistentFrameId, processId: 20}).then(() => { - browser.test.fail("getFrame promise should be rejected for an unexistent frameId"); - }, (error) => { - browser.test.assertEq(`No frame found with frameId: ${nonExistentFrameId}`, error.message, - "getFrame promise should be rejected with the expected error message on unexistent frameId"); - }).then(() => { - browser.tabs.remove(tabId); - browser.test.sendMessage("webNavigationFrames.done"); - }); - }); + // Check that getFrame Promise is rejected with the expected error message on nonexistent frameId. + await browser.test.assertRejects( + browser.webNavigation.getFrame({tabId, frameId: nonExistentFrameId, processId: 20}), + `No frame found with frameId: ${nonExistentFrameId}`, + "getFrame promise should be rejected with the expected error message on unexistent frameId"); + + await browser.tabs.remove(tabId); + browser.test.sendMessage("webNavigationFrames.done"); }); - browser.tabs.create({url: "tab.html"}, (tab) => { - tabId = tab.id; - }); - } + ")();", + let tab = await browser.tabs.create({url: "tab.html"}); + tabId = tab.id; + }, manifest: { permissions: ["webNavigation", "tabs"], }, files: { "tab.html": ` <!DOCTYPE html> <html> <head>
--- a/browser/components/extensions/test/browser/browser_ext_windows_create.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_create.js @@ -1,15 +1,15 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testWindowCreate() { let extension = ExtensionTestUtils.loadExtension({ - background() { + async background() { let _checkWindowPromise; browser.test.onMessage.addListener(msg => { if (msg == "checked-window") { _checkWindowPromise.resolve(); _checkWindowPromise = null; } }); @@ -17,69 +17,71 @@ add_task(function* testWindowCreate() { function checkWindow(expected) { return new Promise(resolve => { _checkWindowPromise = {resolve}; browser.test.sendMessage("check-window", expected); }); } - function createWindow(params, expected, keep = false) { - return browser.windows.create(params).then(window => { - for (let key of Object.keys(params)) { - if (key == "state" && os == "mac" && params.state == "normal") { - // OS-X doesn't have a hard distinction between "normal" and - // "maximized" states. - browser.test.assertTrue(window.state == "normal" || window.state == "maximized", - `Expected window.state (currently ${window.state}) to be "normal" but will accept "maximized"`); - } else { - browser.test.assertEq(params[key], window[key], `Got expected value for window.${key}`); - } + async function createWindow(params, expected, keep = false) { + let window = await browser.windows.create(params); + + for (let key of Object.keys(params)) { + if (key == "state" && os == "mac" && params.state == "normal") { + // OS-X doesn't have a hard distinction between "normal" and + // "maximized" states. + browser.test.assertTrue(window.state == "normal" || window.state == "maximized", + `Expected window.state (currently ${window.state}) to be "normal" but will accept "maximized"`); + } else { + browser.test.assertEq(params[key], window[key], `Got expected value for window.${key}`); } + } - browser.test.assertEq(1, window.tabs.length, "tabs property got populated"); - return checkWindow(expected).then(() => { - if (keep) { - return window; - } - if (params.state == "fullscreen" && os == "win") { - // FIXME: Closing a fullscreen window causes a window leak in - // Windows tests. - return browser.windows.update(window.id, {state: "normal"}).then(() => { - return browser.windows.remove(window.id); - }); - } - return browser.windows.remove(window.id); - }); - }); + browser.test.assertEq(1, window.tabs.length, "tabs property got populated"); + + await checkWindow(expected); + if (keep) { + return window; + } + + if (params.state == "fullscreen" && os == "win") { + // FIXME: Closing a fullscreen window causes a window leak in + // Windows tests. + await browser.windows.update(window.id, {state: "normal"}); + } + await browser.windows.remove(window.id); } - browser.runtime.getPlatformInfo().then(info => { os = info.os; }) - .then(() => createWindow({state: "maximized"}, {state: "STATE_MAXIMIZED"})) - .then(() => createWindow({state: "minimized"}, {state: "STATE_MINIMIZED"})) - .then(() => createWindow({state: "normal"}, {state: "STATE_NORMAL", hiddenChrome: []})) - .then(() => createWindow({state: "fullscreen"}, {state: "STATE_FULLSCREEN"})) - .then(() => { - return createWindow({type: "popup"}, - {hiddenChrome: ["menubar", "toolbar", "location", "directories", "status", "extrachrome"], - chromeFlags: ["CHROME_OPENAS_DIALOG"]}, - true); - }).then(window => { - return browser.tabs.query({windowType: "popup", active: true}).then(tabs => { - browser.test.assertEq(1, tabs.length, "Expected only one popup"); - browser.test.assertEq(window.id, tabs[0].windowId, "Expected new window to be returned in query"); + try { + ({os} = await browser.runtime.getPlatformInfo()); + + await createWindow({state: "maximized"}, {state: "STATE_MAXIMIZED"}); + await createWindow({state: "minimized"}, {state: "STATE_MINIMIZED"}); + await createWindow({state: "normal"}, {state: "STATE_NORMAL", hiddenChrome: []}); + await createWindow({state: "fullscreen"}, {state: "STATE_FULLSCREEN"}); - return browser.windows.remove(window.id); - }); - }).then(() => { + let window = await createWindow( + {type: "popup"}, + {hiddenChrome: ["menubar", "toolbar", "location", "directories", "status", "extrachrome"], + chromeFlags: ["CHROME_OPENAS_DIALOG"]}, + true); + + let tabs = await browser.tabs.query({windowType: "popup", active: true}); + + browser.test.assertEq(1, tabs.length, "Expected only one popup"); + browser.test.assertEq(window.id, tabs[0].windowId, "Expected new window to be returned in query"); + + await browser.windows.remove(window.id); + browser.test.notifyPass("window-create"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-create"); - }); + } }, }); let latestWindow; let windowListener = (window, topic) => { if (topic == "domwindowopened") { latestWindow = window; }
--- a/browser/components/extensions/test/browser/browser_ext_windows_create_params.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_create_params.js @@ -1,40 +1,33 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; // Tests that incompatible parameters can't be used together. add_task(function* testWindowCreateParams() { let extension = ExtensionTestUtils.loadExtension({ - background() { - function* getCalls() { + async background() { + try { for (let state of ["minimized", "maximized", "fullscreen"]) { for (let param of ["left", "top", "width", "height"]) { let expected = `"state": "${state}" may not be combined with "left", "top", "width", or "height"`; - yield browser.windows.create({state, [param]: 100}).then( - val => { - browser.test.fail(`Expected error but got "${val}" instead`); - }, - error => { - browser.test.assertTrue( - error.message.includes(expected), - `Got expected error (got: '${error.message}', expected: '${expected}'`); - }); + await browser.test.assertRejects( + browser.windows.create({state, [param]: 100}), + RegExp(expected), + `Got expected error from create(${param}=100)`); } } - } - Promise.all(getCalls()).then(() => { browser.test.notifyPass("window-create-params"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-create-params"); - }); + } }, }); yield extension.startup(); yield extension.awaitFinish("window-create-params"); yield extension.unload(); });
--- a/browser/components/extensions/test/browser/browser_ext_windows_create_tabId.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_create_tabId.js @@ -1,14 +1,14 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testWindowCreate() { - function background() { + async function background() { let promiseTabAttached = () => { return new Promise(resolve => { browser.tabs.onAttached.addListener(function listener() { browser.tabs.onAttached.removeListener(listener); resolve(); }); }); }; @@ -19,130 +19,113 @@ add_task(function* testWindowCreate() { if (changeInfo.url === expected) { browser.tabs.onUpdated.removeListener(listener); resolve(); } }); }); }; - let windowId, tabId; - browser.windows.getCurrent().then(window => { - windowId = window.id; + try { + let window = await browser.windows.getCurrent(); + let windowId = window.id; browser.test.log("Create additional tab in window 1"); - return browser.tabs.create({windowId, url: "about:blank"}); - }).then(tab => { - tabId = tab.id; + let tab = await browser.tabs.create({windowId, url: "about:blank"}); + let tabId = tab.id; + browser.test.log("Create a new window, adopting the new tab"); // Note that we want to check against actual boolean values for // all of the `incognito` property tests. browser.test.assertEq(false, tab.incognito, "Tab is not private"); - return Promise.all([ - promiseTabAttached(), - browser.windows.create({tabId: tabId}), - ]); - }).then(([, window]) => { - browser.test.assertEq(false, window.incognito, "New window is not private"); - browser.test.assertEq(tabId, window.tabs[0].id, "tabs property populated correctly"); + { + let [, window] = await Promise.all([ + promiseTabAttached(), + browser.windows.create({tabId: tabId}), + ]); + browser.test.assertEq(false, window.incognito, "New window is not private"); + browser.test.assertEq(tabId, window.tabs[0].id, "tabs property populated correctly"); - browser.test.log("Close the new window"); - return browser.windows.remove(window.id); - }).then(() => { - browser.test.log("Create a new private window"); + browser.test.log("Close the new window"); + await browser.windows.remove(window.id); + } - return browser.windows.create({incognito: true}); - }).then(privateWindow => { - browser.test.assertEq(true, privateWindow.incognito, "Private window is private"); + { + browser.test.log("Create a new private window"); + let privateWindow = await browser.windows.create({incognito: true}); + browser.test.assertEq(true, privateWindow.incognito, "Private window is private"); - browser.test.log("Create additional tab in private window"); - return browser.tabs.create({windowId: privateWindow.id}).then(privateTab => { + browser.test.log("Create additional tab in private window"); + let privateTab = await browser.tabs.create({windowId: privateWindow.id}); browser.test.assertEq(true, privateTab.incognito, "Private tab is private"); browser.test.log("Create a new window, adopting the new private tab"); - - return Promise.all([ + let [, newWindow] = await Promise.all([ promiseTabAttached(), browser.windows.create({tabId: privateTab.id}), ]); - }).then(([, newWindow]) => { browser.test.assertEq(true, newWindow.incognito, "New private window is private"); browser.test.log("Close the new private window"); - return browser.windows.remove(newWindow.id); - }).then(() => { + await browser.windows.remove(newWindow.id); + browser.test.log("Close the private window"); - return browser.windows.remove(privateWindow.id); - }); - }).then(() => { - return browser.tabs.query({windowId, active: true}); - }).then(([tab]) => { - browser.test.log("Try to create a window with both a tab and a URL"); + await browser.windows.remove(privateWindow.id); + } + - return browser.windows.create({tabId: tab.id, url: "http://example.com/"}).then( - window => { - browser.test.fail("Create call should have failed"); - }, - error => { - browser.test.assertTrue(/`tabId` may not be used in conjunction with `url`/.test(error.message), - "Create call failed as expected"); - }).then(() => { - browser.test.log("Try to create a window with both a tab and an invalid incognito setting"); + browser.test.log("Try to create a window with both a tab and a URL"); + [tab] = await browser.tabs.query({windowId, active: true}); + await browser.test.assertRejects( + browser.windows.create({tabId: tab.id, url: "http://example.com/"}), + /`tabId` may not be used in conjunction with `url`/, + "Create call failed as expected"); - return browser.windows.create({tabId: tab.id, incognito: true}); - }).then( - window => { - browser.test.fail("Create call should have failed"); - }, - error => { - browser.test.assertTrue(/`incognito` property must match the incognito state of tab/.test(error.message), - "Create call failed as expected"); - }); - }).then(() => { - browser.test.log("Try to create a window with an invalid tabId"); + browser.test.log("Try to create a window with both a tab and an invalid incognito setting"); + await browser.test.assertRejects( + browser.windows.create({tabId: tab.id, incognito: true}), + /`incognito` property must match the incognito state of tab/, + "Create call failed as expected"); + - return browser.windows.create({tabId: 0}).then( - window => { - browser.test.fail("Create call should have failed"); - }, - error => { - browser.test.assertTrue(/Invalid tab ID: 0/.test(error.message), - "Create call failed as expected"); - } - ); - }).then(() => { + browser.test.log("Try to create a window with an invalid tabId"); + await browser.test.assertRejects( + browser.windows.create({tabId: 0}), + /Invalid tab ID: 0/, + "Create call failed as expected"); + + browser.test.log("Try to create a window with two URLs"); - - return Promise.all([ + [, , window] = await Promise.all([ // tabs.onUpdated can be invoked between the call of windows.create and // the invocation of its callback/promise, so set up the listeners // before creating the window. promiseTabUpdated("http://example.com/"), promiseTabUpdated("http://example.org/"), browser.windows.create({url: ["http://example.com/", "http://example.org/"]}), ]); - }).then(([, , window]) => { browser.test.assertEq(2, window.tabs.length, "2 tabs were opened in new window"); browser.test.assertEq("about:blank", window.tabs[0].url, "about:blank, page not loaded yet"); browser.test.assertEq("about:blank", window.tabs[1].url, "about:blank, page not loaded yet"); - return browser.windows.get(window.id, {populate: true}); - }).then(window => { + window = await browser.windows.get(window.id, {populate: true}); + browser.test.assertEq(2, window.tabs.length, "2 tabs were opened in new window"); browser.test.assertEq("http://example.com/", window.tabs[0].url, "Correct URL was loaded in tab 1"); browser.test.assertEq("http://example.org/", window.tabs[1].url, "Correct URL was loaded in tab 2"); - return browser.windows.remove(window.id); - }).then(() => { + + await browser.windows.remove(window.id); + browser.test.notifyPass("window-create"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-create"); - }); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": ["tabs"], }, background,
--- a/browser/components/extensions/test/browser/browser_ext_windows_create_url.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_create_url.js @@ -3,17 +3,17 @@ "use strict"; add_task(function* testWindowCreate() { let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["tabs"], }, - background() { + background: async function() { const EXTENSION_URL = browser.runtime.getURL("test.html"); const REMOTE_URL = browser.runtime.getURL("test.html"); let windows = new class extends Map { // eslint-disable-line new-parens get(id) { if (!this.has(id)) { let window = { tabs: new Map(), @@ -35,47 +35,47 @@ add_task(function* testWindowCreate() { window.tabs.set(tab.index, tab); if (window.tabs.size === window.expectedTabs) { window.resolvePromise(window); } } }); - function create(options) { - return browser.windows.create(options).then(window => { - let win = windows.get(window.id); + async function create(options) { + let window = await browser.windows.create(options); + let win = windows.get(window.id); - win.expectedTabs = Array.isArray(options.url) ? options.url.length : 1; + win.expectedTabs = Array.isArray(options.url) ? options.url.length : 1; - return win.promise; - }); + return win.promise; } - Promise.all([ - create({url: REMOTE_URL}), - create({url: "test.html"}), - create({url: EXTENSION_URL}), - create({url: [REMOTE_URL, "test.html", EXTENSION_URL]}), - ]).then(windows => { + try { + let windows = await Promise.all([ + create({url: REMOTE_URL}), + create({url: "test.html"}), + create({url: EXTENSION_URL}), + create({url: [REMOTE_URL, "test.html", EXTENSION_URL]}), + ]); browser.test.assertEq(REMOTE_URL, windows[0].tabs.get(0).url, "Single, absolute, remote URL"); browser.test.assertEq(REMOTE_URL, windows[1].tabs.get(0).url, "Single, relative URL"); browser.test.assertEq(REMOTE_URL, windows[2].tabs.get(0).url, "Single, absolute, extension URL"); browser.test.assertEq(REMOTE_URL, windows[3].tabs.get(0).url, "url[0]: Absolute, remote URL"); browser.test.assertEq(EXTENSION_URL, windows[3].tabs.get(1).url, "url[1]: Relative URL"); browser.test.assertEq(EXTENSION_URL, windows[3].tabs.get(2).url, "url[2]: Absolute, extension URL"); - }).then(() => { + browser.test.notifyPass("window-create-url"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-create-url"); - }); + } }, files: { "test.html": `<DOCTYPE html><html><head><meta charset="utf-8"></head></html>`, }, }); yield extension.startup();
--- a/browser/components/extensions/test/browser/browser_ext_windows_events.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_events.js @@ -1,50 +1,50 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; SimpleTest.requestCompleteLog(); add_task(function* testWindowsEvents() { function background() { - browser.windows.onCreated.addListener(function listener(window) { + browser.windows.onCreated.addListener(window => { browser.test.log(`onCreated: windowId=${window.id}`); browser.test.assertTrue(Number.isInteger(window.id), "Window object's id is an integer"); browser.test.assertEq("normal", window.type, "Window object returned with the correct type"); browser.test.sendMessage("window-created", window.id); }); let lastWindowId, os; - browser.windows.onFocusChanged.addListener(function listener(windowId) { + browser.windows.onFocusChanged.addListener(async windowId => { browser.test.log(`onFocusChange: windowId=${windowId} lastWindowId=${lastWindowId}`); if (windowId === browser.windows.WINDOW_ID_NONE && os === "linux") { browser.test.log("Ignoring a superfluous WINDOW_ID_NONE (blur) event on Linux"); return; } browser.test.assertTrue(lastWindowId !== windowId, "onFocusChanged fired once for the given window"); lastWindowId = windowId; browser.test.assertTrue(Number.isInteger(windowId), "windowId is an integer"); - browser.windows.getLastFocused().then(window => { - browser.test.assertEq(windowId, window.id, - "Last focused window has the correct id"); - browser.test.sendMessage(`window-focus-changed`, window.id); - }); + let window = await browser.windows.getLastFocused(); + + browser.test.assertEq(windowId, window.id, + "Last focused window has the correct id"); + browser.test.sendMessage(`window-focus-changed`, window.id); }); - browser.windows.onRemoved.addListener(function listener(windowId) { + browser.windows.onRemoved.addListener(windowId => { browser.test.log(`onRemoved: windowId=${windowId}`); browser.test.assertTrue(Number.isInteger(windowId), "windowId is an integer"); browser.test.sendMessage(`window-removed`, windowId); browser.test.notifyPass("windows.events"); });
--- a/browser/components/extensions/test/browser/browser_ext_windows_size.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_size.js @@ -1,15 +1,15 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* testWindowCreate() { let extension = ExtensionTestUtils.loadExtension({ - background() { + async background() { let _checkWindowPromise; browser.test.onMessage.addListener((msg, arg) => { if (msg == "checked-window") { _checkWindowPromise.resolve(arg); _checkWindowPromise = null; } }); @@ -23,83 +23,73 @@ add_task(function* testWindowCreate() { const KEYS = ["left", "top", "width", "height"]; function checkGeom(expected, actual) { for (let key of KEYS) { browser.test.assertEq(expected[key], actual[key], `Expected '${key}' value`); } } let windowId; - function checkWindow(expected, retries = 5) { - return getWindowSize().then(geom => { - if (retries && KEYS.some(key => expected[key] != geom[key])) { - browser.test.log(`Got mismatched size (${JSON.stringify(expected)} != ${JSON.stringify(geom)}). ` + - `Retrying after a short delay.`); + async function checkWindow(expected, retries = 5) { + let geom = await getWindowSize(); + + if (retries && KEYS.some(key => expected[key] != geom[key])) { + browser.test.log(`Got mismatched size (${JSON.stringify(expected)} != ${JSON.stringify(geom)}). ` + + `Retrying after a short delay.`); + + await new Promise(resolve => setTimeout(resolve, 200)); - return new Promise(resolve => { - setTimeout(resolve, 200); - }).then(() => { - return checkWindow(expected, retries - 1); - }); - } + return checkWindow(expected, retries - 1); + } + + browser.test.log(`Check actual window size`); + checkGeom(expected, geom); - browser.test.log(`Check actual window size`); - checkGeom(expected, geom); + browser.test.log("Check API-reported window size"); - browser.test.log("Check API-reported window size"); - return browser.windows.get(windowId).then(geom => { - checkGeom(expected, geom); - }); - }); + geom = await browser.windows.get(windowId); + + checkGeom(expected, geom); } - let geom = {left: 100, top: 100, width: 500, height: 300}; + try { + let geom = {left: 100, top: 100, width: 500, height: 300}; - return browser.windows.create(geom).then(window => { + let window = await browser.windows.create(geom); windowId = window.id; - return checkWindow(geom); - }).then(() => { + await checkWindow(geom); + let update = {left: 150, width: 600}; Object.assign(geom, update); - - return browser.windows.update(windowId, update); - }).then(() => { - return checkWindow(geom); - }).then(() => { - let update = {top: 150, height: 400}; - Object.assign(geom, update); + await browser.windows.update(windowId, update); + await checkWindow(geom); - return browser.windows.update(windowId, update); - }).then(() => { - return checkWindow(geom); - }).then(() => { - geom = {left: 200, top: 200, width: 800, height: 600}; + update = {top: 150, height: 400}; + Object.assign(geom, update); + await browser.windows.update(windowId, update); + await checkWindow(geom); - return browser.windows.update(windowId, geom); - }).then(() => { - return checkWindow(geom); - }).then(() => { - return browser.runtime.getPlatformInfo(); - }).then((platformInfo) => { + geom = {left: 200, top: 200, width: 800, height: 600}; + await browser.windows.update(windowId, geom); + await checkWindow(geom); + + let platformInfo = await browser.runtime.getPlatformInfo(); if (platformInfo.os != "linux") { geom = {left: -50, top: -50, width: 800, height: 600}; - - return browser.windows.update(windowId, geom).then(() => { - return checkWindow(geom); - }); + await browser.windows.update(windowId, geom); + await checkWindow(geom); } - }).then(() => { - return browser.windows.remove(windowId); - }).then(() => { + + await browser.windows.remove(windowId); browser.test.notifyPass("window-size"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-size"); - }); + } }, }); let latestWindow; let windowListener = (window, topic) => { if (topic == "domwindowopened") { latestWindow = window; }
--- a/browser/components/extensions/test/browser/browser_ext_windows_update.js +++ b/browser/components/extensions/test/browser/browser_ext_windows_update.js @@ -46,17 +46,17 @@ add_task(function* () { yield extension.unload(); yield BrowserTestUtils.closeWindow(window2); }); add_task(function* testWindowUpdate() { let extension = ExtensionTestUtils.loadExtension({ - background() { + async background() { let _checkWindowPromise; browser.test.onMessage.addListener(msg => { if (msg == "checked-window") { _checkWindowPromise.resolve(); _checkWindowPromise = null; } }); @@ -64,49 +64,53 @@ add_task(function* testWindowUpdate() { function checkWindow(expected) { return new Promise(resolve => { _checkWindowPromise = {resolve}; browser.test.sendMessage("check-window", expected); }); } let currentWindowId; - function updateWindow(windowId, params, expected) { - return browser.windows.update(windowId, params).then(window => { - browser.test.assertEq(currentWindowId, window.id, "Expected WINDOW_ID_CURRENT to refer to the same window"); - for (let key of Object.keys(params)) { - if (key == "state" && os == "mac" && params.state == "normal") { - // OS-X doesn't have a hard distinction between "normal" and - // "maximized" states. - browser.test.assertTrue(window.state == "normal" || window.state == "maximized", - `Expected window.state (currently ${window.state}) to be "normal" but will accept "maximized"`); - } else { - browser.test.assertEq(params[key], window[key], `Got expected value for window.${key}`); - } + async function updateWindow(windowId, params, expected) { + let window = await browser.windows.update(windowId, params); + + browser.test.assertEq(currentWindowId, window.id, "Expected WINDOW_ID_CURRENT to refer to the same window"); + for (let key of Object.keys(params)) { + if (key == "state" && os == "mac" && params.state == "normal") { + // OS-X doesn't have a hard distinction between "normal" and + // "maximized" states. + browser.test.assertTrue(window.state == "normal" || window.state == "maximized", + `Expected window.state (currently ${window.state}) to be "normal" but will accept "maximized"`); + } else { + browser.test.assertEq(params[key], window[key], `Got expected value for window.${key}`); } + } - return checkWindow(expected); - }); + return checkWindow(expected); } - let windowId = browser.windows.WINDOW_ID_CURRENT; + try { + let windowId = browser.windows.WINDOW_ID_CURRENT; + + ({os} = await browser.runtime.getPlatformInfo()); + + let window = await browser.windows.getCurrent(); + currentWindowId = window.id; - browser.runtime.getPlatformInfo().then(info => { os = info.os; }) - .then(() => browser.windows.getCurrent().then(window => { currentWindowId = window.id; })) - .then(() => updateWindow(windowId, {state: "maximized"}, {state: "STATE_MAXIMIZED"})) - .then(() => updateWindow(windowId, {state: "minimized"}, {state: "STATE_MINIMIZED"})) - .then(() => updateWindow(windowId, {state: "normal"}, {state: "STATE_NORMAL"})) - .then(() => updateWindow(windowId, {state: "fullscreen"}, {state: "STATE_FULLSCREEN"})) - .then(() => updateWindow(windowId, {state: "normal"}, {state: "STATE_NORMAL"})) - .then(() => { + await updateWindow(windowId, {state: "maximized"}, {state: "STATE_MAXIMIZED"}); + await updateWindow(windowId, {state: "minimized"}, {state: "STATE_MINIMIZED"}); + await updateWindow(windowId, {state: "normal"}, {state: "STATE_NORMAL"}); + await updateWindow(windowId, {state: "fullscreen"}, {state: "STATE_FULLSCREEN"}); + await updateWindow(windowId, {state: "normal"}, {state: "STATE_NORMAL"}); + browser.test.notifyPass("window-update"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-update"); - }); + } }, }); extension.onMessage("check-window", expected => { if (expected.state != null) { let {windowState} = window; if (window.fullScreen) { windowState = window.STATE_FULLSCREEN; @@ -152,41 +156,34 @@ add_task(function* () { yield BrowserTestUtils.closeWindow(window2); }); // Tests that incompatible parameters can't be used together. add_task(function* testWindowUpdateParams() { let extension = ExtensionTestUtils.loadExtension({ - background() { - function* getCalls() { + async background() { + try { for (let state of ["minimized", "maximized", "fullscreen"]) { for (let param of ["left", "top", "width", "height"]) { let expected = `"state": "${state}" may not be combined with "left", "top", "width", or "height"`; let windowId = browser.windows.WINDOW_ID_CURRENT; - yield browser.windows.update(windowId, {state, [param]: 100}).then( - val => { - browser.test.fail(`Expected error but got "${val}" instead`); - }, - error => { - browser.test.assertTrue( - error.message.includes(expected), - `Got expected error (got: '${error.message}', expected: '${expected}'`); - }); + await browser.test.assertRejects( + browser.windows.update(windowId, {state, [param]: 100}), + RegExp(expected), + `Got expected error for create(${param}=100`); } } - } - Promise.all(getCalls()).then(() => { browser.test.notifyPass("window-update-params"); - }).catch(e => { + } catch (e) { browser.test.fail(`${e} :: ${e.stack}`); browser.test.notifyFail("window-update-params"); - }); + } }, }); yield extension.startup(); yield extension.awaitFinish("window-update-params"); yield extension.unload(); });
--- a/browser/components/extensions/test/browser/head_pageAction.js +++ b/browser/components/extensions/test/browser/head_pageAction.js @@ -7,71 +7,66 @@ function* runTests(options) { function background(getTests) { let tabs; let tests; // Gets the current details of the page action, and returns a // promise that resolves to an object containing them. - function getDetails() { - return new Promise(resolve => { - return browser.tabs.query({active: true, currentWindow: true}, resolve); - }).then(([tab]) => { - let tabId = tab.id; - browser.test.log(`Get details: tab={id: ${tabId}, url: ${JSON.stringify(tab.url)}}`); - return Promise.all([ - browser.pageAction.getTitle({tabId}), - browser.pageAction.getPopup({tabId})]); - }).then(details => { - return Promise.resolve({title: details[0], - popup: details[1]}); - }); + async function getDetails() { + let [tab] = await browser.tabs.query({active: true, currentWindow: true}); + let tabId = tab.id; + + browser.test.log(`Get details: tab={id: ${tabId}, url: ${JSON.stringify(tab.url)}}`); + + return { + title: await browser.pageAction.getTitle({tabId}), + popup: await browser.pageAction.getPopup({tabId}), + }; } // Runs the next test in the `tests` array, checks the results, // and passes control back to the outer test scope. function nextTest() { let test = tests.shift(); - test(expecting => { + test(async expecting => { function finish() { // Check that the actual icon has the expected values, then // run the next test. browser.test.sendMessage("nextTest", expecting, tests.length); } if (expecting) { // Check that the API returns the expected values, and then // run the next test. - getDetails().then(details => { - browser.test.assertEq(expecting.title, details.title, - "expected value from getTitle"); + let details = await getDetails(); + + browser.test.assertEq(expecting.title, details.title, + "expected value from getTitle"); - browser.test.assertEq(expecting.popup, details.popup, - "expected value from getPopup"); + browser.test.assertEq(expecting.popup, details.popup, + "expected value from getPopup"); + } - finish(); - }); - } else { - finish(); - } + finish(); }); } - function runTests() { + async function runTests() { tabs = []; tests = getTests(tabs); - browser.tabs.query({active: true, currentWindow: true}, resultTabs => { - tabs[0] = resultTabs[0].id; + let resultTabs = await browser.tabs.query({active: true, currentWindow: true}); - nextTest(); - }); + tabs[0] = resultTabs[0].id; + + nextTest(); } browser.test.onMessage.addListener((msg) => { if (msg == "runTests") { runTests(); } else if (msg == "runNextTest") { nextTest(); } else {
--- a/browser/components/originattributes/moz.build +++ b/browser/components/originattributes/moz.build @@ -3,10 +3,14 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. BROWSER_CHROME_MANIFESTS += [ 'test/browser/browser.ini', ] +MOCHITEST_MANIFESTS += [ + 'test/mochitest/mochitest.ini' +] + with Files('**'): BUG_COMPONENT = ('Firefox', 'OriginAttributes')
--- a/browser/components/originattributes/test/browser/browser_cache.js +++ b/browser/components/originattributes/test/browser/browser_cache.js @@ -216,16 +216,24 @@ function* doTest(aBrowser) { // The check function, which checks the number of cache entries. function* doCheck(aShouldIsolate, aInputA, aInputB) { let expectedEntryCount = 1; let data = []; data = data.concat(yield cacheDataForContext(LoadContextInfo.default)); data = data.concat(yield cacheDataForContext(LoadContextInfo.private)); data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(true, {}))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(false, { userContextId: 1 }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(true, { userContextId: 1 }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(false, { userContextId: 2 }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(true, { userContextId: 2 }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(false, { firstPartyDomain: "example.com" }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(true, { firstPartyDomain: "example.com" }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(false, { firstPartyDomain: "example.org" }))); + data = data.concat(yield cacheDataForContext(LoadContextInfo.custom(true, { firstPartyDomain: "example.org" }))); if (aShouldIsolate) { expectedEntryCount = 2; } for (let suffix of suffixes) { let foundEntryCount = countMatchingCacheEntries(data, "example.net", suffix); let result = (expectedEntryCount === foundEntryCount);
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/mochitest/file_empty.html @@ -0,0 +1,2 @@ +<h1>I'm just a support file</h1> +<p>I get loaded to do permission testing.</p> \ No newline at end of file
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/mochitest/mochitest.ini @@ -0,0 +1,5 @@ +[DEFAULT] +support-files = + file_empty.html + +[test_permissions_api.html]
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/mochitest/test_permissions_api.html @@ -0,0 +1,207 @@ +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<!DOCTYPE HTML> +<html> + +<head> + <meta charset="utf-8"> + <title>Test for Permissions API</title> + <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" href="/tests/SimpleTest/test.css"> +</head> + +<body> + <pre id="test"></pre> + <script type="application/javascript;version=1.8"> + /*globals SpecialPowers, SimpleTest, is, ok, */ + 'use strict'; + + const { + UNKNOWN_ACTION, + PROMPT_ACTION, + ALLOW_ACTION, + DENY_ACTION + } = SpecialPowers.Ci.nsIPermissionManager; + + SimpleTest.waitForExplicitFinish(); + + const PERMISSIONS = [{ + name: 'geolocation', + type: 'geo' + }, { + name: 'notifications', + type: 'desktop-notification' + }, { + name: 'push', + type: 'desktop-notification' + }, ]; + + const UNSUPPORTED_PERMISSIONS = [ + 'foobarbaz', // Not in spec, for testing only. + 'midi', + ]; + + // Create a closure, so that tests are run on the correct window object. + function createPermissionTester(aWindow) { + return { + setPermissions(allow) { + const permissions = PERMISSIONS.map(({ type }) => { + return { + type, + allow, + 'context': aWindow.document + }; + }); + return new Promise((resolve) => { + SpecialPowers.popPermissions(() => { + SpecialPowers.pushPermissions(permissions, resolve); + }); + }); + }, + revokePermissions() { + const promisesToRevoke = PERMISSIONS.map(({ name }) => { + return aWindow.navigator.permissions + .revoke({ name }) + .then( + ({ state }) => is(state, 'prompt', `correct state for '${name}'`), + () => ok(false, `revoke should not have rejected for '${name}'`) + ); + }); + return Promise.all(promisesToRevoke); + }, + revokeUnsupportedPermissions() { + const promisesToRevoke = UNSUPPORTED_PERMISSIONS.map(({ name }) => { + return aWindow.navigator.permissions + .revoke({ name }) + .then( + () => ok(false, `revoke should not have resolved for '${name}'`), + error => is(error.name, 'TypeError', `revoke should have thrown TypeError for '${name}'`) + ); + }); + return Promise.all(promisesToRevoke); + }, + checkPermissions(state) { + const promisesToQuery = PERMISSIONS.map(({ name }) => { + return aWindow.navigator.permissions + .query({ name }) + .then( + () => is(state, state, `correct state for '${name}'`), + () => ok(false, `query should not have rejected for '${name}'`) + ); + }); + return Promise.all(promisesToQuery); + }, + checkUnsupportedPermissions() { + const promisesToQuery = UNSUPPORTED_PERMISSIONS.map(({ name }) => { + return aWindow.navigator.permissions + .query({ name }) + .then( + () => ok(false, `query should not have resolved for '${name}'`), + error => { + is(error.name, 'TypeError', + `query should have thrown TypeError for '${name}'`); + } + ); + }); + return Promise.all(promisesToQuery); + }, + promiseStateChanged(name, state) { + return aWindow.navigator.permissions + .query({ name }) + .then(status => { + return new Promise( resolve => { + status.onchange = () => { + status.onchange = null; + is(status.state, state, `state changed for '${name}'`); + resolve(); + }; + }); + }, + () => ok(false, `query should not have rejected for '${name}'`)); + }, + testStatusOnChange() { + return new Promise((resolve) => { + SpecialPowers.popPermissions(() => { + const permission = 'geolocation'; + const promiseGranted = this.promiseStateChanged(permission, 'granted'); + this.setPermissions(ALLOW_ACTION); + promiseGranted.then(() => { + const promisePrompt = this.promiseStateChanged(permission, 'prompt'); + SpecialPowers.popPermissions(); + return promisePrompt; + }).then(resolve); + }); + }); + }, + testInvalidQuery() { + return aWindow.navigator.permissions + .query({ name: 'invalid' }) + .then( + () => ok(false, 'invalid query should not have resolved'), + () => ok(true, 'invalid query should have rejected') + ); + }, + testInvalidRevoke() { + return aWindow.navigator.permissions + .revoke({ name: 'invalid' }) + .then( + () => ok(false, 'invalid revoke should not have resolved'), + () => ok(true, 'invalid revoke should have rejected') + ); + }, + }; + } + + function enablePrefs() { + const ops = { + 'set': [ + ['dom.permissions.revoke.enable', true], + ['privacy.firstparty.isolate', true], + ], + }; + return SpecialPowers.pushPrefEnv(ops); + } + + function createIframe() { + return new Promise((resolve) => { + const iframe = document.createElement('iframe'); + iframe.src = 'file_empty.html'; + iframe.onload = () => resolve(iframe.contentWindow); + document.body.appendChild(iframe); + }); + } + debugger; + window.onload = () => { + enablePrefs() + .then(createIframe) + .then(createPermissionTester) + .then((tester) => { + return tester + .checkUnsupportedPermissions() + .then(() => tester.setPermissions(UNKNOWN_ACTION)) + .then(() => tester.checkPermissions('prompt')) + .then(() => tester.setPermissions(PROMPT_ACTION)) + .then(() => tester.checkPermissions('prompt')) + .then(() => tester.setPermissions(ALLOW_ACTION)) + .then(() => tester.checkPermissions('granted')) + .then(() => tester.setPermissions(DENY_ACTION)) + .then(() => tester.checkPermissions('denied')) + .then(() => tester.testStatusOnChange()) + .then(() => tester.testInvalidQuery()) + .then(() => tester.revokeUnsupportedPermissions()) + .then(() => tester.revokePermissions()) + .then(() => tester.checkPermissions('prompt')) + .then(() => tester.testInvalidRevoke()); + }) + .then(SimpleTest.finish) + .catch((e) => { + ok(false, `Unexpected error ${e}`); + SimpleTest.finish(); + }); + }; + </script> +</body> + +</html>
--- a/browser/components/safebrowsing/content/test/head.js +++ b/browser/components/safebrowsing/content/test/head.js @@ -28,18 +28,16 @@ function promiseTabLoadEvent(tab, url, e info(`Skipping spurious load event for ${loadedUrl}`); return false; } info("Tab event received: load"); return true; } - // Create two promises: one resolved from the content process when the page - // loads and one that is rejected if we take too long to load the url. let loaded; if (eventType === "load") { loaded = BrowserTestUtils.browserLoaded(tab.linkedBrowser, false, handle); } else { // No need to use handle. loaded = BrowserTestUtils.waitForContentEvent(tab.linkedBrowser, eventType, true, undefined, true);
--- a/browser/components/search/test/head.js +++ b/browser/components/search/test/head.js @@ -107,18 +107,16 @@ function promiseTabLoadEvent(tab, url) info(`Skipping spurious load event for ${loadedUrl}`); return false; } info("Tab event received: load"); return true; } - // Create two promises: one resolved from the content process when the page - // loads and one that is rejected if we take too long to load the url. let loaded = BrowserTestUtils.browserLoaded(tab.linkedBrowser, false, handle); if (url) BrowserTestUtils.loadURI(tab.linkedBrowser, url); return loaded; }
--- a/browser/locales/en-US/chrome/browser/tabbrowser.properties +++ b/browser/locales/en-US/chrome/browser/tabbrowser.properties @@ -12,16 +12,17 @@ # tabs.downloading = Firefox is downloading a file for a helper application (PDF) tabs.connecting=Connecting… tabs.encryptingConnection=Securing connection… tabs.searching=Searching… tabs.loading=Loading… tabs.waiting=Waiting… tabs.downloading=Downloading… +tabs.restoreLastTabs=Restore Tabs From Last Time tabs.emptyTabTitle=New Tab tabs.closeTab=Close Tab tabs.close=Close tabs.closeWarningTitle=Confirm close # LOCALIZATION NOTE (tabs.closeWarningMultiple): # Semicolon-separated list of plural forms. See: # http://developer.mozilla.org/en/docs/Localization_and_Plurals # The singular form is not considered since this string is used only for
--- a/browser/themes/osx/browser.css +++ b/browser/themes/osx/browser.css @@ -142,22 +142,16 @@ } /** End titlebar **/ #main-window[chromehidden~="toolbar"][chromehidden~="location"][chromehidden~="directories"] { border-top: 1px solid rgba(0,0,0,0.65); } -/* Because of -moz-box-align: center above, separators will be invisible unless - we set their min-height. See bug 583510 for more information. */ -toolbarseparator { - min-height: 22px; -} - #navigator-toolbox > toolbar:not(#TabsToolbar):not(#nav-bar):not(:-moz-lwtheme) { -moz-appearance: none; background: url(chrome://browser/skin/Toolbar-background-noise.png) hsl(0,0%,83%); } /* remove noise texture on Yosemite */ @media (-moz-mac-yosemite-theme) { #navigator-toolbox > toolbar:not(#TabsToolbar):not(#nav-bar):not(:-moz-lwtheme) {
--- a/build/gecko_templates.mozbuild +++ b/build/gecko_templates.mozbuild @@ -53,22 +53,24 @@ def GeckoBinary(linkage='dependent', msv ] elif linkage != None: error('`linkage` must be "dependent", "standalone" or None') if mozglue: LDFLAGS += CONFIG['MOZ_GLUE_WRAP_LDFLAGS'] if mozglue == 'program': USE_LIBS += ['mozglue'] + DEFINES['MOZ_HAS_MOZGLUE'] = True if CONFIG['MOZ_GLUE_IN_PROGRAM']: if CONFIG['GNU_CC']: LDFLAGS += ['-rdynamic'] if CONFIG['MOZ_MEMORY']: USE_LIBS += ['memory'] elif mozglue == 'library': + LIBRARY_DEFINES['MOZ_HAS_MOZGLUE'] = True if not CONFIG['MOZ_GLUE_IN_PROGRAM']: USE_LIBS += ['mozglue'] else: error('`mozglue` must be "program" or "library"') if not CONFIG['JS_STANDALONE']: USE_LIBS += [ 'fallible',
--- a/config/external/fdlibm/moz.build +++ b/config/external/fdlibm/moz.build @@ -1,14 +1,12 @@ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- # vim: set filetype=python: # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -Library('fdlibm') - with Files('**'): BUG_COMPONENT = ('Core', 'JavaScript Engine') DIRS += [ '../../../modules/fdlibm', ]
--- a/devtools/client/netmonitor/test/browser_net_content-type.js +++ b/devtools/client/netmonitor/test/browser_net_content-type.js @@ -19,17 +19,17 @@ function* content_type_test(isHTTPS) { let { tab, monitor } = yield initNetMonitor(pageURL); info("Starting test... "); let { document, Editor, NetMonitorView } = monitor.panelWin; let { RequestsMenu } = NetMonitorView; RequestsMenu.lazyUpdate = false; - let wait = waitForNetworkEvents(monitor, 8); + let wait = waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS); yield ContentTask.spawn(tab.linkedBrowser, {}, function* () { content.wrappedJSObject.performRequests(); }); yield wait; let okStatus = isHTTPS ? "Connected" : "OK"; verifyRequestItemTarget(RequestsMenu.getItemAtIndex(0),
--- a/devtools/client/netmonitor/test/browser_net_copy_image_as_data_uri.js +++ b/devtools/client/netmonitor/test/browser_net_copy_image_as_data_uri.js @@ -11,17 +11,17 @@ add_task(function* () { let { tab, monitor } = yield initNetMonitor(CONTENT_TYPE_WITHOUT_CACHE_URL); info("Starting test... "); let { NetMonitorView } = monitor.panelWin; let { RequestsMenu } = NetMonitorView; RequestsMenu.lazyUpdate = false; - let wait = waitForNetworkEvents(monitor, 8); + let wait = waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS); yield ContentTask.spawn(tab.linkedBrowser, {}, function* () { content.wrappedJSObject.performRequests(); }); yield wait; let requestItem = RequestsMenu.getItemAtIndex(5); RequestsMenu.selectedItem = requestItem;
--- a/devtools/client/netmonitor/test/browser_net_copy_response.js +++ b/devtools/client/netmonitor/test/browser_net_copy_response.js @@ -13,17 +13,17 @@ add_task(function* () { const EXPECTED_RESULT = '{ "greeting": "Hello JSON!" }'; let { NetMonitorView } = monitor.panelWin; let { RequestsMenu } = NetMonitorView; RequestsMenu.lazyUpdate = false; - let wait = waitForNetworkEvents(monitor, 8); + let wait = waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS); yield ContentTask.spawn(tab.linkedBrowser, {}, function* () { content.wrappedJSObject.performRequests(); }); yield wait; let requestItem = RequestsMenu.getItemAtIndex(3); RequestsMenu.selectedItem = requestItem;
--- a/devtools/client/netmonitor/test/browser_net_icon-preview.js +++ b/devtools/client/netmonitor/test/browser_net_icon-preview.js @@ -39,17 +39,17 @@ add_task(function* () { info("Checking the image thumbnail after a reload."); checkImageThumbnail(); yield teardown(monitor); function waitForEvents() { return promise.all([ - waitForNetworkEvents(monitor, 8), + waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS), monitor.panelWin.once(EVENTS.RESPONSE_IMAGE_THUMBNAIL_DISPLAYED) ]); } function performRequests() { return ContentTask.spawn(tab.linkedBrowser, {}, function* () { content.wrappedJSObject.performRequests(); });
--- a/devtools/client/netmonitor/test/browser_net_image-tooltip.js +++ b/devtools/client/netmonitor/test/browser_net_image-tooltip.js @@ -10,28 +10,28 @@ add_task(function* test() { let { tab, monitor } = yield initNetMonitor(CONTENT_TYPE_WITHOUT_CACHE_URL); info("Starting test... "); let { $, EVENTS, ACTIVITY_TYPE, NetMonitorView, NetMonitorController } = monitor.panelWin; let { RequestsMenu } = NetMonitorView; RequestsMenu.lazyUpdate = true; - let onEvents = waitForNetworkEvents(monitor, 8); + let onEvents = waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS); let onThumbnail = monitor.panelWin.once(EVENTS.RESPONSE_IMAGE_THUMBNAIL_DISPLAYED); yield performRequests(); yield onEvents; yield onThumbnail; info("Checking the image thumbnail after a few requests were made..."); yield showTooltipAndVerify(RequestsMenu.tooltip, RequestsMenu.items[5]); - // 7 XHRs as before + 1 extra document reload - onEvents = waitForNetworkEvents(monitor, 8); + // +1 extra document reload + onEvents = waitForNetworkEvents(monitor, CONTENT_TYPE_WITHOUT_CACHE_REQUESTS + 1); onThumbnail = monitor.panelWin.once(EVENTS.RESPONSE_IMAGE_THUMBNAIL_DISPLAYED); info("Reloading the debuggee and performing all requests again..."); yield NetMonitorController.triggerActivity(ACTIVITY_TYPE.RELOAD.WITH_CACHE_ENABLED); yield performRequests(); yield onEvents; yield onThumbnail;
--- a/devtools/client/netmonitor/test/head.js +++ b/devtools/client/netmonitor/test/head.js @@ -16,16 +16,17 @@ const EXAMPLE_URL = "http://example.com/ const HTTPS_EXAMPLE_URL = "https://example.com/browser/devtools/client/netmonitor/test/"; const API_CALLS_URL = EXAMPLE_URL + "html_api-calls-test-page.html"; const SIMPLE_URL = EXAMPLE_URL + "html_simple-test-page.html"; const NAVIGATE_URL = EXAMPLE_URL + "html_navigate-test-page.html"; const CONTENT_TYPE_URL = EXAMPLE_URL + "html_content-type-test-page.html"; const CONTENT_TYPE_WITHOUT_CACHE_URL = EXAMPLE_URL + "html_content-type-without-cache-test-page.html"; const HTTPS_CONTENT_TYPE_WITHOUT_CACHE_URL = HTTPS_EXAMPLE_URL + "html_content-type-without-cache-test-page.html"; +const CONTENT_TYPE_WITHOUT_CACHE_REQUESTS = 8; const CYRILLIC_URL = EXAMPLE_URL + "html_cyrillic-test-page.html"; const STATUS_CODES_URL = EXAMPLE_URL + "html_status-codes-test-page.html"; const POST_DATA_URL = EXAMPLE_URL + "html_post-data-test-page.html"; const POST_JSON_URL = EXAMPLE_URL + "html_post-json-test-page.html"; const POST_RAW_URL = EXAMPLE_URL + "html_post-raw-test-page.html"; const POST_RAW_WITH_HEADERS_URL = EXAMPLE_URL + "html_post-raw-with-headers-test-page.html"; const PARAMS_URL = EXAMPLE_URL + "html_params-test-page.html"; const JSONP_URL = EXAMPLE_URL + "html_jsonp-test-page.html";
--- a/devtools/client/storage/test/storage-listings.html +++ b/devtools/client/storage/test/storage-listings.html @@ -14,17 +14,17 @@ Bug 970517 - Storage inspector front end "use strict"; let partialHostname = location.hostname.match(/^[^.]+(\..*)$/)[1]; let cookieExpiresTime1 = 2000000000000; let cookieExpiresTime2 = 2000000001000; // Setting up some cookies to eat. document.cookie = "c1=foobar; expires=" + new Date(cookieExpiresTime1).toGMTString() + "; path=/browser"; document.cookie = "cs2=sessionCookie; path=/; domain=" + partialHostname; -document.cookie = "c3=foobar-2; secure=true; expires=" + +document.cookie = "c3=foobar-2; expires=" + new Date(cookieExpiresTime2).toGMTString() + "; path=/"; // ... and some local storage items .. localStorage.setItem("ls1", "foobar"); localStorage.setItem("ls2", "foobar-2"); // ... and finally some session storage items too sessionStorage.setItem("ss1", "foobar-3"); dump("added cookies and storage from main page\n");
--- a/devtools/client/storage/test/storage-unsecured-iframe.html +++ b/devtools/client/storage/test/storage-unsecured-iframe.html @@ -4,16 +4,16 @@ Iframe for testing multiple host detetion in storage actor --> <head> <meta charset="utf-8"> </head> <body> <script> "use strict"; -document.cookie = "uc1=foobar; domain=.example.org; path=/; secure=true"; +document.cookie = "uc1=foobar; domain=.example.org; path=/"; localStorage.setItem("iframe-u-ls1", "foobar"); sessionStorage.setItem("iframe-u-ss1", "foobar1"); sessionStorage.setItem("iframe-u-ss2", "foobar2"); dump("added cookies and storage from unsecured iframe\n"); </script> </body> </html>
--- a/devtools/client/themes/computed.css +++ b/devtools/client/themes/computed.css @@ -31,22 +31,22 @@ align-items: center; } #browser-style-checkbox { /* Bug 1200073 - extra space before the browser styles checkbox so they aren't squished together in a small window. Put also an extra space after. */ margin-inline-start: 5px; - margin-inline-end: 5px; - + margin-inline-end: 0; } #browser-style-checkbox-label { - margin-right: 5px; + padding-inline-start: 5px; + margin-inline-end: 5px; } #propertyContainer { -moz-user-select: text; overflow-y: auto; overflow-x: hidden; flex: auto; }
--- a/devtools/server/tests/browser/browser_storage_listings.js +++ b/devtools/server/tests/browser/browser_storage_listings.js @@ -30,17 +30,17 @@ const storeMap = { }, { name: "c3", value: "foobar-2", expires: 2000000001000, path: "/", host: "test1.example.org", isDomain: false, - isSecure: true, + isSecure: false, }, { name: "uc1", value: "foobar", host: ".example.org", path: "/", expires: 0, isDomain: true, @@ -339,17 +339,24 @@ function* testStores(data) { function testCookies(cookiesActor) { is(Object.keys(cookiesActor.hosts).length, 2, "Correct number of host entries for cookies"); return testCookiesObjects(0, cookiesActor.hosts, cookiesActor); } var testCookiesObjects = Task.async(function* (index, hosts, cookiesActor) { let host = Object.keys(hosts)[index]; let matchItems = data => { - is(data.total, storeMap.cookies[host].length, + let cookiesLength = 0; + for (let secureCookie of storeMap.cookies[host]) { + if (secureCookie.isSecure) { + ++cookiesLength; + } + } + // Any secure cookies did not get stored in the database. + is(data.total, storeMap.cookies[host].length - cookiesLength, "Number of cookies in host " + host + " matches"); for (let item of data.data) { let found = false; for (let toMatch of storeMap.cookies[host]) { if (item.name == toMatch.name) { found = true; ok(true, "Found cookie " + item.name + " in response"); is(item.value.str, toMatch.value, "The value matches.");
--- a/dom/base/nsDocument.cpp +++ b/dom/base/nsDocument.cpp @@ -12263,16 +12263,17 @@ nsIDocument::InlineScriptAllowedByCSP() { nsCOMPtr<nsIContentSecurityPolicy> csp; nsresult rv = NodePrincipal()->GetCsp(getter_AddRefs(csp)); NS_ENSURE_SUCCESS(rv, true); bool allowsInlineScript = true; if (csp) { nsresult rv = csp->GetAllowsInline(nsIContentPolicy::TYPE_SCRIPT, EmptyString(), // aNonce + false, // parserCreated EmptyString(), // FIXME get script sample (bug 1314567) 0, // aLineNumber &allowsInlineScript); NS_ENSURE_SUCCESS(rv, true); } return allowsInlineScript; }
--- a/dom/base/nsScriptLoader.cpp +++ b/dom/base/nsScriptLoader.cpp @@ -1377,24 +1377,25 @@ CSPAllowsInlineScript(nsIScriptElement * // no CSP --> allow return true; } // query the nonce nsCOMPtr<nsIContent> scriptContent = do_QueryInterface(aElement); nsAutoString nonce; scriptContent->GetAttr(kNameSpaceID_None, nsGkAtoms::nonce, nonce); + bool parserCreated = aElement->GetParserCreated() != mozilla::dom::NOT_FROM_PARSER; // query the scripttext nsAutoString scriptText; aElement->GetScriptText(scriptText); bool allowInlineScript = false; rv = csp->GetAllowsInline(nsIContentPolicy::TYPE_SCRIPT, - nonce, scriptText, + nonce, parserCreated, scriptText, aElement->GetScriptLineNumber(), &allowInlineScript); return allowInlineScript; } nsScriptLoadRequest* nsScriptLoader::CreateLoadRequest(nsScriptKind aKind, nsIScriptElement* aElement,
--- a/dom/cache/CacheOpParent.cpp +++ b/dom/cache/CacheOpParent.cpp @@ -49,30 +49,30 @@ CacheOpParent::~CacheOpParent() void CacheOpParent::Execute(ManagerId* aManagerId) { NS_ASSERT_OWNINGTHREAD(CacheOpParent); MOZ_ASSERT(!mManager); MOZ_ASSERT(!mVerifier); - RefPtr<Manager> manager; - nsresult rv = Manager::GetOrCreate(aManagerId, getter_AddRefs(manager)); + RefPtr<cache::Manager> manager; + nsresult rv = cache::Manager::GetOrCreate(aManagerId, getter_AddRefs(manager)); if (NS_WARN_IF(NS_FAILED(rv))) { ErrorResult result(rv); Unused << Send__delete__(this, result, void_t()); result.SuppressException(); return; } Execute(manager); } void -CacheOpParent::Execute(Manager* aManager) +CacheOpParent::Execute(cache::Manager* aManager) { NS_ASSERT_OWNINGTHREAD(CacheOpParent); MOZ_ASSERT(!mManager); MOZ_ASSERT(!mVerifier); mManager = aManager; // Handle put op
--- a/dom/cache/CacheOpParent.h +++ b/dom/cache/CacheOpParent.h @@ -32,17 +32,17 @@ public: CacheOpParent(mozilla::ipc::PBackgroundParent* aIpcManager, Namespace aNamespace, const CacheOpArgs& aOpArgs); ~CacheOpParent(); void Execute(ManagerId* aManagerId); void - Execute(Manager* aManager); + Execute(cache::Manager* aManager); void WaitForVerification(PrincipalVerifier* aVerifier); private: // PCacheOpParent methods virtual void ActorDestroy(ActorDestroyReason aReason) override; @@ -62,17 +62,17 @@ private: // utility methods already_AddRefed<nsIInputStream> DeserializeCacheStream(const CacheReadStreamOrVoid& aStreamOrVoid); mozilla::ipc::PBackgroundParent* mIpcManager; const CacheId mCacheId; const Namespace mNamespace; const CacheOpArgs mOpArgs; - RefPtr<Manager> mManager; + RefPtr<cache::Manager> mManager; RefPtr<PrincipalVerifier> mVerifier; NS_DECL_OWNINGTHREAD }; } // namespace cache } // namespace dom } // namespace mozilla
--- a/dom/cache/CacheTypes.ipdlh +++ b/dom/cache/CacheTypes.ipdlh @@ -45,22 +45,22 @@ union CacheReadStreamOrVoid CacheReadStream; }; struct HeadersEntry { nsCString name; nsCString value; }; - struct CacheRequest { nsCString method; nsCString urlWithoutQuery; nsCString urlQuery; + nsCString urlFragment; HeadersEntry[] headers; HeadersGuardEnum headersGuard; nsString referrer; ReferrerPolicy referrerPolicy; RequestMode mode; RequestCredentials credentials; CacheReadStreamOrVoid body; uint32_t contentPolicyType;
--- a/dom/cache/DBSchema.cpp +++ b/dom/cache/DBSchema.cpp @@ -27,30 +27,25 @@ #include "nsNetCID.h" #include "nsPrintfCString.h" #include "nsTArray.h" namespace mozilla { namespace dom { namespace cache { namespace db { - const int32_t kFirstShippedSchemaVersion = 15; - namespace { - // Update this whenever the DB schema is changed. -const int32_t kLatestSchemaVersion = 23; - +const int32_t kLatestSchemaVersion = 24; // --------- // The following constants define the SQL schema. These are defined in the // same order the SQL should be executed in CreateOrMigrateSchema(). They are // broken out as constants for convenient use in validation and migration. // --------- - // The caches table is the single source of truth about what Cache // objects exist for the origin. The contents of the Cache are stored // in the entries table that references back to caches. // // The caches table is also referenced from storage. Rows in storage // represent named Cache objects. There are cases, however, where // a Cache can still exist, but not be in a named Storage. For example, // when content is still using the Cache after CacheStorage::Delete() @@ -98,24 +93,23 @@ const char* const kTableEntries = "response_type INTEGER NOT NULL, " "response_status INTEGER NOT NULL, " "response_status_text TEXT NOT NULL, " "response_headers_guard INTEGER NOT NULL, " "response_body_id TEXT NULL, " "response_security_info_id INTEGER NULL REFERENCES security_info(id), " "response_principal_info TEXT NOT NULL, " "cache_id INTEGER NOT NULL REFERENCES caches(id) ON DELETE CASCADE, " - "request_redirect INTEGER NOT NULL, " "request_referrer_policy INTEGER NOT NULL, " - "request_integrity TEXT NOT NULL" + "request_integrity TEXT NOT NULL, " + "request_url_fragment TEXT NOT NULL" // New columns must be added at the end of table to migrate and // validate properly. ")"; - // Create an index to support the QueryCache() matching algorithm. This // needs to quickly find entries in a given Cache that match the request // URL. The url query is separated in order to support the ignoreSearch // option. Finally, we index hashes of the URL values instead of the // actual strings to avoid excessive disk bloat. The index will duplicate // the contents of the columsn in the index. The hash index will prune // the vast majority of values from the query result so that normal // scanning only has to be done on a few values to find an exact URL match. @@ -1650,16 +1644,17 @@ InsertEntry(mozIStorageConnection* aConn nsCOMPtr<mozIStorageStatement> state; rv = aConn->CreateStatement(NS_LITERAL_CSTRING( "INSERT INTO entries (" "request_method, " "request_url_no_query, " "request_url_no_query_hash, " "request_url_query, " "request_url_query_hash, " + "request_url_fragment, " "request_referrer, " "request_referrer_policy, " "request_headers_guard, " "request_mode, " "request_credentials, " "request_contentpolicytype, " "request_cache, " "request_redirect, " @@ -1674,16 +1669,17 @@ InsertEntry(mozIStorageConnection* aConn "response_principal_info, " "cache_id " ") VALUES (" ":request_method, " ":request_url_no_query, " ":request_url_no_query_hash, " ":request_url_query, " ":request_url_query_hash, " + ":request_url_fragment, " ":request_referrer, " ":request_referrer_policy, " ":request_headers_guard, " ":request_mode, " ":request_credentials, " ":request_contentpolicytype, " ":request_cache, " ":request_redirect, " @@ -1719,29 +1715,29 @@ InsertEntry(mozIStorageConnection* aConn rv = state->BindUTF8StringByName(NS_LITERAL_CSTRING("request_url_query"), aRequest.urlQuery()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } nsAutoCString urlQueryHash; rv = HashCString(crypto, aRequest.urlQuery(), urlQueryHash); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - rv = state->BindUTF8StringAsBlobByName( NS_LITERAL_CSTRING("request_url_query_hash"), urlQueryHash); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } + rv = state->BindUTF8StringByName(NS_LITERAL_CSTRING("request_url_fragment"), + aRequest.urlFragment()); + if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } rv = state->BindStringByName(NS_LITERAL_CSTRING("request_referrer"), aRequest.referrer()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - rv = state->BindInt32ByName(NS_LITERAL_CSTRING("request_referrer_policy"), static_cast<int32_t>(aRequest.referrerPolicy())); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - rv = state->BindInt32ByName(NS_LITERAL_CSTRING("request_headers_guard"), static_cast<int32_t>(aRequest.headersGuard())); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } rv = state->BindInt32ByName(NS_LITERAL_CSTRING("request_mode"), static_cast<int32_t>(aRequest.mode())); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } @@ -2038,23 +2034,23 @@ ReadResponse(mozIStorageConnection* aCon nsresult ReadRequest(mozIStorageConnection* aConn, EntryId aEntryId, SavedRequest* aSavedRequestOut) { MOZ_ASSERT(!NS_IsMainThread()); MOZ_ASSERT(aConn); MOZ_ASSERT(aSavedRequestOut); - nsCOMPtr<mozIStorageStatement> state; nsresult rv = aConn->CreateStatement(NS_LITERAL_CSTRING( "SELECT " "request_method, " "request_url_no_query, " "request_url_query, " + "request_url_fragment, " "request_referrer, " "request_referrer_policy, " "request_headers_guard, " "request_mode, " "request_credentials, " "request_contentpolicytype, " "request_cache, " "request_redirect, " @@ -2069,80 +2065,69 @@ ReadRequest(mozIStorageConnection* aConn if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } bool hasMoreData = false; rv = state->ExecuteStep(&hasMoreData); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } rv = state->GetUTF8String(0, aSavedRequestOut->mValue.method()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - rv = state->GetUTF8String(1, aSavedRequestOut->mValue.urlWithoutQuery()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - rv = state->GetUTF8String(2, aSavedRequestOut->mValue.urlQuery()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - - rv = state->GetString(3, aSavedRequestOut->mValue.referrer()); + rv = state->GetUTF8String(3, aSavedRequestOut->mValue.urlFragment()); + if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } + rv = state->GetString(4, aSavedRequestOut->mValue.referrer()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } int32_t referrerPolicy; - rv = state->GetInt32(4, &referrerPolicy); + rv = state->GetInt32(5, &referrerPolicy); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.referrerPolicy() = static_cast<ReferrerPolicy>(referrerPolicy); - int32_t guard; - rv = state->GetInt32(5, &guard); + rv = state->GetInt32(6, &guard); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.headersGuard() = static_cast<HeadersGuardEnum>(guard); - int32_t mode; - rv = state->GetInt32(6, &mode); + rv = state->GetInt32(7, &mode); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.mode() = static_cast<RequestMode>(mode); - int32_t credentials; - rv = state->GetInt32(7, &credentials); + rv = state->GetInt32(8, &credentials); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.credentials() = static_cast<RequestCredentials>(credentials); - int32_t requestContentPolicyType; - rv = state->GetInt32(8, &requestContentPolicyType); + rv = state->GetInt32(9, &requestContentPolicyType); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.contentPolicyType() = static_cast<nsContentPolicyType>(requestContentPolicyType); - int32_t requestCache; - rv = state->GetInt32(9, &requestCache); + rv = state->GetInt32(10, &requestCache); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.requestCache() = static_cast<RequestCache>(requestCache); - int32_t requestRedirect; - rv = state->GetInt32(10, &requestRedirect); + rv = state->GetInt32(11, &requestRedirect); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mValue.requestRedirect() = static_cast<RequestRedirect>(requestRedirect); - - rv = state->GetString(11, aSavedRequestOut->mValue.integrity()); + rv = state->GetString(12, aSavedRequestOut->mValue.integrity()); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - bool nullBody = false; - rv = state->GetIsNull(12, &nullBody); + rv = state->GetIsNull(13, &nullBody); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } aSavedRequestOut->mHasBodyId = !nullBody; - if (aSavedRequestOut->mHasBodyId) { - rv = ExtractId(state, 12, &aSavedRequestOut->mBodyId); + rv = ExtractId(state, 13, &aSavedRequestOut->mBodyId); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } } - rv = aConn->CreateStatement(NS_LITERAL_CSTRING( "SELECT " "name, " "value " "FROM request_headers " "WHERE entry_id=:entry_id;" ), getter_AddRefs(state)); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } @@ -2487,31 +2472,30 @@ struct Migration nsresult MigrateFrom15To16(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom16To17(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom17To18(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom18To19(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom19To20(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom20To21(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom21To22(mozIStorageConnection* aConn, bool& aRewriteSchema); nsresult MigrateFrom22To23(mozIStorageConnection* aConn, bool& aRewriteSchema); - +nsresult MigrateFrom23To24(mozIStorageConnection* aConn, bool& aRewriteSchema); // Configure migration functions to run for the given starting version. Migration sMigrationList[] = { Migration(15, MigrateFrom15To16), Migration(16, MigrateFrom16To17), Migration(17, MigrateFrom17To18), Migration(18, MigrateFrom18To19), Migration(19, MigrateFrom19To20), Migration(20, MigrateFrom20To21), Migration(21, MigrateFrom21To22), Migration(22, MigrateFrom22To23), + Migration(23, MigrateFrom23To24), }; - uint32_t sMigrationListLength = sizeof(sMigrationList) / sizeof(Migration); - nsresult RewriteEntriesSchema(mozIStorageConnection* aConn) { nsresult rv = aConn->ExecuteSimpleSQL(NS_LITERAL_CSTRING( "PRAGMA writable_schema = ON" )); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } @@ -2996,21 +2980,37 @@ nsresult MigrateFrom21To22(mozIStorageCo nsresult MigrateFrom22To23(mozIStorageConnection* aConn, bool& aRewriteSchema) { MOZ_ASSERT(!NS_IsMainThread()); MOZ_ASSERT(aConn); // The only change between 22 and 23 was a different snappy compression // format, but it's backwards-compatible. - nsresult rv = aConn->SetSchemaVersion(23); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } + return rv; +} +nsresult MigrateFrom23To24(mozIStorageConnection* aConn, bool& aRewriteSchema) +{ + MOZ_ASSERT(!NS_IsMainThread()); + MOZ_ASSERT(aConn); + + // Add the request_url_fragment column. + nsresult rv = aConn->ExecuteSimpleSQL(NS_LITERAL_CSTRING( + "ALTER TABLE entries " + "ADD COLUMN request_url_fragment TEXT NOT NULL DEFAULT ''" + )); + if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } + + rv = aConn->SetSchemaVersion(24); + if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } + + aRewriteSchema = true; return rv; } } // anonymous namespace - } // namespace db } // namespace cache } // namespace dom } // namespace mozilla
--- a/dom/cache/TypeUtils.cpp +++ b/dom/cache/TypeUtils.cpp @@ -117,40 +117,35 @@ TypeUtils::ToInternalRequest(const Ownin void TypeUtils::ToCacheRequest(CacheRequest& aOut, InternalRequest* aIn, BodyAction aBodyAction, SchemeAction aSchemeAction, nsTArray<UniquePtr<AutoIPCStream>>& aStreamCleanupList, ErrorResult& aRv) { MOZ_ASSERT(aIn); - aIn->GetMethod(aOut.method()); - - nsAutoCString url; - aIn->GetURL(url); - + nsCString url(aIn->GetURLWithoutFragment()); bool schemeValid; ProcessURL(url, &schemeValid, &aOut.urlWithoutQuery(), &aOut.urlQuery(), aRv); if (aRv.Failed()) { return; } - if (!schemeValid) { if (aSchemeAction == TypeErrorOnInvalidScheme) { NS_ConvertUTF8toUTF16 urlUTF16(url); aRv.ThrowTypeError<MSG_INVALID_URL_SCHEME>(NS_LITERAL_STRING("Request"), urlUTF16); return; } } + aOut.urlFragment() = aIn->GetFragment(); aIn->GetReferrer(aOut.referrer()); aOut.referrerPolicy() = aIn->ReferrerPolicy_(); - RefPtr<InternalHeaders> headers = aIn->Headers(); MOZ_ASSERT(headers); ToHeadersEntryList(aOut.headers(), headers); aOut.headersGuard() = headers->Guard(); aOut.mode() = aIn->Mode(); aOut.credentials() = aIn->GetCredentialsMode(); aOut.contentPolicyType() = aIn->ContentPolicyType(); aOut.requestCache() = aIn->GetCacheMode(); @@ -301,27 +296,24 @@ TypeUtils::ToResponse(const CacheRespons default: MOZ_CRASH("Unexpected ResponseType!"); } MOZ_ASSERT(ir); RefPtr<Response> ref = new Response(GetGlobalObject(), ir); return ref.forget(); } - already_AddRefed<InternalRequest> TypeUtils::ToInternalRequest(const CacheRequest& aIn) { nsAutoCString url(aIn.urlWithoutQuery()); url.Append(aIn.urlQuery()); - - RefPtr<InternalRequest> internalRequest = new InternalRequest(url); - + RefPtr<InternalRequest> internalRequest = + new InternalRequest(url, aIn.urlFragment()); internalRequest->SetMethod(aIn.method()); - internalRequest->SetReferrer(aIn.referrer()); internalRequest->SetReferrerPolicy(aIn.referrerPolicy()); internalRequest->SetMode(aIn.mode()); internalRequest->SetCredentialsMode(aIn.credentials()); internalRequest->SetContentPolicyType(aIn.contentPolicyType()); internalRequest->SetCacheMode(aIn.requestCache()); internalRequest->SetRedirectMode(aIn.requestRedirect()); internalRequest->SetIntegrity(aIn.integrity());
--- a/dom/cache/test/mochitest/test_cache_keys.js +++ b/dom/cache/test/mochitest/test_cache_keys.js @@ -10,16 +10,21 @@ caches.open(name).then(function(cache) { c = cache; return c.addAll(tests); }).then(function() { // Add another cache entry using Cache.add var another = "//mochi.test:8888/?yetanother" + context; tests.push(another); return c.add(another); }).then(function() { + // Add another cache entry with URL fragment using Cache.add + var anotherWithFragment = "//mochi.test:8888/?fragment" + context + "#fragment"; + tests.push(anotherWithFragment); + return c.add(anotherWithFragment); +}).then(function() { return c.keys(); }).then(function(keys) { is(keys.length, tests.length, "Same number of elements"); // Verify both the insertion order of the requests and their validity. keys.forEach(function(r, i) { ok(r instanceof Request, "Valid request object"); ok(r.url.indexOf(tests[i]) >= 0, "Valid URL"); });
--- a/dom/cache/test/mochitest/test_cache_match_request.js +++ b/dom/cache/test/mochitest/test_cache_match_request.js @@ -20,31 +20,29 @@ function checkResponse(r, expectedBody) "Both responses should have the same status text"); return r.text().then(function(text) { // Avoid dumping out the large response text to the log if they're equal. if (text !== expectedBody) { is(text, responseText, "The response body should be correct"); } }); } - fetch(new Request(request)).then(function(r) { response = r; return response.text(); }).then(function(text) { responseText = text; return testRequest(request, unknownRequest, requestWithAltQS, request.url.replace("#fragment", "#other")); }).then(function() { return testRequest(request.url, unknownRequest.url, requestWithAltQS.url, request.url.replace("#fragment", "#other")); }).then(function() { testDone(); }); - // The request argument can either be a URL string, or a Request object. function testRequest(request, unknownRequest, requestWithAlternateQueryString, requestWithDifferentFragment) { return caches.open(name).then(function(cache) { c = cache; return c.add(request); }).then(function() { return Promise.all(
--- a/dom/events/EventListenerManager.cpp +++ b/dom/events/EventListenerManager.cpp @@ -867,16 +867,17 @@ EventListenerManager::SetEventHandler(ns scriptSample.Assign(attr); scriptSample.AppendLiteral(" attribute on "); scriptSample.Append(tagName); scriptSample.AppendLiteral(" element"); bool allowsInlineScript = true; rv = csp->GetAllowsInline(nsIContentPolicy::TYPE_SCRIPT, EmptyString(), // aNonce + false, // aParserCreated scriptSample, 0, // aLineNumber &allowsInlineScript); NS_ENSURE_SUCCESS(rv, rv); // return early if CSP wants us to block inline scripts if (!allowsInlineScript) { return NS_OK;
--- a/dom/fetch/FetchDriver.cpp +++ b/dom/fetch/FetchDriver.cpp @@ -370,29 +370,25 @@ FetchDriver::HttpFetch() } rv = chan->AsyncOpen2(this); NS_ENSURE_SUCCESS(rv, rv); // Step 4 onwards of "HTTP Fetch" is handled internally by Necko. return NS_OK; } - already_AddRefed<InternalResponse> FetchDriver::BeginAndGetFilteredResponse(InternalResponse* aResponse, bool aFoundOpaqueRedirect) { MOZ_ASSERT(aResponse); - AutoTArray<nsCString, 4> reqURLList; - mRequest->GetURLList(reqURLList); - + mRequest->GetURLListWithoutFragment(reqURLList); MOZ_ASSERT(!reqURLList.IsEmpty()); aResponse->SetURLList(reqURLList); - RefPtr<InternalResponse> filteredResponse; if (aFoundOpaqueRedirect) { filteredResponse = aResponse->OpaqueRedirectResponse(); } else { switch (mRequest->GetResponseTainting()) { case LoadTainting::Basic: filteredResponse = aResponse->BasicResponse(); break; @@ -803,25 +799,28 @@ FetchDriver::AsyncOnChannelRedirect(nsIC nsCOMPtr<nsIURI> uri; MOZ_ALWAYS_SUCCEEDS(aNewChannel->GetURI(getter_AddRefs(uri))); nsCOMPtr<nsIURI> uriClone; nsresult rv = uri->CloneIgnoringRef(getter_AddRefs(uriClone)); if(NS_WARN_IF(NS_FAILED(rv))){ return rv; } - nsCString spec; rv = uriClone->GetSpec(spec); if(NS_WARN_IF(NS_FAILED(rv))){ return rv; } + nsCString fragment; + rv = uri->GetRef(fragment); + if(NS_WARN_IF(NS_FAILED(rv))){ + return rv; + } - mRequest->AddURL(spec); - + mRequest->AddURL(spec, fragment); NS_ConvertUTF8toUTF16 tRPHeaderValue(tRPHeaderCValue); // updates request’s associated referrer policy according to the // Referrer-Policy header (if any). if (!tRPHeaderValue.IsEmpty()) { net::ReferrerPolicy net_referrerPolicy = nsContentUtils::GetReferrerPolicyFromHeader(tRPHeaderValue); if (net_referrerPolicy != net::RP_Unset) { ReferrerPolicy referrerPolicy = mRequest->ReferrerPolicy_();
--- a/dom/fetch/InternalRequest.cpp +++ b/dom/fetch/InternalRequest.cpp @@ -14,28 +14,26 @@ #include "mozilla/dom/FetchTypes.h" #include "mozilla/dom/ScriptSettings.h" #include "mozilla/dom/workers/Workers.h" #include "WorkerPrivate.h" namespace mozilla { namespace dom { - // The global is used to extract the principal. already_AddRefed<InternalRequest> InternalRequest::GetRequestConstructorCopy(nsIGlobalObject* aGlobal, ErrorResult& aRv) const { MOZ_RELEASE_ASSERT(!mURLList.IsEmpty(), "Internal Request's urlList should not be empty when copied from constructor."); - - RefPtr<InternalRequest> copy = new InternalRequest(mURLList.LastElement()); + RefPtr<InternalRequest> copy = new InternalRequest(mURLList.LastElement(), + mFragment); copy->SetMethod(mMethod); copy->mHeaders = new InternalHeaders(*mHeaders); copy->SetUnsafeRequest(); - copy->mBodyStream = mBodyStream; copy->mForceOriginHeader = true; // The "client" is not stored in our implementation. Fetch API users should // use the appropriate window/document/principal and other Gecko security // mechanisms as appropriate. copy->mSameOriginDataURL = true; copy->mPreserveContentCodings = true; copy->mReferrer = mReferrer; @@ -70,21 +68,20 @@ InternalRequest::Clone() nsresult rv = NS_CloneInputStream(mBodyStream, getter_AddRefs(clonedBody), getter_AddRefs(replacementBody)); if (NS_WARN_IF(NS_FAILED(rv))) { return nullptr; } clone->mBodyStream.swap(clonedBody); if (replacementBody) { mBodyStream.swap(replacementBody); } - return clone.forget(); } - -InternalRequest::InternalRequest(const nsACString& aURL) +InternalRequest::InternalRequest(const nsACString& aURL, + const nsACString& aFragment) : mMethod("GET") , mHeaders(new InternalHeaders(HeadersGuardEnum::None)) , mContentPolicyType(nsIContentPolicy::TYPE_FETCH) , mReferrer(NS_LITERAL_STRING(kFETCH_CLIENT_REFERRER_STR)) , mReferrerPolicy(ReferrerPolicy::_empty) , mEnvironmentReferrerPolicy(net::RP_Default) , mMode(RequestMode::No_cors) , mCredentialsMode(RequestCredentials::Omit) @@ -100,20 +97,20 @@ InternalRequest::InternalRequest(const n // specification does not handle this yet. , mSameOriginDataURL(true) , mSkipServiceWorker(false) , mSynchronous(false) , mUnsafeRequest(false) , mUseURLCredentials(false) { MOZ_ASSERT(!aURL.IsEmpty()); - AddURL(aURL); + AddURL(aURL, aFragment); } - InternalRequest::InternalRequest(const nsACString& aURL, + const nsACString& aFragment, const nsACString& aMethod, already_AddRefed<InternalHeaders> aHeaders, RequestCache aCacheMode, RequestMode aMode, RequestRedirect aRequestRedirect, RequestCredentials aRequestCredentials, const nsAString& aReferrer, ReferrerPolicy aReferrerPolicy, @@ -137,33 +134,33 @@ InternalRequest::InternalRequest(const n // FIXME See the above comment in the default constructor. , mSameOriginDataURL(true) , mSkipServiceWorker(false) , mSynchronous(false) , mUnsafeRequest(false) , mUseURLCredentials(false) { MOZ_ASSERT(!aURL.IsEmpty()); - AddURL(aURL); + AddURL(aURL, aFragment); } - InternalRequest::InternalRequest(const InternalRequest& aOther) : mMethod(aOther.mMethod) , mURLList(aOther.mURLList) , mHeaders(new InternalHeaders(*aOther.mHeaders)) , mContentPolicyType(aOther.mContentPolicyType) , mReferrer(aOther.mReferrer) , mReferrerPolicy(aOther.mReferrerPolicy) , mEnvironmentReferrerPolicy(aOther.mEnvironmentReferrerPolicy) , mMode(aOther.mMode) , mCredentialsMode(aOther.mCredentialsMode) , mResponseTainting(aOther.mResponseTainting) , mCacheMode(aOther.mCacheMode) , mRedirectMode(aOther.mRedirectMode) , mIntegrity(aOther.mIntegrity) + , mFragment(aOther.mFragment) , mAuthenticationFlag(aOther.mAuthenticationFlag) , mForceOriginHeader(aOther.mForceOriginHeader) , mPreserveContentCodings(aOther.mPreserveContentCodings) , mSameOriginDataURL(aOther.mSameOriginDataURL) , mSkipServiceWorker(aOther.mSkipServiceWorker) , mSynchronous(aOther.mSynchronous) , mUnsafeRequest(aOther.mUnsafeRequest) , mUseURLCredentials(aOther.mUseURLCredentials)
--- a/dom/fetch/InternalRequest.h +++ b/dom/fetch/InternalRequest.h @@ -82,27 +82,24 @@ namespace dom { * TODO: Add a content type for favicon * TODO: Add a content type for download */ class Request; class IPCInternalRequest; #define kFETCH_CLIENT_REFERRER_STR "about:client" - class InternalRequest final { friend class Request; - public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(InternalRequest) - - explicit InternalRequest(const nsACString& aURL); - + InternalRequest(const nsACString& aURL, const nsACString& aFragment); InternalRequest(const nsACString& aURL, + const nsACString& aFragment, const nsACString& aMethod, already_AddRefed<InternalHeaders> aHeaders, RequestCache aCacheMode, RequestMode aMode, RequestRedirect aRequestRedirect, RequestCredentials aRequestCredentials, const nsAString& aReferrer, ReferrerPolicy aReferrerPolicy, @@ -129,47 +126,59 @@ public: bool HasSimpleMethod() const { return mMethod.LowerCaseEqualsASCII("get") || mMethod.LowerCaseEqualsASCII("post") || mMethod.LowerCaseEqualsASCII("head"); } - - // GetURL should get the request's current url. A request has an associated - // current url. It is a pointer to the last fetch URL in request's url list. + // GetURL should get the request's current url with fragment. A request has + // an associated current url. It is a pointer to the last fetch URL in + // request's url list. void GetURL(nsACString& aURL) const { - MOZ_RELEASE_ASSERT(!mURLList.IsEmpty(), "Internal Request's urlList should not be empty."); - - aURL.Assign(mURLList.LastElement()); + aURL.Assign(GetURLWithoutFragment()); + if (GetFragment().IsEmpty()) { + return; + } + aURL.Append(NS_LITERAL_CSTRING("#")); + aURL.Append(GetFragment()); } + const nsCString& + GetURLWithoutFragment() const + { + MOZ_RELEASE_ASSERT(!mURLList.IsEmpty(), + "Internal Request's urlList should not be empty."); + + return mURLList.LastElement(); + } // AddURL should append the url into url list. - // Normally we strip the fragment from the URL in Request::Constructor. - // If internal code is directly constructing this object they must - // strip the fragment first. Since these should be well formed URLs we - // can use a simple check for a fragment here. The full parser is - // difficult to use off the main thread. + // Normally we strip the fragment from the URL in Request::Constructor and + // pass the fragment as the second argument into it. + // If a fragment is present in the URL it must be stripped and passed in + // separately. void - AddURL(const nsACString& aURL) + AddURL(const nsACString& aURL, const nsACString& aFragment) { MOZ_ASSERT(!aURL.IsEmpty()); + MOZ_ASSERT(!aURL.Contains('#')); + mURLList.AppendElement(aURL); - MOZ_ASSERT(mURLList.LastElement().Find(NS_LITERAL_CSTRING("#")) == kNotFound); + + mFragment.Assign(aFragment); } - + // Get the URL list without their fragments. void - GetURLList(nsTArray<nsCString>& aURLList) + GetURLListWithoutFragment(nsTArray<nsCString>& aURLList) { aURLList.Assign(mURLList); } - void GetReferrer(nsAString& aReferrer) const { aReferrer.Assign(mReferrer); } void SetReferrer(const nsAString& aReferrer) @@ -316,30 +325,33 @@ public: mRedirectMode = aRedirectMode; } const nsString& GetIntegrity() const { return mIntegrity; } - void SetIntegrity(const nsAString& aIntegrity) { MOZ_ASSERT(mIntegrity.IsEmpty()); mIntegrity.Assign(aIntegrity); } + const nsCString& + GetFragment() const + { + return mFragment; + } nsContentPolicyType ContentPolicyType() const { return mContentPolicyType; } - void SetContentPolicyType(nsContentPolicyType aContentPolicyType); void OverrideContentPolicyType(nsContentPolicyType aContentPolicyType); RequestContext Context() const @@ -486,25 +498,23 @@ private: ReferrerPolicy mReferrerPolicy; // This will be used for request created from Window or Worker contexts // In case there's no Referrer Policy in Request, this will be passed to // channel. // The Environment Referrer Policy should be net::ReferrerPolicy so that it // could be associated with nsIHttpChannel. net::ReferrerPolicy mEnvironmentReferrerPolicy; - RequestMode mMode; RequestCredentials mCredentialsMode; MOZ_INIT_OUTSIDE_CTOR LoadTainting mResponseTainting; RequestCache mCacheMode; RequestRedirect mRedirectMode; - nsString mIntegrity; - + nsCString mFragment; MOZ_INIT_OUTSIDE_CTOR bool mAuthenticationFlag; MOZ_INIT_OUTSIDE_CTOR bool mForceOriginHeader; MOZ_INIT_OUTSIDE_CTOR bool mPreserveContentCodings; MOZ_INIT_OUTSIDE_CTOR bool mSameOriginDataURL; MOZ_INIT_OUTSIDE_CTOR bool mSkipServiceWorker; MOZ_INIT_OUTSIDE_CTOR bool mSynchronous; MOZ_INIT_OUTSIDE_CTOR bool mUnsafeRequest; MOZ_INIT_OUTSIDE_CTOR bool mUseURLCredentials;
--- a/dom/fetch/InternalResponse.h +++ b/dom/fetch/InternalResponse.h @@ -80,47 +80,40 @@ public: return mType; } bool IsError() const { return Type() == ResponseType::Error; } - // GetUrl should return last fetch URL in response's url list and null if // response's url list is the empty list. - void - GetURL(nsCString& aURL) const + const nsCString& + GetURL() const { // Empty urlList when response is a synthetic response. if (mURLList.IsEmpty()) { - aURL.Truncate(); - return; + return EmptyCString(); } - - aURL.Assign(mURLList.LastElement()); + return mURLList.LastElement(); } - void GetURLList(nsTArray<nsCString>& aURLList) const { aURLList.Assign(mURLList); } - - void - GetUnfilteredURL(nsCString& aURL) const + const nsCString& + GetUnfilteredURL() const { if (mWrappedResponse) { - return mWrappedResponse->GetURL(aURL); + return mWrappedResponse->GetURL(); } - - return GetURL(aURL); + return GetURL(); } - void GetUnfilteredURLList(nsTArray<nsCString>& aURLList) const { if (mWrappedResponse) { return mWrappedResponse->GetURLList(aURLList); } return GetURLList(aURLList);
--- a/dom/fetch/Request.cpp +++ b/dom/fetch/Request.cpp @@ -84,147 +84,158 @@ ParseURLFromDocument(nsIDocument* aDocum nsCOMPtr<nsIURI> baseURI = aDocument->GetBaseURI(); nsCOMPtr<nsIURI> resolvedURI; aRv = NS_NewURI(getter_AddRefs(resolvedURI), aInput, nullptr, baseURI); if (NS_WARN_IF(aRv.Failed())) { aRv.ThrowTypeError<MSG_INVALID_URL>(aInput); } return resolvedURI.forget(); } - void GetRequestURLFromDocument(nsIDocument* aDocument, const nsAString& aInput, - nsAString& aRequestURL, ErrorResult& aRv) + nsAString& aRequestURL, nsACString& aURLfragment, + ErrorResult& aRv) { nsCOMPtr<nsIURI> resolvedURI = ParseURLFromDocument(aDocument, aInput, aRv); if (aRv.Failed()) { return; } - // This fails with URIs with weird protocols, even when they are valid, // so we ignore the failure nsAutoCString credentials; Unused << resolvedURI->GetUserPass(credentials); if (!credentials.IsEmpty()) { aRv.ThrowTypeError<MSG_URL_HAS_CREDENTIALS>(aInput); return; } nsCOMPtr<nsIURI> resolvedURIClone; // We use CloneIgnoringRef to strip away the fragment even if the original URI // is immutable. aRv = resolvedURI->CloneIgnoringRef(getter_AddRefs(resolvedURIClone)); if (NS_WARN_IF(aRv.Failed())) { return; } - nsAutoCString spec; aRv = resolvedURIClone->GetSpec(spec); if (NS_WARN_IF(aRv.Failed())) { return; } + CopyUTF8toUTF16(spec, aRequestURL); - CopyUTF8toUTF16(spec, aRequestURL); + // Get the fragment from nsIURI. + aRv = resolvedURI->GetRef(aURLfragment); + if (NS_WARN_IF(aRv.Failed())) { + return; + } } - already_AddRefed<nsIURI> ParseURLFromChrome(const nsAString& aInput, ErrorResult& aRv) { MOZ_ASSERT(NS_IsMainThread()); - nsCOMPtr<nsIURI> uri; aRv = NS_NewURI(getter_AddRefs(uri), aInput, nullptr, nullptr); if (NS_WARN_IF(aRv.Failed())) { aRv.ThrowTypeError<MSG_INVALID_URL>(aInput); } return uri.forget(); } - void GetRequestURLFromChrome(const nsAString& aInput, nsAString& aRequestURL, - ErrorResult& aRv) + nsACString& aURLfragment, ErrorResult& aRv) { nsCOMPtr<nsIURI> uri = ParseURLFromChrome(aInput, aRv); if (aRv.Failed()) { return; } - // This fails with URIs with weird protocols, even when they are valid, // so we ignore the failure nsAutoCString credentials; Unused << uri->GetUserPass(credentials); if (!credentials.IsEmpty()) { aRv.ThrowTypeError<MSG_URL_HAS_CREDENTIALS>(aInput); return; } nsCOMPtr<nsIURI> uriClone; // We use CloneIgnoringRef to strip away the fragment even if the original URI // is immutable. aRv = uri->CloneIgnoringRef(getter_AddRefs(uriClone)); if (NS_WARN_IF(aRv.Failed())) { return; } - nsAutoCString spec; aRv = uriClone->GetSpec(spec); if (NS_WARN_IF(aRv.Failed())) { return; } + CopyUTF8toUTF16(spec, aRequestURL); - CopyUTF8toUTF16(spec, aRequestURL); + // Get the fragment from nsIURI. + aRv = uri->GetRef(aURLfragment); + if (NS_WARN_IF(aRv.Failed())) { + return; + } } - already_AddRefed<URL> ParseURLFromWorker(const GlobalObject& aGlobal, const nsAString& aInput, ErrorResult& aRv) { workers::WorkerPrivate* worker = workers::GetCurrentThreadWorkerPrivate(); MOZ_ASSERT(worker); worker->AssertIsOnWorkerThread(); NS_ConvertUTF8toUTF16 baseURL(worker->GetLocationInfo().mHref); RefPtr<URL> url = URL::WorkerConstructor(aGlobal, aInput, baseURL, aRv); if (NS_WARN_IF(aRv.Failed())) { aRv.ThrowTypeError<MSG_INVALID_URL>(aInput); } return url.forget(); } - void GetRequestURLFromWorker(const GlobalObject& aGlobal, const nsAString& aInput, - nsAString& aRequestURL, ErrorResult& aRv) + nsAString& aRequestURL, nsACString& aURLfragment, + ErrorResult& aRv) { RefPtr<URL> url = ParseURLFromWorker(aGlobal, aInput, aRv); if (aRv.Failed()) { return; } - nsString username; url->GetUsername(username, aRv); if (NS_WARN_IF(aRv.Failed())) { return; } nsString password; url->GetPassword(password, aRv); if (NS_WARN_IF(aRv.Failed())) { return; } - if (!username.IsEmpty() || !password.IsEmpty()) { aRv.ThrowTypeError<MSG_URL_HAS_CREDENTIALS>(aInput); return; } + // Get the fragment from URL. + nsAutoString fragment; + url->GetHash(fragment, aRv); + if (NS_WARN_IF(aRv.Failed())) { + return; + } + + // Note: URL::GetHash() includes the "#" and we want the fragment with out + // the hash symbol. + if (!fragment.IsEmpty()) { + CopyUTF16toUTF8(Substring(fragment, 1), aURLfragment); + } url->SetHash(EmptyString(), aRv); if (NS_WARN_IF(aRv.Failed())) { return; } - url->Stringify(aRequestURL, aRv); if (NS_WARN_IF(aRv.Failed())) { return; } } class ReferrerSameOriginChecker final : public workers::WorkerMainThreadRunnable { @@ -279,48 +290,43 @@ Request::Constructor(const GlobalObject& aRv.ThrowTypeError<MSG_FETCH_BODY_CONSUMED_ERROR>(); return nullptr; } if (body) { temporaryBody = body; } request = inputReq->GetInternalRequest(); - } else { // aInput is USVString. // We need to get url before we create a InternalRequest. nsAutoString input; input.Assign(aInput.GetAsUSVString()); - nsAutoString requestURL; + nsCString fragment; if (NS_IsMainThread()) { nsIDocument* doc = GetEntryDocument(); if (doc) { - GetRequestURLFromDocument(doc, input, requestURL, aRv); + GetRequestURLFromDocument(doc, input, requestURL, fragment, aRv); } else { // If we don't have a document, we must assume that this is a full URL. - GetRequestURLFromChrome(input, requestURL, aRv); + GetRequestURLFromChrome(input, requestURL, fragment, aRv); } } else { - GetRequestURLFromWorker(aGlobal, input, requestURL, aRv); + GetRequestURLFromWorker(aGlobal, input, requestURL, fragment, aRv); } - if (NS_WARN_IF(aRv.Failed())) { return nullptr; } - - request = new InternalRequest(NS_ConvertUTF16toUTF8(requestURL)); + request = new InternalRequest(NS_ConvertUTF16toUTF8(requestURL), fragment); } - request = request->GetRequestConstructorCopy(global, aRv); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } - RequestMode fallbackMode = RequestMode::EndGuard_; RequestCredentials fallbackCredentials = RequestCredentials::EndGuard_; RequestCache fallbackCache = RequestCache::EndGuard_; if (aInput.IsUSVString()) { fallbackMode = RequestMode::Cors; fallbackCredentials = RequestCredentials::Omit; fallbackCache = RequestCache::Default; }
--- a/dom/fetch/Response.h +++ b/dom/fetch/Response.h @@ -43,31 +43,26 @@ public: return ResponseBinding::Wrap(aCx, this, aGivenProto); } ResponseType Type() const { return mInternalResponse->Type(); } - void GetUrl(nsAString& aUrl) const { - nsCString url; - mInternalResponse->GetURL(url); - CopyUTF8toUTF16(url, aUrl); + CopyUTF8toUTF16(mInternalResponse->GetURL(), aUrl); } - bool Redirected() const { return mInternalResponse->IsRedirected(); } - uint16_t Status() const { return mInternalResponse->GetStatus(); } bool Ok() const
--- a/dom/flyweb/HttpServer.cpp +++ b/dom/flyweb/HttpServer.cpp @@ -580,26 +580,23 @@ HttpServer::Connection::ConsumeLine(cons MOZ_ASSERT(!mPendingReq); // Process request line nsCWhitespaceTokenizer tokens(Substring(aBuffer, aLength)); NS_ENSURE_TRUE(tokens.hasMoreTokens(), NS_ERROR_UNEXPECTED); nsDependentCSubstring method = tokens.nextToken(); NS_ENSURE_TRUE(NS_IsValidHTTPToken(method), NS_ERROR_UNEXPECTED); - NS_ENSURE_TRUE(tokens.hasMoreTokens(), NS_ERROR_UNEXPECTED); nsDependentCSubstring url = tokens.nextToken(); // Seems like it's also allowed to pass full urls with scheme+host+port. // May need to support that. NS_ENSURE_TRUE(url.First() == '/', NS_ERROR_UNEXPECTED); - - mPendingReq = new InternalRequest(url); + mPendingReq = new InternalRequest(url, /* aURLFragment */ EmptyCString()); mPendingReq->SetMethod(method); - NS_ENSURE_TRUE(tokens.hasMoreTokens(), NS_ERROR_UNEXPECTED); nsDependentCSubstring version = tokens.nextToken(); NS_ENSURE_TRUE(StringBeginsWith(version, NS_LITERAL_CSTRING("HTTP/1.")), NS_ERROR_UNEXPECTED); nsresult rv; // This integer parsing is likely not strict enough. nsCString reqVersion; reqVersion = Substring(version, MOZ_ARRAY_LENGTH("HTTP/1.") - 1);
--- a/dom/interfaces/security/nsIContentSecurityPolicy.idl +++ b/dom/interfaces/security/nsIContentSecurityPolicy.idl @@ -126,26 +126,28 @@ interface nsIContentSecurityPolicy : nsI void appendPolicy(in AString policyString, in boolean reportOnly, in boolean deliveredViaMetaTag); /* * Whether this policy allows inline script or style. * @param aContentPolicyType Either TYPE_SCRIPT or TYPE_STYLESHEET * @param aNonce The nonce string to check against the policy + * @param aParserCreated If the script element was created by the HTML Parser * @param aContent The content of the inline resource to hash * (and compare to the hashes listed in the policy) * @param aLineNumber The line number of the inline resource * (used for reporting) * @return * Whether or not the effects of the inline style should be allowed * (block the rules if false). */ boolean getAllowsInline(in nsContentPolicyType aContentPolicyType, in AString aNonce, + in boolean aParserCreated, in AString aContent, in unsigned long aLineNumber); /** * whether this policy allows eval and eval-like functions * such as setTimeout("code string", time). * @param shouldReportViolations * Whether or not the use of eval should be reported.
--- a/dom/ipc/ContentChild.cpp +++ b/dom/ipc/ContentChild.cpp @@ -20,16 +20,17 @@ #include "mozilla/LookAndFeel.h" #include "mozilla/Preferences.h" #include "mozilla/ProcessHangMonitorIPC.h" #include "mozilla/Unused.h" #include "mozilla/devtools/HeapSnapshotTempFileHelperChild.h" #include "mozilla/docshell/OfflineCacheUpdateChild.h" #include "mozilla/dom/ContentBridgeChild.h" #include "mozilla/dom/ContentBridgeParent.h" +#include "mozilla/dom/VideoDecoderManagerChild.h" #include "mozilla/dom/ContentParent.h" #include "mozilla/dom/DataTransfer.h" #include "mozilla/dom/DOMStorageIPC.h" #include "mozilla/dom/ExternalHelperAppChild.h" #include "mozilla/dom/FlyWebPublishedServerIPC.h" #include "mozilla/dom/GetFilesHelper.h" #include "mozilla/dom/PCrashReporterChild.h" #include "mozilla/dom/ProcessGlobal.h" @@ -1176,34 +1177,37 @@ ContentChild::RecvGMPsChanged(nsTArray<G { GeckoMediaPluginServiceChild::UpdateGMPCapabilities(Move(capabilities)); return true; } bool ContentChild::RecvInitRendering(Endpoint<PCompositorBridgeChild>&& aCompositor, Endpoint<PImageBridgeChild>&& aImageBridge, - Endpoint<PVRManagerChild>&& aVRBridge) + Endpoint<PVRManagerChild>&& aVRBridge, + Endpoint<PVideoDecoderManagerChild>&& aVideoManager) { if (!CompositorBridgeChild::InitForContent(Move(aCompositor))) { return false; } if (!ImageBridgeChild::InitForContent(Move(aImageBridge))) { return false; } if (!gfx::VRManagerChild::InitForContent(Move(aVRBridge))) { return false; } + VideoDecoderManagerChild::InitForContent(Move(aVideoManager)); return true; } bool ContentChild::RecvReinitRendering(Endpoint<PCompositorBridgeChild>&& aCompositor, Endpoint<PImageBridgeChild>&& aImageBridge, - Endpoint<PVRManagerChild>&& aVRBridge) + Endpoint<PVRManagerChild>&& aVRBridge, + Endpoint<PVideoDecoderManagerChild>&& aVideoManager) { nsTArray<RefPtr<TabChild>> tabs = TabChild::GetAll(); // Zap all the old layer managers we have lying around. for (const auto& tabChild : tabs) { if (tabChild->LayersId()) { tabChild->InvalidateLayers(); } @@ -1221,16 +1225,18 @@ ContentChild::RecvReinitRendering(Endpoi } // Establish new PLayerTransactions. for (const auto& tabChild : tabs) { if (tabChild->LayersId()) { tabChild->ReinitRendering(); } } + + VideoDecoderManagerChild::InitForContent(Move(aVideoManager)); return true; } PBackgroundChild* ContentChild::AllocPBackgroundChild(Transport* aTransport, ProcessId aOtherProcess) { return BackgroundChild::Alloc(aTransport, aOtherProcess);
--- a/dom/ipc/ContentChild.h +++ b/dom/ipc/ContentChild.h @@ -164,23 +164,25 @@ public: bool RecvGMPsChanged(nsTArray<GMPCapabilityData>&& capabilities) override; bool RecvInitRendering( Endpoint<PCompositorBridgeChild>&& aCompositor, Endpoint<PImageBridgeChild>&& aImageBridge, - Endpoint<PVRManagerChild>&& aVRBridge) override; + Endpoint<PVRManagerChild>&& aVRBridge, + Endpoint<PVideoDecoderManagerChild>&& aVideoManager) override; bool RecvReinitRendering( Endpoint<PCompositorBridgeChild>&& aCompositor, Endpoint<PImageBridgeChild>&& aImageBridge, - Endpoint<PVRManagerChild>&& aVRBridge) override; + Endpoint<PVRManagerChild>&& aVRBridge, + Endpoint<PVideoDecoderManagerChild>&& aVideoManager) override; PProcessHangMonitorChild* AllocPProcessHangMonitorChild(Transport* aTransport, ProcessId aOtherProcess) override; virtual bool RecvSetProcessSandbox(const MaybeFileDesc& aBroker) override; PBackgroundChild*
--- a/dom/ipc/ContentParent.cpp +++ b/dom/ipc/ContentParent.cpp @@ -1022,23 +1022,16 @@ ContentParent::RecvFindPlugins(const uin nsresult* aRv, nsTArray<PluginTag>* aPlugins, uint32_t* aNewPluginEpoch) { *aRv = mozilla::plugins::FindPluginsForContent(aPluginEpoch, aPlugins, aNewPluginEpoch); return true; } -bool -ContentParent::RecvInitVideoDecoderManager(Endpoint<PVideoDecoderManagerChild>* aEndpoint) -{ - GPUProcessManager::Get()->CreateContentVideoDecoderManager(OtherPid(), aEndpoint); - return true; -} - /*static*/ TabParent* ContentParent::CreateBrowserOrApp(const TabContext& aContext, Element* aFrameElement, ContentParent* aOpenerContentParent, bool aFreshProcess) { PROFILER_LABEL_FUNC(js::ProfileEntry::Category::OTHER); @@ -2222,28 +2215,31 @@ ContentParent::InitInternal(ProcessPrior // on demand.) bool useOffMainThreadCompositing = !!CompositorThreadHolder::Loop(); if (useOffMainThreadCompositing) { GPUProcessManager* gpm = GPUProcessManager::Get(); Endpoint<PCompositorBridgeChild> compositor; Endpoint<PImageBridgeChild> imageBridge; Endpoint<PVRManagerChild> vrBridge; + Endpoint<PVideoDecoderManagerChild> videoManager; DebugOnly<bool> opened = gpm->CreateContentBridges( OtherPid(), &compositor, &imageBridge, - &vrBridge); + &vrBridge, + &videoManager); MOZ_ASSERT(opened); Unused << SendInitRendering( Move(compositor), Move(imageBridge), - Move(vrBridge)); + Move(vrBridge), + Move(videoManager)); gpm->AddListener(this); } } if (gAppData) { // Sending all information to content process. Unused << SendAppInit(); @@ -2378,28 +2374,31 @@ ContentParent::RecvGetGfxVars(Infallible void ContentParent::OnCompositorUnexpectedShutdown() { GPUProcessManager* gpm = GPUProcessManager::Get(); Endpoint<PCompositorBridgeChild> compositor; Endpoint<PImageBridgeChild> imageBridge; Endpoint<PVRManagerChild> vrBridge; + Endpoint<PVideoDecoderManagerChild> videoManager; DebugOnly<bool> opened = gpm->CreateContentBridges( OtherPid(), &compositor, &imageBridge, - &vrBridge); + &vrBridge, + &videoManager); MOZ_ASSERT(opened); Unused << SendReinitRendering( Move(compositor), Move(imageBridge), - Move(vrBridge)); + Move(vrBridge), + Move(videoManager)); } void ContentParent::OnVarChanged(const GfxVarUpdate& aVar) { if (!mIPCOpen) { return; }
--- a/dom/ipc/ContentParent.h +++ b/dom/ipc/ContentParent.h @@ -257,18 +257,16 @@ public: virtual bool RecvGetBlocklistState(const uint32_t& aPluginId, uint32_t* aIsBlocklisted) override; virtual bool RecvFindPlugins(const uint32_t& aPluginEpoch, nsresult* aRv, nsTArray<PluginTag>* aPlugins, uint32_t* aNewPluginEpoch) override; - virtual bool RecvInitVideoDecoderManager(Endpoint<PVideoDecoderManagerChild>* endpoint) override; - virtual bool RecvUngrabPointer(const uint32_t& aTime) override; virtual bool RecvRemovePermission(const IPC::Principal& aPrincipal, const nsCString& aPermissionType, nsresult* aRv) override; NS_DECL_CYCLE_COLLECTION_CLASS_AMBIGUOUS(ContentParent, nsIObserver)
--- a/dom/ipc/CrashReporterParent.cpp +++ b/dom/ipc/CrashReporterParent.cpp @@ -42,44 +42,16 @@ CrashReporterParent::ActorDestroy(ActorD bool CrashReporterParent::RecvAppendAppNotes(const nsCString& data) { mAppNotes.Append(data); return true; } -mozilla::ipc::IProtocol* -CrashReporterParent::CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) -{ -#ifdef MOZ_CRASHREPORTER - ContentParent* contentParent = aCtx->GetContentParent(); - CrashReporter::ThreadId childThreadId = contentParent->Pid(); - GeckoProcessType childProcessType = - contentParent->Process()->GetProcessType(); - - nsAutoPtr<PCrashReporterParent> actor( - contentParent->AllocPCrashReporterParent(childThreadId, - childProcessType) - ); - if (!actor || - !contentParent->RecvPCrashReporterConstructor(actor, - childThreadId, - childThreadId)) { - return nullptr; - } - - return actor.forget(); -#else - MOZ_CRASH("Not Implemented"); - return nullptr; -#endif -} - CrashReporterParent::CrashReporterParent() : #ifdef MOZ_CRASHREPORTER mNotes(4), #endif mStartTime(::time(nullptr)) , mInitialized(false) {
--- a/dom/ipc/CrashReporterParent.h +++ b/dom/ipc/CrashReporterParent.h @@ -155,20 +155,16 @@ public: const nsCString& aData) override { AnnotateCrashReport(aKey, aData); return true; } virtual bool RecvAppendAppNotes(const nsCString& aData) override; - virtual mozilla::ipc::IProtocol* - CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext *aCtx) override; - #ifdef MOZ_CRASHREPORTER void NotifyCrashService(); #endif #ifdef MOZ_CRASHREPORTER AnnotationTable mNotes; #endif
--- a/dom/ipc/PContent.ipdl +++ b/dom/ipc/PContent.ipdl @@ -424,25 +424,27 @@ both: async PWebBrowserPersistDocument(nullable PBrowser aBrowser, uint64_t aOuterWindowID); child: // Give the content process its endpoints to the compositor. async InitRendering( Endpoint<PCompositorBridgeChild> compositor, Endpoint<PImageBridgeChild> imageBridge, - Endpoint<PVRManagerChild> vr); + Endpoint<PVRManagerChild> vr, + Endpoint<PVideoDecoderManagerChild> video); // Re-create the rendering stack using the given endpoints. This is sent // after the compositor process has crashed. The new endpoints may be to a // newly launched GPU process, or the compositor thread of the UI process. async ReinitRendering( Endpoint<PCompositorBridgeChild> compositor, Endpoint<PImageBridgeChild> bridge, - Endpoint<PVRManagerChild> vr); + Endpoint<PVRManagerChild> vr, + Endpoint<PVideoDecoderManagerChild> video); /** * Enable system-level sandboxing features, if available. Can * usually only be performed zero or one times. The child may * abnormally exit if this fails; the details are OS-specific. */ async SetProcessSandbox(MaybeFileDesc aBroker); @@ -737,18 +739,16 @@ parent: async PJavaScript(); async PRemoteSpellcheckEngine(); async PDeviceStorageRequest(DeviceStorageParams params); sync PCrashReporter(NativeThreadId tid, uint32_t processType); - sync InitVideoDecoderManager() returns (Endpoint<PVideoDecoderManagerChild> endpoint); - /** * Is this token compatible with the provided version? * * |version| The offered version to test * Returns |True| if the offered version is compatible */ sync NSSU2FTokenIsCompatibleVersion(nsString version) returns (bool result);
--- a/dom/jsurl/nsJSProtocolHandler.cpp +++ b/dom/jsurl/nsJSProtocolHandler.cpp @@ -178,16 +178,17 @@ nsresult nsJSThunk::EvaluateScript(nsICh // allowed. nsCOMPtr<nsIContentSecurityPolicy> csp; rv = principal->GetCsp(getter_AddRefs(csp)); NS_ENSURE_SUCCESS(rv, rv); if (csp) { bool allowsInlineScript = true; rv = csp->GetAllowsInline(nsIContentPolicy::TYPE_SCRIPT, EmptyString(), // aNonce + false, // aParserCreated EmptyString(), // aContent 0, // aLineNumber &allowsInlineScript); //return early if inline scripts are not allowed if (!allowsInlineScript) { return NS_ERROR_DOM_RETVAL_UNDEFINED; }
--- a/dom/locales/en-US/chrome/security/csp.properties +++ b/dom/locales/en-US/chrome/security/csp.properties @@ -34,16 +34,27 @@ ignoringUnknownOption = Ignoring unknown ignoringDuplicateSrc = Ignoring duplicate source %1$S # LOCALIZATION NOTE (ignoringSrcFromMetaCSP): # %1$S defines the ignored src ignoringSrcFromMetaCSP = Ignoring source ‘%1$S’ (Not supported when delivered via meta element). # LOCALIZATION NOTE (ignoringSrcWithinScriptStyleSrc): # %1$S is the ignored src # script-src and style-src are directive names and should not be localized ignoringSrcWithinScriptStyleSrc = Ignoring “%1$S” within script-src or style-src: nonce-source or hash-source specified +# LOCALIZATION NOTE (ignoringSrcForStrictDynamic): +# %1$S is the ignored src +# script-src, as well as 'strict-dynamic' should not be localized +ignoringSrcForStrictDynamic = Ignoring “%1$S” within script-src: ‘strict-dynamic’ specified +# LOCALIZATION NOTE (ignoringStrictDynamic): +# %1$S is the ignored src +ignoringStrictDynamic = Ignoring source “%1$S” (Only supported within script-src). +# LOCALIZATION NOTE (strictDynamicButNoHashOrNonce): +# %1$S is the csp directive that contains 'strict-dynamic' +# 'strict-dynamic' should not be localized +strictDynamicButNoHashOrNonce = Keyword ‘strict-dynamic’ within “%1$S” with no valid nonce or hash might block all scripts from loading # LOCALIZATION NOTE (reportURInotHttpsOrHttp2): # %1$S is the ETLD of the report URI that is not HTTP or HTTPS reportURInotHttpsOrHttp2 = The report URI (%1$S) should be an HTTP or HTTPS URI. # LOCALIZATION NOTE (reportURInotInReportOnlyHeader): # %1$S is the ETLD of the page with the policy reportURInotInReportOnlyHeader = This site (%1$S) has a Report-Only policy without a report URI. CSP will not block and cannot report violations of this policy. # LOCALIZATION NOTE (failedToParseUnrecognizedSource): # %1$S is the CSP Source that could not be parsed
--- a/dom/media/MediaFormatReader.cpp +++ b/dom/media/MediaFormatReader.cpp @@ -1507,26 +1507,30 @@ MediaFormatReader::Update(TrackType aTra if (decoder.mNeedDraining) { DrainDecoder(aTrack); return; } if (decoder.mError && !decoder.HasFatalError()) { decoder.mDecodePending = false; - if (++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) { + bool needsNewDecoder = decoder.mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER; + if (!needsNewDecoder && ++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) { NotifyError(aTrack, decoder.mError.ref()); return; } decoder.mError.reset(); LOG("%s decoded error count %d", TrackTypeToStr(aTrack), decoder.mNumOfConsecutiveError); media::TimeUnit nextKeyframe; if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() && NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) { + if (needsNewDecoder) { + decoder.ShutdownDecoder(); + } SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length()); return; } else if (aTrack == TrackType::kAudioTrack) { decoder.Flush(); } } bool needInput = NeedInput(decoder);
--- a/dom/media/MediaFormatReader.h +++ b/dom/media/MediaFormatReader.h @@ -323,19 +323,31 @@ private: } uint32_t mNumOfConsecutiveError; uint32_t mMaxConsecutiveError; Maybe<MediaResult> mError; bool HasFatalError() const { - return mError.isSome() && - (mError.ref() != NS_ERROR_DOM_MEDIA_DECODE_ERR || - mNumOfConsecutiveError > mMaxConsecutiveError); + if (!mError.isSome()) { + return false; + } + if (mError.ref() == NS_ERROR_DOM_MEDIA_DECODE_ERR) { + // Allow decode errors to be non-fatal, but give up + // if we have too many. + return mNumOfConsecutiveError > mMaxConsecutiveError; + } else if (mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) { + // If the caller asked for a new decoder we shouldn't treat + // it as fatal. + return false; + } else { + // All other error types are fatal + return true; + } } // If set, all decoded samples prior mTimeThreshold will be dropped. // Used for internal seeking when a change of stream is detected or when // encountering data discontinuity. Maybe<InternalSeekTarget> mTimeThreshold; // Time of last sample returned. Maybe<media::TimeInterval> mLastSampleTime;
deleted file mode 100644 --- a/dom/media/compiledtest/moz.build +++ /dev/null @@ -1,20 +0,0 @@ -# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- -# vim: set filetype=python: -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. - -GeckoCppUnitTests([ - 'TestAudioBuffers', - 'TestAudioMixer', - 'TestAudioPacketizer', - 'TestAudioSegment' -]) - -LOCAL_INCLUDES += [ - '..', -] - -USE_LIBS += [ - 'lgpllibs', -]
rename from dom/media/compiledtest/TestAudioBuffers.cpp rename to dom/media/gtest/TestAudioBuffers.cpp --- a/dom/media/compiledtest/TestAudioBuffers.cpp +++ b/dom/media/gtest/TestAudioBuffers.cpp @@ -1,59 +1,57 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include <stdint.h> #include "AudioBufferUtils.h" -#include <mozilla/Assertions.h> +#include "gtest/gtest.h" const uint32_t FRAMES = 256; const uint32_t CHANNELS = 2; const uint32_t SAMPLES = CHANNELS * FRAMES; -int main() { +TEST(AudioBuffers, Test) +{ mozilla::AudioCallbackBufferWrapper<float, CHANNELS> mBuffer; mozilla::SpillBuffer<float, 128, CHANNELS> b; float fromCallback[SAMPLES]; float other[SAMPLES]; for (uint32_t i = 0; i < SAMPLES; i++) { other[i] = 1.0; fromCallback[i] = 0.0; } // Set the buffer in the wrapper from the callback mBuffer.SetBuffer(fromCallback, FRAMES); // Fill the SpillBuffer with data. - MOZ_RELEASE_ASSERT(b.Fill(other, 15) == 15); - MOZ_RELEASE_ASSERT(b.Fill(other, 17) == 17); + ASSERT_TRUE(b.Fill(other, 15) == 15); + ASSERT_TRUE(b.Fill(other, 17) == 17); for (uint32_t i = 0; i < 32 * CHANNELS; i++) { other[i] = 0.0; } // Empty it in the AudioCallbackBufferWrapper - MOZ_RELEASE_ASSERT(b.Empty(mBuffer) == 32); + ASSERT_TRUE(b.Empty(mBuffer) == 32); // Check available return something reasonnable - MOZ_RELEASE_ASSERT(mBuffer.Available() == FRAMES - 32); + ASSERT_TRUE(mBuffer.Available() == FRAMES - 32); // Fill the buffer with the rest of the data mBuffer.WriteFrames(other + 32 * CHANNELS, FRAMES - 32); // Check the buffer is now full - MOZ_RELEASE_ASSERT(mBuffer.Available() == 0); + ASSERT_TRUE(mBuffer.Available() == 0); for (uint32_t i = 0 ; i < SAMPLES; i++) { - if (fromCallback[i] != 1.0) { - fprintf(stderr, "Difference at %d (%f != %f)\n", i, fromCallback[i], 1.0); - MOZ_CRASH("Samples differ"); - } + ASSERT_TRUE(fromCallback[i] == 1.0) << + "Difference at " << i << " (" << fromCallback[i] << " != " << 1.0 << + ")\n"; } - MOZ_RELEASE_ASSERT(b.Fill(other, FRAMES) == 128); - MOZ_RELEASE_ASSERT(b.Fill(other, FRAMES) == 0); - MOZ_RELEASE_ASSERT(b.Empty(mBuffer) == 0); - - return 0; + ASSERT_TRUE(b.Fill(other, FRAMES) == 128); + ASSERT_TRUE(b.Fill(other, FRAMES) == 0); + ASSERT_TRUE(b.Empty(mBuffer) == 0); }
rename from dom/media/compiledtest/TestAudioMixer.cpp rename to dom/media/gtest/TestAudioMixer.cpp --- a/dom/media/compiledtest/TestAudioMixer.cpp +++ b/dom/media/gtest/TestAudioMixer.cpp @@ -1,18 +1,21 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioMixer.h" +#include "gtest/gtest.h" using mozilla::AudioDataValue; using mozilla::AudioSampleFormat; +namespace audio_mixer { + struct MixerConsumer : public mozilla::MixerCallbackReceiver { /* In this test, the different audio stream and channels are always created to * cancel each other. */ void MixerCallback(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate) { bool silent = true; for (uint32_t i = 0; i < aChannels * aFrames; i++) { @@ -20,19 +23,17 @@ struct MixerConsumer : public mozilla::M if (aFormat == mozilla::AUDIO_FORMAT_S16) { fprintf(stderr, "Sample at %d is not silent: %d\n", i, (short)aData[i]); } else { fprintf(stderr, "Sample at %d is not silent: %f\n", i, (float)aData[i]); } silent = false; } } - if (!silent) { - MOZ_CRASH(); - } + ASSERT_TRUE(silent); } }; /* Helper function to give us the maximum and minimum value that don't clip, * for a given sample format (integer or floating-point). */ template<typename T> T GetLowValue(); @@ -62,17 +63,18 @@ short GetHighValue<short>() { void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue) { AudioDataValue* end = aBuffer + aLength; while (aBuffer != end) { *aBuffer++ = aValue; } } -int main(int argc, char* argv[]) { +TEST(AudioMixer, Test) +{ const uint32_t CHANNEL_LENGTH = 256; const uint32_t AUDIO_RATE = 44100; MixerConsumer consumer; AudioDataValue a[CHANNEL_LENGTH * 2]; AudioDataValue b[CHANNEL_LENGTH * 2]; FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>()); FillBuffer(a + CHANNEL_LENGTH, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>()); @@ -153,11 +155,11 @@ int main(int argc, char* argv[]) { mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.Mix(b, 2, CHANNEL_LENGTH, AUDIO_RATE); mixer.FinishMixing(); } +} - return 0; -} +} // namespace audio_mixer
rename from dom/media/compiledtest/TestAudioPacketizer.cpp rename to dom/media/gtest/TestAudioPacketizer.cpp --- a/dom/media/compiledtest/TestAudioPacketizer.cpp +++ b/dom/media/gtest/TestAudioPacketizer.cpp @@ -1,17 +1,17 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include <stdint.h> #include <math.h> #include "../AudioPacketizer.h" -#include <mozilla/Assertions.h> +#include "gtest/gtest.h" using namespace mozilla; template<typename T> class AutoBuffer { public: explicit AutoBuffer(size_t aLength) @@ -35,39 +35,36 @@ int16_t Sequence(int16_t* aBuffer, uint3 aBuffer[i] = aStart + i; } return aStart + i; } void IsSequence(int16_t* aBuffer, uint32_t aSize, uint32_t aStart = 0) { for (uint32_t i = 0; i < aSize; i++) { - if (aBuffer[i] != static_cast<int64_t>(aStart + i)) { - fprintf(stderr, "Buffer is not a sequence at offset %u\n", i); - MOZ_CRASH("Buffer is not a sequence"); - } + ASSERT_TRUE(aBuffer[i] == static_cast<int64_t>(aStart + i)) << + "Buffer is not a sequence at offset " << i << std::endl; } // Buffer is a sequence. } void Zero(int16_t* aBuffer, uint32_t aSize) { for (uint32_t i = 0; i < aSize; i++) { - if (aBuffer[i] != 0) { - fprintf(stderr, "Buffer is not null at offset %u\n", i); - MOZ_CRASH("Buffer is not null"); - } + ASSERT_TRUE(aBuffer[i] == 0) << + "Buffer is not null at offset " << i << std::endl; } } double sine(uint32_t aPhase) { - return sin(aPhase * 2 * M_PI * 440 / 44100); + return sin(aPhase * 2 * M_PI * 440 / 44100); } -int main() { +TEST(AudioPacketizer, Test) +{ for (int16_t channels = 1; channels < 2; channels++) { // Test that the packetizer returns zero on underrun { AudioPacketizer<int16_t, int16_t> ap(441, channels); for (int16_t i = 0; i < 10; i++) { int16_t* out = ap.Output(); Zero(out, 441); delete[] out; @@ -152,22 +149,19 @@ int main() { } phase++; } ap.Input(b.Get(), 128); while (ap.PacketsAvailable()) { int16_t* packet = ap.Output(); for (uint32_t k = 0; k < ap.PacketSize(); k++) { for (int32_t c = 0; c < channels; c++) { - MOZ_RELEASE_ASSERT(packet[k * channels + c] == - static_cast<int16_t>(((2 << 14) * sine(outPhase)))); + ASSERT_TRUE(packet[k * channels + c] == + static_cast<int16_t>(((2 << 14) * sine(outPhase)))); } outPhase++; } delete [] packet; } } } } - - printf("OK\n"); - return 0; }
rename from dom/media/compiledtest/TestAudioSegment.cpp rename to dom/media/gtest/TestAudioSegment.cpp --- a/dom/media/compiledtest/TestAudioSegment.cpp +++ b/dom/media/gtest/TestAudioSegment.cpp @@ -1,26 +1,20 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioSegment.h" #include <iostream> -#include <mozilla/Assertions.h> +#include "gtest/gtest.h" using namespace mozilla; -namespace mozilla { -uint32_t -GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2) -{ - return std::max(aChannels1, aChannels2); -} -} +namespace audio_segment { /* Helper function to give us the maximum and minimum value that don't clip, * for a given sample format (integer or floating-point). */ template<typename T> T GetLowValue(); template<typename T> T GetHighValue(); @@ -137,18 +131,18 @@ void TestInterleaveAndConvert() for (uint32_t channels = 1; channels < maxChannels; channels++) { const SrcT* const* src = GetPlanarChannelArray<SrcT>(channels, arraySize); DstT* dst = new DstT[channels * arraySize]; InterleaveAndConvertBuffer(src, arraySize, 1.0, channels, dst); uint32_t channelIndex = 0; for (size_t i = 0; i < arraySize * channels; i++) { - MOZ_RELEASE_ASSERT(FuzzyEqual(dst[i], - FloatToAudioSample<DstT>(1. / (channelIndex + 1)))); + ASSERT_TRUE(FuzzyEqual(dst[i], + FloatToAudioSample<DstT>(1. / (channelIndex + 1)))); channelIndex++; channelIndex %= channels; } DeletePlanarChannelsArray(src, channels); delete [] dst; } } @@ -161,18 +155,18 @@ void TestDeinterleaveAndConvert() for (uint32_t channels = 1; channels < maxChannels; channels++) { const SrcT* src = GetInterleavedChannelArray<SrcT>(channels, arraySize); DstT** dst = GetPlanarArray<DstT>(channels, arraySize); DeinterleaveAndConvertBuffer(src, arraySize, channels, dst); for (size_t channel = 0; channel < channels; channel++) { for (size_t i = 0; i < arraySize; i++) { - MOZ_RELEASE_ASSERT(FuzzyEqual(dst[channel][i], - FloatToAudioSample<DstT>(1. / (channel + 1)))); + ASSERT_TRUE(FuzzyEqual(dst[channel][i], + FloatToAudioSample<DstT>(1. / (channel + 1)))); } } DeleteInterleavedChannelArray(src); DeletePlanarArray(dst, channels); } } @@ -196,21 +190,21 @@ void TestUpmixStereo() channels[0] = new T[arraySize]; for (size_t i = 0; i < arraySize; i++) { channels[0][i] = GetHighValue<T>(); } channelsptr[0] = channels[0]; - AudioChannelsUpMix(&channelsptr, 2, ::SilentChannel<T>()); + AudioChannelsUpMix(&channelsptr, 2, SilentChannel<T>()); for (size_t channel = 0; channel < 2; channel++) { for (size_t i = 0; i < arraySize; i++) { - MOZ_RELEASE_ASSERT(channelsptr[channel][i] == GetHighValue<T>()); + ASSERT_TRUE(channelsptr[channel][i] == GetHighValue<T>()); } } delete channels[0]; } template<typename T> void TestDownmixStereo() { @@ -231,32 +225,33 @@ void TestDownmixStereo() input[channel][i] = channel == 0 ? GetLowValue<T>() : GetHighValue<T>(); } inputptr[channel] = input[channel]; } AudioChannelsDownMix(inputptr, output, 1, arraySize); for (size_t i = 0; i < arraySize; i++) { - MOZ_RELEASE_ASSERT(output[0][i] == GetSilentValue<T>()); - MOZ_RELEASE_ASSERT(output[0][i] == GetSilentValue<T>()); + ASSERT_TRUE(output[0][i] == GetSilentValue<T>()); + ASSERT_TRUE(output[0][i] == GetSilentValue<T>()); } delete output[0]; delete output; } -int main(int argc, char* argv[]) { +TEST(AudioSegment, Test) +{ TestInterleaveAndConvert<float, float>(); TestInterleaveAndConvert<float, int16_t>(); TestInterleaveAndConvert<int16_t, float>(); TestInterleaveAndConvert<int16_t, int16_t>(); TestDeinterleaveAndConvert<float, float>(); TestDeinterleaveAndConvert<float, int16_t>(); TestDeinterleaveAndConvert<int16_t, float>(); TestDeinterleaveAndConvert<int16_t, int16_t>(); TestUpmixStereo<float>(); TestUpmixStereo<int16_t>(); TestDownmixStereo<float>(); TestDownmixStereo<int16_t>(); +} - return 0; -} +} // namespace audio_segment
--- a/dom/media/gtest/TestRust.cpp +++ b/dom/media/gtest/TestRust.cpp @@ -1,8 +1,9 @@ #include <stdint.h> +#include "gtest/gtest.h" extern "C" uint8_t* test_rust(); TEST(rust, CallFromCpp) { auto greeting = test_rust(); EXPECT_STREQ(reinterpret_cast<char*>(greeting), "hello from rust."); }
--- a/dom/media/gtest/moz.build +++ b/dom/media/gtest/moz.build @@ -1,17 +1,21 @@ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- # vim: set filetype=python: # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. UNIFIED_SOURCES += [ 'MockMediaResource.cpp', + 'TestAudioBuffers.cpp', 'TestAudioCompactor.cpp', + 'TestAudioMixer.cpp', + 'TestAudioPacketizer.cpp', + 'TestAudioSegment.cpp', 'TestGMPCrossOrigin.cpp', 'TestGMPRemoveAndDelete.cpp', 'TestGMPUtils.cpp', 'TestIntervalSet.cpp', 'TestMediaDataDecoder.cpp', 'TestMediaEventSource.cpp', 'TestMediaFormatReader.cpp', 'TestMozPromise.cpp',
--- a/dom/media/ipc/RemoteVideoDecoder.cpp +++ b/dom/media/ipc/RemoteVideoDecoder.cpp @@ -4,16 +4,17 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "RemoteVideoDecoder.h" #include "VideoDecoderChild.h" #include "VideoDecoderManagerChild.h" #include "mozilla/layers/TextureClient.h" #include "base/thread.h" #include "MediaInfo.h" +#include "MediaPrefs.h" #include "ImageContainer.h" namespace mozilla { namespace dom { using base::Thread; using namespace ipc; using namespace layers; @@ -142,17 +143,18 @@ PlatformDecoderModule::ConversionRequire RemoteDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const { return mWrapped->DecoderNeedsConversion(aConfig); } already_AddRefed<MediaDataDecoder> RemoteDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams) { - if (!aParams.mKnowsCompositor || + if (!MediaPrefs::PDMUseGPUDecoder() || + !aParams.mKnowsCompositor || aParams.mKnowsCompositor->GetTextureFactoryIdentifier().mParentProcessType != GeckoProcessType_GPU) { return nullptr; } MediaDataDecoderCallback* callback = aParams.mCallback; MOZ_ASSERT(callback->OnReaderTaskQueue()); RefPtr<RemoteVideoDecoder> object = new RemoteVideoDecoder(callback);
--- a/dom/media/ipc/VideoDecoderChild.cpp +++ b/dom/media/ipc/VideoDecoderChild.cpp @@ -98,34 +98,45 @@ VideoDecoderChild::RecvInitFailed(const mInitPromise.Reject(aReason, __func__); return true; } void VideoDecoderChild::ActorDestroy(ActorDestroyReason aWhy) { if (aWhy == AbnormalShutdown) { - if (mInitialized) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); - } else { - mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); - } + // Defer reporting an error until we've recreated the manager so that + // it'll be safe for MediaFormatReader to recreate decoders + RefPtr<VideoDecoderChild> ref = this; + GetManager()->RunWhenRecreated(NS_NewRunnableFunction([=]() { + if (ref->mInitialized) { + ref->mCallback->Error(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER); + } else { + ref->mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, __func__); + } + })); } mCanSend = false; } void VideoDecoderChild::InitIPDL(MediaDataDecoderCallback* aCallback, const VideoInfo& aVideoInfo, layers::KnowsCompositor* aKnowsCompositor) { RefPtr<VideoDecoderManagerChild> manager = VideoDecoderManagerChild::GetSingleton(); - if (!manager) { + // If the manager isn't available, then don't initialize mIPDLSelfRef and leave + // us in an error state. We'll then immediately reject the promise when Init() + // is called and the caller can try again. Hopefully by then the new manager is + // ready, or we've notified the caller of it being no longer available. + // If not, then the cycle repeats until we're ready. + if (!manager || !manager->CanSend()) { return; } + mIPDLSelfRef = this; mCallback = aCallback; mVideoInfo = aVideoInfo; mKnowsCompositor = aKnowsCompositor; if (manager->SendPVideoDecoderConstructor(this)) { mCanSend = true; } } @@ -145,96 +156,99 @@ VideoDecoderChild::IPDLActorDestroyed() } // MediaDataDecoder methods RefPtr<MediaDataDecoder::InitPromise> VideoDecoderChild::Init() { AssertOnManagerThread(); - if (!mCanSend || !SendInit(mVideoInfo, mKnowsCompositor->GetTextureFactoryIdentifier())) { + + if (!mIPDLSelfRef) { return MediaDataDecoder::InitPromise::CreateAndReject( - NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); + NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__); + } + // If we failed to send this, then we'll still resolve the Init promise + // as ActorDestroy handles it. + if (mCanSend) { + SendInit(mVideoInfo, mKnowsCompositor->GetTextureFactoryIdentifier()); } return mInitPromise.Ensure(__func__); } void VideoDecoderChild::Input(MediaRawData* aSample) { AssertOnManagerThread(); if (!mCanSend) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); return; } // TODO: It would be nice to add an allocator method to // MediaDataDecoder so that the demuxer could write directly // into shmem rather than requiring a copy here. Shmem buffer; if (!AllocShmem(aSample->Size(), Shmem::SharedMemory::TYPE_BASIC, &buffer)) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); + mCallback->Error(NS_ERROR_DOM_MEDIA_DECODE_ERR); return; } memcpy(buffer.get<uint8_t>(), aSample->Data(), aSample->Size()); MediaRawDataIPDL sample(MediaDataIPDL(aSample->mOffset, aSample->mTime, aSample->mTimecode, aSample->mDuration, aSample->mFrames, aSample->mKeyframe), buffer); - if (!SendInput(sample)) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); - } + SendInput(sample); } void VideoDecoderChild::Flush() { AssertOnManagerThread(); - if (!mCanSend || !SendFlush()) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); + if (mCanSend) { + SendFlush(); } } void VideoDecoderChild::Drain() { AssertOnManagerThread(); - if (!mCanSend || !SendDrain()) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); + if (mCanSend) { + SendDrain(); } } void VideoDecoderChild::Shutdown() { AssertOnManagerThread(); - if (!mCanSend || !SendShutdown()) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); + if (mCanSend) { + SendShutdown(); } mInitialized = false; } bool VideoDecoderChild::IsHardwareAccelerated(nsACString& aFailureReason) const { aFailureReason = mHardwareAcceleratedReason; return mIsHardwareAccelerated; } void VideoDecoderChild::SetSeekThreshold(const media::TimeUnit& aTime) { AssertOnManagerThread(); - if (!mCanSend || !SendSetSeekThreshold(aTime.ToMicroseconds())) { - mCallback->Error(NS_ERROR_DOM_MEDIA_FATAL_ERR); + if (mCanSend) { + SendSetSeekThreshold(aTime.ToMicroseconds()); } } void VideoDecoderChild::AssertOnManagerThread() { MOZ_ASSERT(NS_GetCurrentThread() == mThread); }
--- a/dom/media/ipc/VideoDecoderManagerChild.cpp +++ b/dom/media/ipc/VideoDecoderManagerChild.cpp @@ -8,109 +8,98 @@ #include "mozilla/dom/ContentChild.h" #include "MediaPrefs.h" #include "nsThreadUtils.h" #include "mozilla/gfx/2D.h" #include "mozilla/ipc/ProtocolUtils.h" #include "mozilla/layers/SynchronousTask.h" #include "mozilla/gfx/DataSurfaceHelpers.h" #include "mozilla/layers/ISurfaceAllocator.h" +#include "base/task.h" namespace mozilla { namespace dom { using namespace ipc; using namespace layers; using namespace gfx; // Only modified on the main-thread StaticRefPtr<nsIThread> sVideoDecoderChildThread; StaticRefPtr<AbstractThread> sVideoDecoderChildAbstractThread; // Only accessed from sVideoDecoderChildThread static StaticRefPtr<VideoDecoderManagerChild> sDecoderManager; +static UniquePtr<nsTArray<RefPtr<Runnable>>> sRecreateTasks; /* static */ void -VideoDecoderManagerChild::Initialize() +VideoDecoderManagerChild::InitializeThread() { MOZ_ASSERT(NS_IsMainThread()); - MediaPrefs::GetSingleton(); - -#ifdef XP_WIN - if (!MediaPrefs::PDMUseGPUDecoder()) { - return; - } - - // Can't run remote video decoding in the parent process. - if (!ContentChild::GetSingleton()) { - return; - } - if (!sVideoDecoderChildThread) { RefPtr<nsIThread> childThread; nsresult rv = NS_NewNamedThread("VideoChild", getter_AddRefs(childThread)); NS_ENSURE_SUCCESS_VOID(rv); sVideoDecoderChildThread = childThread; sVideoDecoderChildAbstractThread = AbstractThread::CreateXPCOMThreadWrapper(childThread, false); + + sRecreateTasks = MakeUnique<nsTArray<RefPtr<Runnable>>>(); } -#else - return; -#endif +} +/* static */ void +VideoDecoderManagerChild::InitForContent(Endpoint<PVideoDecoderManagerChild>&& aVideoManager) +{ + InitializeThread(); + sVideoDecoderChildThread->Dispatch(NewRunnableFunction(&Open, Move(aVideoManager)), NS_DISPATCH_NORMAL); } /* static */ void VideoDecoderManagerChild::Shutdown() { MOZ_ASSERT(NS_IsMainThread()); if (sVideoDecoderChildThread) { sVideoDecoderChildThread->Dispatch(NS_NewRunnableFunction([]() { - if (sDecoderManager) { + if (sDecoderManager && sDecoderManager->CanSend()) { sDecoderManager->Close(); sDecoderManager = nullptr; } }), NS_DISPATCH_NORMAL); sVideoDecoderChildAbstractThread = nullptr; sVideoDecoderChildThread->Shutdown(); sVideoDecoderChildThread = nullptr; + + sRecreateTasks = nullptr; } } +void +VideoDecoderManagerChild::RunWhenRecreated(already_AddRefed<Runnable> aTask) +{ + MOZ_ASSERT(NS_GetCurrentThread() == GetManagerThread()); + + // If we've already been recreated, then run the task immediately. + if (sDecoderManager && sDecoderManager != this && sDecoderManager->CanSend()) { + RefPtr<Runnable> task = aTask; + task->Run(); + } else { + sRecreateTasks->AppendElement(aTask); + } +} + + /* static */ VideoDecoderManagerChild* VideoDecoderManagerChild::GetSingleton() { MOZ_ASSERT(NS_GetCurrentThread() == GetManagerThread()); - - if (!sDecoderManager || !sDecoderManager->mCanSend) { - RefPtr<VideoDecoderManagerChild> manager; - - NS_DispatchToMainThread(NS_NewRunnableFunction([&]() { - Endpoint<PVideoDecoderManagerChild> endpoint; - if (!ContentChild::GetSingleton()->SendInitVideoDecoderManager(&endpoint)) { - return; - } - - if (!endpoint.IsValid()) { - return; - } - - manager = new VideoDecoderManagerChild(); - - RefPtr<Runnable> task = NewRunnableMethod<Endpoint<PVideoDecoderManagerChild>&&>( - manager, &VideoDecoderManagerChild::Open, Move(endpoint)); - sVideoDecoderChildThread->Dispatch(task.forget(), NS_DISPATCH_NORMAL); - }), NS_DISPATCH_SYNC); - - sDecoderManager = manager; - } return sDecoderManager; } /* static */ nsIThread* VideoDecoderManagerChild::GetManagerThread() { return sVideoDecoderChildThread; } @@ -133,43 +122,66 @@ VideoDecoderManagerChild::DeallocPVideoD VideoDecoderChild* child = static_cast<VideoDecoderChild*>(actor); child->IPDLActorDestroyed(); return true; } void VideoDecoderManagerChild::Open(Endpoint<PVideoDecoderManagerChild>&& aEndpoint) { - if (!aEndpoint.Bind(this)) { - return; + // Make sure we always dispatch everything in sRecreateTasks, even if we + // fail since this is as close to being recreated as we will ever be. + sDecoderManager = nullptr; + if (aEndpoint.IsValid()) { + RefPtr<VideoDecoderManagerChild> manager = new VideoDecoderManagerChild(); + if (aEndpoint.Bind(manager)) { + sDecoderManager = manager; + manager->InitIPDL(); + } } - AddRef(); + for (Runnable* task : *sRecreateTasks) { + task->Run(); + } + sRecreateTasks->Clear(); +} + +void +VideoDecoderManagerChild::InitIPDL() +{ mCanSend = true; + mIPDLSelfRef = this; } void VideoDecoderManagerChild::ActorDestroy(ActorDestroyReason aWhy) { mCanSend = false; } void VideoDecoderManagerChild::DeallocPVideoDecoderManagerChild() { - Release(); + mIPDLSelfRef = nullptr; +} + +bool +VideoDecoderManagerChild::CanSend() +{ + MOZ_ASSERT(NS_GetCurrentThread() == GetManagerThread()); + return mCanSend; } bool VideoDecoderManagerChild::DeallocShmem(mozilla::ipc::Shmem& aShmem) { if (NS_GetCurrentThread() != sVideoDecoderChildThread) { RefPtr<VideoDecoderManagerChild> self = this; mozilla::ipc::Shmem shmem = aShmem; sVideoDecoderChildThread->Dispatch(NS_NewRunnableFunction([self, shmem]() { - if (self->mCanSend) { + if (self->CanSend()) { mozilla::ipc::Shmem shmemCopy = shmem; self->DeallocShmem(shmemCopy); } }), NS_DISPATCH_NORMAL); return true; } return PVideoDecoderManagerChild::DeallocShmem(aShmem); } @@ -202,17 +214,17 @@ VideoDecoderManagerChild::Readback(const // loop while it waits. This function can be called from JS and we // don't want that to happen. SynchronousTask task("Readback sync"); RefPtr<VideoDecoderManagerChild> ref = this; SurfaceDescriptor sd; sVideoDecoderChildThread->Dispatch(NS_NewRunnableFunction([&]() { AutoCompleteTask complete(&task); - if (ref->mCanSend) { + if (ref->CanSend()) { ref->SendReadback(aSD, &sd); } }), NS_DISPATCH_NORMAL); task.Wait(); if (!IsSurfaceDescriptorValid(sd)) { return nullptr; @@ -234,22 +246,22 @@ VideoDecoderManagerChild::Readback(const } void VideoDecoderManagerChild::DeallocateSurfaceDescriptorGPUVideo(const SurfaceDescriptorGPUVideo& aSD) { RefPtr<VideoDecoderManagerChild> ref = this; SurfaceDescriptorGPUVideo sd = Move(aSD); sVideoDecoderChildThread->Dispatch(NS_NewRunnableFunction([ref, sd]() { - if (ref->mCanSend) { + if (ref->CanSend()) { ref->SendDeallocateSurfaceDescriptorGPUVideo(sd); } }), NS_DISPATCH_NORMAL); } void -VideoDecoderManagerChild::FatalError(const char* const aName, const char* const aMsg) const +VideoDecoderManagerChild::HandleFatalError(const char* aName, const char* aMsg) const { dom::ContentChild::FatalErrorIfNotUsingGPUProcess(aName, aMsg, OtherPid()); } } // namespace dom } // namespace mozilla
--- a/dom/media/ipc/VideoDecoderManagerChild.h +++ b/dom/media/ipc/VideoDecoderManagerChild.h @@ -46,35 +46,50 @@ public: return PVideoDecoderManagerChild::AllocUnsafeShmem(aSize, aShmType, aShmem); } // Can be called from any thread, dispatches the request to the IPDL thread internally // and will be ignored if the IPDL actor has been destroyed. bool DeallocShmem(mozilla::ipc::Shmem& aShmem) override; // Main thread only - static void Initialize(); + static void InitForContent(Endpoint<PVideoDecoderManagerChild>&& aVideoManager); static void Shutdown(); + // Run aTask (on the manager thread) when we next attempt to create a new manager + // (even if creation fails). Intended to be called from ActorDestroy when we get + // notified that the old manager is being destroyed. + // Can only be called from the manager thread. + void RunWhenRecreated(already_AddRefed<Runnable> aTask); + + bool CanSend(); + protected: + void InitIPDL(); + void ActorDestroy(ActorDestroyReason aWhy) override; void DeallocPVideoDecoderManagerChild() override; - void FatalError(const char* const aName, const char* const aMsg) const override; + void HandleFatalError(const char* aName, const char* aMsg) const override; PVideoDecoderChild* AllocPVideoDecoderChild() override; bool DeallocPVideoDecoderChild(PVideoDecoderChild* actor) override; private: + // Main thread only + static void InitializeThread(); + VideoDecoderManagerChild() : mCanSend(false) {} ~VideoDecoderManagerChild() {} - void Open(Endpoint<PVideoDecoderManagerChild>&& aEndpoint); + static void Open(Endpoint<PVideoDecoderManagerChild>&& aEndpoint); + + RefPtr<VideoDecoderManagerChild> mIPDLSelfRef; // Should only ever be accessed on the manager thread. bool mCanSend; }; } // namespace dom } // namespace mozilla
--- a/dom/media/ipc/VideoDecoderManagerParent.h +++ b/dom/media/ipc/VideoDecoderManagerParent.h @@ -28,17 +28,17 @@ public: protected: PVideoDecoderParent* AllocPVideoDecoderParent() override; bool DeallocPVideoDecoderParent(PVideoDecoderParent* actor) override; bool RecvReadback(const SurfaceDescriptorGPUVideo& aSD, SurfaceDescriptor* aResult) override; bool RecvDeallocateSurfaceDescriptorGPUVideo(const SurfaceDescriptorGPUVideo& aSD) override; - void ActorDestroy(mozilla::ipc::IProtocolManager<mozilla::ipc::IProtocol>::ActorDestroyReason) override {} + void ActorDestroy(mozilla::ipc::IProtocol::ActorDestroyReason) override {} void DeallocPVideoDecoderManagerParent() override; private: VideoDecoderManagerParent(); ~VideoDecoderManagerParent(); void Open(Endpoint<PVideoDecoderManagerParent>&& aEndpoint);
--- a/dom/media/moz.build +++ b/dom/media/moz.build @@ -50,17 +50,16 @@ if CONFIG['MOZ_ANDROID_OMX']: if CONFIG['MOZ_FMP4']: DIRS += ['fmp4'] if CONFIG['MOZ_WEBRTC']: DIRS += ['bridge'] TEST_DIRS += [ - 'compiledtest', 'gtest', ] MOCHITEST_MANIFESTS += [ 'test/mochitest.ini', 'tests/mochitest/identity/mochitest.ini', ]
--- a/dom/media/systemservices/MediaParent.h +++ b/dom/media/systemservices/MediaParent.h @@ -17,17 +17,17 @@ namespace media { // media::Parent implements the chrome-process side of ipc for media::Child APIs // A same-process version may also be created to service non-e10s calls. class OriginKeyStore; class NonE10s { - typedef mozilla::ipc::IProtocolManager<mozilla::ipc::IProtocol>::ActorDestroyReason + typedef mozilla::ipc::IProtocol::ActorDestroyReason ActorDestroyReason; public: virtual ~NonE10s() {} protected: virtual bool RecvGetOriginKey(const uint32_t& aRequestId, const nsCString& aOrigin, const bool& aPrivateBrowsing, const bool& aPersist) = 0; @@ -40,17 +40,17 @@ protected: nsCString aKey); }; // Super = PMediaParent or NonE10s template<class Super> class Parent : public Super { - typedef mozilla::ipc::IProtocolManager<mozilla::ipc::IProtocol>::ActorDestroyReason + typedef mozilla::ipc::IProtocol::ActorDestroyReason ActorDestroyReason; public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Parent<Super>) virtual bool RecvGetOriginKey(const uint32_t& aRequestId, const nsCString& aOrigin, const bool& aPrivateBrowsing, const bool& aPersist) override;
--- a/dom/permission/PermissionObserver.cpp +++ b/dom/permission/PermissionObserver.cpp @@ -81,17 +81,17 @@ PermissionObserver::RemoveSink(Permissio void PermissionObserver::Notify(PermissionName aName, nsIPrincipal& aPrincipal) { for (auto* sink : mSinks) { if (sink->mName != aName) { continue; } - nsIPrincipal* sinkPrincipal = sink->GetPrincipal(); + nsCOMPtr<nsIPrincipal> sinkPrincipal = sink->GetPrincipal(); if (NS_WARN_IF(!sinkPrincipal) || !aPrincipal.Equals(sinkPrincipal)) { continue; } sink->PermissionChanged(); } }
--- a/dom/permission/PermissionStatus.cpp +++ b/dom/permission/PermissionStatus.cpp @@ -88,30 +88,34 @@ PermissionStatus::UpdateState() if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } mState = ActionToPermissionState(action); return NS_OK; } -nsIPrincipal* +already_AddRefed<nsIPrincipal> PermissionStatus::GetPrincipal() const { nsCOMPtr<nsPIDOMWindowInner> window = GetOwner(); if (NS_WARN_IF(!window)) { return nullptr; } nsIDocument* doc = window->GetExtantDoc(); if (NS_WARN_IF(!doc)) { return nullptr; } - return doc->NodePrincipal(); + nsCOMPtr<nsIPrincipal> principal = + mozilla::BasePrincipal::Cast(doc->NodePrincipal())->CloneStrippingUserContextIdAndFirstPartyDomain(); + NS_ENSURE_TRUE(principal, nullptr); + + return principal.forget(); } void PermissionStatus::PermissionChanged() { auto oldState = mState; UpdateState(); if (mState != oldState) {
--- a/dom/permission/PermissionStatus.h +++ b/dom/permission/PermissionStatus.h @@ -37,17 +37,17 @@ private: ~PermissionStatus(); PermissionStatus(nsPIDOMWindowInner* aWindow, PermissionName aName); nsresult Init(); nsresult UpdateState(); - nsIPrincipal* GetPrincipal() const; + already_AddRefed<nsIPrincipal> GetPrincipal() const; void PermissionChanged(); PermissionName mName; PermissionState mState; RefPtr<PermissionObserver> mObserver; };
--- a/dom/plugins/ipc/PluginMessageUtils.h +++ b/dom/plugins/ipc/PluginMessageUtils.h @@ -5,18 +5,19 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef DOM_PLUGINS_PLUGINMESSAGEUTILS_H #define DOM_PLUGINS_PLUGINMESSAGEUTILS_H #include "ipc/IPCMessageUtils.h" #include "base/message_loop.h" +#include "mozilla/ipc/CrossProcessMutex.h" #include "mozilla/ipc/MessageChannel.h" -#include "mozilla/ipc/CrossProcessMutex.h" +#include "mozilla/ipc/ProtocolUtils.h" #include "mozilla/UniquePtr.h" #include "gfxipc/ShadowLayerUtils.h" #include "npapi.h" #include "npruntime.h" #include "npfunctions.h" #include "nsString.h" #include "nsTArray.h"
--- a/dom/plugins/ipc/PluginModuleParent.cpp +++ b/dom/plugins/ipc/PluginModuleParent.cpp @@ -1031,18 +1031,17 @@ PluginModuleChromeParent::GetInvokingPro * * This function needs to be updated if the subprotocols are modified in * PPluginInstance.ipdl. */ PluginInstanceParent* PluginModuleChromeParent::GetManagingInstance(mozilla::ipc::IProtocol* aProtocol) { MOZ_ASSERT(aProtocol); - mozilla::ipc::MessageListener* listener = - static_cast<mozilla::ipc::MessageListener*>(aProtocol); + mozilla::ipc::IProtocol* listener = aProtocol; switch (listener->GetProtocolTypeId()) { case PPluginInstanceMsgStart: // In this case, aProtocol is the instance itself. Just cast it. return static_cast<PluginInstanceParent*>(aProtocol); case PPluginBackgroundDestroyerMsgStart: { PPluginBackgroundDestroyerParent* actor = static_cast<PPluginBackgroundDestroyerParent*>(aProtocol); return static_cast<PluginInstanceParent*>(actor->Manager());
--- a/dom/presentation/PresentationAvailability.cpp +++ b/dom/presentation/PresentationAvailability.cpp @@ -48,33 +48,36 @@ PresentationAvailability::Create(nsPIDOM } PresentationAvailability::PresentationAvailability(nsPIDOMWindowInner* aWindow, const nsTArray<nsString>& aUrls) : DOMEventTargetHelper(aWindow) , mIsAvailable(false) , mUrls(aUrls) { + for (uint32_t i = 0; i < mUrls.Length(); ++i) { + mAvailabilityOfUrl.AppendElement(false); + } } PresentationAvailability::~PresentationAvailability() { Shutdown(); } bool PresentationAvailability::Init(RefPtr<Promise>& aPromise) { nsCOMPtr<nsIPresentationService> service = do_GetService(PRESENTATION_SERVICE_CONTRACTID); if (NS_WARN_IF(!service)) { return false; } - nsresult rv = service->RegisterAvailabilityListener(this); + nsresult rv = service->RegisterAvailabilityListener(mUrls, this); if (NS_WARN_IF(NS_FAILED(rv))) { // If the user agent is unable to monitor available device, // Resolve promise with |value| set to false. mIsAvailable = false; aPromise->MaybeResolve(this); return true; } @@ -97,17 +100,18 @@ void PresentationAvailability::Shutdown( nsCOMPtr<nsIPresentationService> service = do_GetService(PRESENTATION_SERVICE_CONTRACTID); if (NS_WARN_IF(!service)) { return; } Unused << - NS_WARN_IF(NS_FAILED(service->UnregisterAvailabilityListener(this))); + NS_WARN_IF(NS_FAILED(service->UnregisterAvailabilityListener(mUrls, + this))); } /* virtual */ void PresentationAvailability::DisconnectFromOwner() { Shutdown(); DOMEventTargetHelper::DisconnectFromOwner(); } @@ -152,22 +156,31 @@ PresentationAvailability::EnqueuePromise bool PresentationAvailability::Value() const { return mIsAvailable; } NS_IMETHODIMP -PresentationAvailability::NotifyAvailableChange(bool aIsAvailable) +PresentationAvailability::NotifyAvailableChange(const nsTArray<nsString>& aAvailabilityUrls, + bool aIsAvailable) { + bool available = false; + for (uint32_t i = 0; i < mUrls.Length(); ++i) { + if (aAvailabilityUrls.Contains(mUrls[i])) { + mAvailabilityOfUrl[i] = aIsAvailable; + } + available |= mAvailabilityOfUrl[i]; + } + return NS_DispatchToCurrentThread(NewRunnableMethod <bool>(this, &PresentationAvailability::UpdateAvailabilityAndDispatchEvent, - aIsAvailable)); + available)); } void PresentationAvailability::UpdateAvailabilityAndDispatchEvent(bool aIsAvailable) { PRES_DEBUG("%s\n", __func__); bool isChanged = (aIsAvailable != mIsAvailable);
--- a/dom/presentation/PresentationAvailability.h +++ b/dom/presentation/PresentationAvailability.h @@ -60,14 +60,15 @@ private: void UpdateAvailabilityAndDispatchEvent(bool aIsAvailable); bool mIsAvailable; nsTArray<RefPtr<Promise>> mPromises; nsTArray<nsString> mUrls; + nsTArray<bool> mAvailabilityOfUrl; }; } // namespace dom } // namespace mozilla #endif // mozilla_dom_PresentationAvailability_h
--- a/dom/presentation/PresentationService.cpp +++ b/dom/presentation/PresentationService.cpp @@ -21,19 +21,16 @@ #include "nsISupportsPrimitives.h" #include "nsNetUtil.h" #include "nsServiceManagerUtils.h" #include "nsThreadUtils.h" #include "nsXPCOMCID.h" #include "nsXULAppAPI.h" #include "PresentationLog.h" -using namespace mozilla; -using namespace mozilla::dom; - namespace mozilla { namespace dom { static bool IsSameDevice(nsIPresentationDevice* aDevice, nsIPresentationDevice* aDeviceAnother) { if (!aDevice || !aDeviceAnother) { return false; } @@ -127,19 +124,16 @@ private: nsWeakPtr mChromeEventHandler; nsCOMPtr<nsIPrincipal> mPrincipal; nsCOMPtr<nsIPresentationServiceCallback> mCallback; nsCOMPtr<nsIPresentationTransportBuilderConstructor> mBuilderConstructor; }; LazyLogModule gPresentationLog("Presentation"); -} // namespace dom -} // namespace mozilla - NS_IMPL_ISUPPORTS(PresentationDeviceRequest, nsIPresentationDeviceRequest) PresentationDeviceRequest::PresentationDeviceRequest( const nsTArray<nsString>& aUrls, const nsAString& aId, const nsAString& aOrigin, uint64_t aWindowId, nsIDOMEventTarget* aEventTarget, @@ -271,17 +265,16 @@ PresentationDeviceRequest::Cancel(nsresu * Implementation of PresentationService */ NS_IMPL_ISUPPORTS(PresentationService, nsIPresentationService, nsIObserver) PresentationService::PresentationService() - : mIsAvailable(false) { } PresentationService::~PresentationService() { HandleShutdown(); } @@ -311,36 +304,42 @@ PresentationService::Init() if (NS_WARN_IF(NS_FAILED(rv))) { return false; } rv = obs->AddObserver(this, PRESENTATION_RECONNECT_REQUEST_TOPIC, false); if (NS_WARN_IF(NS_FAILED(rv))) { return false; } - nsCOMPtr<nsIPresentationDeviceManager> deviceManager = - do_GetService(PRESENTATION_DEVICE_MANAGER_CONTRACTID); - if (NS_WARN_IF(!deviceManager)) { - return false; - } - - rv = deviceManager->GetDeviceAvailable(&mIsAvailable); return !NS_WARN_IF(NS_FAILED(rv)); } NS_IMETHODIMP PresentationService::Observe(nsISupports* aSubject, const char* aTopic, const char16_t* aData) { if (!strcmp(aTopic, NS_XPCOM_SHUTDOWN_OBSERVER_ID)) { HandleShutdown(); return NS_OK; } else if (!strcmp(aTopic, PRESENTATION_DEVICE_CHANGE_TOPIC)) { - return HandleDeviceChange(); + // Ignore the "update" case here, since we only care about the arrival and + // removal of the device. + if (!NS_strcmp(aData, u"add")) { + nsCOMPtr<nsIPresentationDevice> device = do_QueryInterface(aSubject); + if (NS_WARN_IF(!device)) { + return NS_ERROR_FAILURE; + } + + return HandleDeviceAdded(device); + } else if(!NS_strcmp(aData, u"remove")) { + return HandleDeviceRemoved(); + } + + return NS_OK; } else if (!strcmp(aTopic, PRESENTATION_SESSION_REQUEST_TOPIC)) { nsCOMPtr<nsIPresentationSessionRequest> request(do_QueryInterface(aSubject)); if (NS_WARN_IF(!request)) { return NS_ERROR_FAILURE; } return HandleSessionRequest(request); } else if (!strcmp(aTopic, PRESENTATION_TERMINATE_REQUEST_TOPIC)) { @@ -369,53 +368,114 @@ PresentationService::Observe(nsISupports void PresentationService::HandleShutdown() { MOZ_ASSERT(NS_IsMainThread()); Shutdown(); - mAvailabilityListeners.Clear(); + mAvailabilityManager.Clear(); mSessionInfoAtController.Clear(); mSessionInfoAtReceiver.Clear(); nsCOMPtr<nsIObserverService> obs = services::GetObserverService(); if (obs) { obs->RemoveObserver(this, NS_XPCOM_SHUTDOWN_OBSERVER_ID); obs->RemoveObserver(this, PRESENTATION_DEVICE_CHANGE_TOPIC); obs->RemoveObserver(this, PRESENTATION_SESSION_REQUEST_TOPIC); obs->RemoveObserver(this, PRESENTATION_TERMINATE_REQUEST_TOPIC); obs->RemoveObserver(this, PRESENTATION_RECONNECT_REQUEST_TOPIC); } } nsresult -PresentationService::HandleDeviceChange() +PresentationService::HandleDeviceAdded(nsIPresentationDevice* aDevice) +{ + PRES_DEBUG("%s\n", __func__); + if (!aDevice) { + MOZ_ASSERT(false, "aDevice shoud no be null."); + return NS_ERROR_INVALID_ARG; + } + + // Query for only unavailable URLs while device added. + nsTArray<nsString> unavailableUrls; + mAvailabilityManager.GetAvailbilityUrlByAvailability(unavailableUrls, false); + + nsTArray<nsString> supportedAvailabilityUrl; + for (const auto& url : unavailableUrls) { + bool isSupported; + if (NS_SUCCEEDED(aDevice->IsRequestedUrlSupported(url, &isSupported)) && + isSupported) { + supportedAvailabilityUrl.AppendElement(url); + } + } + + if (!supportedAvailabilityUrl.IsEmpty()) { + return mAvailabilityManager.DoNotifyAvailableChange(supportedAvailabilityUrl, + true); + } + + return NS_OK; +} + +nsresult +PresentationService::HandleDeviceRemoved() { PRES_DEBUG("%s\n", __func__); + // Query for only available URLs while device removed. + nsTArray<nsString> availabilityUrls; + mAvailabilityManager.GetAvailbilityUrlByAvailability(availabilityUrls, true); + + return UpdateAvailabilityUrlChange(availabilityUrls); +} + +nsresult +PresentationService::UpdateAvailabilityUrlChange( + const nsTArray<nsString>& aAvailabilityUrls) +{ nsCOMPtr<nsIPresentationDeviceManager> deviceManager = do_GetService(PRESENTATION_DEVICE_MANAGER_CONTRACTID); if (NS_WARN_IF(!deviceManager)) { return NS_ERROR_NOT_AVAILABLE; } - bool isAvailable; - nsresult rv = deviceManager->GetDeviceAvailable(&isAvailable); + nsCOMPtr<nsIArray> devices; + nsresult rv = deviceManager->GetAvailableDevices(nullptr, + getter_AddRefs(devices)); if (NS_WARN_IF(NS_FAILED(rv))) { return rv; } - if (isAvailable != mIsAvailable) { - mIsAvailable = isAvailable; - NotifyAvailableChange(mIsAvailable); + uint32_t numOfDevices; + devices->GetLength(&numOfDevices); + + nsTArray<nsString> supportedAvailabilityUrl; + for (const auto& url : aAvailabilityUrls) { + for (uint32_t i = 0; i < numOfDevices; ++i) { + nsCOMPtr<nsIPresentationDevice> device = do_QueryElementAt(devices, i); + if (device) { + bool isSupported; + if (NS_SUCCEEDED(device->IsRequestedUrlSupported(url, &isSupported)) && + isSupported) { + supportedAvailabilityUrl.AppendElement(url); + break; + } + } + } } - return NS_OK; + if (supportedAvailabilityUrl.IsEmpty()) { + return mAvailabilityManager.DoNotifyAvailableChange(aAvailabilityUrls, + false); + } + + return mAvailabilityManager.DoNotifyAvailableChange(supportedAvailabilityUrl, + true); } nsresult PresentationService::HandleSessionRequest(nsIPresentationSessionRequest* aRequest) { nsCOMPtr<nsIPresentationControlChannel> ctrlChannel; nsresult rv = aRequest->GetControlChannel(getter_AddRefs(ctrlChannel)); if (NS_WARN_IF(NS_FAILED(rv) || !ctrlChannel)) { @@ -592,27 +652,16 @@ PresentationService::HandleReconnectRequ if (NS_WARN_IF(!info->GetUrl().Equals(url))) { ctrlChannel->Disconnect(rv); return rv; } return HandleSessionRequest(aRequest); } -void -PresentationService::NotifyAvailableChange(bool aIsAvailable) -{ - nsTObserverArray<nsCOMPtr<nsIPresentationAvailabilityListener>>::ForwardIterator iter(mAvailabilityListeners); - while (iter.HasMore()) { - nsCOMPtr<nsIPresentationAvailabilityListener> listener = iter.GetNext(); - Unused << - NS_WARN_IF(NS_FAILED(listener->NotifyAvailableChange(aIsAvailable))); - } -} - NS_IMETHODIMP PresentationService::StartSession( const nsTArray<nsString>& aUrls, const nsAString& aSessionId, const nsAString& aOrigin, const nsAString& aDeviceId, uint64_t aWindowId, nsIDOMEventTarget* aEventTarget, @@ -873,38 +922,36 @@ PresentationService::BuildTransport(cons if (NS_WARN_IF(!info)) { return NS_ERROR_NOT_AVAILABLE; } return static_cast<PresentationControllingInfo*>(info.get())->BuildTransport(); } NS_IMETHODIMP -PresentationService::RegisterAvailabilityListener(nsIPresentationAvailabilityListener* aListener) +PresentationService::RegisterAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(!aAvailabilityUrls.IsEmpty()); + MOZ_ASSERT(aListener); + + mAvailabilityManager.AddAvailabilityListener(aAvailabilityUrls, aListener); + return UpdateAvailabilityUrlChange(aAvailabilityUrls); +} + +NS_IMETHODIMP +PresentationService::UnregisterAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) { MOZ_ASSERT(NS_IsMainThread()); - if (!mAvailabilityListeners.Contains(aListener)) { - mAvailabilityListeners.AppendElement(aListener); - } - - // Leverage availablility change notification to assign - // the initial value of availability object. - Unused << - NS_WARN_IF(NS_FAILED(aListener->NotifyAvailableChange(mIsAvailable))); - - return NS_OK; -} - -NS_IMETHODIMP -PresentationService::UnregisterAvailabilityListener(nsIPresentationAvailabilityListener* aListener) -{ - MOZ_ASSERT(NS_IsMainThread()); - - mAvailabilityListeners.RemoveElement(aListener); + mAvailabilityManager.RemoveAvailabilityListener(aAvailabilityUrls, aListener); return NS_OK; } NS_IMETHODIMP PresentationService::RegisterSessionListener(const nsAString& aSessionId, uint8_t aRole, nsIPresentationSessionListener* aListener) { @@ -1114,16 +1161,19 @@ PresentationService::IsSessionAccessible aRole == nsIPresentationService::ROLE_RECEIVER); RefPtr<PresentationSessionInfo> info = GetSessionInfo(aSessionId, aRole); if (NS_WARN_IF(!info)) { return false; } return info->IsAccessible(aProcessId); } +} // namespace dom +} // namespace mozilla + already_AddRefed<nsIPresentationService> NS_CreatePresentationService() { MOZ_ASSERT(NS_IsMainThread()); nsCOMPtr<nsIPresentationService> service; if (XRE_GetProcessType() == GeckoProcessType_Content) { service = new mozilla::dom::PresentationIPCService();
--- a/dom/presentation/PresentationService.h +++ b/dom/presentation/PresentationService.h @@ -4,17 +4,16 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_dom_PresentationService_h #define mozilla_dom_PresentationService_h #include "nsCOMPtr.h" #include "nsIObserver.h" -#include "nsTObserverArray.h" #include "PresentationServiceBase.h" #include "PresentationSessionInfo.h" class nsIPresentationSessionRequest; class nsIPresentationTerminateRequest; class nsIURI; class nsIPresentationSessionTransportBuilder; @@ -41,28 +40,29 @@ public: const uint8_t aRole, base::ProcessId aProcessId); private: friend class PresentationDeviceRequest; virtual ~PresentationService(); void HandleShutdown(); - nsresult HandleDeviceChange(); + nsresult HandleDeviceAdded(nsIPresentationDevice* aDevice); + nsresult HandleDeviceRemoved(); nsresult HandleSessionRequest(nsIPresentationSessionRequest* aRequest); nsresult HandleTerminateRequest(nsIPresentationTerminateRequest* aRequest); nsresult HandleReconnectRequest(nsIPresentationSessionRequest* aRequest); - void NotifyAvailableChange(bool aIsAvailable); // This is meant to be called by PresentationDeviceRequest. already_AddRefed<PresentationSessionInfo> CreateControllingSessionInfo(const nsAString& aUrl, const nsAString& aSessionId, uint64_t aWindowId); - bool mIsAvailable; - nsTObserverArray<nsCOMPtr<nsIPresentationAvailabilityListener>> mAvailabilityListeners; + // Emumerate all devices to get the availability of each input Urls. + nsresult UpdateAvailabilityUrlChange( + const nsTArray<nsString>& aAvailabilityUrls); }; } // namespace dom } // namespace mozilla #endif // mozilla_dom_PresentationService_h
--- a/dom/presentation/PresentationServiceBase.h +++ b/dom/presentation/PresentationServiceBase.h @@ -2,24 +2,25 @@ /* vim: set sw=2 ts=8 et ft=cpp : */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_dom_PresentationServiceBase_h #define mozilla_dom_PresentationServiceBase_h +#include "mozilla/Unused.h" #include "nsClassHashtable.h" +#include "nsCOMArray.h" +#include "nsIPresentationListener.h" #include "nsIPresentationService.h" #include "nsRefPtrHashtable.h" #include "nsString.h" #include "nsTArray.h" -class nsIPresentationRespondingListener; - namespace mozilla { namespace dom { template<class T> class PresentationServiceBase { public: PresentationServiceBase() = default; @@ -127,16 +128,191 @@ protected: mRespondingWindowIds.Clear(); } private: nsClassHashtable<nsUint64HashKey, nsTArray<nsString>> mRespondingSessionIds; nsDataHashtable<nsStringHashKey, uint64_t> mRespondingWindowIds; }; + class AvailabilityManager final + { + public: + explicit AvailabilityManager() + { + MOZ_COUNT_CTOR(AvailabilityManager); + } + + ~AvailabilityManager() + { + MOZ_COUNT_DTOR(AvailabilityManager); + } + + void AddAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) + { + nsTArray<nsString> dummy; + AddAvailabilityListener(aAvailabilityUrls, aListener, dummy); + } + + void AddAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener, + nsTArray<nsString>& aAddedUrls) + { + if (!aListener) { + MOZ_ASSERT(false, "aListener should not be null."); + return; + } + + if (aAvailabilityUrls.IsEmpty()) { + MOZ_ASSERT(false, "aAvailabilityUrls should not be empty."); + return; + } + + aAddedUrls.Clear(); + nsTArray<nsString> knownAvailableUrls; + for (const auto& url : aAvailabilityUrls) { + AvailabilityEntry* entry; + if (!mAvailabilityUrlTable.Get(url, &entry)) { + entry = new AvailabilityEntry(); + mAvailabilityUrlTable.Put(url, entry); + aAddedUrls.AppendElement(url); + } + if (!entry->mListeners.Contains(aListener)) { + entry->mListeners.AppendElement(aListener); + } + if (entry->mAvailable) { + knownAvailableUrls.AppendElement(url); + } + } + + if (!knownAvailableUrls.IsEmpty()) { + Unused << + NS_WARN_IF( + NS_FAILED(aListener->NotifyAvailableChange(knownAvailableUrls, + true))); + } else { + // If we can't find any known available url and there is no newly + // added url, we still need to notify the listener of the result. + // So, the promise returned by |getAvailability| can be resolved. + if (aAddedUrls.IsEmpty()) { + Unused << + NS_WARN_IF( + NS_FAILED(aListener->NotifyAvailableChange(aAvailabilityUrls, + false))); + } + } + } + + void RemoveAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) + { + nsTArray<nsString> dummy; + RemoveAvailabilityListener(aAvailabilityUrls, aListener, dummy); + } + + void RemoveAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener, + nsTArray<nsString>& aRemovedUrls) + { + if (!aListener) { + MOZ_ASSERT(false, "aListener should not be null."); + return; + } + + if (aAvailabilityUrls.IsEmpty()) { + MOZ_ASSERT(false, "aAvailabilityUrls should not be empty."); + return; + } + + aRemovedUrls.Clear(); + for (const auto& url : aAvailabilityUrls) { + AvailabilityEntry* entry; + if (mAvailabilityUrlTable.Get(url, &entry)) { + entry->mListeners.RemoveElement(aListener); + if (entry->mListeners.IsEmpty()) { + mAvailabilityUrlTable.Remove(url); + aRemovedUrls.AppendElement(url); + } + } + } + } + + nsresult DoNotifyAvailableChange(const nsTArray<nsString>& aAvailabilityUrls, + bool aAvailable) + { + typedef nsClassHashtable<nsISupportsHashKey, + nsTArray<nsString>> ListenerToUrlsMap; + ListenerToUrlsMap availabilityListenerTable; + // Create a mapping from nsIPresentationAvailabilityListener to + // availabilityUrls. + for (auto it = mAvailabilityUrlTable.ConstIter(); !it.Done(); it.Next()) { + if (aAvailabilityUrls.Contains(it.Key())) { + AvailabilityEntry* entry = it.UserData(); + entry->mAvailable = aAvailable; + + for (uint32_t i = 0; i < entry->mListeners.Length(); ++i) { + nsIPresentationAvailabilityListener* listener = + entry->mListeners.ObjectAt(i); + nsTArray<nsString>* urlArray; + if (!availabilityListenerTable.Get(listener, &urlArray)) { + urlArray = new nsTArray<nsString>(); + availabilityListenerTable.Put(listener, urlArray); + } + urlArray->AppendElement(it.Key()); + } + } + } + + for (auto it = availabilityListenerTable.Iter(); !it.Done(); it.Next()) { + auto listener = + static_cast<nsIPresentationAvailabilityListener*>(it.Key()); + + Unused << + NS_WARN_IF(NS_FAILED(listener->NotifyAvailableChange(*it.UserData(), + aAvailable))); + } + return NS_OK; + } + + void GetAvailbilityUrlByAvailability(nsTArray<nsString>& aOutArray, + bool aAvailable) + { + aOutArray.Clear(); + + for (auto it = mAvailabilityUrlTable.ConstIter(); !it.Done(); it.Next()) { + if (it.UserData()->mAvailable == aAvailable) { + aOutArray.AppendElement(it.Key()); + } + } + } + + void Clear() + { + mAvailabilityUrlTable.Clear(); + } + + private: + struct AvailabilityEntry + { + explicit AvailabilityEntry() + : mAvailable(false) + {} + + bool mAvailable; + nsCOMArray<nsIPresentationAvailabilityListener> mListeners; + }; + + nsClassHashtable<nsStringHashKey, AvailabilityEntry> mAvailabilityUrlTable; + }; + virtual ~PresentationServiceBase() = default; void Shutdown() { mRespondingListeners.Clear(); mControllerSessionIdManager.Clear(); mReceiverSessionIdManager.Clear(); } @@ -210,14 +386,16 @@ protected: // to retrieve the correspondent session ID. Besides, also keep the mapping // between the responding session ID and the window ID to help look up the // window ID. SessionIdManager mControllerSessionIdManager; SessionIdManager mReceiverSessionIdManager; nsRefPtrHashtable<nsStringHashKey, T> mSessionInfoAtController; nsRefPtrHashtable<nsStringHashKey, T> mSessionInfoAtReceiver; + + AvailabilityManager mAvailabilityManager; }; } // namespace dom } // namespace mozilla #endif // mozilla_dom_PresentationServiceBase_h
--- a/dom/presentation/interfaces/nsIPresentationListener.idl +++ b/dom/presentation/interfaces/nsIPresentationListener.idl @@ -1,21 +1,24 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "nsISupports.idl" -[scriptable, uuid(0105f837-4279-4715-9d5b-2dc3f8b65353)] +[ref] native URLArrayRef(const nsTArray<nsString>); + +[uuid(0105f837-4279-4715-9d5b-2dc3f8b65353)] interface nsIPresentationAvailabilityListener : nsISupports { /* * Called when device availability changes. */ - void notifyAvailableChange(in bool available); + [noscript] void notifyAvailableChange(in URLArrayRef urls, + in bool available); }; [scriptable, uuid(7dd48df8-8f8c-48c7-ac37-7b9fd1acf2f8)] interface nsIPresentationSessionListener : nsISupports { const unsigned short STATE_CONNECTING = 0; const unsigned short STATE_CONNECTED = 1; const unsigned short STATE_CLOSED = 2;
--- a/dom/presentation/interfaces/nsIPresentationService.idl +++ b/dom/presentation/interfaces/nsIPresentationService.idl @@ -153,25 +153,32 @@ interface nsIPresentationService : nsISu [noscript] void reconnectSession(in URLArrayRef urls, in DOMString sessionId, in uint8_t role, in nsIPresentationServiceCallback callback); /* * Register an availability listener. Must be called from the main thread. * + * @param availabilityUrls: The Urls that this listener is interested in. * @param listener: The listener to register. */ - void registerAvailabilityListener(in nsIPresentationAvailabilityListener listener); + [noscript] void registerAvailabilityListener( + in URLArrayRef availabilityUrls, + in nsIPresentationAvailabilityListener listener); /* * Unregister an availability listener. Must be called from the main thread. + * + * @param availabilityUrls: The Urls that are registered before. * @param listener: The listener to unregister. */ - void unregisterAvailabilityListener(in nsIPresentationAvailabilityListener listener); + [noscript] void unregisterAvailabilityListener( + in URLArrayRef availabilityUrls, + in nsIPresentationAvailabilityListener listener); /* * Register a session listener. Must be called from the main thread. * * @param sessionId: An ID to identify presentation session. * @param role: Identify the function called by controller or receiver. * @param listener: The listener to register. */
--- a/dom/presentation/ipc/PPresentation.ipdl +++ b/dom/presentation/ipc/PPresentation.ipdl @@ -72,33 +72,34 @@ union PresentationIPCRequest sync protocol PPresentation { manager PContent; manages PPresentationBuilder; manages PPresentationRequest; child: - async NotifyAvailableChange(bool aAvailable); + async NotifyAvailableChange(nsString[] aAvailabilityUrls, + bool aAvailable); async NotifySessionStateChange(nsString aSessionId, uint16_t aState, nsresult aReason); async NotifyMessage(nsString aSessionId, nsCString aData, bool aIsBinary); async NotifySessionConnect(uint64_t aWindowId, nsString aSessionId); async NotifyCloseSessionTransport(nsString aSessionId, uint8_t aRole, nsresult aReason); async PPresentationBuilder(nsString aSessionId, uint8_t aRole); parent: async __delete__(); - async RegisterAvailabilityHandler(); - async UnregisterAvailabilityHandler(); + async RegisterAvailabilityHandler(nsString[] aAvailabilityUrls); + async UnregisterAvailabilityHandler(nsString[] aAvailabilityUrls); async RegisterSessionHandler(nsString aSessionId, uint8_t aRole); async UnregisterSessionHandler(nsString aSessionId, uint8_t aRole); async RegisterRespondingHandler(uint64_t aWindowId); async UnregisterRespondingHandler(uint64_t aWindowId); async PPresentationRequest(PresentationIPCRequest aRequest);
--- a/dom/presentation/ipc/PresentationChild.cpp +++ b/dom/presentation/ipc/PresentationChild.cpp @@ -84,20 +84,24 @@ PresentationChild::DeallocPPresentationB { RefPtr<PresentationBuilderChild> actor = dont_AddRef(static_cast<PresentationBuilderChild*>(aActor)); return true; } bool -PresentationChild::RecvNotifyAvailableChange(const bool& aAvailable) +PresentationChild::RecvNotifyAvailableChange( + nsTArray<nsString>&& aAvailabilityUrls, + const bool& aAvailable) { if (mService) { - Unused << NS_WARN_IF(NS_FAILED(mService->NotifyAvailableChange(aAvailable))); + Unused << + NS_WARN_IF(NS_FAILED(mService->NotifyAvailableChange(aAvailabilityUrls, + aAvailable))); } return true; } bool PresentationChild::RecvNotifySessionStateChange(const nsString& aSessionId, const uint16_t& aState, const nsresult& aReason)
--- a/dom/presentation/ipc/PresentationChild.h +++ b/dom/presentation/ipc/PresentationChild.h @@ -38,17 +38,18 @@ public: virtual PPresentationBuilderChild* AllocPPresentationBuilderChild(const nsString& aSessionId, const uint8_t& aRole) override; virtual bool DeallocPPresentationBuilderChild(PPresentationBuilderChild* aActor) override; virtual bool - RecvNotifyAvailableChange(const bool& aAvailable) override; + RecvNotifyAvailableChange(nsTArray<nsString>&& aAvailabilityUrls, + const bool& aAvailable) override; virtual bool RecvNotifySessionStateChange(const nsString& aSessionId, const uint16_t& aState, const nsresult& aReason) override; virtual bool RecvNotifyMessage(const nsString& aSessionId,
--- a/dom/presentation/ipc/PresentationIPCService.cpp +++ b/dom/presentation/ipc/PresentationIPCService.cpp @@ -23,17 +23,19 @@ using namespace mozilla::dom; using namespace mozilla::ipc; namespace { PresentationChild* sPresentationChild; } // anonymous -NS_IMPL_ISUPPORTS(PresentationIPCService, nsIPresentationService) +NS_IMPL_ISUPPORTS(PresentationIPCService, + nsIPresentationService, + nsIPresentationAvailabilityListener) PresentationIPCService::PresentationIPCService() { ContentChild* contentChild = ContentChild::GetSingleton(); if (NS_WARN_IF(!contentChild)) { return; } sPresentationChild = new PresentationChild(this); @@ -41,17 +43,16 @@ PresentationIPCService::PresentationIPCS NS_WARN_IF(!contentChild->SendPPresentationConstructor(sPresentationChild)); } /* virtual */ PresentationIPCService::~PresentationIPCService() { Shutdown(); - mAvailabilityListeners.Clear(); mSessionListeners.Clear(); mSessionInfoAtController.Clear(); mSessionInfoAtReceiver.Clear(); sPresentationChild = nullptr; } NS_IMETHODIMP PresentationIPCService::StartSession( @@ -229,39 +230,53 @@ PresentationIPCService::SendRequest(nsIP if (sPresentationChild) { PresentationRequestChild* actor = new PresentationRequestChild(aCallback); Unused << NS_WARN_IF(!sPresentationChild->SendPPresentationRequestConstructor(actor, aRequest)); } return NS_OK; } NS_IMETHODIMP -PresentationIPCService::RegisterAvailabilityListener(nsIPresentationAvailabilityListener* aListener) +PresentationIPCService::RegisterAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) { MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(!aAvailabilityUrls.IsEmpty()); MOZ_ASSERT(aListener); - mAvailabilityListeners.AppendElement(aListener); - if (sPresentationChild) { + nsTArray<nsString> addedUrls; + mAvailabilityManager.AddAvailabilityListener(aAvailabilityUrls, + aListener, + addedUrls); + + if (sPresentationChild && !addedUrls.IsEmpty()) { Unused << - NS_WARN_IF(!sPresentationChild->SendRegisterAvailabilityHandler()); + NS_WARN_IF( + !sPresentationChild->SendRegisterAvailabilityHandler(addedUrls)); } return NS_OK; } NS_IMETHODIMP -PresentationIPCService::UnregisterAvailabilityListener(nsIPresentationAvailabilityListener* aListener) +PresentationIPCService::UnregisterAvailabilityListener( + const nsTArray<nsString>& aAvailabilityUrls, + nsIPresentationAvailabilityListener* aListener) { MOZ_ASSERT(NS_IsMainThread()); - MOZ_ASSERT(aListener); - mAvailabilityListeners.RemoveElement(aListener); - if (mAvailabilityListeners.IsEmpty() && sPresentationChild) { + nsTArray<nsString> removedUrls; + mAvailabilityManager.RemoveAvailabilityListener(aAvailabilityUrls, + aListener, + removedUrls); + + if (sPresentationChild && !removedUrls.IsEmpty()) { Unused << - NS_WARN_IF(!sPresentationChild->SendUnregisterAvailabilityHandler()); + NS_WARN_IF( + !sPresentationChild->SendUnregisterAvailabilityHandler(removedUrls)); } return NS_OK; } NS_IMETHODIMP PresentationIPCService::RegisterSessionListener(const nsAString& aSessionId, uint8_t aRole, nsIPresentationSessionListener* aListener) @@ -414,26 +429,23 @@ PresentationIPCService::NotifySessionCon nsCOMPtr<nsIPresentationRespondingListener> listener; if (NS_WARN_IF(!mRespondingListeners.Get(aWindowId, getter_AddRefs(listener)))) { return NS_OK; } return listener->NotifySessionConnect(aWindowId, aSessionId); } -nsresult -PresentationIPCService::NotifyAvailableChange(bool aAvailable) +NS_IMETHODIMP +PresentationIPCService::NotifyAvailableChange( + const nsTArray<nsString>& aAvailabilityUrls, + bool aAvailable) { - nsTObserverArray<nsCOMPtr<nsIPresentationAvailabilityListener>>::ForwardIterator iter(mAvailabilityListeners); - while (iter.HasMore()) { - nsIPresentationAvailabilityListener* listener = iter.GetNext(); - Unused << NS_WARN_IF(NS_FAILED(listener->NotifyAvailableChange(aAvailable))); - } - - return NS_OK; + return mAvailabilityManager.DoNotifyAvailableChange(aAvailabilityUrls, + aAvailable); } NS_IMETHODIMP PresentationIPCService::NotifyReceiverReady( const nsAString& aSessionId, uint64_t aWindowId, bool aIsLoading, nsIPresentationTransportBuilderConstructor* aBuilderConstructor)
--- a/dom/presentation/ipc/PresentationIPCService.h +++ b/dom/presentation/ipc/PresentationIPCService.h @@ -3,41 +3,41 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_dom_PresentationIPCService_h #define mozilla_dom_PresentationIPCService_h #include "mozilla/dom/PresentationServiceBase.h" +#include "nsIPresentationListener.h" #include "nsIPresentationSessionTransport.h" #include "nsIPresentationService.h" -#include "nsTObserverArray.h" class nsIDocShell; namespace mozilla { namespace dom { class PresentationIPCRequest; class PresentationContentSessionInfo; class PresentationResponderLoadingCallback; class PresentationIPCService final - : public nsIPresentationService + : public nsIPresentationAvailabilityListener + , public nsIPresentationService , public PresentationServiceBase<PresentationContentSessionInfo> { public: NS_DECL_ISUPPORTS + NS_DECL_NSIPRESENTATIONAVAILABILITYLISTENER NS_DECL_NSIPRESENTATIONSERVICE PresentationIPCService(); - nsresult NotifyAvailableChange(bool aAvailable); - nsresult NotifySessionStateChange(const nsAString& aSessionId, uint16_t aState, nsresult aReason); nsresult NotifyMessage(const nsAString& aSessionId, const nsACString& aData, const bool& aIsBinary); @@ -57,17 +57,16 @@ public: uint8_t aRole, nsresult aReason); private: virtual ~PresentationIPCService(); nsresult SendRequest(nsIPresentationServiceCallback* aCallback, const PresentationIPCRequest& aRequest); - nsTObserverArray<nsCOMPtr<nsIPresentationAvailabilityListener> > mAvailabilityListeners; nsRefPtrHashtable<nsStringHashKey, nsIPresentationSessionListener> mSessionListeners; nsRefPtrHashtable<nsUint64HashKey, nsIPresentationRespondingListener> mRespondingListeners; RefPtr<PresentationResponderLoadingCallback> mCallback; }; } // namespace dom
--- a/dom/presentation/ipc/PresentationParent.cpp +++ b/dom/presentation/ipc/PresentationParent.cpp @@ -129,17 +129,19 @@ PresentationParent::ActorDestroy(ActorDe mSessionIdsAtReceiver.Clear(); for (uint32_t i = 0; i < mWindowIds.Length(); i++) { Unused << NS_WARN_IF(NS_FAILED(mService-> UnregisterRespondingListener(mWindowIds[i]))); } mWindowIds.Clear(); - mService->UnregisterAvailabilityListener(this); + if (!mContentAvailabilityUrls.IsEmpty()) { + mService->UnregisterAvailabilityListener(mContentAvailabilityUrls, this); + } mService = nullptr; } bool PresentationParent::RecvPPresentationRequestConstructor( PPresentationRequestParent* aActor, const PresentationIPCRequest& aRequest) { @@ -207,28 +209,40 @@ PresentationParent::DeallocPPresentation bool PresentationParent::Recv__delete__() { return true; } bool -PresentationParent::RecvRegisterAvailabilityHandler() +PresentationParent::RecvRegisterAvailabilityHandler( + nsTArray<nsString>&& aAvailabilityUrls) { MOZ_ASSERT(mService); - Unused << NS_WARN_IF(NS_FAILED(mService->RegisterAvailabilityListener(this))); + + Unused << NS_WARN_IF(NS_FAILED(mService->RegisterAvailabilityListener( + aAvailabilityUrls, + this))); + mContentAvailabilityUrls.AppendElements(aAvailabilityUrls); return true; } bool -PresentationParent::RecvUnregisterAvailabilityHandler() +PresentationParent::RecvUnregisterAvailabilityHandler( + nsTArray<nsString>&& aAvailabilityUrls) { MOZ_ASSERT(mService); - Unused << NS_WARN_IF(NS_FAILED(mService->UnregisterAvailabilityListener(this))); + + Unused << NS_WARN_IF(NS_FAILED(mService->UnregisterAvailabilityListener( + aAvailabilityUrls, + this))); + for (const auto& url : aAvailabilityUrls) { + mContentAvailabilityUrls.RemoveElement(url); + } return true; } /* virtual */ bool PresentationParent::RecvRegisterSessionHandler(const nsString& aSessionId, const uint8_t& aRole) { MOZ_ASSERT(mService); @@ -278,19 +292,22 @@ PresentationParent::RecvUnregisterRespon { MOZ_ASSERT(mService); mWindowIds.RemoveElement(aWindowId); Unused << NS_WARN_IF(NS_FAILED(mService->UnregisterRespondingListener(aWindowId))); return true; } NS_IMETHODIMP -PresentationParent::NotifyAvailableChange(bool aAvailable) +PresentationParent::NotifyAvailableChange(const nsTArray<nsString>& aAvailabilityUrls, + bool aAvailable) { - if (NS_WARN_IF(mActorDestroyed || !SendNotifyAvailableChange(aAvailable))) { + if (NS_WARN_IF(mActorDestroyed || + !SendNotifyAvailableChange(aAvailabilityUrls, + aAvailable))) { return NS_ERROR_FAILURE; } return NS_OK; } NS_IMETHODIMP PresentationParent::NotifyStateChange(const nsAString& aSessionId, uint16_t aState,
--- a/dom/presentation/ipc/PresentationParent.h +++ b/dom/presentation/ipc/PresentationParent.h @@ -52,19 +52,21 @@ public: const uint8_t& aRole) override; virtual bool DeallocPPresentationBuilderParent( PPresentationBuilderParent* aActor) override; virtual bool Recv__delete__() override; - virtual bool RecvRegisterAvailabilityHandler() override; + virtual bool RecvRegisterAvailabilityHandler( + nsTArray<nsString>&& aAvailabilityUrls) override; - virtual bool RecvUnregisterAvailabilityHandler() override; + virtual bool RecvUnregisterAvailabilityHandler( + nsTArray<nsString>&& aAvailabilityUrls) override; virtual bool RecvRegisterSessionHandler(const nsString& aSessionId, const uint8_t& aRole) override; virtual bool RecvUnregisterSessionHandler(const nsString& aSessionId, const uint8_t& aRole) override; virtual bool RecvRegisterRespondingHandler(const uint64_t& aWindowId) override; @@ -83,16 +85,17 @@ private: virtual ~PresentationParent(); bool mActorDestroyed = false; nsCOMPtr<nsIPresentationService> mService; nsTArray<nsString> mSessionIdsAtController; nsTArray<nsString> mSessionIdsAtReceiver; nsTArray<uint64_t> mWindowIds; ContentParentId mChildId; + nsTArray<nsString> mContentAvailabilityUrls; }; class PresentationRequestParent final : public PPresentationRequestParent , public nsIPresentationServiceCallback { friend class PresentationParent; public:
--- a/dom/presentation/tests/mochitest/PresentationDeviceInfoChromeScript.js +++ b/dom/presentation/tests/mochitest/PresentationDeviceInfoChromeScript.js @@ -30,35 +30,121 @@ var testDevice = { return true; }, id: null, name: null, type: null, listener: null, }; +var testDevice1 = { + QueryInterface: XPCOMUtils.generateQI([Ci.nsIPresentationDevice]), + id: 'dummyid', + name: 'dummyName', + type: 'dummyType', + establishControlChannel: function(url, presentationId) { + return null; + }, + disconnect: function() {}, + isRequestedUrlSupported: function(requestedUrl) { + return true; + }, +}; + +var testDevice2 = { + QueryInterface: XPCOMUtils.generateQI([Ci.nsIPresentationDevice]), + id: 'dummyid', + name: 'dummyName', + type: 'dummyType', + establishControlChannel: function(url, presentationId) { + return null; + }, + disconnect: function() {}, + isRequestedUrlSupported: function(requestedUrl) { + return true; + }, +}; + +var mockedDeviceWithoutSupportedURL = { + QueryInterface: XPCOMUtils.generateQI([Ci.nsIPresentationDevice]), + id: 'dummyid', + name: 'dummyName', + type: 'dummyType', + establishControlChannel: function(url, presentationId) { + return null; + }, + disconnect: function() {}, + isRequestedUrlSupported: function(requestedUrl) { + return false; + }, +}; + +var mockedDeviceSupportHttpsURL = { + QueryInterface: XPCOMUtils.generateQI([Ci.nsIPresentationDevice]), + id: 'dummyid', + name: 'dummyName', + type: 'dummyType', + establishControlChannel: function(url, presentationId) { + return null; + }, + disconnect: function() {}, + isRequestedUrlSupported: function(requestedUrl) { + if (requestedUrl.indexOf("https://") != -1) { + return true; + } + return false; + }, +}; + addMessageListener('setup', function() { manager.addDeviceProvider(testProvider); sendAsyncMessage('setup-complete'); }); addMessageListener('trigger-device-add', function(device) { testDevice.id = device.id; testDevice.name = device.name; testDevice.type = device.type; manager.addDevice(testDevice); }); +addMessageListener('trigger-add-unsupport-url-device', function() { + manager.addDevice(mockedDeviceWithoutSupportedURL); +}); + +addMessageListener('trigger-add-multiple-devices', function() { + manager.addDevice(testDevice1); + manager.addDevice(testDevice2); +}); + +addMessageListener('trigger-add-https-devices', function() { + manager.addDevice(mockedDeviceSupportHttpsURL); +}); + + addMessageListener('trigger-device-update', function(device) { testDevice.id = device.id; testDevice.name = device.name; testDevice.type = device.type; manager.updateDevice(testDevice); }); addMessageListener('trigger-device-remove', function() { manager.removeDevice(testDevice); }); +addMessageListener('trigger-remove-unsupported-device', function() { + manager.removeDevice(mockedDeviceWithoutSupportedURL); +}); + +addMessageListener('trigger-remove-multiple-devices', function() { + manager.removeDevice(testDevice1); + manager.removeDevice(testDevice2); +}); + +addMessageListener('trigger-remove-https-devices', function() { + manager.removeDevice(mockedDeviceSupportHttpsURL); +}); + addMessageListener('teardown', function() { manager.removeDeviceProvider(testProvider); });
--- a/dom/presentation/tests/mochitest/test_presentation_availability.html +++ b/dom/presentation/tests/mochitest/test_presentation_availability.html @@ -93,35 +93,140 @@ function testConsecutiveGetAvailability( ok(firstAvailabilityResolved, "getAvailability() should be resolved in sequence"); }) ]).catch(function(aError) { ok(false, "Error occurred when getting availability: " + aError); teardown(); }); } +function testUnsupportedDeviceAvailability() { + return Promise.race([ + new Promise(function(aResolve, aReject) { + let request = new PresentationRequest("https://test.com"); + request.getAvailability().then(function(aAvailability) { + availability = aAvailability; + aAvailability.onchange = function() { + availability.onchange = null; + ok(false, "Should not get onchange event."); + teardown(); + } + }); + gScript.sendAsyncMessage('trigger-add-unsupport-url-device'); + }), + new Promise(function(aResolve, aReject) { + setTimeout(function() { + ok(true, "Should not get onchange event."); + availability.onchange = null; + gScript.sendAsyncMessage('trigger-remove-unsupported-device'); + aResolve(); + }, 3000); + }), + ]); +} + +function testMultipleAvailabilityURLs() { + let request1 = new PresentationRequest(["https://example.com", + "https://example1.com"]); + let request2 = new PresentationRequest(["https://example1.com", + "https://example2.com"]); + return Promise.all([ + request1.getAvailability().then(function(aAvailability) { + return new Promise(function(aResolve) { + aAvailability.onchange = function() { + aAvailability.onchange = null; + ok(true, "Should get onchange event."); + aResolve(); + }; + }); + }), + request2.getAvailability().then(function(aAvailability) { + return new Promise(function(aResolve) { + aAvailability.onchange = function() { + aAvailability.onchange = null; + ok(true, "Should get onchange event."); + aResolve(); + }; + }); + }), + new Promise(function(aResolve) { + gScript.sendAsyncMessage('trigger-add-multiple-devices'); + aResolve(); + }), + ]).then(new Promise(function(aResolve) { + gScript.sendAsyncMessage('trigger-remove-multiple-devices'); + aResolve(); + })); +} + +function testPartialSupportedDeviceAvailability() { + let request1 = new PresentationRequest(["https://supportedUrl.com"]); + let request2 = new PresentationRequest(["http://notSupportedUrl.com"]); + + return Promise.all([ + request1.getAvailability().then(function(aAvailability) { + return new Promise(function(aResolve) { + aAvailability.onchange = function() { + aAvailability.onchange = null; + ok(true, "Should get onchange event."); + aResolve(); + }; + }); + }), + Promise.race([ + request2.getAvailability().then(function(aAvailability) { + return new Promise(function(aResolve) { + aAvailability.onchange = function() { + aAvailability.onchange = null; + ok(false, "Should get onchange event."); + aResolve(); + }; + }); + }), + new Promise(function(aResolve) { + setTimeout(function() { + ok(true, "Should not get onchange event."); + availability.onchange = null; + aResolve(); + }, 3000); + }), + ]), + new Promise(function(aResolve) { + gScript.sendAsyncMessage('trigger-add-https-devices'); + aResolve(); + }), + ]).then(new Promise(function(aResolve) { + gScript.sendAsyncMessage('trigger-remove-https-devices'); + aResolve(); + })); +} + function teardown() { request = null; availability = null; gScript.sendAsyncMessage('teardown'); gScript.destroy(); SimpleTest.finish(); } function runTests() { ok(navigator.presentation, "navigator.presentation should be available."); testSetup().then(testInitialUnavailable) .then(testInitialAvailable) .then(testSameObject) .then(testOnChangeEvent) .then(testConsecutiveGetAvailability) + .then(testMultipleAvailabilityURLs) + .then(testUnsupportedDeviceAvailability) + .then(testPartialSupportedDeviceAvailability) .then(teardown); } SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout('Test for guarantee not firing async event'); SpecialPowers.pushPermissions([ {type: "presentation-device-manage", allow: false, context: document}, ], function() { SpecialPowers.pushPrefEnv({ "set": [["dom.presentation.enabled", true], ["dom.presentation.controller.enabled", true], ["dom.presentation.session_transport.data_channel.enable", false]]}, runTests); });
--- a/dom/security/nsCSPContext.cpp +++ b/dom/security/nsCSPContext.cpp @@ -38,16 +38,17 @@ #include "nsScriptSecurityManager.h" #include "nsStringStream.h" #include "mozilla/Logging.h" #include "mozilla/dom/CSPReportBinding.h" #include "mozilla/dom/CSPDictionariesBinding.h" #include "mozilla/net/ReferrerPolicy.h" #include "nsINetworkInterceptController.h" #include "nsSandboxFlags.h" +#include "nsIScriptElement.h" using namespace mozilla; static LogModule* GetCspContextLog() { static LazyLogModule gCspContextPRLog("CSPContext"); return gCspContextPRLog; @@ -148,37 +149,44 @@ nsCSPContext::ShouldLoad(nsContentPolicy // If the content type doesn't map to a CSP directive, there's nothing for // CSP to do. CSPDirective dir = CSP_ContentTypeToDirective(aContentType); if (dir == nsIContentSecurityPolicy::NO_DIRECTIVE) { return NS_OK; } nsAutoString nonce; + bool parserCreated = false; if (!isPreload) { nsCOMPtr<nsIDOMHTMLElement> htmlElement = do_QueryInterface(aRequestContext); if (htmlElement) { rv = htmlElement->GetAttribute(NS_LITERAL_STRING("nonce"), nonce); NS_ENSURE_SUCCESS(rv, rv); } + + nsCOMPtr<nsIScriptElement> script = do_QueryInterface(aRequestContext); + if (script && script->GetParserCreated() != mozilla::dom::NOT_FROM_PARSER) { + parserCreated = true; + } } // aExtra is only non-null if the channel got redirected. bool wasRedirected = (aExtra != nullptr); nsCOMPtr<nsIURI> originalURI = do_QueryInterface(aExtra); bool permitted = permitsInternal(dir, aContentLocation, originalURI, nonce, wasRedirected, isPreload, false, // allow fallback to default-src true, // send violation reports - true); // send blocked URI in violation reports + true, // send blocked URI in violation reports + parserCreated); *outDecision = permitted ? nsIContentPolicy::ACCEPT : nsIContentPolicy::REJECT_SERVER; // Done looping, cache any relevant result if (cacheKey.Length() > 0 && !isPreload) { mShouldLoadCache.Put(cacheKey, *outDecision); } @@ -196,17 +204,18 @@ bool nsCSPContext::permitsInternal(CSPDirective aDir, nsIURI* aContentLocation, nsIURI* aOriginalURI, const nsAString& aNonce, bool aWasRedirected, bool aIsPreload, bool aSpecific, bool aSendViolationReports, - bool aSendContentLocationInViolationReports) + bool aSendContentLocationInViolationReports, + bool aParserCreated) { bool permits = true; nsAutoString violatedDirective; for (uint32_t p = 0; p < mPolicies.Length(); p++) { // According to the W3C CSP spec, frame-ancestors checks are ignored for // report-only policies (when "monitoring"). @@ -215,16 +224,17 @@ nsCSPContext::permitsInternal(CSPDirecti continue; } if (!mPolicies[p]->permits(aDir, aContentLocation, aNonce, aWasRedirected, aSpecific, + aParserCreated, violatedDirective)) { // If the policy is violated and not report-only, reject the load and // report to the console if (!mPolicies[p]->getReportOnlyFlag()) { CSPCONTEXTLOG(("nsCSPContext::permitsInternal, false")); permits = false; } @@ -399,17 +409,19 @@ NS_IMETHODIMP nsCSPContext::GetAllowsEval(bool* outShouldReportViolation, bool* outAllowsEval) { *outShouldReportViolation = false; *outAllowsEval = true; for (uint32_t i = 0; i < mPolicies.Length(); i++) { if (!mPolicies[i]->allows(nsIContentPolicy::TYPE_SCRIPT, - CSP_UNSAFE_EVAL, EmptyString())) { + CSP_UNSAFE_EVAL, + EmptyString(), + false)) { // policy is violated: must report the violation and allow the inline // script if the policy is report-only. *outShouldReportViolation = true; if (!mPolicies[i]->getReportOnlyFlag()) { *outAllowsEval = false; } } } @@ -466,16 +478,17 @@ nsCSPContext::reportInlineViolation(nsCo NS_ConvertUTF8toUTF16(sourceFile), // aSourceFile codeSample, // aScriptSample aLineNumber); // aLineNum } NS_IMETHODIMP nsCSPContext::GetAllowsInline(nsContentPolicyType aContentType, const nsAString& aNonce, + bool aParserCreated, const nsAString& aContent, uint32_t aLineNumber, bool* outAllowsInline) { *outAllowsInline = true; MOZ_ASSERT(aContentType == nsContentUtils::InternalContentPolicyTypeToExternal(aContentType), "We should only see external content policy types here."); @@ -484,19 +497,19 @@ nsCSPContext::GetAllowsInline(nsContentP aContentType != nsIContentPolicy::TYPE_STYLESHEET) { MOZ_ASSERT(false, "can only allow inline for script or style"); return NS_OK; } // always iterate all policies, otherwise we might not send out all reports for (uint32_t i = 0; i < mPolicies.Length(); i++) { bool allowed = - mPolicies[i]->allows(aContentType, CSP_UNSAFE_INLINE, EmptyString()) || - mPolicies[i]->allows(aContentType, CSP_NONCE, aNonce) || - mPolicies[i]->allows(aContentType, CSP_HASH, aContent); + mPolicies[i]->allows(aContentType, CSP_UNSAFE_INLINE, EmptyString(), aParserCreated) || + mPolicies[i]->allows(aContentType, CSP_NONCE, aNonce, aParserCreated) || + mPolicies[i]->allows(aContentType, CSP_HASH, aContent, aParserCreated); if (!allowed) { // policy is violoated: deny the load unless policy is report only and // report the violation. if (!mPolicies[i]->getReportOnlyFlag()) { *outAllowsInline = false; } nsAutoString violatedDirective; @@ -530,23 +543,27 @@ nsCSPContext::GetAllowsInline(nsContentP * @param violationType: the VIOLATION_TYPE_* constant (partial symbol) * such as INLINE_SCRIPT * @param contentPolicyType: a constant from nsIContentPolicy such as TYPE_STYLESHEET * @param nonceOrHash: for NONCE and HASH violations, it's the nonce or content * string. For other violations, it is an empty string. * @param keyword: the keyword corresponding to violation (UNSAFE_INLINE for most) * @param observerTopic: the observer topic string to send with the CSP * observer notifications. + * + * Please note that inline violations for scripts are reported within + * GetAllowsInline() and do not call this macro, hence we can pass 'false' + * as the argument _aParserCreated_ to allows(). */ #define CASE_CHECK_AND_REPORT(violationType, contentPolicyType, nonceOrHash, \ keyword, observerTopic) \ case nsIContentSecurityPolicy::VIOLATION_TYPE_ ## violationType : \ PR_BEGIN_MACRO \ if (!mPolicies[p]->allows(nsIContentPolicy::TYPE_ ## contentPolicyType, \ - keyword, nonceOrHash)) \ + keyword, nonceOrHash, false)) \ { \ nsAutoString violatedDirective; \ mPolicies[p]->getDirectiveStringForContentType( \ nsIContentPolicy::TYPE_ ## contentPolicyType, \ violatedDirective); \ this->AsyncReportViolation(selfISupports, nullptr, violatedDirective, p, \ NS_LITERAL_STRING(observerTopic), \ aSourceFile, aScriptSample, aLineNum); \ @@ -1263,17 +1280,18 @@ nsCSPContext::PermitsAncestry(nsIDocShel bool permits = permitsInternal(nsIContentSecurityPolicy::FRAME_ANCESTORS_DIRECTIVE, ancestorsArray[a], nullptr, // no redirect here. EmptyString(), // no nonce false, // no redirect here. false, // not a preload. true, // specific, do not use default-src true, // send violation reports - okToSendAncestor); + okToSendAncestor, + false); // not parser created if (!permits) { *outPermitsAncestry = false; } } return NS_OK; } NS_IMETHODIMP @@ -1290,17 +1308,18 @@ nsCSPContext::Permits(nsIURI* aURI, *outPermits = permitsInternal(aDir, aURI, nullptr, // no original (pre-redirect) URI EmptyString(), // no nonce false, // not redirected. false, // not a preload. aSpecific, true, // send violation reports - true); // send blocked URI in violation reports + true, // send blocked URI in violation reports + false); // not parser created if (CSPCONTEXTLOGENABLED()) { CSPCONTEXTLOG(("nsCSPContext::Permits, aUri: %s, aDir: %d, isAllowed: %s", aURI->GetSpecOrDefault().get(), aDir, *outPermits ? "allow" : "deny")); } return NS_OK;
--- a/dom/security/nsCSPContext.h +++ b/dom/security/nsCSPContext.h @@ -89,17 +89,18 @@ class nsCSPContext : public nsIContentSe bool permitsInternal(CSPDirective aDir, nsIURI* aContentLocation, nsIURI* aOriginalURI, const nsAString& aNonce, bool aWasRedirected, bool aIsPreload, bool aSpecific, bool aSendViolationReports, - bool aSendContentLocationInViolationReports); + bool aSendContentLocationInViolationReports, + bool aParserCreated); // helper to report inline script/style violations void reportInlineViolation(nsContentPolicyType aContentType, const nsAString& aNonce, const nsAString& aContent, const nsAString& aViolatedDirective, uint32_t aViolatedPolicyIndex, uint32_t aLineNumber);
--- a/dom/security/nsCSPParser.cpp +++ b/dom/security/nsCSPParser.cpp @@ -118,37 +118,40 @@ nsCSPTokenizer::tokenizeCSPPolicy(const nsCSPTokenizer tokenizer(aPolicyString.BeginReading(), aPolicyString.EndReading()); tokenizer.generateTokens(outTokens); } /* ===== nsCSPParser ==================== */ bool nsCSPParser::sCSPExperimentalEnabled = false; +bool nsCSPParser::sStrictDynamicEnabled = false; nsCSPParser::nsCSPParser(cspTokens& aTokens, nsIURI* aSelfURI, nsCSPContext* aCSPContext, bool aDeliveredViaMetaTag) : mCurChar(nullptr) , mEndChar(nullptr) , mHasHashOrNonce(false) + , mStrictDynamic(false) , mUnsafeInlineKeywordSrc(nullptr) , mChildSrc(nullptr) , mFrameSrc(nullptr) , mTokens(aTokens) , mSelfURI(aSelfURI) , mPolicy(nullptr) , mCSPContext(aCSPContext) , mDeliveredViaMetaTag(aDeliveredViaMetaTag) { static bool initialized = false; if (!initialized) { initialized = true; Preferences::AddBoolVarCache(&sCSPExperimentalEnabled, "security.csp.experimentalEnabled"); + Preferences::AddBoolVarCache(&sStrictDynamicEnabled, "security.csp.enableStrictDynamic"); } CSPPARSERLOG(("nsCSPParser::nsCSPParser")); } nsCSPParser::~nsCSPParser() { CSPPARSERLOG(("nsCSPParser::~nsCSPParser")); } @@ -526,16 +529,32 @@ nsCSPParser::keywordSource() NS_ConvertUTF16toUTF8(mCurValue).get())); // Special case handling for 'self' which is not stored internally as a keyword, // but rather creates a nsCSPHostSrc using the selfURI if (CSP_IsKeyword(mCurToken, CSP_SELF)) { return CSP_CreateHostSrcFromURI(mSelfURI); } + if (CSP_IsKeyword(mCurToken, CSP_STRICT_DYNAMIC)) { + // make sure strict dynamic is enabled + if (!sStrictDynamicEnabled) { + return nullptr; + } + if (!CSP_IsDirective(mCurDir[0], nsIContentSecurityPolicy::SCRIPT_SRC_DIRECTIVE)) { + // Todo: Enforce 'strict-dynamic' within default-src; see Bug 1313937 + const char16_t* params[] = { u"strict-dynamic" }; + logWarningErrorToConsole(nsIScriptError::warningFlag, "ignoringStrictDynamic", + params, ArrayLength(params)); + return nullptr; + } + mStrictDynamic = true; + return new nsCSPKeywordSrc(CSP_KeywordToEnum(mCurToken)); + } + if (CSP_IsKeyword(mCurToken, CSP_UNSAFE_INLINE)) { nsWeakPtr ctx = mCSPContext->GetLoadingContext(); nsCOMPtr<nsIDocument> doc = do_QueryReferent(ctx); if (doc) { doc->SetHasUnsafeInlineCSP(true); } // make sure script-src only contains 'unsafe-inline' once; // ignore duplicates and log warning @@ -1182,35 +1201,68 @@ nsCSPParser::directive() if (CSP_IsDirective(mCurDir[0], nsIContentSecurityPolicy::SANDBOX_DIRECTIVE)) { sandboxFlagList(cspDir); return; } // make sure to reset cache variables when trying to invalidate unsafe-inline; // unsafe-inline might not only appear in script-src, but also in default-src mHasHashOrNonce = false; + mStrictDynamic = false; mUnsafeInlineKeywordSrc = nullptr; // Try to parse all the srcs by handing the array off to directiveValue nsTArray<nsCSPBaseSrc*> srcs; directiveValue(srcs); // If we can not parse any srcs; we let the source expression be the empty set ('none') // see, http://www.w3.org/TR/CSP11/#source-list-parsing if (srcs.Length() == 0) { nsCSPKeywordSrc *keyword = new nsCSPKeywordSrc(CSP_NONE); srcs.AppendElement(keyword); } - // Ignore unsafe-inline within script-src or style-src if nonce - // or hash is specified, see: - // http://www.w3.org/TR/CSP2/#directive-script-src - if ((cspDir->equals(nsIContentSecurityPolicy::SCRIPT_SRC_DIRECTIVE) || - cspDir->equals(nsIContentSecurityPolicy::STYLE_SRC_DIRECTIVE)) && - mHasHashOrNonce && mUnsafeInlineKeywordSrc) { + // If policy contains 'strict-dynamic' invalidate all srcs within script-src. + if (mStrictDynamic) { + MOZ_ASSERT(cspDir->equals(nsIContentSecurityPolicy::SCRIPT_SRC_DIRECTIVE), + "strict-dynamic only allowed within script-src"); + for (uint32_t i = 0; i < srcs.Length(); i++) { + // Please note that nsCSPNonceSrc as well as nsCSPHashSrc overwrite invalidate(), + // so it's fine to just call invalidate() on all srcs. Please also note that + // nsCSPKeywordSrc() can not be invalidated and always returns false unless the + // keyword is 'strict-dynamic' in which case we allow the load if the script is + // not parser created! + srcs[i]->invalidate(); + // Log a message to the console that src will be ignored. + nsAutoString srcStr; + srcs[i]->toString(srcStr); + // Even though we invalidate all of the srcs internally, we don't want to log + // messages for the srcs: (1) strict-dynamic, (2) unsafe-inline, + // (3) nonces, and (4) hashes + if (!srcStr.EqualsASCII(CSP_EnumToKeyword(CSP_STRICT_DYNAMIC)) && + !srcStr.EqualsASCII(CSP_EnumToKeyword(CSP_UNSAFE_EVAL)) && + !StringBeginsWith(NS_ConvertUTF16toUTF8(srcStr), NS_LITERAL_CSTRING("'nonce-")) && + !StringBeginsWith(NS_ConvertUTF16toUTF8(srcStr), NS_LITERAL_CSTRING("'sha"))) + { + const char16_t* params[] = { srcStr.get() }; + logWarningErrorToConsole(nsIScriptError::warningFlag, "ignoringSrcForStrictDynamic", + params, ArrayLength(params)); + } + } + // Log a warning that all scripts might be blocked because the policy contains + // 'strict-dynamic' but no valid nonce or hash. + if (!mHasHashOrNonce) { + const char16_t* params[] = { mCurDir[0].get() }; + logWarningErrorToConsole(nsIScriptError::warningFlag, "strictDynamicButNoHashOrNonce", + params, ArrayLength(params)); + } + } + else if (mHasHashOrNonce && mUnsafeInlineKeywordSrc && + (cspDir->equals(nsIContentSecurityPolicy::SCRIPT_SRC_DIRECTIVE) || + cspDir->equals(nsIContentSecurityPolicy::STYLE_SRC_DIRECTIVE))) { mUnsafeInlineKeywordSrc->invalidate(); // log to the console that unsafe-inline will be ignored const char16_t* params[] = { u"'unsafe-inline'" }; logWarningErrorToConsole(nsIScriptError::warningFlag, "ignoringSrcWithinScriptStyleSrc", params, ArrayLength(params)); } // Add the newly created srcs to the directive and add the directive to the policy
--- a/dom/security/nsCSPParser.h +++ b/dom/security/nsCSPParser.h @@ -107,16 +107,17 @@ class nsCSPParser { private: nsCSPParser(cspTokens& aTokens, nsIURI* aSelfURI, nsCSPContext* aCSPContext, bool aDeliveredViaMetaTag); static bool sCSPExperimentalEnabled; + static bool sStrictDynamicEnabled; ~nsCSPParser(); // Parsing the CSP using the source-list from http://www.w3.org/TR/CSP11/#source-list nsCSPPolicy* policy(); void directive(); nsCSPDirective* directiveName(); @@ -231,18 +232,20 @@ class nsCSPParser { */ const char16_t* mCurChar; const char16_t* mEndChar; nsString mCurValue; nsString mCurToken; nsTArray<nsString> mCurDir; - // cache variables to ignore unsafe-inline if hash or nonce is specified + // helpers to allow invalidation of srcs within script-src and style-src + // if either 'strict-dynamic' or at least a hash or nonce is present. bool mHasHashOrNonce; // false, if no hash or nonce is defined + bool mStrictDynamic; // false, if 'strict-dynamic' is not defined nsCSPKeywordSrc* mUnsafeInlineKeywordSrc; // null, otherwise invlidate() // cache variables for child-src and frame-src directive handling. // frame-src is deprecated in favor of child-src, however if we // see a frame-src directive, it takes precedence for frames and iframes. // At the end of parsing, if we have a child-src directive, we need to // decide whether it will handle frames, or if there is a frame-src we // should honor instead.
--- a/dom/security/nsCSPUtils.cpp +++ b/dom/security/nsCSPUtils.cpp @@ -419,42 +419,44 @@ CSP_AppendCSPFromHeader(nsIContentSecuri } } return NS_OK; } /* ===== nsCSPSrc ============================ */ nsCSPBaseSrc::nsCSPBaseSrc() + : mInvalidated(false) { } nsCSPBaseSrc::~nsCSPBaseSrc() { } // ::permits is only called for external load requests, therefore: // nsCSPKeywordSrc and nsCSPHashSource fall back to this base class // implementation which will never allow the load. bool nsCSPBaseSrc::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPBaseSrc::permits, aUri: %s", aUri->GetSpecOrDefault().get())); } return false; } // ::allows is only called for inlined loads, therefore: // nsCSPSchemeSrc, nsCSPHostSrc fall back // to this base class implementation which will never allow the load. bool -nsCSPBaseSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsCSPBaseSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPBaseSrc::allows, aKeyWord: %s, a HashOrNonce: %s", aKeyword == CSP_HASH ? "hash" : CSP_EnumToKeyword(aKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get())); return false; } /* ====== nsCSPSchemeSrc ===================== */ @@ -466,23 +468,26 @@ nsCSPSchemeSrc::nsCSPSchemeSrc(const nsA } nsCSPSchemeSrc::~nsCSPSchemeSrc() { } bool nsCSPSchemeSrc::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPSchemeSrc::permits, aUri: %s", aUri->GetSpecOrDefault().get())); } MOZ_ASSERT((!mScheme.EqualsASCII("")), "scheme can not be the empty string"); + if (mInvalidated) { + return false; + } return permitsScheme(mScheme, aUri, aReportOnly, aUpgradeInsecure); } bool nsCSPSchemeSrc::visit(nsCSPSrcVisitor* aVisitor) const { return aVisitor->visitSchemeSrc(*this); } @@ -586,23 +591,27 @@ permitsPort(const nsAString& aEnforcemen } // ports do not match, block the load. return false; } bool nsCSPHostSrc::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPHostSrc::permits, aUri: %s", aUri->GetSpecOrDefault().get())); } + if (mInvalidated) { + return false; + } + // we are following the enforcement rules from the spec, see: // http://www.w3.org/TR/CSP11/#match-source-expression // 4.3) scheme matching: Check if the scheme matches. if (!permitsScheme(mScheme, aUri, aReportOnly, aUpgradeInsecure)) { return false; } @@ -753,100 +762,106 @@ nsCSPHostSrc::appendPath(const nsAString { mPath.Append(aPath); } /* ===== nsCSPKeywordSrc ===================== */ nsCSPKeywordSrc::nsCSPKeywordSrc(enum CSPKeyword aKeyword) : mKeyword(aKeyword) - , mInvalidated(false) { NS_ASSERTION((aKeyword != CSP_SELF), "'self' should have been replaced in the parser"); } nsCSPKeywordSrc::~nsCSPKeywordSrc() { } bool -nsCSPKeywordSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsCSPKeywordSrc::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const +{ + // no need to check for invalidated, this will always return false unless + // it is an nsCSPKeywordSrc for 'strict-dynamic', which should allow non + // parser created scripts. + return ((mKeyword == CSP_STRICT_DYNAMIC) && !aParserCreated); +} + +bool +nsCSPKeywordSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPKeywordSrc::allows, aKeyWord: %s, aHashOrNonce: %s, mInvalidated: %s", CSP_EnumToKeyword(aKeyword), + CSP_EnumToKeyword(mKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get(), mInvalidated ? "yes" : "false")); - // if unsafe-inline should be ignored, then bail early + if (mInvalidated) { - NS_ASSERTION(mKeyword == CSP_UNSAFE_INLINE, - "should only invalidate unsafe-inline within script-src"); + // only 'self' and 'unsafe-inline' are keywords that can be ignored. Please note that + // the parser already translates 'self' into a uri (see assertion in constructor). + MOZ_ASSERT(mKeyword == CSP_UNSAFE_INLINE, + "should only invalidate unsafe-inline"); return false; } - return mKeyword == aKeyword; + // either the keyword allows the load or the policy contains 'strict-dynamic', in which + // case we have to make sure the script is not parser created before allowing the load. + return ((mKeyword == aKeyword) || + ((mKeyword == CSP_STRICT_DYNAMIC) && !aParserCreated)); } bool nsCSPKeywordSrc::visit(nsCSPSrcVisitor* aVisitor) const { return aVisitor->visitKeywordSrc(*this); } void nsCSPKeywordSrc::toString(nsAString& outStr) const { - if (mInvalidated) { - MOZ_ASSERT(mKeyword == CSP_UNSAFE_INLINE, - "can only ignore 'unsafe-inline' within toString()"); - return; - } outStr.AppendASCII(CSP_EnumToKeyword(mKeyword)); } -void -nsCSPKeywordSrc::invalidate() -{ - mInvalidated = true; - MOZ_ASSERT(mKeyword == CSP_UNSAFE_INLINE, - "invalidate 'unsafe-inline' only within script-src"); -} - /* ===== nsCSPNonceSrc ==================== */ nsCSPNonceSrc::nsCSPNonceSrc(const nsAString& aNonce) : mNonce(aNonce) { } nsCSPNonceSrc::~nsCSPNonceSrc() { } bool nsCSPNonceSrc::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPNonceSrc::permits, aUri: %s, aNonce: %s", aUri->GetSpecOrDefault().get(), NS_ConvertUTF16toUTF8(aNonce).get())); } + // nonces can not be invalidated by strict-dynamic return mNonce.Equals(aNonce); } bool -nsCSPNonceSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsCSPNonceSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPNonceSrc::allows, aKeyWord: %s, a HashOrNonce: %s", CSP_EnumToKeyword(aKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get())); if (aKeyword != CSP_NONCE) { return false; } + // nonces can not be invalidated by strict-dynamic return mNonce.Equals(aHashOrNonce); } bool nsCSPNonceSrc::visit(nsCSPSrcVisitor* aVisitor) const { return aVisitor->visitNonceSrc(*this); } @@ -869,25 +884,28 @@ nsCSPHashSrc::nsCSPHashSrc(const nsAStri ToLowerCase(mAlgorithm); } nsCSPHashSrc::~nsCSPHashSrc() { } bool -nsCSPHashSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsCSPHashSrc::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPHashSrc::allows, aKeyWord: %s, a HashOrNonce: %s", CSP_EnumToKeyword(aKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get())); if (aKeyword != CSP_HASH) { return false; } + // hashes can not be invalidated by strict-dynamic + // Convert aHashOrNonce to UTF-8 NS_ConvertUTF16toUTF8 utf8_hash(aHashOrNonce); nsresult rv; nsCOMPtr<nsICryptoHash> hasher; hasher = do_CreateInstance("@mozilla.org/security/hash;1", &rv); NS_ENSURE_SUCCESS(rv, false); @@ -987,39 +1005,40 @@ nsCSPDirective::~nsCSPDirective() { for (uint32_t i = 0; i < mSrcs.Length(); i++) { delete mSrcs[i]; } } bool nsCSPDirective::permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPDirective::permits, aUri: %s", aUri->GetSpecOrDefault().get())); } for (uint32_t i = 0; i < mSrcs.Length(); i++) { - if (mSrcs[i]->permits(aUri, aNonce, aWasRedirected, aReportOnly, aUpgradeInsecure)) { + if (mSrcs[i]->permits(aUri, aNonce, aWasRedirected, aReportOnly, aUpgradeInsecure, aParserCreated)) { return true; } } return false; } bool -nsCSPDirective::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsCSPDirective::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPDirective::allows, aKeyWord: %s, a HashOrNonce: %s", CSP_EnumToKeyword(aKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get())); for (uint32_t i = 0; i < mSrcs.Length(); i++) { - if (mSrcs[i]->allows(aKeyword, aHashOrNonce)) { + if (mSrcs[i]->allows(aKeyword, aHashOrNonce, aParserCreated)) { return true; } } return false; } void nsCSPDirective::toString(nsAString& outStr) const @@ -1302,17 +1321,18 @@ nsRequireSRIForDirective::hasType(nsCont bool nsRequireSRIForDirective::restrictsContentType(const nsContentPolicyType aType) const { return this->hasType(aType); } bool -nsRequireSRIForDirective::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const +nsRequireSRIForDirective::allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { // can only disallow CSP_REQUIRE_SRI_FOR. return (aKeyword != CSP_REQUIRE_SRI_FOR); } /* ===== nsCSPPolicy ========================= */ nsCSPPolicy::nsCSPPolicy() @@ -1332,25 +1352,26 @@ nsCSPPolicy::~nsCSPPolicy() } bool nsCSPPolicy::permits(CSPDirective aDir, nsIURI* aUri, bool aSpecific) const { nsString outp; - return this->permits(aDir, aUri, EmptyString(), false, aSpecific, outp); + return this->permits(aDir, aUri, EmptyString(), false, aSpecific, false, outp); } bool nsCSPPolicy::permits(CSPDirective aDir, nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, bool aSpecific, + bool aParserCreated, nsAString& outViolatedDirective) const { if (CSPUTILSLOGENABLED()) { CSPUTILSLOG(("nsCSPPolicy::permits, aUri: %s, aDir: %d, aSpecific: %s", aUri->GetSpecOrDefault().get(), aDir, aSpecific ? "true" : "false")); } @@ -1358,56 +1379,59 @@ nsCSPPolicy::permits(CSPDirective aDir, outViolatedDirective.Truncate(); nsCSPDirective* defaultDir = nullptr; // Try to find a relevant directive // These directive arrays are short (1-5 elements), not worth using a hashtable. for (uint32_t i = 0; i < mDirectives.Length(); i++) { if (mDirectives[i]->equals(aDir)) { - if (!mDirectives[i]->permits(aUri, aNonce, aWasRedirected, mReportOnly, mUpgradeInsecDir)) { + if (!mDirectives[i]->permits(aUri, aNonce, aWasRedirected, mReportOnly, + mUpgradeInsecDir, aParserCreated)) { mDirectives[i]->toString(outViolatedDirective); return false; } return true; } if (mDirectives[i]->isDefaultDirective()) { defaultDir = mDirectives[i]; } } // If the above loop runs through, we haven't found a matching directive. // Avoid relooping, just store the result of default-src while looping. if (!aSpecific && defaultDir) { - if (!defaultDir->permits(aUri, aNonce, aWasRedirected, mReportOnly, mUpgradeInsecDir)) { + if (!defaultDir->permits(aUri, aNonce, aWasRedirected, mReportOnly, + mUpgradeInsecDir, aParserCreated)) { defaultDir->toString(outViolatedDirective); return false; } return true; } // Nothing restricts this, so we're allowing the load // See bug 764937 return true; } bool nsCSPPolicy::allows(nsContentPolicyType aContentType, enum CSPKeyword aKeyword, - const nsAString& aHashOrNonce) const + const nsAString& aHashOrNonce, + bool aParserCreated) const { CSPUTILSLOG(("nsCSPPolicy::allows, aKeyWord: %s, a HashOrNonce: %s", CSP_EnumToKeyword(aKeyword), NS_ConvertUTF16toUTF8(aHashOrNonce).get())); nsCSPDirective* defaultDir = nullptr; // Try to find a matching directive for (uint32_t i = 0; i < mDirectives.Length(); i++) { if (mDirectives[i]->restrictsContentType(aContentType)) { - if (mDirectives[i]->allows(aKeyword, aHashOrNonce)) { + if (mDirectives[i]->allows(aKeyword, aHashOrNonce, aParserCreated)) { return true; } return false; } if (mDirectives[i]->isDefaultDirective()) { defaultDir = mDirectives[i]; } } @@ -1420,32 +1444,32 @@ nsCSPPolicy::allows(nsContentPolicyType return true; } return false; } // If the above loop runs through, we haven't found a matching directive. // Avoid relooping, just store the result of default-src while looping. if (defaultDir) { - return defaultDir->allows(aKeyword, aHashOrNonce); + return defaultDir->allows(aKeyword, aHashOrNonce, aParserCreated); } // Allowing the load; see Bug 885433 // a) inline scripts (also unsafe eval) should only be blocked // if there is a [script-src] or [default-src] // b) inline styles should only be blocked // if there is a [style-src] or [default-src] return true; } bool nsCSPPolicy::allows(nsContentPolicyType aContentType, enum CSPKeyword aKeyword) const { - return allows(aContentType, aKeyword, NS_LITERAL_STRING("")); + return allows(aContentType, aKeyword, NS_LITERAL_STRING(""), false); } void nsCSPPolicy::toString(nsAString& outStr) const { uint32_t length = mDirectives.Length(); for (uint32_t i = 0; i < length; ++i) {
--- a/dom/security/nsCSPUtils.h +++ b/dom/security/nsCSPUtils.h @@ -121,32 +121,34 @@ inline CSPDirective CSP_StringToCSPDirec // CSPStrKeywords underneath. enum CSPKeyword { CSP_SELF = 0, CSP_UNSAFE_INLINE, CSP_UNSAFE_EVAL, CSP_NONE, CSP_NONCE, CSP_REQUIRE_SRI_FOR, + CSP_STRICT_DYNAMIC, // CSP_LAST_KEYWORD_VALUE always needs to be the last element in the enum // because we use it to calculate the size for the char* array. CSP_LAST_KEYWORD_VALUE, // Putting CSP_HASH after the delimitor, because CSP_HASH is not a valid // keyword (hash uses e.g. sha256, sha512) but we use CSP_HASH internally // to identify allowed hashes in ::allows. CSP_HASH }; static const char* CSPStrKeywords[] = { "'self'", // CSP_SELF = 0 "'unsafe-inline'", // CSP_UNSAFE_INLINE "'unsafe-eval'", // CSP_UNSAFE_EVAL "'none'", // CSP_NONE "'nonce-", // CSP_NONCE - "require-sri-for" // CSP_REQUIRE_SRI_FOR + "require-sri-for", // CSP_REQUIRE_SRI_FOR + "'strict-dynamic'" // CSP_STRICT_DYNAMIC // Remember: CSP_HASH is not supposed to be used }; inline const char* CSP_EnumToKeyword(enum CSPKeyword aKey) { // Make sure all elements in enum CSPKeyword got added to CSPStrKeywords. static_assert((sizeof(CSPStrKeywords) / sizeof(CSPStrKeywords[0]) == static_cast<uint32_t>(CSP_LAST_KEYWORD_VALUE)), @@ -198,31 +200,41 @@ void CSP_PercentDecodeStr(const nsAStrin /* =============== nsCSPSrc ================== */ class nsCSPBaseSrc { public: nsCSPBaseSrc(); virtual ~nsCSPBaseSrc(); virtual bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const; - virtual bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; + virtual bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; virtual bool visit(nsCSPSrcVisitor* aVisitor) const = 0; virtual void toString(nsAString& outStr) const = 0; + + virtual void invalidate() const + { mInvalidated = true; } + + protected: + // invalidate srcs if 'script-dynamic' is present or also invalidate + // unsafe-inline' if nonce- or hash-source specified + mutable bool mInvalidated; + }; /* =============== nsCSPSchemeSrc ============ */ class nsCSPSchemeSrc : public nsCSPBaseSrc { public: explicit nsCSPSchemeSrc(const nsAString& aScheme); virtual ~nsCSPSchemeSrc(); bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const; + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; bool visit(nsCSPSrcVisitor* aVisitor) const; void toString(nsAString& outStr) const; inline void getScheme(nsAString& outStr) const { outStr.Assign(mScheme); }; private: nsString mScheme; @@ -231,17 +243,17 @@ class nsCSPSchemeSrc : public nsCSPBaseS /* =============== nsCSPHostSrc ============== */ class nsCSPHostSrc : public nsCSPBaseSrc { public: explicit nsCSPHostSrc(const nsAString& aHost); virtual ~nsCSPHostSrc(); bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const; + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; bool visit(nsCSPSrcVisitor* aVisitor) const; void toString(nsAString& outStr) const; void setScheme(const nsAString& aScheme); void setPort(const nsAString& aPort); void appendPath(const nsAString &aPath); inline void getScheme(nsAString& outStr) const @@ -265,67 +277,91 @@ class nsCSPHostSrc : public nsCSPBaseSrc /* =============== nsCSPKeywordSrc ============ */ class nsCSPKeywordSrc : public nsCSPBaseSrc { public: explicit nsCSPKeywordSrc(CSPKeyword aKeyword); virtual ~nsCSPKeywordSrc(); - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; + bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; bool visit(nsCSPSrcVisitor* aVisitor) const; void toString(nsAString& outStr) const; - void invalidate(); inline CSPKeyword getKeyword() const { return mKeyword; }; + inline void invalidate() const + { + // keywords that need to invalidated + if (mKeyword == CSP_SELF || mKeyword == CSP_UNSAFE_INLINE) { + mInvalidated = true; + } + } + private: CSPKeyword mKeyword; - // invalidate 'unsafe-inline' if nonce- or hash-source specified - bool mInvalidated; }; /* =============== nsCSPNonceSource =========== */ class nsCSPNonceSrc : public nsCSPBaseSrc { public: explicit nsCSPNonceSrc(const nsAString& aNonce); virtual ~nsCSPNonceSrc(); bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const; - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; bool visit(nsCSPSrcVisitor* aVisitor) const; void toString(nsAString& outStr) const; inline void getNonce(nsAString& outStr) const { outStr.Assign(mNonce); }; + inline void invalidate() const + { + // overwrite nsCSPBaseSRC::invalidate() and explicitily + // do *not* invalidate, because 'strict-dynamic' should + // not invalidate nonces. + } + private: nsString mNonce; }; /* =============== nsCSPHashSource ============ */ class nsCSPHashSrc : public nsCSPBaseSrc { public: nsCSPHashSrc(const nsAString& algo, const nsAString& hash); virtual ~nsCSPHashSrc(); - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; void toString(nsAString& outStr) const; bool visit(nsCSPSrcVisitor* aVisitor) const; inline void getAlgorithm(nsAString& outStr) const { outStr.Assign(mAlgorithm); }; inline void getHash(nsAString& outStr) const { outStr.Assign(mHash); }; + inline void invalidate() const + { + // overwrite nsCSPBaseSRC::invalidate() and explicitily + // do *not* invalidate, because 'strict-dynamic' should + // not invalidate hashes. + } + private: nsString mAlgorithm; nsString mHash; }; /* =============== nsCSPReportURI ============ */ class nsCSPReportURI : public nsCSPBaseSrc { @@ -376,18 +412,19 @@ class nsCSPSrcVisitor { /* =============== nsCSPDirective ============= */ class nsCSPDirective { public: explicit nsCSPDirective(CSPDirective aDirective); virtual ~nsCSPDirective(); virtual bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const; - virtual bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const; + virtual bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; virtual void toString(nsAString& outStr) const; void toDomCSPStruct(mozilla::dom::CSP& outCSP) const; virtual void addSrcs(const nsTArray<nsCSPBaseSrc*>& aSrcs) { mSrcs = aSrcs; } virtual bool restrictsContentType(nsContentPolicyType aContentType) const; @@ -433,23 +470,24 @@ class nsCSPChildSrcDirective : public ns /* =============== nsBlockAllMixedContentDirective === */ class nsBlockAllMixedContentDirective : public nsCSPDirective { public: explicit nsBlockAllMixedContentDirective(CSPDirective aDirective); ~nsBlockAllMixedContentDirective(); bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { return false; } bool permits(nsIURI* aUri) const { return false; } - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { return false; } void toString(nsAString& outStr) const; void addSrcs(const nsTArray<nsCSPBaseSrc*>& aSrcs) { MOZ_ASSERT(false, "block-all-mixed-content does not hold any srcs"); } }; @@ -485,23 +523,24 @@ class nsBlockAllMixedContentDirective : * gets upgraded from ws to wss. */ class nsUpgradeInsecureDirective : public nsCSPDirective { public: explicit nsUpgradeInsecureDirective(CSPDirective aDirective); ~nsUpgradeInsecureDirective(); bool permits(nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, - bool aReportOnly, bool aUpgradeInsecure) const + bool aReportOnly, bool aUpgradeInsecure, bool aParserCreated) const { return false; } bool permits(nsIURI* aUri) const { return false; } - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const { return false; } void toString(nsAString& outStr) const; void addSrcs(const nsTArray<nsCSPBaseSrc*>& aSrcs) { MOZ_ASSERT(false, "upgrade-insecure-requests does not hold any srcs"); } }; @@ -513,17 +552,18 @@ class nsRequireSRIForDirective : public ~nsRequireSRIForDirective(); void toString(nsAString& outStr) const; void addType(nsContentPolicyType aType) { mTypes.AppendElement(aType); } bool hasType(nsContentPolicyType aType) const; bool restrictsContentType(nsContentPolicyType aType) const; - bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce) const; + bool allows(enum CSPKeyword aKeyword, const nsAString& aHashOrNonce, + bool aParserCreated) const; private: nsTArray<nsContentPolicyType> mTypes; }; /* =============== nsCSPPolicy ================== */ class nsCSPPolicy { @@ -531,23 +571,25 @@ class nsCSPPolicy { nsCSPPolicy(); virtual ~nsCSPPolicy(); bool permits(CSPDirective aDirective, nsIURI* aUri, const nsAString& aNonce, bool aWasRedirected, bool aSpecific, + bool aParserCreated, nsAString& outViolatedDirective) const; bool permits(CSPDirective aDir, nsIURI* aUri, bool aSpecific) const; bool allows(nsContentPolicyType aContentType, enum CSPKeyword aKeyword, - const nsAString& aHashOrNonce) const; + const nsAString& aHashOrNonce, + bool aParserCreated) const; bool allows(nsContentPolicyType aContentType, enum CSPKeyword aKeyword) const; void toString(nsAString& outStr) const; void toDomCSPStruct(mozilla::dom::CSP& outCSP) const; inline void addDirective(nsCSPDirective* aDir) { mDirectives.AppendElement(aDir); }
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic.js @@ -0,0 +1,1 @@ +document.getElementById("testdiv").innerHTML = "allowed";
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_default_src.html @@ -0,0 +1,14 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> + +<div id="testdiv">blocked</div> +<script nonce="foo" src="http://mochi.test:8888/tests/dom/security/test/csp/file_strict_dynamic_default_src.js"></script> + +<img id="testimage" src="http://mochi.test:8888/tests/image/test/mochitest/blue.png"></img> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_default_src.js @@ -0,0 +1,1 @@ +document.getElementById("testdiv").innerHTML = "allowed";
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_non_parser_inserted.html @@ -0,0 +1,17 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + // generates a *non* parser inserted script and should be allowed + var myScript = document.createElement('script'); + myScript.src = 'http://example.com/tests/dom/security/test/csp/file_strict_dynamic.js'; + document.head.appendChild(myScript); +</script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_non_parser_inserted_inline.html @@ -0,0 +1,16 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + var dynamicScript = document.createElement('script'); + dynamicScript.textContent = 'document.getElementById("testdiv").textContent="allowed"'; + document.head.appendChild(dynamicScript); +</script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_parser_inserted_doc_write.html @@ -0,0 +1,15 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + // generates a parser inserted script and should be blocked + document.write("<script src='http://example.com/tests/dom/security/test/csp/file_strict_dynamic.js'><\/script>"); +</script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_parser_inserted_doc_write_correct_nonce.html @@ -0,0 +1,15 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + // generates a parser inserted script with a valid nonce- and should be allowed + document.write("<script nonce='foo' src='http://example.com/tests/dom/security/test/csp/file_strict_dynamic.js'><\/script>"); +</script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_script_extern.html @@ -0,0 +1,10 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> +<script nonce="foo" src="http://example.com/tests/dom/security/test/csp/file_strict_dynamic.js"></script> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_script_inline.html @@ -0,0 +1,14 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + document.getElementById("testdiv").innerHTML = "allowed"; +</script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/file_strict_dynamic_unsafe_eval.html @@ -0,0 +1,14 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> +</head> +<body> +<div id="testdiv">blocked</div> + +<script nonce="foo"> + eval('document.getElementById("testdiv").innerHTML = "allowed";'); +</script> + +</body> +</html> \ No newline at end of file
--- a/dom/security/test/csp/mochitest.ini +++ b/dom/security/test/csp/mochitest.ini @@ -184,16 +184,26 @@ support-files = file_sandbox_11.html file_sandbox_12.html file_require_sri_meta.sjs file_require_sri_meta.js file_sendbeacon.html file_upgrade_insecure_docwrite_iframe.sjs file_data-uri_blocked.html file_data-uri_blocked.html^headers^ + file_strict_dynamic_script_inline.html + file_strict_dynamic_script_extern.html + file_strict_dynamic.js + file_strict_dynamic_parser_inserted_doc_write.html + file_strict_dynamic_parser_inserted_doc_write_correct_nonce.html + file_strict_dynamic_non_parser_inserted.html + file_strict_dynamic_non_parser_inserted_inline.html + file_strict_dynamic_unsafe_eval.html + file_strict_dynamic_default_src.html + file_strict_dynamic_default_src.js [test_base-uri.html] [test_blob_data_schemes.html] [test_connect-src.html] [test_CSP.html] [test_allow_https_schemes.html] [test_bug663567.html] [test_bug802872.html] @@ -267,8 +277,11 @@ tags = mcb [test_iframe_sandbox_top_1.html] [test_sandbox.html] [test_ping.html] [test_require_sri_meta.html] [test_sendbeacon.html] [test_upgrade_insecure_docwrite_iframe.html] [test_bug1242019.html] [test_bug1312272.html] +[test_strict_dynamic.html] +[test_strict_dynamic_parser_inserted.html] +[test_strict_dynamic_default_src.html]
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/test_strict_dynamic.html @@ -0,0 +1,115 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> + <!-- Including SimpleTest.js so we can use waitForExplicitFinish !--> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> + <iframe style="width:100%;" id="testframe"></iframe> + +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SpecialPowers.setBoolPref("security.csp.enableStrictDynamic", true); + +/* Description of the test: + * We load scripts with a CSP of 'strict-dynamic' with valid + * and invalid nonces and make sure scripts are allowed/blocked + * accordingly. Different tests load inline and external scripts + * also using a CSP including http: and https: making sure + * other srcs are invalided by 'strict-dynamic'. + */ + +var tests = [ + { + desc: "strict-dynamic with valid nonce should be allowed", + result: "allowed", + file: "file_strict_dynamic_script_extern.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' https: 'none' 'self'" + }, + { + desc: "strict-dynamic with invalid nonce should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_extern.html", + policy: "script-src 'strict-dynamic' 'nonce-bar' http: http://example.com" + }, + { + desc: "strict-dynamic, whitelist and invalid nonce should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_extern.html", + policy: "script-src 'strict-dynamic' 'nonce-bar' 'unsafe-inline' http: http://example.com" + }, + { + desc: "strict-dynamic with no 'nonce-' should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_extern.html", + policy: "script-src 'strict-dynamic'" + }, + // inline scripts + { + desc: "strict-dynamic with valid nonce should be allowed", + result: "allowed", + file: "file_strict_dynamic_script_inline.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' https: 'none' 'self'" + }, + { + desc: "strict-dynamic with invalid nonce should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_inline.html", + policy: "script-src 'strict-dynamic' 'nonce-bar' http: http://example.com" + }, + { + desc: "strict-dynamic, unsafe-inline and invalid nonce should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_inline.html", + policy: "script-src 'strict-dynamic' 'nonce-bar' 'unsafe-inline' http: http://example.com" + }, + { + desc: "strict-dynamic with no 'nonce-' should be blocked", + result: "blocked", + file: "file_strict_dynamic_script_inline.html", + policy: "script-src 'strict-dynamic'" + }, +]; + +var counter = 0; +var curTest; + +function loadNextTest() { + if (counter == tests.length) { + SimpleTest.finish(); + return; + } + + curTest = tests[counter++]; + var src = "file_testserver.sjs?file="; + // append the file that should be served + src += escape("tests/dom/security/test/csp/" + curTest.file) + // append the CSP that should be used to serve the file + src += "&csp=" + escape(curTest.policy); + + document.getElementById("testframe").addEventListener("load", test, false); + document.getElementById("testframe").src = src; +} + +function test() { + try { + document.getElementById("testframe").removeEventListener('load', test, false); + var testframe = document.getElementById("testframe"); + var divcontent = testframe.contentWindow.document.getElementById('testdiv').innerHTML; + is(divcontent, curTest.result, curTest.desc); + } + catch (e) { + ok(false, "ERROR: could not access content for test: '" + curTest.desc + "'"); + } + loadNextTest(); +} + +// start running the tests +loadNextTest(); + +</script> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/test_strict_dynamic_default_src.html @@ -0,0 +1,136 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> + <!-- Including SimpleTest.js so we can use waitForExplicitFinish !--> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> + <iframe style="width:100%;" id="testframe"></iframe> + +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SpecialPowers.setBoolPref("security.csp.enableStrictDynamic", true); + +/* Description of the test: + * We load scripts and images with a CSP of 'strict-dynamic' making sure + * whitelists get ignored for scripts but not for images when strict-dynamic + * appears in default-src. + * + * Please note that we do not support strict-dynamic within default-src yet, + * see Bug 1313937. When updating this test please do not change the + * csp policies, but only replace todo_is() with is(). + */ + +var tests = [ + { + script_desc: "(test1) script should be allowed because of valid nonce", + img_desc: "(test1) img should be allowed because of 'self'", + script_result: "allowed", + img_result: "allowed", + policy: "default-src 'strict-dynamic' 'self'; script-src 'nonce-foo'" + }, + { + script_desc: "(test 2) script should be blocked because of invalid nonce", + img_desc: "(test 2) img should be allowed because of valid scheme-src", + script_result: "blocked", + img_result: "allowed", + policy: "default-src 'strict-dynamic' http:; script-src 'nonce-bar' http:" + }, + { + script_desc: "(test 3) script should be blocked because of invalid nonce", + img_desc: "(test 3) img should be allowed because of valid host-src", + script_result: "blocked", + script_enforced: "", + img_result: "allowed", + policy: "default-src 'strict-dynamic' mochi.test; script-src 'nonce-bar' http:" + }, + { + script_desc: "(test 4) script should be allowed because of valid nonce", + img_desc: "(test 4) img should be blocked because of default-src 'strict-dynamic'", + script_result: "allowed", + img_result: "blocked", + policy: "default-src 'strict-dynamic'; script-src 'nonce-foo'" + }, + // some reverse order tests (have script-src appear before default-src) + { + script_desc: "(test 5) script should be allowed because of valid nonce", + img_desc: "(test 5) img should be blocked because of default-src 'strict-dynamic'", + script_result: "allowed", + img_result: "blocked", + policy: "script-src 'nonce-foo'; default-src 'strict-dynamic';" + }, + { + script_desc: "(test 6) script should be allowed because of valid nonce", + img_desc: "(test 6) img should be blocked because of default-src http:", + script_result: "blocked", + img_result: "allowed", + policy: "script-src 'nonce-bar' http:; default-src 'strict-dynamic' http:;" + }, + { + script_desc: "(test 7) script should be allowed because of invalid nonce", + img_desc: "(test 7) img should be blocked because of image-src http:", + script_result: "blocked", + img_result: "allowed", + policy: "script-src 'nonce-bar' http:; default-src 'strict-dynamic' http:; img-src http:" + }, +]; + +var counter = 0; +var curTest; + +function loadNextTest() { + if (counter == tests.length) { + SimpleTest.finish(); + return; + } + + curTest = tests[counter++]; + var src = "file_testserver.sjs?file="; + // append the file that should be served + src += escape("tests/dom/security/test/csp/file_strict_dynamic_default_src.html"); + // append the CSP that should be used to serve the file + src += "&csp=" + escape(curTest.policy); + + document.getElementById("testframe").addEventListener("load", checkResults, false); + document.getElementById("testframe").src = src; +} + +function checkResults() { + try { + var testframe = document.getElementById("testframe"); + testframe.removeEventListener('load', checkResults, false); + + // check if script loaded + var divcontent = testframe.contentWindow.document.getElementById('testdiv').innerHTML; + if (curTest.script_result === "blocked") { + todo_is(divcontent, curTest.script_result, curTest.script_desc); + } + else { + is(divcontent, curTest.script_result, curTest.script_desc); + } + + // check if image loaded + var testimg = testframe.contentWindow.document.getElementById("testimage"); + if (curTest.img_result === "allowed") { + ok(testimg.complete, curTest.img_desc); + } + else { + ok((testimg.width == 0) && (testimg.height == 0), curTest.img_desc); + } + } + catch (e) { + ok(false, "ERROR: could not access content for test: '" + curTest.script_desc + "'"); + } + + loadNextTest(); +} + +// start running the tests +loadNextTest(); + +</script> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/dom/security/test/csp/test_strict_dynamic_parser_inserted.html @@ -0,0 +1,95 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Bug 1299483 - CSP: Implement 'strict-dynamic'</title> + <!-- Including SimpleTest.js so we can use waitForExplicitFinish !--> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> + <iframe style="width:100%;" id="testframe"></iframe> + +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SpecialPowers.setBoolPref("security.csp.enableStrictDynamic", true); + +/* Description of the test: + * We loader parser and non parser inserted scripts making sure that + * parser inserted scripts are blocked if strict-dynamic is present + * and no valid nonce and also making sure that non-parser inserted + * scripts are allowed to execute. + */ + +var tests = [ + { + desc: "(parser inserted script) using doc.write(<script>) should be blocked", + result: "blocked", + file: "file_strict_dynamic_parser_inserted_doc_write.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' http:" + }, + { + desc: "(parser inserted script with valid nonce) using doc.write(<script>) should be allowed", + result: "allowed", + file: "file_strict_dynamic_parser_inserted_doc_write_correct_nonce.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' https:" + }, + { + desc: "(non parser inserted script) using appendChild() should allow external script", + result: "allowed", + file: "file_strict_dynamic_non_parser_inserted.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' https:" + }, + { + desc: "(non parser inserted script) using appendChild() should allow inline script", + result: "allowed", + file: "file_strict_dynamic_non_parser_inserted_inline.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' https:" + }, + { + desc: "strict-dynamic should not invalidate 'unsafe-eval'", + result: "allowed", + file: "file_strict_dynamic_unsafe_eval.html", + policy: "script-src 'strict-dynamic' 'nonce-foo' 'unsafe-eval'" + }, +]; + +var counter = 0; +var curTest; + +function loadNextTest() { + if (counter == tests.length) { + SimpleTest.finish(); + return; + } + + curTest = tests[counter++]; + var src = "file_testserver.sjs?file="; + // append the file that should be served + src += escape("tests/dom/security/test/csp/" + curTest.file) + // append the CSP that should be used to serve the file + src += "&csp=" + escape(curTest.policy); + + document.getElementById("testframe").addEventListener("load", test, false); + document.getElementById("testframe").src = src; +} + +function test() { + try { + document.getElementById("testframe").removeEventListener('load', test, false); + var testframe = document.getElementById("testframe"); + var divcontent = testframe.contentWindow.document.getElementById('testdiv').innerHTML; + is(divcontent, curTest.result, curTest.desc); + } + catch (e) { + ok(false, "ERROR: could not access content for test: '" + curTest.desc + "'"); + } + loadNextTest(); +} + +// start running the tests +loadNextTest(); + +</script> +</body> +</html>
--- a/dom/security/test/gtest/TestCSPParser.cpp +++ b/dom/security/test/gtest/TestCSPParser.cpp @@ -170,29 +170,34 @@ nsresult runTest(uint32_t aExpectedPolic // ============================= run Tests ======================== nsresult runTestSuite(const PolicyTest* aPolicies, uint32_t aPolicyCount, uint32_t aExpectedPolicyCount) { nsresult rv; nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID); bool experimentalEnabledCache = false; + bool strictDynamicEnabledCache = false; if (prefs) { prefs->GetBoolPref("security.csp.experimentalEnabled", &experimentalEnabledCache); prefs->SetBoolPref("security.csp.experimentalEnabled", true); + + prefs->GetBoolPref("security.csp.enableStrictDynamic", &strictDynamicEnabledCache); + prefs->SetBoolPref("security.csp.enableStrictDynamic", true); } for (uint32_t i = 0; i < aPolicyCount; i++) { rv = runTest(aExpectedPolicyCount, aPolicies[i].policy, aPolicies[i].expectedResult); NS_ENSURE_SUCCESS(rv, rv); } if (prefs) { prefs->SetBoolPref("security.csp.experimentalEnabled", experimentalEnabledCache); + prefs->SetBoolPref("security.csp.enableStrictDynamic", strictDynamicEnabledCache); } return NS_OK; } // ============================= TestDirectives ======================== TEST(CSPParser, Directives) @@ -221,17 +226,23 @@ TEST(CSPParser, Directives) "report-uri http://www.example.com/" }, { "script-src 'nonce-correctscriptnonce'", "script-src 'nonce-correctscriptnonce'" }, { "script-src 'sha256-siVR8vAcqP06h2ppeNwqgjr0yZ6yned4X2VF84j4GmI='", "script-src 'sha256-siVR8vAcqP06h2ppeNwqgjr0yZ6yned4X2VF84j4GmI='" }, { "referrer no-referrer", "referrer no-referrer" }, { "require-sri-for script style", - "require-sri-for script style"} + "require-sri-for script style"}, + { "script-src 'nonce-foo' 'unsafe-inline' ", + "script-src 'nonce-foo' 'unsafe-inline'" }, + { "script-src 'nonce-foo' 'strict-dynamic' 'unsafe-inline' https: ", + "script-src 'nonce-foo' 'strict-dynamic' 'unsafe-inline' https:" }, + { "default-src 'sha256-siVR8' 'strict-dynamic' 'unsafe-inline' https: ", + "default-src 'sha256-siVR8' 'unsafe-inline' https:" }, }; uint32_t policyCount = sizeof(policies) / sizeof(PolicyTest); ASSERT_TRUE(NS_SUCCEEDED(runTestSuite(policies, policyCount, 1))); } // ============================= TestKeywords ========================
--- a/dom/security/test/unit/test_csp_reports.js +++ b/dom/security/test/unit/test_csp_reports.js @@ -108,16 +108,17 @@ function run_test() { "/foo/self"); // test that inline script violations cause a report. makeTest(0, {"blocked-uri": "self"}, false, function(csp) { let inlineOK = true; inlineOK = csp.getAllowsInline(Ci.nsIContentPolicy.TYPE_SCRIPT, "", // aNonce + false, // aParserCreated "", // aContent 0); // aLineNumber // this is not a report only policy, so it better block inline scripts do_check_false(inlineOK); }); // test that eval violations cause a report. @@ -154,16 +155,17 @@ function run_test() { }); // test that inline script violations cause a report in report-only policy makeTest(3, {"blocked-uri": "self"}, true, function(csp) { let inlineOK = true; inlineOK = csp.getAllowsInline(Ci.nsIContentPolicy.TYPE_SCRIPT, "", // aNonce + false, // aParserCreated "", // aContent 0); // aLineNumber // this is a report only policy, so it better allow inline scripts do_check_true(inlineOK); }); // test that eval violations cause a report in report-only policy
--- a/dom/storage/DOMStorageIPC.cpp +++ b/dom/storage/DOMStorageIPC.cpp @@ -366,28 +366,16 @@ DOMStorageDBParent::DOMStorageDBParent() DOMStorageDBParent::~DOMStorageDBParent() { DOMStorageObserver* observer = DOMStorageObserver::Self(); if (observer) { observer->RemoveSink(this); } } -mozilla::ipc::IProtocol* -DOMStorageDBParent::CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) -{ - ContentParent* contentParent = aCtx->GetContentParent(); - nsAutoPtr<PStorageParent> actor(contentParent->AllocPStorageParent()); - if (!actor || !contentParent->RecvPStorageConstructor(actor)) { - return nullptr; - } - return actor.forget(); -} - DOMStorageDBParent::CacheParentBridge* DOMStorageDBParent::NewCache(const nsACString& aOriginSuffix, const nsACString& aOriginNoSuffix) { return new CacheParentBridge(this, aOriginSuffix, aOriginNoSuffix); } void DOMStorageDBParent::ActorDestroy(ActorDestroyReason aWhy)
--- a/dom/storage/DOMStorageIPC.h +++ b/dom/storage/DOMStorageIPC.h @@ -120,20 +120,16 @@ private: class DOMStorageDBParent final : public PStorageParent , public DOMStorageObserverSink { virtual ~DOMStorageDBParent(); public: DOMStorageDBParent(); - virtual mozilla::ipc::IProtocol* - CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) override; - NS_IMETHOD_(MozExternalRefCountType) AddRef(void); NS_IMETHOD_(MozExternalRefCountType) Release(void); void AddIPDLReference(); void ReleaseIPDLReference(); bool IPCOpen() { return mIPCOpen; }
--- a/dom/tests/mochitest/fetch/test_request.js +++ b/dom/tests/mochitest/fetch/test_request.js @@ -226,26 +226,24 @@ function testMethod() { } try { var r = new Request("", { method: "head", body: "hello" }); ok(false, "HEAD/GET request cannot have a body"); } catch(e) { is(e.name, "TypeError", "HEAD/GET request cannot have a body"); } - // Non HEAD/GET should not throw. var r = new Request("", { method: "patch", body: "hello" }); } - function testUrlFragment() { var req = new Request("./request#withfragment"); - is(req.url, (new URL("./request", self.location.href)).href, "request.url should be serialized with exclude fragment flag set"); + is(req.url, (new URL("./request#withfragment", self.location.href)).href, + "request.url should be serialized without exclude fragment flag set"); } - function testUrlMalformed() { try { var req = new Request("http:// example.com"); ok(false, "Creating a Request with a malformed URL should throw a TypeError"); } catch(e) { is(e.name, "TypeError", "Creating a Request with a malformed URL should throw a TypeError"); } }
--- a/dom/workers/ServiceWorkerEvents.cpp +++ b/dom/workers/ServiceWorkerEvents.cpp @@ -232,33 +232,29 @@ public: nsCOMPtr<nsIObserverService> obsService = services::GetObserverService(); if (obsService) { obsService->NotifyObservers(underlyingChannel, "service-worker-synthesized-response", nullptr); } return rv; } - bool CSPPermitsResponse(nsILoadInfo* aLoadInfo) { AssertIsOnMainThread(); MOZ_ASSERT(aLoadInfo); - nsresult rv; nsCOMPtr<nsIURI> uri; - nsAutoCString url; - mInternalResponse->GetUnfilteredURL(url); + nsCString url = mInternalResponse->GetUnfilteredURL(); if (url.IsEmpty()) { // Synthetic response. The buck stops at the worker script. url = mScriptSpec; } rv = NS_NewURI(getter_AddRefs(uri), url, nullptr, nullptr); NS_ENSURE_SUCCESS(rv, false); - int16_t decision = nsIContentPolicy::ACCEPT; rv = NS_CheckContentLoadPolicy(aLoadInfo->InternalContentPolicyType(), uri, aLoadInfo->LoadingPrincipal(), aLoadInfo->LoadingNode(), EmptyCString(), nullptr, &decision); NS_ENSURE_SUCCESS(rv, false); return decision == nsIContentPolicy::ACCEPT; } @@ -630,28 +626,26 @@ RespondWithHandler::ResolvedCallback(JSC NS_LITERAL_CSTRING("InterceptedUsedResponseWithURL"), mRequestURL); return; } RefPtr<InternalResponse> ir = response->GetInternalResponse(); if (NS_WARN_IF(!ir)) { return; } - // When an opaque response is encountered, we need the original channel's principal // to reflect the final URL. Non-opaque responses are either same-origin or CORS-enabled // cross-origin responses, which are treated as same-origin by consumers. nsCString responseURL; if (response->Type() == ResponseType::Opaque) { - ir->GetUnfilteredURL(responseURL); + responseURL = ir->GetUnfilteredURL(); if (NS_WARN_IF(responseURL.IsEmpty())) { return; } } - nsAutoPtr<RespondWithClosure> closure(new RespondWithClosure(mInterceptedChannel, mRegistration, ir, worker->GetChannelInfo(), mScriptSpec, responseURL, mRequestURL, mRespondWithScriptSpec, mRespondWithLineNumber,
--- a/dom/workers/ServiceWorkerPrivate.cpp +++ b/dom/workers/ServiceWorkerPrivate.cpp @@ -1243,16 +1243,17 @@ namespace { class FetchEventRunnable : public ExtendableFunctionalEventWorkerRunnable , public nsIHttpHeaderVisitor { nsMainThreadPtrHandle<nsIInterceptedChannel> mInterceptedChannel; const nsCString mScriptSpec; nsMainThreadPtrHandle<ServiceWorkerRegistrationInfo> mRegistration; nsTArray<nsCString> mHeaderNames; nsTArray<nsCString> mHeaderValues; nsCString mSpec; + nsCString mFragment; nsCString mMethod; nsString mClientId; bool mIsReload; RequestCache mCacheMode; RequestMode mRequestMode; RequestRedirect mRequestRedirect; RequestCredentials mRequestCredentials; nsContentPolicyType mContentPolicyType; @@ -1314,28 +1315,27 @@ public: // Normally we rely on the Request constructor to strip the fragment, but // when creating the FetchEvent we bypass the constructor. So strip the // fragment manually here instead. We can't do it later when we create // the Request because that code executes off the main thread. nsCOMPtr<nsIURI> uriNoFragment; rv = uri->CloneIgnoringRef(getter_AddRefs(uriNoFragment)); NS_ENSURE_SUCCESS(rv, rv); - rv = uriNoFragment->GetSpec(mSpec); NS_ENSURE_SUCCESS(rv, rv); + rv = uri->GetRef(mFragment); + NS_ENSURE_SUCCESS(rv, rv); uint32_t loadFlags; rv = channel->GetLoadFlags(&loadFlags); NS_ENSURE_SUCCESS(rv, rv); - nsCOMPtr<nsILoadInfo> loadInfo; rv = channel->GetLoadInfo(getter_AddRefs(loadInfo)); NS_ENSURE_SUCCESS(rv, rv); - mContentPolicyType = loadInfo->InternalContentPolicyType(); nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(channel); MOZ_ASSERT(httpChannel, "How come we don't have an HTTP channel?"); nsAutoCString referrer; // Ignore the return value since the Referer header may not exist. httpChannel->GetRequestHeader(NS_LITERAL_CSTRING("Referer"), referrer); @@ -1470,18 +1470,18 @@ private: } ErrorResult result; internalHeaders->SetGuard(HeadersGuardEnum::Immutable, result); if (NS_WARN_IF(result.Failed())) { result.SuppressException(); return false; } - RefPtr<InternalRequest> internalReq = new InternalRequest(mSpec, + mFragment, mMethod, internalHeaders.forget(), mCacheMode, mRequestMode, mRequestRedirect, mRequestCredentials, NS_ConvertUTF8toUTF16(mReferrer), mReferrerPolicy,
--- a/editor/libeditor/tests/mochitest.ini +++ b/editor/libeditor/tests/mochitest.ini @@ -149,16 +149,17 @@ subsuite = clipboard [test_bug735059.html] [test_bug738366.html] [test_bug740784.html] [test_bug742261.html] [test_bug757371.html] [test_bug757771.html] [test_bug767684.html] [test_bug772796.html] +skip-if = toolkit == 'android' # bug 1309431 [test_bug773262.html] [test_bug780035.html] [test_bug787432.html] [test_bug790475.html] [test_bug795785.html] [test_bug796839.html] [test_bug830600.html] subsuite = clipboard
--- a/gfx/ipc/CompositorSession.h +++ b/gfx/ipc/CompositorSession.h @@ -2,16 +2,18 @@ /* vim: set ts=8 sts=2 et sw=2 tw=99: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef _include_mozilla_gfx_ipc_CompositorSession_h_ #define _include_mozilla_gfx_ipc_CompositorSession_h_ #include "base/basictypes.h" +#include "mozilla/layers/LayersTypes.h" +#include "mozilla/layers/CompositorTypes.h" #include "nsISupportsImpl.h" class nsIWidget; namespace mozilla { namespace widget { class CompositorWidget; class CompositorWidgetDelegate; @@ -37,16 +39,19 @@ class CompositorSession protected: typedef gfx::GPUProcessHost GPUProcessHost; typedef widget::CompositorWidget CompositorWidget; typedef widget::CompositorWidgetDelegate CompositorWidgetDelegate; public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CompositorSession) + virtual bool Reset(const nsTArray<LayersBackend>& aBackendHints, + TextureFactoryIdentifier* aOutIdentifier) = 0; + virtual void Shutdown() = 0; // This returns a CompositorBridgeParent if the compositor resides in the same process. virtual CompositorBridgeParent* GetInProcessBridge() const = 0; // Set the GeckoContentController for the root of the layer tree. virtual void SetContentController(GeckoContentController* aController) = 0;
--- a/gfx/ipc/GPUChild.cpp +++ b/gfx/ipc/GPUChild.cpp @@ -2,16 +2,17 @@ /* vim: set ts=8 sts=2 et sw=2 tw=99: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "GPUChild.h" #include "gfxConfig.h" #include "gfxPrefs.h" #include "GPUProcessHost.h" +#include "GPUProcessManager.h" #include "mozilla/Telemetry.h" #include "mozilla/dom/CheckerboardReportService.h" #include "mozilla/gfx/gfxVars.h" #if defined(XP_WIN) # include "mozilla/gfx/DeviceManagerDx.h" #endif #include "mozilla/ipc/CrashReporterHost.h" @@ -146,16 +147,23 @@ GPUChild::RecvAccumulateChildHistogram(I bool GPUChild::RecvAccumulateChildKeyedHistogram(InfallibleTArray<KeyedAccumulation>&& aAccumulations) { Telemetry::AccumulateChildKeyed(GeckoProcessType_GPU, aAccumulations); return true; } +bool +GPUChild::RecvNotifyDeviceReset() +{ + mHost->mListener->OnProcessDeviceReset(mHost); + return true; +} + void GPUChild::ActorDestroy(ActorDestroyReason aWhy) { if (aWhy == AbnormalShutdown) { #ifdef MOZ_CRASHREPORTER if (mCrashReporter) { mCrashReporter->GenerateCrashReport(OtherPid()); mCrashReporter = nullptr;
--- a/gfx/ipc/GPUChild.h +++ b/gfx/ipc/GPUChild.h @@ -38,16 +38,17 @@ public: bool RecvInitComplete(const GPUDeviceData& aData) override; bool RecvReportCheckerboard(const uint32_t& aSeverity, const nsCString& aLog) override; bool RecvInitCrashReporter(Shmem&& shmem) override; bool RecvAccumulateChildHistogram(InfallibleTArray<Accumulation>&& aAccumulations) override; bool RecvAccumulateChildKeyedHistogram(InfallibleTArray<KeyedAccumulation>&& aAccumulations) override; void ActorDestroy(ActorDestroyReason aWhy) override; bool RecvGraphicsError(const nsCString& aError) override; bool RecvNotifyUiObservers(const nsCString& aTopic) override; + bool RecvNotifyDeviceReset() override; static void Destroy(UniquePtr<GPUChild>&& aChild); private: GPUProcessHost* mHost; UniquePtr<ipc::CrashReporterHost> mCrashReporter; bool mGPUReady; };
--- a/gfx/ipc/GPUParent.cpp +++ b/gfx/ipc/GPUParent.cpp @@ -109,16 +109,40 @@ GPUParent::Init(base::ProcessId aParentP LayerTreeOwnerTracker::Initialize(); mozilla::ipc::SetThisProcessName("GPU Process"); #ifdef XP_WIN wmf::MFStartup(); #endif return true; } +void +GPUParent::NotifyDeviceReset() +{ + if (!NS_IsMainThread()) { + NS_DispatchToMainThread(NS_NewRunnableFunction([] () -> void { + GPUParent::GetSingleton()->NotifyDeviceReset(); + })); + return; + } + + // Reset and reinitialize the compositor devices +#ifdef XP_WIN + if (!DeviceManagerDx::Get()->MaybeResetAndReacquireDevices()) { + // If the device doesn't need to be reset then the device + // has already been reset by a previous NotifyDeviceReset message. + return; + } +#endif + + // Notify the main process that there's been a device reset + // and that they should reset their compositors and repaint + Unused << SendNotifyDeviceReset(); +} + bool GPUParent::RecvInit(nsTArray<GfxPrefSetting>&& prefs, nsTArray<GfxVarUpdate>&& vars, const DevicePrefs& devicePrefs) { const nsTArray<gfxPrefs::Pref*>& globalPrefs = gfxPrefs::all(); for (auto& setting : prefs) { gfxPrefs::Pref* pref = globalPrefs[setting.index()];
--- a/gfx/ipc/GPUParent.h +++ b/gfx/ipc/GPUParent.h @@ -20,16 +20,17 @@ public: GPUParent(); ~GPUParent(); static GPUParent* GetSingleton(); bool Init(base::ProcessId aParentPid, MessageLoop* aIOLoop, IPC::Channel* aChannel); + void NotifyDeviceReset(); bool RecvInit(nsTArray<GfxPrefSetting>&& prefs, nsTArray<GfxVarUpdate>&& vars, const DevicePrefs& devicePrefs) override; bool RecvInitVsyncBridge(Endpoint<PVsyncBridgeParent>&& aVsyncEndpoint) override; bool RecvInitImageBridge(Endpoint<PImageBridgeParent>&& aEndpoint) override; bool RecvInitVRManager(Endpoint<PVRManagerParent>&& aEndpoint) override; bool RecvUpdatePref(const GfxPrefSetting& pref) override;
--- a/gfx/ipc/GPUProcessHost.h +++ b/gfx/ipc/GPUProcessHost.h @@ -38,16 +38,19 @@ public: virtual void OnProcessLaunchComplete(GPUProcessHost* aHost) {} // The GPUProcessHost has unexpectedly shutdown or had its connection // severed. This is not called if an error occurs after calling // Shutdown(). virtual void OnProcessUnexpectedShutdown(GPUProcessHost* aHost) {} + + virtual void OnProcessDeviceReset(GPUProcessHost* aHost) + {} }; public: explicit GPUProcessHost(Listener* listener); ~GPUProcessHost(); // Launch the subprocess asynchronously. On failure, false is returned. // Otherwise, true is returned, and the OnLaunchComplete listener callback
--- a/gfx/ipc/GPUProcessManager.cpp +++ b/gfx/ipc/GPUProcessManager.cpp @@ -24,16 +24,17 @@ #include "nsContentUtils.h" #include "VRManagerChild.h" #include "VRManagerParent.h" #include "VsyncBridgeChild.h" #include "VsyncIOThreadHolder.h" #include "VsyncSource.h" #include "mozilla/dom/VideoDecoderManagerChild.h" #include "mozilla/dom/VideoDecoderManagerParent.h" +#include "MediaPrefs.h" namespace mozilla { namespace gfx { using namespace mozilla::layers; static StaticAutoPtr<GPUProcessManager> sSingleton; @@ -253,16 +254,24 @@ GPUProcessManager::OnProcessLaunchComple nsTArray<LayerTreeIdMapping> mappings; LayerTreeOwnerTracker::Get()->Iterate([&](uint64_t aLayersId, base::ProcessId aProcessId) { mappings.AppendElement(LayerTreeIdMapping(aLayersId, aProcessId)); }); mGPUChild->SendAddLayerTreeIdMapping(mappings); } void +GPUProcessManager::OnProcessDeviceReset(GPUProcessHost* aHost) +{ + for (auto& session : mRemoteSessions) { + session->NotifyDeviceReset(); + } +} + +void GPUProcessManager::OnProcessUnexpectedShutdown(GPUProcessHost* aHost) { MOZ_ASSERT(mProcess && mProcess == aHost); DestroyProcess(); if (mNumProcessAttempts > uint32_t(gfxPrefs::GPUProcessDevMaxRestarts())) { DisableGPUProcess("GPU processed crashed too many times"); @@ -531,24 +540,28 @@ GPUProcessManager::CreateRemoteSession(n return nullptr; #endif } bool GPUProcessManager::CreateContentBridges(base::ProcessId aOtherProcess, ipc::Endpoint<PCompositorBridgeChild>* aOutCompositor, ipc::Endpoint<PImageBridgeChild>* aOutImageBridge, - ipc::Endpoint<PVRManagerChild>* aOutVRBridge) + ipc::Endpoint<PVRManagerChild>* aOutVRBridge, + ipc::Endpoint<dom::PVideoDecoderManagerChild>* aOutVideoManager) { if (!CreateContentCompositorBridge(aOtherProcess, aOutCompositor) || !CreateContentImageBridge(aOtherProcess, aOutImageBridge) || !CreateContentVRManager(aOtherProcess, aOutVRBridge)) { return false; } + // VideoDeocderManager is only supported in the GPU process, so we allow this to be + // fallible. + CreateContentVideoDecoderManager(aOtherProcess, aOutVideoManager); return true; } bool GPUProcessManager::CreateContentCompositorBridge(base::ProcessId aOtherProcess, ipc::Endpoint<PCompositorBridgeChild>* aOutEndpoint) { EnsureGPUReady(); @@ -654,41 +667,41 @@ GPUProcessManager::CreateContentVRManage return false; } } *aOutEndpoint = Move(childPipe); return true; } -bool +void GPUProcessManager::CreateContentVideoDecoderManager(base::ProcessId aOtherProcess, ipc::Endpoint<dom::PVideoDecoderManagerChild>* aOutEndpoint) { - if (!mGPUChild) { - return false; + if (!mGPUChild || !MediaPrefs::PDMUseGPUDecoder()) { + return; } ipc::Endpoint<dom::PVideoDecoderManagerParent> parentPipe; ipc::Endpoint<dom::PVideoDecoderManagerChild> childPipe; nsresult rv = dom::PVideoDecoderManager::CreateEndpoints( mGPUChild->OtherPid(), aOtherProcess, &parentPipe, &childPipe); if (NS_FAILED(rv)) { gfxCriticalNote << "Could not create content video decoder: " << hexa(int(rv)); - return false; + return; } mGPUChild->SendNewContentVideoDecoderManager(Move(parentPipe)); *aOutEndpoint = Move(childPipe); - return true; + return; } already_AddRefed<IAPZCTreeManager> GPUProcessManager::GetAPZCTreeManagerForLayers(uint64_t aLayersId) { return CompositorBridgeParent::GetAPZCTreeManager(aLayersId); }
--- a/gfx/ipc/GPUProcessManager.h +++ b/gfx/ipc/GPUProcessManager.h @@ -86,19 +86,18 @@ public: bool aUseAPZ, bool aUseExternalSurfaceSize, const gfx::IntSize& aSurfaceSize); bool CreateContentBridges( base::ProcessId aOtherProcess, ipc::Endpoint<PCompositorBridgeChild>* aOutCompositor, ipc::Endpoint<PImageBridgeChild>* aOutImageBridge, - ipc::Endpoint<PVRManagerChild>* aOutVRBridge); - bool CreateContentVideoDecoderManager(base::ProcessId aOtherProcess, - ipc::Endpoint<dom::PVideoDecoderManagerChild>* aOutEndPoint); + ipc::Endpoint<PVRManagerChild>* aOutVRBridge, + ipc::Endpoint<dom::PVideoDecoderManagerChild>* aOutVideoManager); // This returns a reference to the APZCTreeManager to which // pan/zoom-related events can be sent. already_AddRefed<IAPZCTreeManager> GetAPZCTreeManagerForLayers(uint64_t aLayersId); // Maps the layer tree and process together so that aOwningPID is allowed // to access aLayersId across process. void MapLayerTreeId(uint64_t aLayersId, base::ProcessId aOwningId); @@ -115,16 +114,17 @@ public: // associated resources that live only on the compositor thread. // // Must run on the content main thread. uint64_t AllocateLayerTreeId(); void OnProcessLaunchComplete(GPUProcessHost* aHost) override; void OnProcessUnexpectedShutdown(GPUProcessHost* aHost) override; + void OnProcessDeviceReset(GPUProcessHost* aHost) override; // Notify the GPUProcessManager that a top-level PGPU protocol has been // terminated. This may be called from any thread. void NotifyRemoteActorDestroyed(const uint64_t& aProcessToken); void AddListener(GPUProcessListener* aListener); void RemoveListener(GPUProcessListener* aListener); @@ -153,16 +153,18 @@ private: void OnXPCOMShutdown(); bool CreateContentCompositorBridge(base::ProcessId aOtherProcess, ipc::Endpoint<PCompositorBridgeChild>* aOutEndpoint); bool CreateContentImageBridge(base::ProcessId aOtherProcess, ipc::Endpoint<PImageBridgeChild>* aOutEndpoint); bool CreateContentVRManager(base::ProcessId aOtherProcess, ipc::Endpoint<PVRManagerChild>* aOutEndpoint); + void CreateContentVideoDecoderManager(base::ProcessId aOtherProcess, + ipc::Endpoint<dom::PVideoDecoderManagerChild>* aOutEndPoint); // Called from RemoteCompositorSession. We track remote sessions so we can // notify their owning widgets that the session must be restarted. void RegisterSession(RemoteCompositorSession* aSession); void UnregisterSession(RemoteCompositorSession* aSession); private: GPUProcessManager();
--- a/gfx/ipc/InProcessCompositorSession.cpp +++ b/gfx/ipc/InProcessCompositorSession.cpp @@ -55,16 +55,22 @@ InProcessCompositorSession::SetContentCo } RefPtr<IAPZCTreeManager> InProcessCompositorSession::GetAPZCTreeManager() const { return mCompositorBridgeParent->GetAPZCTreeManager(mRootLayerTreeId); } +bool +InProcessCompositorSession::Reset(const nsTArray<LayersBackend>& aBackendHints, TextureFactoryIdentifier* aOutIdentifier) +{ + return mCompositorBridgeParent->ResetCompositor(aBackendHints, aOutIdentifier); +} + void InProcessCompositorSession::Shutdown() { // Destroy will synchronously wait for the parent to acknowledge shutdown, // at which point CBP will defer a Release on the compositor thread. We // can safely release our reference now, and let the destructor run on either // thread. mCompositorBridgeChild->Destroy();
--- a/gfx/ipc/InProcessCompositorSession.h +++ b/gfx/ipc/InProcessCompositorSession.h @@ -25,16 +25,17 @@ public: CSSToLayoutDeviceScale aScale, bool aUseAPZ, bool aUseExternalSurfaceSize, const gfx::IntSize& aSurfaceSize); CompositorBridgeParent* GetInProcessBridge() const override; void SetContentController(GeckoContentController* aController) override; RefPtr<IAPZCTreeManager> GetAPZCTreeManager() const override; + bool Reset(const nsTArray<LayersBackend>& aBackendHints, TextureFactoryIdentifier* aOutIdentifier) override; void Shutdown() override; private: InProcessCompositorSession(widget::CompositorWidget* aWidget, CompositorBridgeChild* aChild, CompositorBridgeParent* aParent); private:
--- a/gfx/ipc/PGPU.ipdl +++ b/gfx/ipc/PGPU.ipdl @@ -93,12 +93,14 @@ child: // Have a message be broadcasted to the UI process by the UI process // observer service. async NotifyUiObservers(nsCString aTopic); // Messages for reporting telemetry to the UI process. async AccumulateChildHistogram(Accumulation[] accumulations); async AccumulateChildKeyedHistogram(KeyedAccumulation[] accumulations); + + async NotifyDeviceReset(); }; } // namespace gfx } // namespace mozilla
--- a/gfx/ipc/RemoteCompositorSession.cpp +++ b/gfx/ipc/RemoteCompositorSession.cpp @@ -3,16 +3,17 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "RemoteCompositorSession.h" #include "mozilla/VsyncDispatcher.h" #include "mozilla/layers/APZChild.h" #include "mozilla/layers/APZCTreeManagerChild.h" +#include "mozilla/Unused.h" #include "nsBaseWidget.h" namespace mozilla { namespace layers { using namespace gfx; using namespace widget; @@ -33,16 +34,23 @@ RemoteCompositorSession::RemoteComposito RemoteCompositorSession::~RemoteCompositorSession() { // This should have been shutdown first. MOZ_ASSERT(!mCompositorBridgeChild); } void +RemoteCompositorSession::NotifyDeviceReset() +{ + MOZ_ASSERT(mWidget); + mWidget->OnRenderingDeviceReset(); +} + +void RemoteCompositorSession::NotifySessionLost() { // Re-entrancy should be impossible: when we are being notified of a lost // session, we have by definition not shut down yet. We will shutdown, but // then will be removed from the notification list. MOZ_ASSERT(mWidget); mWidget->NotifyRemoteCompositorSessionLost(this); } @@ -73,16 +81,24 @@ RemoteCompositorSession::GetWidget() } RefPtr<IAPZCTreeManager> RemoteCompositorSession::GetAPZCTreeManager() const { return mAPZ; } +bool +RemoteCompositorSession::Reset(const nsTArray<LayersBackend>& aBackendHints, TextureFactoryIdentifier* aOutIdentifier) +{ + bool didReset; + Unused << mCompositorBridgeChild->SendReset(aBackendHints, &didReset, aOutIdentifier); + return didReset; +} + void RemoteCompositorSession::Shutdown() { mContentController = nullptr; if (mAPZ) { mAPZ->SetCompositorSession(nullptr); } mCompositorBridgeChild->Destroy();
--- a/gfx/ipc/RemoteCompositorSession.h +++ b/gfx/ipc/RemoteCompositorSession.h @@ -23,18 +23,20 @@ public: const uint64_t& aRootLayerTreeId); ~RemoteCompositorSession() override; CompositorBridgeParent* GetInProcessBridge() const override; void SetContentController(GeckoContentController* aController) override; GeckoContentController* GetContentController(); nsIWidget* GetWidget(); RefPtr<IAPZCTreeManager> GetAPZCTreeManager() const override; + bool Reset(const nsTArray<LayersBackend>& aBackendHints, TextureFactoryIdentifier* aOutIdentifier) override; void Shutdown() override; + void NotifyDeviceReset(); void NotifySessionLost(); private: nsBaseWidget* mWidget; RefPtr<APZCTreeManagerChild> mAPZ; RefPtr<GeckoContentController> mContentController; };
--- a/gfx/ipc/VsyncBridgeChild.cpp +++ b/gfx/ipc/VsyncBridgeChild.cpp @@ -141,15 +141,15 @@ VsyncBridgeChild::DeallocPVsyncBridgeChi void VsyncBridgeChild::ProcessingError(Result aCode, const char* aReason) { MOZ_RELEASE_ASSERT(aCode == MsgDropped, "Processing error in VsyncBridgeChild"); } void -VsyncBridgeChild::FatalError(const char* const aName, const char* const aMsg) const +VsyncBridgeChild::HandleFatalError(const char* aName, const char* aMsg) const { dom::ContentChild::FatalErrorIfNotUsingGPUProcess(aName, aMsg, OtherPid()); } } // namespace gfx } // namespace mozilla
--- a/gfx/ipc/VsyncBridgeChild.h +++ b/gfx/ipc/VsyncBridgeChild.h @@ -28,17 +28,17 @@ public: void Close(); void ActorDestroy(ActorDestroyReason aWhy) override; void DeallocPVsyncBridgeChild() override; void ProcessingError(Result aCode, const char* aReason) override; void NotifyVsync(TimeStamp aTimeStamp, const uint64_t& aLayersId); - virtual void FatalError(const char* const aName, const char* const aMsg) const override; + virtual void HandleFatalError(const char* aName, const char* aMsg) const override; private: VsyncBridgeChild(RefPtr<VsyncIOThreadHolder>, const uint64_t& aProcessToken); ~VsyncBridgeChild(); void Open(Endpoint<PVsyncBridgeChild>&& aEndpoint); void NotifyVsyncImpl(TimeStamp aTimeStamp, const uint64_t& aLayersId);
--- a/gfx/layers/IPDLActor.h +++ b/gfx/layers/IPDLActor.h @@ -33,17 +33,17 @@ public: virtual bool RecvDestroy() override { DestroyIfNeeded(); Unused << Protocol::Send__delete__(this); return true; } - typedef ipc::IProtocolManager<ipc::IProtocol>::ActorDestroyReason Why; + typedef ipc::IProtocol::ActorDestroyReason Why; virtual void ActorDestroy(Why) override { DestroyIfNeeded(); } protected: void DestroyIfNeeded() { if (!mDestroyed) {
--- a/gfx/layers/client/ClientLayerManager.cpp +++ b/gfx/layers/client/ClientLayerManager.cpp @@ -352,17 +352,16 @@ ClientLayerManager::StorePluginWidgetCon } void ClientLayerManager::EndTransaction(DrawPaintedLayerCallback aCallback, void* aCallbackData, EndTransactionFlags aFlags) { if (!mForwarder->IPCOpen()) { - mTransactionIdAllocator->RevokeTransactionId(mLatestTransactionId); mInTransaction = false; return; } if (mWidget) { mWidget->PrepareWindowEffects(); } EndTransactionInternal(aCallback, aCallbackData, aFlags); @@ -384,17 +383,16 @@ ClientLayerManager::EndTransaction(DrawP } bool ClientLayerManager::EndEmptyTransaction(EndTransactionFlags aFlags) { mInTransaction = false; if (!mRoot || !mForwarder->IPCOpen()) { - mTransactionIdAllocator->RevokeTransactionId(mLatestTransactionId); return false; } if (!EndTransactionInternal(nullptr, nullptr, aFlags)) { // Return without calling ForwardTransaction. This leaves the // ShadowLayerForwarder transaction open; the following // EndTransaction will complete it. return false;
--- a/gfx/layers/d3d11/CompositorD3D11.cpp +++ b/gfx/layers/d3d11/CompositorD3D11.cpp @@ -7,16 +7,17 @@ #include "TextureD3D11.h" #include "CompositorD3D11Shaders.h" #include "gfxWindowsPlatform.h" #include "nsIWidget.h" #include "mozilla/gfx/D3D11Checks.h" #include "mozilla/gfx/DeviceManagerDx.h" +#include "mozilla/gfx/GPUParent.h" #include "mozilla/layers/ImageHost.h" #include "mozilla/layers/ContentHost.h" #include "mozilla/layers/Effects.h" #include "nsWindowsHelpers.h" #include "gfxPrefs.h" #include "gfxConfig.h" #include "gfxCrashReporterUtils.h" #include "gfxUtils.h" @@ -987,16 +988,23 @@ CompositorD3D11::BeginFrame(const nsIntR *aRenderBoundsOut = IntRect(); return; } if (mDevice->GetDeviceRemovedReason() != S_OK) { gfxCriticalNote << "GFX: D3D11 skip BeginFrame with device-removed."; ReadUnlockTextures(); *aRenderBoundsOut = IntRect(); + + // If we are in the GPU process then the main process doesn't + // know that a device reset has happened and needs to be informed + if (XRE_IsGPUProcess()) { + GPUParent::GetSingleton()->NotifyDeviceReset(); + } + return; } LayoutDeviceIntSize oldSize = mSize; // Failed to create a render target or the view. if (!UpdateRenderTarget() || !mDefaultRT || !mDefaultRT->mRTView || mSize.width <= 0 || mSize.height <= 0) {
--- a/gfx/layers/ipc/APZCTreeManagerChild.cpp +++ b/gfx/layers/ipc/APZCTreeManagerChild.cpp @@ -257,18 +257,10 @@ APZCTreeManagerChild::RecvNotifyPinchGes // of the pinch is. This may change in the future. if (mCompositorSession && mCompositorSession->GetWidget()) { APZCCallbackHelper::NotifyPinchGesture(aType, aSpanChange, aModifiers, mCompositorSession->GetWidget()); } return true; } -void -APZCTreeManagerChild::OnProcessingError( - Result aCode, - const char* aReason) -{ - MOZ_RELEASE_ASSERT(aCode != MsgDropped); -} - } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/APZCTreeManagerChild.h +++ b/gfx/layers/ipc/APZCTreeManagerChild.h @@ -81,21 +81,16 @@ public: LayoutDeviceIntPoint* aRefPoint, ScrollableLayerGuid* aOutTargetGuid) override; void UpdateWheelTransaction( LayoutDeviceIntPoint aRefPoint, EventMessage aEventMessage) override; - void - OnProcessingError( - Result aCode, - const char* aReason) override; - protected: bool RecvHandleTap(const TapType& aType, const LayoutDevicePoint& aPoint, const Modifiers& aModifiers, const ScrollableLayerGuid& aGuid, const uint64_t& aInputBlockId) override; bool RecvNotifyPinchGesture(const PinchGestureType& aType,
--- a/gfx/layers/ipc/CompositorBridgeChild.cpp +++ b/gfx/layers/ipc/CompositorBridgeChild.cpp @@ -202,18 +202,17 @@ CompositorBridgeChild::InitForContent(En // There's only one compositor per child process. MOZ_ASSERT(!sCompositorBridge); RefPtr<CompositorBridgeChild> child(new CompositorBridgeChild(nullptr)); if (!aEndpoint.Bind(child)) { NS_RUNTIMEABORT("Couldn't Open() Compositor channel."); return false; } - - child->mCanSend = true; + child->InitIPDL(); // We release this ref in DeferredDestroyCompositor. sCompositorBridge = child; return true; } /* static */ bool CompositorBridgeChild::ReinitForContent(Endpoint<PCompositorBridgeChild>&& aEndpoint) @@ -240,40 +239,53 @@ CompositorBridgeChild::InitSameProcess(w const gfx::IntSize& aSurfaceSize) { TimeDuration vsyncRate = gfxPlatform::GetPlatform()->GetHardwareVsync()->GetGlobalDisplay().GetVsyncRate(); mCompositorBridgeParent = new CompositorBridgeParent(aScale, vsyncRate, aUseExternalSurface, aSurfaceSize); - mCanSend = Open(mCompositorBridgeParent->GetIPCChannel(), - CompositorThreadHolder::Loop(), - ipc::ChildSide); - MOZ_RELEASE_ASSERT(mCanSend); + bool ok = Open(mCompositorBridgeParent->GetIPCChannel(), + CompositorThreadHolder::Loop(), + ipc::ChildSide); + MOZ_RELEASE_ASSERT(ok); + InitIPDL(); mCompositorBridgeParent->InitSameProcess(aWidget, aLayerTreeId, aUseAPZ); return mCompositorBridgeParent; } /* static */ RefPtr<CompositorBridgeChild> CompositorBridgeChild::CreateRemote(const uint64_t& aProcessToken, LayerManager* aLayerManager, Endpoint<PCompositorBridgeChild>&& aEndpoint) { RefPtr<CompositorBridgeChild> child = new CompositorBridgeChild(aLayerManager); if (!aEndpoint.Bind(child)) { return nullptr; } - - child->mCanSend = true; + child->InitIPDL(); child->mProcessToken = aProcessToken; return child; } +void +CompositorBridgeChild::InitIPDL() +{ + mCanSend = true; + AddRef(); +} + +void +CompositorBridgeChild::DeallocPCompositorBridgeChild() +{ + Release(); +} + /*static*/ CompositorBridgeChild* CompositorBridgeChild::Get() { // This is only expected to be used in child processes. MOZ_ASSERT(!XRE_IsParentProcess()); return sCompositorBridge; } @@ -569,30 +581,23 @@ CompositorBridgeChild::RecvClearCachedRe } return true; } void CompositorBridgeChild::ActorDestroy(ActorDestroyReason aWhy) { if (aWhy == AbnormalShutdown) { -#ifdef MOZ_B2G - // Due to poor lifetime management of gralloc (and possibly shmems) we will - // crash at some point in the future when we get destroyed due to abnormal - // shutdown. Its better just to crash here. On desktop though, we have a chance - // of recovering. - NS_RUNTIMEABORT("ActorDestroy by IPC channel failure at CompositorBridgeChild"); -#endif - // If the parent side runs into a problem then the actor will be destroyed. // There is nothing we can do in the child side, here sets mCanSend as false. - mCanSend = false; gfxCriticalNote << "Receive IPC close with reason=AbnormalShutdown"; } + mCanSend = false; + if (mProcessToken && XRE_IsParentProcess()) { GPUProcessManager::Get()->NotifyRemoteActorDestroyed(mProcessToken); } } bool CompositorBridgeChild::RecvSharedCompositorFrameMetrics( const mozilla::ipc::SharedMemoryBasic::Handle& metrics, @@ -1120,16 +1125,16 @@ CompositorBridgeChild::ProcessingError(R void CompositorBridgeChild::WillEndTransaction() { ResetShmemCounter(); } void -CompositorBridgeChild::FatalError(const char* const aName, const char* const aMsg) const +CompositorBridgeChild::HandleFatalError(const char* aName, const char* aMsg) const { dom::ContentChild::FatalErrorIfNotUsingGPUProcess(aName, aMsg, OtherPid()); } } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/CompositorBridgeChild.h +++ b/gfx/layers/ipc/CompositorBridgeChild.h @@ -129,17 +129,17 @@ public: virtual bool RecvParentAsyncMessages(InfallibleTArray<AsyncParentMessageData>&& aMessages) override; virtual PTextureChild* CreateTexture(const SurfaceDescriptor& aSharedData, LayersBackend aLayersBackend, TextureFlags aFlags, uint64_t aSerial) override; - virtual void FatalError(const char* const aName, const char* const aMsg) const override; + virtual void HandleFatalError(const char* aName, const char* aMsg) const override; /** * Request that the parent tell us when graphics are ready on GPU. * When we get that message, we bounce it to the TabParent via * the TabChild * @param tabChild The object to bounce the note to. Non-NULL. */ void RequestNotifyAfterRemotePaint(TabChild* aTabChild); @@ -226,16 +226,19 @@ public: void ProcessingError(Result aCode, const char* aReason) override; void WillEndTransaction(); private: // Private destructor, to discourage deletion outside of Release(): virtual ~CompositorBridgeChild(); + void InitIPDL(); + void DeallocPCompositorBridgeChild() override; + virtual PLayerTransactionChild* AllocPLayerTransactionChild(const nsTArray<LayersBackend>& aBackendHints, const uint64_t& aId, TextureFactoryIdentifier* aTextureFactoryIdentifier, bool* aSuccess) override; virtual bool DeallocPLayerTransactionChild(PLayerTransactionChild *aChild) override;
--- a/gfx/layers/ipc/CompositorBridgeParent.cpp +++ b/gfx/layers/ipc/CompositorBridgeParent.cpp @@ -677,16 +677,32 @@ CompositorBridgeParent::Initialize() sIndirectLayerTrees[mRootLayerTreeID].mParent = this; } LayerScope::SetPixelScale(mScale.scale); mCompositorScheduler = new CompositorVsyncScheduler(this, mWidget); } +bool +CompositorBridgeParent::RecvReset(nsTArray<LayersBackend>&& aBackendHints, bool* aResult, TextureFactoryIdentifier* aOutIdentifier) +{ + Maybe<TextureFactoryIdentifier> newIdentifier; + ResetCompositorTask(aBackendHints, &newIdentifier); + + if (newIdentifier) { + *aResult = true; + *aOutIdentifier = newIdentifier.value(); + } else { + *aResult = false; + } + + return true; +} + uint64_t CompositorBridgeParent::RootLayerTreeId() { MOZ_ASSERT(mRootLayerTreeID); return mRootLayerTreeID; } CompositorBridgeParent::~CompositorBridgeParent() @@ -2002,16 +2018,17 @@ public: } mSelfRef = this; } virtual void ActorDestroy(ActorDestroyReason aWhy) override; // FIXME/bug 774388: work out what shutdown protocol we need. virtual bool RecvInitialize(const uint64_t& aRootLayerTreeId) override { return false; } + virtual bool RecvReset(nsTArray<LayersBackend>&& aBackendHints, bool* aResult, TextureFactoryIdentifier* aOutIdentifier) override { return false; } virtual bool RecvRequestOverfill() override { return true; } virtual bool RecvWillClose() override { return true; } virtual bool RecvPause() override { return true; } virtual bool RecvResume() override { return true; } virtual bool RecvNotifyChildCreated(const uint64_t& child) override; virtual bool RecvNotifyChildRecreated(const uint64_t& child) override { return false; } virtual bool RecvAdoptChild(const uint64_t& child) override { return false; } virtual bool RecvMakeSnapshot(const SurfaceDescriptor& aInSnapshot,
--- a/gfx/layers/ipc/CompositorBridgeParent.h +++ b/gfx/layers/ipc/CompositorBridgeParent.h @@ -257,16 +257,17 @@ public: bool aUseAPZ); // Must only be called by GPUParent. After invoking this, the IPC channel // is active and RecvWillStop/ActorDestroy must be called to free the // compositor. bool Bind(Endpoint<PCompositorBridgeParent>&& aEndpoint); virtual bool RecvInitialize(const uint64_t& aRootLayerTreeId) override; + virtual bool RecvReset(nsTArray<LayersBackend>&& aBackendHints, bool* aResult, TextureFactoryIdentifier* aOutIdentifier) override; virtual bool RecvGetFrameUniformity(FrameUniformityData* aOutData) override; virtual bool RecvRequestOverfill() override; virtual bool RecvWillClose() override; virtual bool RecvPause() override; virtual bool RecvResume() override; virtual bool RecvNotifyChildCreated(const uint64_t& child) override; virtual bool RecvNotifyChildRecreated(const uint64_t& child) override; virtual bool RecvAdoptChild(const uint64_t& child) override;
--- a/gfx/layers/ipc/ImageBridgeChild.cpp +++ b/gfx/layers/ipc/ImageBridgeChild.cpp @@ -1225,15 +1225,15 @@ ImageBridgeChild::Destroy(CompositableCh bool ImageBridgeChild::CanSend() const { MOZ_ASSERT(InImageBridgeChildThread()); return mCanSend; } void -ImageBridgeChild::FatalError(const char* const aName, const char* const aMsg) const +ImageBridgeChild::HandleFatalError(const char* aName, const char* aMsg) const { dom::ContentChild::FatalErrorIfNotUsingGPUProcess(aName, aMsg, OtherPid()); } } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/ImageBridgeChild.h +++ b/gfx/layers/ipc/ImageBridgeChild.h @@ -344,17 +344,17 @@ public: virtual void UpdateFwdTransactionId() override { ++mFwdTransactionId; } virtual uint64_t GetFwdTransactionId() override { return mFwdTransactionId; } bool InForwarderThread() override { return InImageBridgeChildThread(); } - virtual void FatalError(const char* const aName, const char* const aMsg) const override; + virtual void HandleFatalError(const char* aName, const char* aMsg) const override; protected: ImageBridgeChild(); bool DispatchAllocShmemInternal(size_t aSize, SharedMemory::SharedMemoryType aType, Shmem* aShmem, bool aUnsafe);
--- a/gfx/layers/ipc/PCompositorBridge.ipdl +++ b/gfx/layers/ipc/PCompositorBridge.ipdl @@ -118,16 +118,17 @@ child: async ObserveLayerUpdate(uint64_t aLayersId, uint64_t aEpoch, bool aActive); parent: // Must be called before Initialize(). async PCompositorWidget(CompositorWidgetInitData aInitData); // When out-of-process, this must be called to finish initialization. sync Initialize(uint64_t rootLayerTreeId); + sync Reset(LayersBackend[] aBackendHints) returns (bool aResult, TextureFactoryIdentifier aOutIdentifier); // Returns whether this Compositor has APZ enabled or not. sync AsyncPanZoomEnabled(uint64_t layersId) returns (bool aHasAPZ); // Must be called after Initialize(), and only succeeds if AsyncPanZoomEnabled() is true. async PAPZ(uint64_t layersId); async PAPZCTreeManager(uint64_t layersId);
--- a/gfx/thebes/DeviceManagerDx.cpp +++ b/gfx/thebes/DeviceManagerDx.cpp @@ -578,16 +578,42 @@ DeviceManagerDx::ResetDevices() mAdapter = nullptr; mCompositorDevice = nullptr; mContentDevice = nullptr; mDeviceStatus = Nothing(); Factory::SetDirect3D11Device(nullptr); } bool +DeviceManagerDx::MaybeResetAndReacquireDevices() +{ + DeviceResetReason resetReason; + if (!GetAnyDeviceRemovedReason(&resetReason)) { + return false; + } + + Telemetry::Accumulate(Telemetry::DEVICE_RESET_REASON, uint32_t(resetReason)); + + bool createCompositorDevice = !!mCompositorDevice; + bool createContentDevice = !!mContentDevice; + + ResetDevices(); + + if (createCompositorDevice && !CreateCompositorDevices()) { + // Just stop, don't try anything more + return true; + } + if (createContentDevice) { + CreateContentDevices(); + } + + return true; +} + +bool DeviceManagerDx::ContentAdapterIsParentAdapter(ID3D11Device* device) { DXGI_ADAPTER_DESC desc; if (!D3D11Checks::GetDxgiDesc(device, &desc)) { gfxCriticalNote << "Could not query device DXGI adapter info"; return false; }
--- a/gfx/thebes/DeviceManagerDx.h +++ b/gfx/thebes/DeviceManagerDx.h @@ -75,16 +75,21 @@ public: void ResetDevices(); void InitializeDirectDraw(); // Call GetDeviceRemovedReason on each device until one returns // a failure. bool GetAnyDeviceRemovedReason(DeviceResetReason* aOutReason); + // Reset and reacquire the devices if a reset has happened. + // Returns whether a reset occurred not whether reacquiring + // was successful. + bool MaybeResetAndReacquireDevices(); + // Test whether we can acquire a DXGI 1.2-compatible adapter. This should // only be called on startup before devices are initialized. bool CheckRemotePresentSupport(); private: IDXGIAdapter1 *GetDXGIAdapter(); void DisableD3D11AfterCrash();
--- a/gfx/vr/ipc/VRManagerChild.cpp +++ b/gfx/vr/ipc/VRManagerChild.cpp @@ -568,17 +568,17 @@ VRManagerChild::RemoveListener(dom::VREv mListeners.RemoveElement(aObserver); if (mListeners.IsEmpty()) { Unused << SendSetHaveEventListener(false); } } void -VRManagerChild::FatalError(const char* const aName, const char* const aMsg) const +VRManagerChild::HandleFatalError(const char* aName, const char* aMsg) const { dom::ContentChild::FatalErrorIfNotUsingGPUProcess(aName, aMsg, OtherPid()); } void VRManagerChild::SetGamepadManager(dom::GamepadManager* aGamepadManager) { MOZ_ASSERT(aGamepadManager);
--- a/gfx/vr/ipc/VRManagerChild.h +++ b/gfx/vr/ipc/VRManagerChild.h @@ -82,17 +82,17 @@ public: // the same singleton GamepadManager from the same process. void SetGamepadManager(dom::GamepadManager* aGamepadManager); void UpdateDisplayInfo(nsTArray<VRDisplayInfo>& aDisplayUpdates); void FireDOMVRDisplayConnectEvent(); void FireDOMVRDisplayDisconnectEvent(); void FireDOMVRDisplayPresentChangeEvent(); - virtual void FatalError(const char* const aName, const char* const aMsg) const override; + virtual void HandleFatalError(const char* aName, const char* aMsg) const override; protected: explicit VRManagerChild(); ~VRManagerChild(); void Destroy(); static void DeferredDestroy(RefPtr<VRManagerChild> aVRManagerChild); virtual PTextureChild* AllocPTextureChild(const SurfaceDescriptor& aSharedData,
--- a/hal/sandbox/SandboxHal.cpp +++ b/hal/sandbox/SandboxHal.cpp @@ -896,28 +896,16 @@ public: } else { // Invalid factory reset reason. That should never happen. return false; } hal::FactoryReset(reason); return true; } - - virtual mozilla::ipc::IProtocol* - CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) override - { - ContentParent* contentParent = aCtx->GetContentParent(); - nsAutoPtr<PHalParent> actor(contentParent->AllocPHalParent()); - if (!actor || !contentParent->RecvPHalConstructor(actor)) { - return nullptr; - } - return actor.forget(); - } }; class HalChild : public PHalChild { public: virtual void ActorDestroy(ActorDestroyReason aWhy) override { sHalChildDestroyed = true;
--- a/ipc/glue/MessageChannel.cpp +++ b/ipc/glue/MessageChannel.cpp @@ -467,17 +467,17 @@ private: // Next item in mChan->mTransactionStack. AutoEnterTransaction *mNext; // Pointer the a reply received for this message, if one was received. nsAutoPtr<IPC::Message> mReply; }; -MessageChannel::MessageChannel(MessageListener *aListener) +MessageChannel::MessageChannel(IToplevelProtocol *aListener) : mListener(aListener), mChannelState(ChannelClosed), mSide(UnknownSide), mLink(nullptr), mWorkerLoop(nullptr), mChannelErrorTask(nullptr), mWorkerLoopID(-1), mTimeoutMs(kNoTimeout), @@ -1852,28 +1852,58 @@ MessageChannel::MaybeUndeferIncall() MOZ_RELEASE_ASSERT(call.nested_level() == IPC::Message::NOT_NESTED); RefPtr<MessageTask> task = new MessageTask(this, Move(call)); mPending.insertBack(task); task->Post(); } void +MessageChannel::EnteredCxxStack() +{ + mListener->EnteredCxxStack(); +} + +void MessageChannel::ExitedCxxStack() { - mListener->OnExitedCxxStack(); + mListener->ExitedCxxStack(); if (mSawInterruptOutMsg) { MonitorAutoLock lock(*mMonitor); // see long comment in OnMaybeDequeueOne() EnqueuePendingMessages(); mSawInterruptOutMsg = false; } } void +MessageChannel::EnteredCall() +{ + mListener->EnteredCall(); +} + +void +MessageChannel::ExitedCall() +{ + mListener->ExitedCall(); +} + +void +MessageChannel::EnteredSyncSend() +{ + mListener->OnEnteredSyncSend(); +} + +void +MessageChannel::ExitedSyncSend() +{ + mListener->OnExitedSyncSend(); +} + +void MessageChannel::EnqueuePendingMessages() { AssertWorkerThread(); mMonitor->AssertCurrentThreadOwns(); MaybeUndeferIncall(); // XXX performance tuning knob: could process all or k pending @@ -1948,17 +1978,17 @@ bool MessageChannel::ShouldContinueFromTimeout() { AssertWorkerThread(); mMonitor->AssertCurrentThreadOwns(); bool cont; { MonitorAutoUnlock unlock(*mMonitor); - cont = mListener->OnReplyTimeout(); + cont = mListener->ShouldContinueFromReplyTimeout(); mListener->ArtificialSleep(); } static enum { UNKNOWN, NOT_DEBUGGING, DEBUGGING } sDebuggingChildren = UNKNOWN; if (sDebuggingChildren == UNKNOWN) { sDebuggingChildren = getenv("MOZ_DEBUG_CHILD_PROCESS") ? DEBUGGING : NOT_DEBUGGING; } @@ -1997,17 +2027,17 @@ MessageChannel::DispatchOnChannelConnect MOZ_RELEASE_ASSERT(mPeerPidSet); mListener->OnChannelConnected(mPeerPid); } void MessageChannel::ReportMessageRouteError(const char* channelName) const { PrintErrorMessage(mSide, channelName, "Need a route"); - mListener->OnProcessingError(MsgRouteError, "MsgRouteError"); + mListener->ProcessingError(MsgRouteError, "MsgRouteError"); } void MessageChannel::ReportConnectionError(const char* aChannelName, Message* aMsg) const { AssertWorkerThread(); mMonitor->AssertCurrentThreadOwns(); @@ -2039,17 +2069,17 @@ MessageChannel::ReportConnectionError(co aMsg->type(), aMsg->name(), errorMsg); PrintErrorMessage(mSide, aChannelName, reason); } else { PrintErrorMessage(mSide, aChannelName, errorMsg); } MonitorAutoUnlock unlock(*mMonitor); - mListener->OnProcessingError(MsgDropped, errorMsg); + mListener->ProcessingError(MsgDropped, errorMsg); } bool MessageChannel::MaybeHandleError(Result code, const Message& aMsg, const char* channelName) { if (MsgProcessed == code) return true; @@ -2084,17 +2114,17 @@ MessageChannel::MaybeHandleError(Result if (msgname[0] == '?') { SprintfLiteral(reason,"(msgtype=0x%X) %s", aMsg.type(), errorMsg); } else { SprintfLiteral(reason,"%s %s", msgname, errorMsg); } PrintErrorMessage(mSide, channelName, reason); - mListener->OnProcessingError(code, reason); + mListener->ProcessingError(code, reason); return false; } void MessageChannel::OnChannelErrorFromLink() { AssertLinkThread();
--- a/ipc/glue/MessageChannel.h +++ b/ipc/glue/MessageChannel.h @@ -28,16 +28,17 @@ #include <deque> #include <stack> #include <math.h> namespace mozilla { namespace ipc { class MessageChannel; +class IToplevelProtocol; class RefCountedMonitor : public Monitor { public: RefCountedMonitor() : Monitor("mozilla.ipc.MessageChannel.mMonitor") {} @@ -55,16 +56,25 @@ enum class SyncSendError { NotConnectedBeforeSend, DisconnectedDuringSend, CancelledBeforeSend, CancelledAfterSend, TimedOut, ReplyError, }; +enum ChannelState { + ChannelClosed, + ChannelOpening, + ChannelConnected, + ChannelTimeout, + ChannelClosing, + ChannelError +}; + class AutoEnterTransaction; class MessageChannel : HasResultCodes { friend class ProcessLink; friend class ThreadLink; class CxxStackFrame; @@ -74,17 +84,17 @@ class MessageChannel : HasResultCodes public: static const int32_t kNoTimeout; typedef IPC::Message Message; typedef IPC::MessageInfo MessageInfo; typedef mozilla::ipc::Transport Transport; - explicit MessageChannel(MessageListener *aListener); + explicit MessageChannel(IToplevelProtocol *aListener); ~MessageChannel(); // "Open" from the perspective of the transport layer; the underlying // socketpair/pipe should already be created. // // Returns true if the transport layer was successfully connected, // i.e., mChannelState == ChannelConnected. bool Open(Transport* aTransport, MessageLoop* aIOLoop=0, Side aSide=UnknownSide); @@ -323,39 +333,26 @@ class MessageChannel : HasResultCodes int32_t NextSeqno() { AssertWorkerThread(); return (mSide == ChildSide) ? --mNextSeqno : ++mNextSeqno; } // This helper class manages mCxxStackDepth on behalf of MessageChannel. // When the stack depth is incremented from zero to non-zero, it invokes // a callback, and similarly for when the depth goes from non-zero to zero. - void EnteredCxxStack() { - mListener->OnEnteredCxxStack(); - } - + void EnteredCxxStack(); void ExitedCxxStack(); - void EnteredCall() { - mListener->OnEnteredCall(); - } - - void ExitedCall() { - mListener->OnExitedCall(); - } + void EnteredCall(); + void ExitedCall(); - void EnteredSyncSend() { - mListener->OnEnteredSyncSend(); - } + void EnteredSyncSend(); + void ExitedSyncSend(); - void ExitedSyncSend() { - mListener->OnExitedSyncSend(); - } - - MessageListener *Listener() const { + IToplevelProtocol *Listener() const { return mListener; } void DebugAbort(const char* file, int line, const char* cond, const char* why, bool reply=false); // This method is only safe to call on the worker thread, or in a @@ -490,17 +487,17 @@ class MessageChannel : HasResultCodes typedef LinkedList<RefPtr<MessageTask>> MessageQueue; typedef std::map<size_t, Message> MessageMap; typedef IPC::Message::msgid_t msgid_t; private: // Based on presumption the listener owns and overlives the channel, // this is never nullified. - MessageListener* mListener; + IToplevelProtocol* mListener; ChannelState mChannelState; RefPtr<RefCountedMonitor> mMonitor; Side mSide; MessageLink* mLink; MessageLoop* mWorkerLoop; // thread where work is done RefPtr<CancelableRunnable> mChannelErrorTask; // NotifyMaybeChannelError runnable // id() of mWorkerLoop. This persists even after mWorkerLoop is cleared
--- a/ipc/glue/MessageLink.h +++ b/ipc/glue/MessageLink.h @@ -34,123 +34,16 @@ struct HasResultCodes }; enum Side { ParentSide, ChildSide, UnknownSide }; -enum ChannelState { - ChannelClosed, - ChannelOpening, - ChannelConnected, - ChannelTimeout, - ChannelClosing, - ChannelError -}; - -// What happens if Interrupt calls race? -enum RacyInterruptPolicy { - RIPError, - RIPChildWins, - RIPParentWins -}; - -class MessageListener - : protected HasResultCodes, - public mozilla::SupportsWeakPtr<MessageListener> -{ - public: - MOZ_DECLARE_WEAKREFERENCE_TYPENAME(MessageListener) - typedef IPC::Message Message; - typedef IPC::MessageInfo MessageInfo; - - virtual ~MessageListener() { } - - virtual void OnChannelClose() = 0; - virtual void OnChannelError() = 0; - virtual Result OnMessageReceived(const Message& aMessage) = 0; - virtual Result OnMessageReceived(const Message& aMessage, Message *& aReply) = 0; - virtual Result OnCallReceived(const Message& aMessage, Message *& aReply) = 0; - virtual void OnProcessingError(Result aError, const char* aMsgName) = 0; - virtual void OnChannelConnected(int32_t peer_pid) {} - virtual bool OnReplyTimeout() { - return false; - } - - // WARNING: This function is called with the MessageChannel monitor held. - virtual void IntentionalCrash() { - MOZ_CRASH("Intentional IPDL crash"); - } - - // The code here is only useful for fuzzing. It should not be used for any - // other purpose. -#ifdef DEBUG - // Returns true if we should simulate a timeout. - // WARNING: This is a testing-only function that is called with the - // MessageChannel monitor held. Don't do anything fancy here or we could - // deadlock. - virtual bool ArtificialTimeout() { - return false; - } - - // Returns true if we want to cause the worker thread to sleep with the - // monitor unlocked. - virtual bool NeedArtificialSleep() { - return false; - } - - // This function should be implemented to sleep for some amount of time on - // the worker thread. Will only be called if NeedArtificialSleep() returns - // true. - virtual void ArtificialSleep() {} -#else - bool ArtificialTimeout() { return false; } - bool NeedArtificialSleep() { return false; } - void ArtificialSleep() {} -#endif - - virtual void OnEnteredCxxStack() { - NS_RUNTIMEABORT("default impl shouldn't be invoked"); - } - virtual void OnExitedCxxStack() { - NS_RUNTIMEABORT("default impl shouldn't be invoked"); - } - virtual void OnEnteredCall() { - NS_RUNTIMEABORT("default impl shouldn't be invoked"); - } - virtual void OnExitedCall() { - NS_RUNTIMEABORT("default impl shouldn't be invoked"); - } - virtual RacyInterruptPolicy MediateInterruptRace(const MessageInfo& parent, - const MessageInfo& child) - { - return RIPChildWins; - } - - /** - * Return true if windows messages can be handled while waiting for a reply - * to a sync IPDL message. - */ - virtual bool HandleWindowsMessages(const Message& aMsg) const { return true; } - - virtual void OnEnteredSyncSend() { - } - virtual void OnExitedSyncSend() { - } - - virtual void ProcessRemoteNativeEventsInInterruptCall() { - } - - // FIXME/bug 792652: this doesn't really belong here, but a - // large refactoring is needed to put it where it belongs. - virtual int32_t GetProtocolTypeId() = 0; -}; - class MessageLink { public: typedef IPC::Message Message; explicit MessageLink(MessageChannel *aChan); virtual ~MessageLink();
--- a/ipc/glue/ProtocolUtils.cpp +++ b/ipc/glue/ProtocolUtils.cpp @@ -12,16 +12,17 @@ #endif #include "mozilla/ipc/ProtocolUtils.h" #include "mozilla/dom/ContentParent.h" #include "mozilla/ipc/MessageChannel.h" #include "mozilla/ipc/Transport.h" #include "mozilla/StaticMutex.h" +#include "mozilla/Unused.h" #include "nsPrintfCString.h" #if defined(MOZ_SANDBOX) && defined(XP_WIN) #define TARGET_SANDBOX_EXPORTS #include "mozilla/sandboxTarget.h" #endif #if defined(MOZ_CRASHREPORTER) && defined(XP_WIN) @@ -48,41 +49,16 @@ MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLAT ::LocalFree) MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE(ScopedPSecurityDescriptor, \ RemovePointer<PSECURITY_DESCRIPTOR>::Type, \ ::LocalFree) #endif namespace ipc { -ProtocolCloneContext::ProtocolCloneContext() - : mNeckoParent(nullptr) -{} - -ProtocolCloneContext::~ProtocolCloneContext() -{} - -void ProtocolCloneContext::SetContentParent(ContentParent* aContentParent) -{ - mContentParent = aContentParent; -} - -IToplevelProtocol::IToplevelProtocol(ProtocolId aProtoId) - : mProtocolId(aProtoId) -{ -} - -IToplevelProtocol::~IToplevelProtocol() -{ - if (mTrans) { - RefPtr<DeleteTask<Transport>> task = new DeleteTask<Transport>(mTrans.release()); - XRE_GetIOMessageLoop()->PostTask(task.forget()); - } -} - class ChannelOpened : public IPC::Message { public: ChannelOpened(TransportDescriptor aDescriptor, ProcessId aOtherProcess, ProtocolId aProtocol, NestedLevel aNestedLevel = NOT_NESTED) : IPC::Message(MSG_ROUTING_CONTROL, // these only go to top-level actors @@ -372,10 +348,373 @@ TableToArray(const nsTHashtable<nsPtrHas uint32_t i = 0; void** elements = aArray.AppendElements(aTable.Count()); for (auto iter = aTable.ConstIter(); !iter.Done(); iter.Next()) { elements[i] = iter.Get()->GetKey(); ++i; } } +Maybe<IProtocol*> +IProtocol::ReadActor(const IPC::Message* aMessage, PickleIterator* aIter, bool aNullable, + const char* aActorDescription, int32_t aProtocolTypeId) +{ + int32_t id; + if (!IPC::ReadParam(aMessage, aIter, &id)) { + ActorIdReadError(aActorDescription); + return Nothing(); + } + + if (id == 1 || (id == 0 && !aNullable)) { + BadActorIdError(aActorDescription); + return Nothing(); + } + + if (id == 0) { + return Some(static_cast<IProtocol*>(nullptr)); + } + + IProtocol* listener = this->Lookup(id); + if (!listener) { + ActorLookupError(aActorDescription); + return Nothing(); + } + + if (listener->GetProtocolTypeId() != aProtocolTypeId) { + MismatchedActorTypeError(aActorDescription); + return Nothing(); + } + + return Some(listener); +} + +int32_t +IProtocol::Register(IProtocol* aRouted) +{ + return Manager()->Register(aRouted); +} + +int32_t +IProtocol::RegisterID(IProtocol* aRouted, int32_t aId) +{ + return Manager()->RegisterID(aRouted, aId); +} + +IProtocol* +IProtocol::Lookup(int32_t aId) +{ + return Manager()->Lookup(aId); +} + +void +IProtocol::Unregister(int32_t aId) +{ + Manager()->Unregister(aId); +} + +Shmem::SharedMemory* +IProtocol::CreateSharedMemory(size_t aSize, + SharedMemory::SharedMemoryType aType, + bool aUnsafe, + int32_t* aId) +{ + return Manager()->CreateSharedMemory(aSize, aType, aUnsafe, aId); +} + +Shmem::SharedMemory* +IProtocol::LookupSharedMemory(int32_t aId) +{ + return Manager()->LookupSharedMemory(aId); +} + +bool +IProtocol::IsTrackingSharedMemory(Shmem::SharedMemory* aSegment) +{ + return Manager()->IsTrackingSharedMemory(aSegment); +} + +bool +IProtocol::DestroySharedMemory(Shmem& aShmem) +{ + return Manager()->DestroySharedMemory(aShmem); +} + +ProcessId +IProtocol::OtherPid() const +{ + return Manager()->OtherPid(); +} + +void +IProtocol::FatalError(const char* const aErrorMsg) const +{ + HandleFatalError(ProtocolName(), aErrorMsg); +} + +void +IProtocol::HandleFatalError(const char* aProtocolName, const char* aErrorMsg) const +{ + if (IProtocol* manager = Manager()) { + manager->HandleFatalError(aProtocolName, aErrorMsg); + return; + } + + mozilla::ipc::FatalError(aProtocolName, aErrorMsg, mSide == ParentSide); +} + +bool +IProtocol::AllocShmem(size_t aSize, + Shmem::SharedMemory::SharedMemoryType aType, + Shmem* aOutMem) +{ + Shmem::id_t id; + Shmem::SharedMemory* rawmem(CreateSharedMemory(aSize, aType, false, &id)); + if (!rawmem) { + return false; + } + + *aOutMem = Shmem(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), rawmem, id); + return true; +} + +bool +IProtocol::AllocUnsafeShmem(size_t aSize, + Shmem::SharedMemory::SharedMemoryType aType, + Shmem* aOutMem) +{ + Shmem::id_t id; + Shmem::SharedMemory* rawmem(CreateSharedMemory(aSize, aType, true, &id)); + if (!rawmem) { + return false; + } + + *aOutMem = Shmem(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), rawmem, id); + return true; +} + +bool +IProtocol::DeallocShmem(Shmem& aMem) +{ + bool ok = DestroySharedMemory(aMem); +#ifdef DEBUG + if (!ok) { + if (mSide == ChildSide) { + FatalError("bad Shmem"); + } else { + NS_WARNING("bad Shmem"); + } + return false; + } +#endif // DEBUG + aMem.forget(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead()); + return ok; +} + +IToplevelProtocol::IToplevelProtocol(ProtocolId aProtoId, Side aSide) + : IProtocol(aSide), + mProtocolId(aProtoId), + mOtherPid(mozilla::ipc::kInvalidProcessId), + mLastRouteId(aSide == ParentSide ? 1 : 0), + mLastShmemId(aSide == ParentSide ? 1 : 0) +{ +} + +IToplevelProtocol::~IToplevelProtocol() +{ + if (mTrans) { + RefPtr<DeleteTask<Transport>> task = new DeleteTask<Transport>(mTrans.release()); + XRE_GetIOMessageLoop()->PostTask(task.forget()); + } +} + +base::ProcessId +IToplevelProtocol::OtherPid() const +{ + return mOtherPid; +} + +void +IToplevelProtocol::SetOtherProcessId(base::ProcessId aOtherPid) +{ + mOtherPid = aOtherPid; +} + +bool +IToplevelProtocol::TakeMinidump(nsIFile** aDump, uint32_t* aSequence) +{ + MOZ_RELEASE_ASSERT(GetSide() == ParentSide); +#ifdef MOZ_CRASHREPORTER + return XRE_TakeMinidumpForChild(OtherPid(), aDump, aSequence); +#else + return false; +#endif +} + +bool +IToplevelProtocol::Open(mozilla::ipc::Transport* aTransport, + base::ProcessId aOtherPid, + MessageLoop* aThread, + mozilla::ipc::Side aSide) +{ + SetOtherProcessId(aOtherPid); + return GetIPCChannel()->Open(aTransport, aThread, aSide); +} + +bool +IToplevelProtocol::Open(MessageChannel* aChannel, + MessageLoop* aMessageLoop, + mozilla::ipc::Side aSide) +{ + SetOtherProcessId(base::GetCurrentProcId()); + return GetIPCChannel()->Open(aChannel, aMessageLoop, aSide); +} + +void +IToplevelProtocol::Close() +{ + GetIPCChannel()->Close(); +} + +void +IToplevelProtocol::SetReplyTimeoutMs(int32_t aTimeoutMs) +{ + GetIPCChannel()->SetReplyTimeoutMs(aTimeoutMs); +} + +bool +IToplevelProtocol::IsOnCxxStack() const +{ + return GetIPCChannel()->IsOnCxxStack(); +} + +int32_t +IToplevelProtocol::Register(IProtocol* aRouted) +{ + int32_t id = GetSide() == ParentSide ? ++mLastRouteId : --mLastRouteId; + mActorMap.AddWithID(aRouted, id); + return id; +} + +int32_t +IToplevelProtocol::RegisterID(IProtocol* aRouted, + int32_t aId) +{ + mActorMap.AddWithID(aRouted, aId); + return aId; +} + +IProtocol* +IToplevelProtocol::Lookup(int32_t aId) +{ + return mActorMap.Lookup(aId); +} + +void +IToplevelProtocol::Unregister(int32_t aId) +{ + return mActorMap.Remove(aId); +} + +Shmem::SharedMemory* +IToplevelProtocol::CreateSharedMemory(size_t aSize, + Shmem::SharedMemory::SharedMemoryType aType, + bool aUnsafe, + Shmem::id_t* aId) +{ + RefPtr<Shmem::SharedMemory> segment( + Shmem::Alloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), aSize, aType, aUnsafe)); + if (!segment) { + return nullptr; + } + int32_t id = GetSide() == ParentSide ? ++mLastShmemId : --mLastShmemId; + Shmem shmem( + Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), + segment.get(), + id); + Message* descriptor = shmem.ShareTo( + Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), OtherPid(), MSG_ROUTING_CONTROL); + if (!descriptor) { + return nullptr; + } + Unused << GetIPCChannel()->Send(descriptor); + + *aId = shmem.Id(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead()); + Shmem::SharedMemory* rawSegment = segment.get(); + mShmemMap.AddWithID(segment.forget().take(), *aId); + return rawSegment; +} + +Shmem::SharedMemory* +IToplevelProtocol::LookupSharedMemory(Shmem::id_t aId) +{ + return mShmemMap.Lookup(aId); +} + +bool +IToplevelProtocol::IsTrackingSharedMemory(Shmem::SharedMemory* segment) +{ + return mShmemMap.HasData(segment); +} + +bool +IToplevelProtocol::DestroySharedMemory(Shmem& shmem) +{ + Shmem::id_t aId = shmem.Id(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead()); + Shmem::SharedMemory* segment = LookupSharedMemory(aId); + if (!segment) { + return false; + } + + Message* descriptor = shmem.UnshareFrom( + Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), OtherPid(), MSG_ROUTING_CONTROL); + + mShmemMap.Remove(aId); + Shmem::Dealloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), segment); + + if (!GetIPCChannel()->CanSend()) { + delete descriptor; + return true; + } + + return descriptor && GetIPCChannel()->Send(descriptor); +} + +void +IToplevelProtocol::DeallocShmems() +{ + for (IDMap<SharedMemory>::const_iterator cit = mShmemMap.begin(); cit != mShmemMap.end(); ++cit) { + Shmem::Dealloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), cit->second); + } + mShmemMap.Clear(); +} + +bool +IToplevelProtocol::ShmemCreated(const Message& aMsg) +{ + Shmem::id_t id; + RefPtr<Shmem::SharedMemory> rawmem(Shmem::OpenExisting(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), aMsg, &id, true)); + if (!rawmem) { + return false; + } + mShmemMap.AddWithID(rawmem.forget().take(), id); + return true; +} + +bool +IToplevelProtocol::ShmemDestroyed(const Message& aMsg) +{ + Shmem::id_t id; + PickleIterator iter = PickleIterator(aMsg); + if (!IPC::ReadParam(&aMsg, &iter, &id)) { + return false; + } + aMsg.EndRead(iter); + + Shmem::SharedMemory* rawmem = LookupSharedMemory(id); + if (rawmem) { + mShmemMap.Remove(id); + Shmem::Dealloc(Shmem::IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead(), rawmem); + } + return true; +} + } // namespace ipc } // namespace mozilla
--- a/ipc/glue/ProtocolUtils.h +++ b/ipc/glue/ProtocolUtils.h @@ -1,18 +1,19 @@ -/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: sw=4 ts=4 et : */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_ipc_ProtocolUtils_h #define mozilla_ipc_ProtocolUtils_h 1 +#include "base/id_map.h" #include "base/process.h" #include "base/process_util.h" #include "chrome/common/ipc_message_utils.h" #include "prenv.h" #include "IPCMessageStart.h" #include "mozilla/Attributes.h" @@ -60,16 +61,18 @@ class ContentParent; } // namespace dom namespace net { class NeckoParent; } // namespace net namespace ipc { +class MessageChannel; + #ifdef XP_WIN const base::ProcessHandle kInvalidProcessHandle = INVALID_HANDLE_VALUE; // In theory, on Windows, this is a valid process ID, but in practice they are // currently divisible by four. Process IDs share the kernel handle allocation // code and they are guaranteed to be divisible by four. // As this could change for process IDs we shouldn't generally rely on this // property, however even if that were to change, it seems safe to rely on this @@ -125,127 +128,229 @@ struct Trigger { MOZ_ASSERT(0 <= msg && msg < INT32_MAX); } uint32_t mAction : 1; uint32_t mMessage : 31; }; -class ProtocolCloneContext -{ - typedef mozilla::dom::ContentParent ContentParent; - typedef mozilla::net::NeckoParent NeckoParent; - - RefPtr<ContentParent> mContentParent; - NeckoParent* mNeckoParent; - -public: - ProtocolCloneContext(); - - ~ProtocolCloneContext(); - - void SetContentParent(ContentParent* aContentParent); - - ContentParent* GetContentParent() { return mContentParent; } - - void SetNeckoParent(NeckoParent* aNeckoParent) - { - mNeckoParent = aNeckoParent; - } - - NeckoParent* GetNeckoParent() { return mNeckoParent; } +// What happens if Interrupt calls race? +enum RacyInterruptPolicy { + RIPError, + RIPChildWins, + RIPParentWins }; -template<class ListenerT> -class IProtocolManager +class IProtocol : public HasResultCodes { public: enum ActorDestroyReason { FailedConstructor, Deletion, AncestorDeletion, NormalShutdown, AbnormalShutdown }; typedef base::ProcessId ProcessId; + typedef IPC::Message Message; + typedef IPC::MessageInfo MessageInfo; - virtual int32_t Register(ListenerT*) = 0; - virtual int32_t RegisterID(ListenerT*, int32_t) = 0; - virtual ListenerT* Lookup(int32_t) = 0; - virtual void Unregister(int32_t) = 0; - virtual void RemoveManagee(int32_t, ListenerT*) = 0; + IProtocol(Side aSide) : mId(0), mSide(aSide), mManager(nullptr), mChannel(nullptr) {} + + virtual int32_t Register(IProtocol*); + virtual int32_t RegisterID(IProtocol*, int32_t); + virtual IProtocol* Lookup(int32_t); + virtual void Unregister(int32_t); + virtual void RemoveManagee(int32_t, IProtocol*) = 0; virtual Shmem::SharedMemory* CreateSharedMemory( - size_t, SharedMemory::SharedMemoryType, bool, int32_t*) = 0; - virtual Shmem::SharedMemory* LookupSharedMemory(int32_t) = 0; - virtual bool IsTrackingSharedMemory(Shmem::SharedMemory*) = 0; - virtual bool DestroySharedMemory(Shmem&) = 0; + size_t, SharedMemory::SharedMemoryType, bool, int32_t*); + virtual Shmem::SharedMemory* LookupSharedMemory(int32_t); + virtual bool IsTrackingSharedMemory(Shmem::SharedMemory*); + virtual bool DestroySharedMemory(Shmem&); // XXX odd ducks, acknowledged - virtual ProcessId OtherPid() const = 0; - virtual MessageChannel* GetIPCChannel() = 0; + virtual ProcessId OtherPid() const; + Side GetSide() const { return mSide; } + + virtual const char* ProtocolName() const = 0; + void FatalError(const char* const aErrorMsg) const; + virtual void HandleFatalError(const char* aProtocolName, const char* aErrorMsg) const; + + Maybe<IProtocol*> ReadActor(const IPC::Message* aMessage, PickleIterator* aIter, bool aNullable, + const char* aActorDescription, int32_t aProtocolTypeId); + + virtual Result OnMessageReceived(const Message& aMessage) = 0; + virtual Result OnMessageReceived(const Message& aMessage, Message *& aReply) = 0; + virtual Result OnCallReceived(const Message& aMessage, Message *& aReply) = 0; + + virtual int32_t GetProtocolTypeId() = 0; - virtual void FatalError(const char* const aProtocolName, const char* const aErrorMsg) const = 0; + int32_t Id() const { return mId; } + IProtocol* Manager() const { return mManager; } + virtual const MessageChannel* GetIPCChannel() const { return mChannel; } + virtual MessageChannel* GetIPCChannel() { return mChannel; } + + bool AllocShmem(size_t aSize, Shmem::SharedMemory::SharedMemoryType aType, Shmem* aOutMem); + bool AllocUnsafeShmem(size_t aSize, Shmem::SharedMemory::SharedMemoryType aType, Shmem* aOutMem); + bool DeallocShmem(Shmem& aMem); - Maybe<ListenerT*> ReadActor(const IPC::Message* aMessage, PickleIterator* aIter, bool aNullable, - const char* aActorDescription, int32_t aProtocolTypeId); +protected: + void SetId(int32_t aId) { mId = aId; } + void SetManager(IProtocol* aManager) { mManager = aManager; } + void SetIPCChannel(MessageChannel* aChannel) { mChannel = aChannel; } + +private: + int32_t mId; + Side mSide; + IProtocol* mManager; + MessageChannel* mChannel; }; typedef IPCMessageStart ProtocolId; -/** - * All RPC protocols should implement this interface. - */ -class IProtocol : public MessageListener -{ -public: - /** - * This function is used to clone this protocol actor. - * - * see IProtocol::CloneProtocol() - */ - virtual IProtocol* - CloneProtocol(MessageChannel* aChannel, - ProtocolCloneContext* aCtx) = 0; -}; - template<class PFooSide> class Endpoint; /** * All top-level protocols should inherit this class. * * IToplevelProtocol tracks all top-level protocol actors created from * this protocol actor. */ -class IToplevelProtocol +class IToplevelProtocol : public IProtocol { template<class PFooSide> friend class Endpoint; protected: - explicit IToplevelProtocol(ProtocolId aProtoId); + explicit IToplevelProtocol(ProtocolId aProtoId, Side aSide); ~IToplevelProtocol(); public: void SetTransport(UniquePtr<Transport> aTrans) { mTrans = Move(aTrans); } Transport* GetTransport() const { return mTrans.get(); } ProtocolId GetProtocolId() const { return mProtocolId; } - virtual MessageChannel* GetIPCChannel() = 0; + base::ProcessId OtherPid() const; + void SetOtherProcessId(base::ProcessId aOtherPid); + + bool TakeMinidump(nsIFile** aDump, uint32_t* aSequence); + + virtual void OnChannelClose() = 0; + virtual void OnChannelError() = 0; + virtual void ProcessingError(Result aError, const char* aMsgName) {} + virtual void OnChannelConnected(int32_t peer_pid) {} + + bool Open(mozilla::ipc::Transport* aTransport, + base::ProcessId aOtherPid, + MessageLoop* aThread = nullptr, + mozilla::ipc::Side aSide = mozilla::ipc::UnknownSide); + + bool Open(MessageChannel* aChannel, + MessageLoop* aMessageLoop, + mozilla::ipc::Side aSide = mozilla::ipc::UnknownSide); + + void Close(); + + void SetReplyTimeoutMs(int32_t aTimeoutMs); + + virtual int32_t Register(IProtocol*); + virtual int32_t RegisterID(IProtocol*, int32_t); + virtual IProtocol* Lookup(int32_t); + virtual void Unregister(int32_t); + + virtual Shmem::SharedMemory* CreateSharedMemory( + size_t, SharedMemory::SharedMemoryType, bool, int32_t*); + virtual Shmem::SharedMemory* LookupSharedMemory(int32_t); + virtual bool IsTrackingSharedMemory(Shmem::SharedMemory*); + virtual bool DestroySharedMemory(Shmem&); + + void DeallocShmems(); + + bool ShmemCreated(const Message& aMsg); + bool ShmemDestroyed(const Message& aMsg); + + virtual bool ShouldContinueFromReplyTimeout() { + return false; + } + + // WARNING: This function is called with the MessageChannel monitor held. + virtual void IntentionalCrash() { + MOZ_CRASH("Intentional IPDL crash"); + } + + // The code here is only useful for fuzzing. It should not be used for any + // other purpose. +#ifdef DEBUG + // Returns true if we should simulate a timeout. + // WARNING: This is a testing-only function that is called with the + // MessageChannel monitor held. Don't do anything fancy here or we could + // deadlock. + virtual bool ArtificialTimeout() { + return false; + } + + // Returns true if we want to cause the worker thread to sleep with the + // monitor unlocked. + virtual bool NeedArtificialSleep() { + return false; + } + + // This function should be implemented to sleep for some amount of time on + // the worker thread. Will only be called if NeedArtificialSleep() returns + // true. + virtual void ArtificialSleep() {} +#else + bool ArtificialTimeout() { return false; } + bool NeedArtificialSleep() { return false; } + void ArtificialSleep() {} +#endif + + virtual void EnteredCxxStack() {} + virtual void ExitedCxxStack() {} + virtual void EnteredCall() {} + virtual void ExitedCall() {} + + bool IsOnCxxStack() const; + + virtual RacyInterruptPolicy MediateInterruptRace(const MessageInfo& parent, + const MessageInfo& child) + { + return RIPChildWins; + } + + /** + * Return true if windows messages can be handled while waiting for a reply + * to a sync IPDL message. + */ + virtual bool HandleWindowsMessages(const Message& aMsg) const { return true; } + + virtual void OnEnteredSyncSend() { + } + virtual void OnExitedSyncSend() { + } + + virtual void ProcessRemoteNativeEventsInInterruptCall() { + } private: ProtocolId mProtocolId; UniquePtr<Transport> mTrans; + base::ProcessId mOtherPid; + IDMap<IProtocol> mActorMap; + int32_t mLastRouteId; + IDMap<Shmem::SharedMemory> mShmemMap; + Shmem::id_t mLastShmemId; }; class IShmemAllocator { public: virtual bool AllocShmem(size_t aSize, mozilla::ipc::SharedMemory::SharedMemoryType aShmType, mozilla::ipc::Shmem* aShmem) = 0; @@ -348,50 +453,16 @@ Open(const PrivateIPDLInterface&, MessageChannel*, base::ProcessId, Transport::Mode, ProtocolId, ProtocolId); bool UnpackChannelOpened(const PrivateIPDLInterface&, const IPC::Message&, TransportDescriptor*, base::ProcessId*, ProtocolId*); -template<typename ListenerT> -Maybe<ListenerT*> -IProtocolManager<ListenerT>::ReadActor(const IPC::Message* aMessage, PickleIterator* aIter, bool aNullable, - const char* aActorDescription, int32_t aProtocolTypeId) -{ - int32_t id; - if (!IPC::ReadParam(aMessage, aIter, &id)) { - ActorIdReadError(aActorDescription); - return Nothing(); - } - - if (id == 1 || (id == 0 && !aNullable)) { - BadActorIdError(aActorDescription); - return Nothing(); - } - - if (id == 0) { - return Some(static_cast<ListenerT*>(nullptr)); - } - - ListenerT* listener = this->Lookup(id); - if (!listener) { - ActorLookupError(aActorDescription); - return Nothing(); - } - - if (static_cast<MessageListener*>(listener)->GetProtocolTypeId() != aProtocolTypeId) { - MismatchedActorTypeError(aActorDescription); - return Nothing(); - } - - return Some(listener); -} - #if defined(XP_WIN) // This is a restricted version of Windows' DuplicateHandle() function // that works inside the sandbox and can send handles but not retrieve // them. Unlike DuplicateHandle(), it takes a process ID rather than // a process handle. It returns true on success, false otherwise. bool DuplicateHandle(HANDLE aSourceHandle, DWORD aTargetProcessId,
--- a/ipc/glue/WindowsMessageLoop.cpp +++ b/ipc/glue/WindowsMessageLoop.cpp @@ -12,16 +12,17 @@ #include "nsAutoPtr.h" #include "nsServiceManagerUtils.h" #include "nsString.h" #include "nsIXULAppInfo.h" #include "WinUtils.h" #include "mozilla/ArrayUtils.h" +#include "mozilla/ipc/ProtocolUtils.h" #include "mozilla/PaintTracker.h" #include "mozilla/WindowsVersion.h" using namespace mozilla; using namespace mozilla::ipc; using namespace mozilla::ipc::windows; /**
--- a/ipc/ipdl/ipdl/lower.py +++ b/ipc/ipdl/ipdl/lower.py @@ -115,27 +115,24 @@ def _actorName(pname, side): def _actorIdType(): return Type.INT32 def _actorTypeTagType(): return Type.INT32 def _actorId(actor=None): if actor is not None: - return ExprSelect(actor, '->', 'mId') - return ExprVar('mId') + return ExprCall(ExprSelect(actor, '->', 'Id')) + return ExprCall(ExprVar('Id')) def _actorHId(actorhandle): return ExprSelect(actorhandle, '.', 'mId') -def _actorChannel(actor): - return ExprSelect(actor, '->', 'mChannel') - def _actorManager(actor): - return ExprSelect(actor, '->', 'mManager') + return ExprCall(ExprSelect(actor, '->', 'Manager'), args=[]) def _actorState(actor): return ExprSelect(actor, '->', 'mState') def _backstagePass(): return ExprCall(ExprVar('mozilla::ipc::PrivateIPDLInterface')) def _iterType(ptr): @@ -390,36 +387,16 @@ def _printErrorMessage(msg): ExprCall(ExprVar('NS_ERROR'), args=[ msg ])) def _protocolErrorBreakpoint(msg): if isinstance(msg, str): msg = ExprLiteral.String(msg) return StmtExpr(ExprCall(ExprVar('mozilla::ipc::ProtocolErrorBreakpoint'), args=[ msg ])) -def _ipcFatalError(name, msg): - if isinstance(name, str): - name = ExprLiteral.String(name) - if isinstance(msg, str): - msg = ExprLiteral.String(msg) - return StmtExpr(ExprCall(ExprVar('FatalError'), - args=[ name, msg ])) - -def _ipcFatalErrorWithClassname(name, msg, p, isparent): - if isinstance(name, str): - name = ExprLiteral.String(name) - if isinstance(msg, str): - msg = ExprLiteral.String(msg) - if p.decl.type.isToplevel(): - return StmtExpr(ExprCall(ExprVar('mozilla::ipc::FatalError'), - args=[ name, msg, isparent ])) - else: - return StmtExpr(ExprCall(ExprSelect(p.managerVar(), '->', 'FatalError'), - [ name, msg ])) - def _printWarningMessage(msg): if isinstance(msg, str): msg = ExprLiteral.String(msg) return StmtExpr( ExprCall(ExprVar('NS_WARNING'), args=[ msg ])) def _fatalError(msg): return StmtExpr( @@ -1085,45 +1062,31 @@ class Protocol(ipdl.ast.Protocol): return '->' def channelType(self): return Type('Channel', ptr=not self.decl.type.isToplevel()) def channelHeaderFile(self): return '/'.join(_semsToChannelParts(self.sendSems())) +'.h' - def fqListenerName(self): - return 'mozilla::ipc::MessageListener' - - def fqBaseClass(self): - return 'mozilla::ipc::IProtocol' - def managerInterfaceType(self, ptr=0): - return Type('mozilla::ipc::IProtocolManager', - ptr=ptr, - T=Type(self.fqBaseClass())) + return Type('mozilla::ipc::IProtocol', ptr=ptr) def openedProtocolInterfaceType(self, ptr=0): return Type('mozilla::ipc::IToplevelProtocol', ptr=ptr) def _ipdlmgrtype(self): assert 1 == len(self.decl.type.managers) for mgr in self.decl.type.managers: return mgr def managerActorType(self, side, ptr=0): return Type(_actorName(self._ipdlmgrtype().name(), side), ptr=ptr) - def managerMethod(self, actorThis=None): - _ = self._ipdlmgrtype() - if actorThis is not None: - return ExprSelect(actorThis, '->', 'Manager') - return ExprVar('Manager'); - def stateMethod(self): return ExprVar('state'); def registerMethod(self): return ExprVar('Register') def registerIDMethod(self): return ExprVar('RegisterID') @@ -1164,19 +1127,16 @@ class Protocol(ipdl.ast.Protocol): return ExprVar('GetIPCChannel') def callGetChannel(self, actorThis=None): fn = self.getChannelMethod() if actorThis is not None: fn = ExprSelect(actorThis, '->', fn.name) return ExprCall(fn) - def cloneProtocol(self): - return ExprVar('CloneProtocol') - def processingErrorVar(self): assert self.decl.type.isToplevel() return ExprVar('ProcessingError') def shouldContinueFromTimeoutVar(self): assert self.decl.type.isToplevel() return ExprVar('ShouldContinueFromReplyTimeout') @@ -1195,60 +1155,28 @@ class Protocol(ipdl.ast.Protocol): def exitedCallVar(self): assert self.decl.type.isToplevel() return ExprVar('ExitedCall') def onCxxStackVar(self): assert self.decl.type.isToplevel() return ExprVar('IsOnCxxStack') - def nextActorIdExpr(self, side): - assert self.decl.type.isToplevel() - if side is 'parent': op = '++' - elif side is 'child': op = '--' - else: assert 0 - return ExprPrefixUnop(self.lastActorIdVar(), op) - - def actorIdInit(self, side): - assert self.decl.type.isToplevel() - - # parents go up from FREED, children go down from NULL - if side is 'parent': return _FREED_ACTOR_ID - elif side is 'child': return _NULL_ACTOR_ID - else: assert 0 - # an actor's C++ private variables - def lastActorIdVar(self): - assert self.decl.type.isToplevel() - return ExprVar('mLastRouteId') - - def actorMapVar(self): - assert self.decl.type.isToplevel() - return ExprVar('mActorMap') - def channelVar(self, actorThis=None): if actorThis is not None: return ExprSelect(actorThis, '->', 'mChannel') return ExprVar('mChannel') - def channelForSubactor(self): - if self.decl.type.isToplevel(): - return ExprAddrOf(self.channelVar()) - return self.channelVar() - def routingId(self, actorThis=None): if self.decl.type.isToplevel(): return ExprVar('MSG_ROUTING_CONTROL') if actorThis is not None: - return ExprSelect(actorThis, '->', self.idVar().name) - return self.idVar() - - def idVar(self): - assert not self.decl.type.isToplevel() - return ExprVar('mId') + return ExprCall(ExprSelect(actorThis, '->', 'Id')) + return ExprCall(ExprVar('Id')) def stateVar(self, actorThis=None): if actorThis is not None: return ExprSelect(actorThis, '->', 'mState') return ExprVar('mState') def fqStateType(self): return Type(self.decl.type.name() +'::State') @@ -1259,25 +1187,21 @@ class Protocol(ipdl.ast.Protocol): def nullState(self): return _nullState(self.decl.type) def deadState(self): return _deadState(self.decl.type) def managerVar(self, thisexpr=None): assert thisexpr is not None or not self.decl.type.isToplevel() - mvar = ExprVar('mManager') + mvar = ExprCall(ExprVar('Manager'), args=[]) if thisexpr is not None: - mvar = ExprSelect(thisexpr, '->', mvar.name) + mvar = ExprCall(ExprSelect(thisexpr, '->', 'Manager'), args=[]) return mvar - def otherPidVar(self): - assert self.decl.type.isToplevel() - return ExprVar('mOtherPid') - def managedCxxType(self, actortype, side): assert self.decl.type.isManagerOf(actortype) return Type(_actorName(actortype.name(), side), ptr=1) def managedMethod(self, actortype, side): assert self.decl.type.isManagerOf(actortype) return ExprVar('Managed'+ _actorName(actortype.name(), side)) @@ -1285,58 +1209,16 @@ class Protocol(ipdl.ast.Protocol): assert self.decl.type.isManagerOf(actortype) return ExprVar('mManaged'+ _actorName(actortype.name(), side)) def managedVarType(self, actortype, side, const=0, ref=0): assert self.decl.type.isManagerOf(actortype) return _cxxManagedContainerType(Type(_actorName(actortype.name(), side)), const=const, ref=ref) - def managerArrayExpr(self, thisvar, side): - """The member var my manager keeps of actors of my type.""" - assert self.decl.type.isManaged() - return ExprSelect( - ExprCall(self.managerMethod(thisvar)), - '->', 'mManaged'+ _actorName(self.decl.type.name(), side)) - - # shmem stuff - def shmemMapType(self): - assert self.decl.type.isToplevel() - return Type('IDMap', T=_rawShmemType()) - - def shmemIteratorType(self): - assert self.decl.type.isToplevel() - # XXX breaks abstractions - return Type('IDMap<SharedMemory>::const_iterator') - - def shmemMapVar(self): - assert self.decl.type.isToplevel() - return ExprVar('mShmemMap') - - def lastShmemIdVar(self): - assert self.decl.type.isToplevel() - return ExprVar('mLastShmemId') - - def shmemIdInit(self, side): - assert self.decl.type.isToplevel() - # use the same scheme for shmem IDs as actor IDs - if side is 'parent': return _FREED_ACTOR_ID - elif side is 'child': return _NULL_ACTOR_ID - else: assert 0 - - def nextShmemIdExpr(self, side): - assert self.decl.type.isToplevel() - if side is 'parent': op = '++' - elif side is 'child': op = '--' - return ExprPrefixUnop(self.lastShmemIdVar(), op) - - def removeShmemId(self, idexpr): - return ExprCall(ExprSelect(self.shmemMapVar(), '.', 'Remove'), - args=[ idexpr ]) - # XXX this is sucky, fix def usesShmem(self): return _usesShmem(self) def subtreeUsesShmem(self): return _subtreeUsesShmem(self) @staticmethod @@ -2623,26 +2505,24 @@ class _GenerateProtocolActorCode(ipdl.as def lower(self, tu, clsname, cxxHeaderFile, cxxFile): self.clsname = clsname self.hdrfile = cxxHeaderFile self.cppfile = cxxFile tu.accept(self) def standardTypedefs(self): return [ - Typedef(Type(self.protocol.fqBaseClass()), 'ProtocolBase'), + Typedef(Type('mozilla::ipc::IProtocol'), 'ProtocolBase'), Typedef(Type('IPC::Message'), 'Message'), Typedef(Type(self.protocol.channelName()), 'Channel'), - Typedef(Type(self.protocol.fqListenerName()), 'ChannelListener'), + Typedef(Type('mozilla::ipc::IProtocol'), 'ChannelListener'), Typedef(Type('base::ProcessHandle'), 'ProcessHandle'), Typedef(Type('mozilla::ipc::MessageChannel'), 'MessageChannel'), Typedef(Type('mozilla::ipc::SharedMemory'), 'SharedMemory'), Typedef(Type('mozilla::ipc::Trigger'), 'Trigger'), - Typedef(Type('mozilla::ipc::ProtocolCloneContext'), - 'ProtocolCloneContext') ] def visitTranslationUnit(self, tu): self.protocol = tu.protocol hf = self.hdrfile cf = self.cppfile @@ -2814,31 +2694,32 @@ class _GenerateProtocolActorCode(ipdl.as # FIXME: all actors impl Iface for now if ptype.isManager() or 1: self.hdrfile.addthing(CppDirective('include', '"base/id_map.h"')) self.hdrfile.addthings([ CppDirective('include', '"'+ p.channelHeaderFile() +'"'), Whitespace.NL ]) - optinherits = [] + inherits = [] if ptype.isToplevel(): - optinherits.append(Inherit(p.openedProtocolInterfaceType(), - viz='public')) + inherits.append(Inherit(p.openedProtocolInterfaceType(), + viz='public')) + else: + inherits.append(Inherit(p.managerInterfaceType(), viz='public')) + if ptype.isToplevel() and self.side is 'parent': self.hdrfile.addthings([ _makeForwardDeclForQClass('nsIFile', []), Whitespace.NL ]) self.cls = Class( self.clsname, - inherits=[ Inherit(Type(p.fqBaseClass()), viz='public'), - Inherit(p.managerInterfaceType(), viz='protected') ] + - optinherits, + inherits=inherits, abstract=True) bridgeActorsCreated = ProcessGraph.bridgeEndpointsOf(ptype, self.side) opensActorsCreated = ProcessGraph.opensEndpointsOf(ptype, self.side) channelOpenedActors = OrderedDict.fromkeys(bridgeActorsCreated + opensActorsCreated, None) friends = _FindFriends().findFriends(ptype) if ptype.isManaged(): @@ -2974,133 +2855,55 @@ class _GenerateProtocolActorCode(ipdl.as [ Label.PUBLIC ] + self.standardTypedefs() + [ Whitespace.NL ] )) self.cls.addstmt(Label.PUBLIC) # Actor() ctor = ConstructorDefn(ConstructorDecl(self.clsname)) + side = ExprVar('mozilla::ipc::' + self.side.title() + 'Side') if ptype.isToplevel(): ctor.memberinits = [ + ExprMemberInit(ExprVar('mozilla::ipc::IToplevelProtocol'), + [_protocolId(ptype), side]), ExprMemberInit(p.channelVar(), [ ExprCall(ExprVar('ALLOW_THIS_IN_INITIALIZER_LIST'), [ ExprVar.THIS ]) ]), - ExprMemberInit(p.lastActorIdVar(), - [ p.actorIdInit(self.side) ]), - ExprMemberInit(p.otherPidVar(), - [ ExprVar('mozilla::ipc::kInvalidProcessId') ]), - ExprMemberInit(p.lastShmemIdVar(), - [ p.shmemIdInit(self.side) ]), ExprMemberInit(p.stateVar(), [ p.startState() ]) ] - if ptype.isToplevel(): - ctor.memberinits = [ExprMemberInit( - p.openedProtocolInterfaceType(), - [ _protocolId(ptype) ])] + ctor.memberinits else: ctor.memberinits = [ - ExprMemberInit(p.idVar(), [ ExprLiteral.ZERO ]), + ExprMemberInit(ExprVar('mozilla::ipc::IProtocol'), [side]), ExprMemberInit(p.stateVar(), [ p.deadState() ]) ] ctor.addstmt(StmtExpr(ExprCall(ExprVar('MOZ_COUNT_CTOR'), [ ExprVar(self.clsname) ]))) self.cls.addstmts([ ctor, Whitespace.NL ]) # ~Actor() dtor = DestructorDefn( DestructorDecl(self.clsname, virtual=True)) dtor.addstmt(StmtExpr(ExprCall(ExprVar('MOZ_COUNT_DTOR'), [ ExprVar(self.clsname) ]))) self.cls.addstmts([ dtor, Whitespace.NL ]) - if ptype.isToplevel(): - # Open(Transport*, ProcessId, MessageLoop*, Side) - aTransportVar = ExprVar('aTransport') - aThreadVar = ExprVar('aThread') - otherPidVar = ExprVar('aOtherPid') - sidevar = ExprVar('aSide') - openmeth = MethodDefn( - MethodDecl( - 'Open', - params=[ Decl(Type('Channel::Transport', ptr=True), - aTransportVar.name), - Decl(Type('base::ProcessId'), otherPidVar.name), - Param(Type('MessageLoop', ptr=True), - aThreadVar.name, - default=ExprLiteral.NULL), - Param(Type('mozilla::ipc::Side'), - sidevar.name, - default=ExprVar('mozilla::ipc::UnknownSide')) ], - ret=Type.BOOL)) - - openmeth.addstmts([ - StmtExpr(ExprAssn(p.otherPidVar(), otherPidVar)), - StmtReturn(ExprCall(ExprSelect(p.channelVar(), '.', 'Open'), - [ aTransportVar, aThreadVar, sidevar ])) - ]) - self.cls.addstmts([ - openmeth, - Whitespace.NL ]) - - # Open(MessageChannel *, MessageLoop *, Side) - aChannel = ExprVar('aChannel') - aMessageLoop = ExprVar('aMessageLoop') - sidevar = ExprVar('aSide') - openmeth = MethodDefn( - MethodDecl( - 'Open', - params=[ Decl(Type('MessageChannel', ptr=True), - aChannel.name), - Param(Type('MessageLoop', ptr=True), - aMessageLoop.name), - Param(Type('mozilla::ipc::Side'), - sidevar.name, - default=ExprVar('mozilla::ipc::UnknownSide')) ], - ret=Type.BOOL)) - - openmeth.addstmts([ - StmtExpr(ExprAssn(p.otherPidVar(), ExprCall(ExprVar('base::GetCurrentProcId')))), - StmtReturn(ExprCall(ExprSelect(p.channelVar(), '.', 'Open'), - [ aChannel, aMessageLoop, sidevar ])) - ]) - self.cls.addstmts([ - openmeth, - Whitespace.NL ]) - - # Close() - closemeth = MethodDefn(MethodDecl('Close')) - closemeth.addstmt(StmtExpr( - ExprCall(ExprSelect(p.channelVar(), '.', 'Close')))) - self.cls.addstmts([ closemeth, Whitespace.NL ]) - - if ptype.isSync() or ptype.isInterrupt(): - # SetReplyTimeoutMs() - timeoutvar = ExprVar('aTimeoutMs') - settimeout = MethodDefn(MethodDecl( - 'SetReplyTimeoutMs', - params=[ Decl(Type.INT32, timeoutvar.name) ])) - settimeout.addstmt(StmtExpr( - ExprCall( - ExprSelect(p.channelVar(), '.', 'SetReplyTimeoutMs'), - args=[ timeoutvar ]))) - self.cls.addstmts([ settimeout, Whitespace.NL ]) - if not ptype.isToplevel(): if 1 == len(p.managers): ## manager() const managertype = p.managerActorType(self.side, ptr=1) managermeth = MethodDefn(MethodDecl( - p.managerMethod().name, ret=managertype, const=1)) + 'Manager', ret=managertype, const=1)) + managerexp = ExprCall(ExprVar('IProtocol::Manager'), args=[]) managermeth.addstmt(StmtReturn( - ExprCast(p.managerVar(), managertype, static=1))) + ExprCast(managerexp, managertype, static=1))) self.cls.addstmts([ managermeth, Whitespace.NL ]) def actorFromIter(itervar): return ExprCall(ExprSelect(ExprCall(ExprSelect(itervar, '.', 'Get')), '->', 'GetKey')) def forLoopOverHashtable(hashtable, itervar, const=False): return StmtFor( @@ -3268,122 +3071,44 @@ class _GenerateProtocolActorCode(ipdl.as Whitespace.NL ]) destroysubtreevar = ExprVar('DestroySubtree') deallocsubtreevar = ExprVar('DeallocSubtree') deallocshmemvar = ExprVar('DeallocShmems') deallocselfvar = ExprVar('Dealloc' + _actorName(ptype.name(), self.side)) - # OnProcesingError(code) - codevar = ExprVar('aCode') - reasonvar = ExprVar('aReason') - onprocessingerror = MethodDefn( - MethodDecl('OnProcessingError', - params=[ Param(_Result.Type(), codevar.name), - Param(Type('char', const=1, ptr=1), reasonvar.name) ])) - if ptype.isToplevel(): - onprocessingerror.addstmt(StmtReturn( - ExprCall(p.processingErrorVar(), args=[ codevar, reasonvar ]))) - else: - onprocessingerror.addstmt( - _fatalError("`OnProcessingError' called on non-toplevel actor")) - self.cls.addstmts([ onprocessingerror, Whitespace.NL ]) - # int32_t GetProtocolTypeId() { return PFoo; } gettypetag = MethodDefn( MethodDecl('GetProtocolTypeId', ret=_actorTypeTagType())) gettypetag.addstmt(StmtReturn(_protocolId(ptype))) self.cls.addstmts([ gettypetag, Whitespace.NL ]) - # OnReplyTimeout() - if toplevel.isSync() or toplevel.isInterrupt(): - ontimeout = MethodDefn( - MethodDecl('OnReplyTimeout', ret=Type.BOOL)) - - if ptype.isToplevel(): - ontimeout.addstmt(StmtReturn( - ExprCall(p.shouldContinueFromTimeoutVar()))) - else: - ontimeout.addstmts([ - _fatalError("`OnReplyTimeout' called on non-toplevel actor"), - StmtReturn.FALSE - ]) - - self.cls.addstmts([ ontimeout, Whitespace.NL ]) - - # C++-stack-related methods if ptype.isToplevel(): - # OnEnteredCxxStack() - onentered = MethodDefn(MethodDecl('OnEnteredCxxStack')) - onentered.addstmt(StmtReturn(ExprCall(p.enteredCxxStackVar()))) - - # OnExitedCxxStack() - onexited = MethodDefn(MethodDecl('OnExitedCxxStack')) - onexited.addstmt(StmtReturn(ExprCall(p.exitedCxxStackVar()))) - - # OnEnteredCxxStack() - onenteredcall = MethodDefn(MethodDecl('OnEnteredCall')) - onenteredcall.addstmt(StmtReturn(ExprCall(p.enteredCallVar()))) - - # OnExitedCxxStack() - onexitedcall = MethodDefn(MethodDecl('OnExitedCall')) - onexitedcall.addstmt(StmtReturn(ExprCall(p.exitedCallVar()))) - - # bool IsOnCxxStack() - onstack = MethodDefn( - MethodDecl(p.onCxxStackVar().name, ret=Type.BOOL, const=1)) - onstack.addstmt(StmtReturn(ExprCall( - ExprSelect(p.channelVar(), '.', p.onCxxStackVar().name)))) - - self.cls.addstmts([ onentered, onexited, - onenteredcall, onexitedcall, - onstack, Whitespace.NL ]) - - # OnChannelClose() - onclose = MethodDefn(MethodDecl('OnChannelClose')) - if ptype.isToplevel(): + # OnChannelClose() + onclose = MethodDefn(MethodDecl('OnChannelClose')) onclose.addstmts([ StmtExpr(ExprCall(destroysubtreevar, args=[ _DestroyReason.NormalShutdown ])), StmtExpr(ExprCall(deallocsubtreevar)), StmtExpr(ExprCall(deallocshmemvar)), StmtExpr(ExprCall(deallocselfvar)) ]) - else: - onclose.addstmt( - _fatalError("`OnClose' called on non-toplevel actor")) - self.cls.addstmts([ onclose, Whitespace.NL ]) - - # OnChannelError() - onerror = MethodDefn(MethodDecl('OnChannelError')) - if ptype.isToplevel(): + self.cls.addstmts([ onclose, Whitespace.NL ]) + + # OnChannelError() + onerror = MethodDefn(MethodDecl('OnChannelError')) onerror.addstmts([ StmtExpr(ExprCall(destroysubtreevar, args=[ _DestroyReason.AbnormalShutdown ])), StmtExpr(ExprCall(deallocsubtreevar)), StmtExpr(ExprCall(deallocshmemvar)), StmtExpr(ExprCall(deallocselfvar)) ]) - else: - onerror.addstmt( - _fatalError("`OnError' called on non-toplevel actor")) - self.cls.addstmts([ onerror, Whitespace.NL ]) - - # OnChannelConnected() - onconnected = MethodDefn(MethodDecl('OnChannelConnected', - params=[ Decl(Type.INT32, 'aPid') ])) - if not ptype.isToplevel(): - onconnected.addstmt( - _fatalError("'OnConnected' called on non-toplevel actor")) - - self.cls.addstmts([ onconnected, Whitespace.NL ]) - - # User-facing shmem methods - self.cls.addstmts(self.makeShmemIface()) + self.cls.addstmts([ onerror, Whitespace.NL ]) if (ptype.isToplevel() and ptype.isInterrupt()): processnative = MethodDefn( MethodDecl('ProcessNativeEventsInInterruptCall', ret=Type.VOID)) processnative.addstmts([ CppDirective('ifdef', 'OS_WIN'), @@ -3392,81 +3117,28 @@ class _GenerateProtocolActorCode(ipdl.as 'ProcessNativeEventsInInterruptCall'))), CppDirective('else'), _fatalError('This method is Windows-only'), CppDirective('endif'), ]) self.cls.addstmts([ processnative, Whitespace.NL ]) - if ptype.isToplevel() and self.side is 'parent': - ## void SetOtherProcessId(ProcessId aOtherPid) - otherpidvar = ExprVar('aOtherPid') - setotherprocessid = MethodDefn(MethodDecl( - 'SetOtherProcessId', - params=[ Decl(Type('base::ProcessId'), otherpidvar.name)])) - setotherprocessid.addstmts([ - StmtExpr(ExprAssn(p.otherPidVar(), otherpidvar)), - ]) - self.cls.addstmts([ - setotherprocessid, - Whitespace.NL]) - - ## bool GetMinidump(nsIFile** dump) - self.cls.addstmt(Label.PROTECTED) - - dumpvar = ExprVar('aDump') - seqvar = ExprVar('aSequence') - getdump = MethodDefn(MethodDecl( - 'TakeMinidump', - params=[ Decl(Type('nsIFile', ptrptr=1), dumpvar.name), - Decl(Type.UINT32PTR, seqvar.name)], - ret=Type.BOOL, - const=1)) - getdump.addstmts([ - CppDirective('ifdef', 'MOZ_CRASHREPORTER'), - StmtReturn(ExprCall( - ExprVar('XRE_TakeMinidumpForChild'), - args=[ ExprCall(p.otherPidMethod()), dumpvar, seqvar ])), - CppDirective('else'), - StmtReturn.FALSE, - CppDirective('endif') - ]) - self.cls.addstmts([ getdump, Whitespace.NL ]) - ## private methods self.cls.addstmt(Label.PRIVATE) - ## FatalError() - msgparam = ExprVar('aMsg') + ## ProtocolName() actorname = _actorName(p.name, self.side) - fatalerror = MethodDefn(MethodDecl( - 'FatalError', - params=[ Decl(Type('char', const=1, ptrconst=1), msgparam.name) ], - const=1, never_inline=1)) - if self.side is 'parent': - isparent = ExprLiteral.TRUE - else: - isparent = ExprLiteral.FALSE - fatalerror.addstmts([ - _ipcFatalError(actorname, msgparam) + protocolname = MethodDefn(MethodDecl( + 'ProtocolName', params=[], + const=1, virtual=1, ret=Type('char', const=1, ptr=1))) + protocolname.addstmts([ + StmtReturn(ExprLiteral.String(actorname)) ]) - self.cls.addstmts([ fatalerror, Whitespace.NL ]) - - protocolnameparam = ExprVar('aProtocolName') - - fatalerrorwithclassname = MethodDefn(MethodDecl( - 'FatalError', - params=[ Decl(Type('char', const=1, ptrconst=1), protocolnameparam.name), - Decl(Type('char', const=1, ptrconst=1), msgparam.name) ], - const=1)) - fatalerrorwithclassname.addstmts([ - _ipcFatalErrorWithClassname(protocolnameparam, msgparam, self.protocol, isparent) - ]) - self.cls.addstmts([ fatalerrorwithclassname, Whitespace.NL ]) + self.cls.addstmts([ protocolname, Whitespace.NL ]) ## DestroySubtree(bool normal) whyvar = ExprVar('why') subtreewhyvar = ExprVar('subtreewhy') kidsvar = ExprVar('kids') ivar = ExprVar('i') itervar = ExprVar('iter') ithkid = ExprIndex(kidsvar, ivar) @@ -3565,64 +3237,24 @@ class _GenerateProtocolActorCode(ipdl.as ]) deallocsubtree.addstmt(block) # don't delete outselves: either the manager will do it, or # we're toplevel self.cls.addstmts([ deallocsubtree, Whitespace.NL ]) if ptype.isToplevel(): - ## DeallocShmem(): - # for (cit = map.begin(); cit != map.end(); ++cit) - # Dealloc(cit->second) - # map.Clear() - deallocshmem = MethodDefn(MethodDecl(deallocshmemvar.name)) - - citvar = ExprVar('cit') - begin = ExprCall(ExprSelect(p.shmemMapVar(), '.', 'begin')) - end = ExprCall(ExprSelect(p.shmemMapVar(), '.', 'end')) - shmem = ExprSelect(citvar, '->', 'second') - foreachdealloc = StmtFor( - Param(p.shmemIteratorType(), citvar.name, begin), - ExprBinary(citvar, '!=', end), - ExprPrefixUnop(citvar, '++')) - foreachdealloc.addstmt(StmtExpr(_shmemDealloc(shmem))) - - deallocshmem.addstmts([ - foreachdealloc, - StmtExpr(ExprCall(ExprSelect(p.shmemMapVar(), '.', 'Clear'))) - ]) - self.cls.addstmts([ deallocshmem, Whitespace.NL ]) - deallocself = MethodDefn(MethodDecl(deallocselfvar.name, virtual=1)) self.cls.addstmts([ deallocself, Whitespace.NL ]) self.implementPickling() ## private members - self.cls.addstmt(StmtDecl(Decl(p.channelType(), 'mChannel'))) if ptype.isToplevel(): - self.cls.addstmts([ - StmtDecl(Decl(Type('IDMap', T=Type('ProtocolBase')), - p.actorMapVar().name)), - StmtDecl(Decl(_actorIdType(), p.lastActorIdVar().name)), - StmtDecl(Decl(Type('base::ProcessId'), - p.otherPidVar().name)) - ]) - elif ptype.isManaged(): - self.cls.addstmts([ - StmtDecl(Decl(p.managerInterfaceType(ptr=1), - p.managerVar().name)), - StmtDecl(Decl(_actorIdType(), p.idVar().name)) - ]) - if p.decl.type.isToplevel(): - self.cls.addstmts([ - StmtDecl(Decl(p.shmemMapType(), p.shmemMapVar().name)), - StmtDecl(Decl(_shmemIdType(), p.lastShmemIdVar().name)) - ]) + self.cls.addstmt(StmtDecl(Decl(p.channelType(), 'mChannel'))) self.cls.addstmt(StmtDecl(Decl(Type('State'), p.stateVar().name))) for managed in ptype.manages: self.cls.addstmts([ StmtDecl(Decl( p.managedVarType(managed, self.side), p.managedVar(managed, self.side).name)) ]) @@ -3636,215 +3268,38 @@ class _GenerateProtocolActorCode(ipdl.as sizevar = ExprVar('aSize') typevar = ExprVar('aType') unsafevar = ExprVar('aUnsafe') protocolbase = Type('ProtocolBase', ptr=1) sourcevar = ExprVar('aSource') ivar = ExprVar('i') kidsvar = ExprVar('kids') ithkid = ExprIndex(kidsvar, ivar) - clonecontexttype = Type('ProtocolCloneContext', ptr=1) - clonecontextvar = ExprVar('aCtx') - - register = MethodDefn(MethodDecl( - p.registerMethod().name, - params=[ Decl(protocolbase, routedvar.name) ], - ret=_actorIdType(), virtual=1)) - registerid = MethodDefn(MethodDecl( - p.registerIDMethod().name, - params=[ Decl(protocolbase, routedvar.name), - Decl(_actorIdType(), idvar.name) ], - ret=_actorIdType(), - virtual=1)) - lookup = MethodDefn(MethodDecl( - p.lookupIDMethod().name, - params=[ Decl(_actorIdType(), idvar.name) ], - ret=protocolbase, virtual=1)) - unregister = MethodDefn(MethodDecl( - p.unregisterMethod().name, - params=[ Decl(_actorIdType(), idvar.name) ], - virtual=1)) - - createshmem = MethodDefn(MethodDecl( - p.createSharedMemory().name, - ret=_rawShmemType(ptr=1), - params=[ Decl(Type.SIZE, sizevar.name), - Decl(_shmemTypeType(), typevar.name), - Decl(Type.BOOL, unsafevar.name), - Decl(_shmemIdType(ptr=1), idvar.name) ], - virtual=1)) - lookupshmem = MethodDefn(MethodDecl( - p.lookupSharedMemory().name, - ret=_rawShmemType(ptr=1), - params=[ Decl(_shmemIdType(), idvar.name) ], - virtual=1)) - destroyshmem = MethodDefn(MethodDecl( - p.destroySharedMemory().name, - ret=Type.BOOL, - params=[ Decl(_shmemType(ref=1), shmemvar.name) ], - virtual=1)) - istracking = MethodDefn(MethodDecl( - p.isTrackingSharedMemory().name, - ret=Type.BOOL, - params=[ Decl(_rawShmemType(ptr=1), rawvar.name) ], - virtual=1)) - - otherpid = MethodDefn(MethodDecl( - p.otherPidMethod().name, - ret=Type('base::ProcessId'), - const=1, - virtual=1)) - - getchannel = MethodDefn(MethodDecl( - p.getChannelMethod().name, - ret=Type('MessageChannel', ptr=1), - virtual=1)) - - cloneprotocol = MethodDefn(MethodDecl( - p.cloneProtocol().name, - params=[ Decl(Type('Channel', ptr=True), 'aChannel'), - Decl(clonecontexttype, clonecontextvar.name) ], - ret=Type(p.fqBaseClass(), ptr=1), - virtual=1)) + + methods = [] + + if p.decl.type.isToplevel(): + getchannel = MethodDefn(MethodDecl( + p.getChannelMethod().name, + ret=Type('MessageChannel', ptr=1), + virtual=1)) + getchannel.addstmt(StmtReturn(ExprAddrOf(p.channelVar()))) + + getchannelconst = MethodDefn(MethodDecl( + p.getChannelMethod().name, + ret=Type('MessageChannel', ptr=1, const=1), + virtual=1, const=1)) + getchannelconst.addstmt(StmtReturn(ExprAddrOf(p.channelVar()))) + + methods += [ getchannel, + getchannelconst ] if p.decl.type.isToplevel(): tmpvar = ExprVar('tmp') - register.addstmts([ - StmtDecl(Decl(_actorIdType(), tmpvar.name), - p.nextActorIdExpr(self.side)), - StmtExpr(ExprCall( - ExprSelect(p.actorMapVar(), '.', 'AddWithID'), - [ routedvar, tmpvar ])), - StmtReturn(tmpvar) - ]) - registerid.addstmts([ - StmtExpr( - ExprCall(ExprSelect(p.actorMapVar(), '.', 'AddWithID'), - [ routedvar, idvar ])), - StmtReturn(idvar) - ]) - lookup.addstmt(StmtReturn( - ExprCall(ExprSelect(p.actorMapVar(), '.', 'Lookup'), - [ idvar ]))) - unregister.addstmt(StmtReturn( - ExprCall(ExprSelect(p.actorMapVar(), '.', 'Remove'), - [ idvar ]))) - - # SharedMemory* CreateSharedMemory(size_t aSize, Type aType, bool aUnsafe, id_t* aId): - # RefPtr<SharedMemory> segment(Shmem::Alloc(aSize, aType, aUnsafe)); - # if (!segment) - # return nullptr; - # Shmem shmem(segment.get(), [nextshmemid]); - # Message descriptor = shmem.ShareTo(subprocess, mId, descriptor); - # if (!descriptor) - # return nullptr; - # mChannel.Send(descriptor); - # *aId = shmem.Id(); - # SharedMemory* rawSegment = segment.get(); - # mShmemMap.Add(segment.forget().take(), *aId); - # return rawSegment; - createshmem.addstmt(StmtDecl( - Decl(_refptr(_rawShmemType()), rawvar.name), - initargs=[ _shmemAlloc(sizevar, typevar, unsafevar) ])) - failif = StmtIf(ExprNot(rawvar)) - failif.addifstmt(StmtReturn(ExprLiteral.NULL)) - createshmem.addstmt(failif) - - descriptorvar = ExprVar('descriptor') - createshmem.addstmts([ - StmtDecl( - Decl(_shmemType(), shmemvar.name), - initargs=[ _shmemBackstagePass(), - _refptrGet(rawvar), - p.nextShmemIdExpr(self.side) ]), - StmtDecl(Decl(Type('Message', ptr=1), descriptorvar.name), - init=_shmemShareTo(shmemvar, - p.callOtherPid(), - p.routingId())) - ]) - failif = StmtIf(ExprNot(descriptorvar)) - failif.addifstmt(StmtReturn(ExprLiteral.NULL)) - createshmem.addstmt(failif) - - failif = StmtIf(ExprNot(ExprCall( - ExprSelect(p.channelVar(), p.channelSel(), 'Send'), - args=[ descriptorvar ]))) - createshmem.addstmt(failif) - - rawsegmentvar = ExprVar('rawSegment') - createshmem.addstmts([ - StmtExpr(ExprAssn(ExprDeref(idvar), _shmemId(shmemvar))), - StmtDecl(Decl(_rawShmemType(ptr=1), rawsegmentvar.name), - init=_refptrGet(rawvar)), - StmtExpr(ExprCall( - ExprSelect(p.shmemMapVar(), '.', 'AddWithID'), - args=[ _refptrTake(_refptrForget(rawvar)), ExprDeref(idvar) ])), - StmtReturn(rawsegmentvar) - ]) - - # SharedMemory* Lookup(id) - lookupshmem.addstmt(StmtReturn(ExprCall( - ExprSelect(p.shmemMapVar(), '.', 'Lookup'), - args=[ idvar ]))) - - # bool IsTrackingSharedMemory(mem) - istracking.addstmt(StmtReturn(ExprCall( - ExprSelect(p.shmemMapVar(), '.', 'HasData'), - args=[ rawvar ]))) - - # bool DestroySharedMemory(shmem): - # id = shmem.Id() - # SharedMemory* rawmem = Lookup(id) - # if (!rawmem) - # return false; - # Message descriptor = UnShare(subprocess, mId, descriptor) - # mShmemMap.Remove(id) - # Shmem::Dealloc(rawmem) - # if (!mChannel.CanSend()) { - # delete descriptor; - # return true; - # } - # return descriptor && Send(descriptor) - destroyshmem.addstmts([ - StmtDecl(Decl(_shmemIdType(), idvar.name), - init=_shmemId(shmemvar)), - StmtDecl(Decl(_rawShmemType(ptr=1), rawvar.name), - init=_lookupShmem(idvar)) - ]) - - failif = StmtIf(ExprNot(rawvar)) - failif.addifstmt(StmtReturn.FALSE) - cansend = ExprCall(ExprSelect(p.channelVar(), '.', 'CanSend'), []) - returnif = StmtIf(ExprNot(cansend)) - returnif.addifstmts([ - StmtExpr(ExprDelete(descriptorvar)), - StmtReturn.TRUE]) - destroyshmem.addstmts([ - failif, - Whitespace.NL, - StmtDecl(Decl(Type('Message', ptr=1), descriptorvar.name), - init=_shmemUnshareFrom( - shmemvar, - p.callOtherPid(), - p.routingId())), - Whitespace.NL, - StmtExpr(p.removeShmemId(idvar)), - StmtExpr(_shmemDealloc(rawvar)), - Whitespace.NL, - returnif, - Whitespace.NL, - StmtReturn(ExprBinary( - descriptorvar, '&&', - ExprCall( - ExprSelect(p.channelVar(), p.channelSel(), 'Send'), - args=[ descriptorvar ]))) - ]) - - # "private" message that passes shmem mappings from one process # to the other if p.subtreeUsesShmem(): self.asyncSwitch.addcase( CaseLabel('SHMEM_CREATED_MESSAGE_TYPE'), self.genShmemCreatedHandler()) self.asyncSwitch.addcase( CaseLabel('SHMEM_DESTROYED_MESSAGE_TYPE'), @@ -3855,54 +3310,16 @@ class _GenerateProtocolActorCode(ipdl.as _fatalError('this protocol tree does not use shmem'), StmtReturn(_Result.NotKnown) ]) self.asyncSwitch.addcase( CaseLabel('SHMEM_CREATED_MESSAGE_TYPE'), abort) self.asyncSwitch.addcase( CaseLabel('SHMEM_DESTROYED_MESSAGE_TYPE'), abort) - otherpid.addstmt(StmtReturn(p.otherPidVar())) - getchannel.addstmt(StmtReturn(ExprAddrOf(p.channelVar()))) - else: - # delegate registration to manager - register.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.registerMethod().name), - [ routedvar ]))) - registerid.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.registerIDMethod().name), - [ routedvar, idvar ]))) - lookup.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.lookupIDMethod().name), - [ idvar ]))) - unregister.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.unregisterMethod().name), - [ idvar ]))) - createshmem.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.createSharedMemory().name), - [ sizevar, typevar, unsafevar, idvar ]))) - lookupshmem.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.lookupSharedMemory().name), - [ idvar ]))) - istracking.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', - p.isTrackingSharedMemory().name), - [ rawvar ]))) - destroyshmem.addstmt(StmtReturn(ExprCall( - ExprSelect(p.managerVar(), '->', p.destroySharedMemory().name), - [ shmemvar ]))) - otherpid.addstmt(StmtReturn( - p.callOtherPid(p.managerVar()))) - getchannel.addstmt(StmtReturn(p.channelVar())) - - cloneprotocol.addstmts([ - _fatalError('Clone() has not yet been implemented'), - StmtReturn(ExprLiteral.NULL) - ]) - othervar = ExprVar('other') managertype = Type(_actorName(p.name, self.side), ptr=1) # Keep track of types created with an INOUT ctor. We need to call # Register() or RegisterID() for them depending on the side the managee # is created. inoutCtorTypes = [] for msg in p.messageDecls: @@ -3951,197 +3368,45 @@ class _GenerateProtocolActorCode(ipdl.as ]) switchontype.addcase(CaseLabel(_protocolId(manageeipdltype).name), case) default = StmtBlock() default.addstmts([ _fatalError('unreached'), StmtReturn() ]) switchontype.addcase(DefaultLabel(), default) removemanagee.addstmt(switchontype) - return [ register, - registerid, - lookup, - unregister, - removemanagee, - createshmem, - lookupshmem, - istracking, - destroyshmem, - otherpid, - getchannel, - cloneprotocol, - Whitespace.NL ] - - def makeShmemIface(self): - p = self.protocol - idvar = ExprVar('id') - sizevar = ExprVar('aSize') - typevar = ExprVar('aType') - memvar = ExprVar('aMem') - outmemvar = ExprVar('aOutMem') - rawvar = ExprVar('rawmem') - - def allocShmemMethod(name, unsafe): - # bool Alloc*Shmem(size_t aSize, Type aType, Shmem* aOutMem): - # id_t id; - # SharedMemory* rawmem(CreateSharedMemory(aSize, aType, false, &id)); - # if (!rawmem) - # return false; - # *aOutMem = Shmem(rawmem, id) - # return true; - method = MethodDefn(MethodDecl( - name, - params=[ Decl(Type.SIZE, sizevar.name), - Decl(_shmemTypeType(), typevar.name), - Decl(_shmemType(ptr=1), outmemvar.name) ], - ret=Type.BOOL)) - - ifallocfails = StmtIf(ExprNot(rawvar)) - ifallocfails.addifstmt(StmtReturn.FALSE) - - if unsafe: - unsafe = ExprLiteral.TRUE - else: - unsafe = ExprLiteral.FALSE - method.addstmts([ - StmtDecl(Decl(_shmemIdType(), idvar.name)), - StmtDecl(Decl(_rawShmemType(ptr=1), rawvar.name), - initargs=[ ExprCall(p.createSharedMemory(), - args=[ sizevar, - typevar, - unsafe, - ExprAddrOf(idvar) ]) ]), - ifallocfails, - Whitespace.NL, - StmtExpr(ExprAssn( - ExprDeref(outmemvar), _shmemCtor(rawvar, idvar))), - StmtReturn.TRUE - ]) - return method - - # bool AllocShmem(size_t size, Type type, Shmem* outmem): - allocShmem = allocShmemMethod('AllocShmem', False) - - # bool AllocUnsafeShmem(size_t size, Type type, Shmem* outmem): - allocUnsafeShmem = allocShmemMethod('AllocUnsafeShmem', True) - - # bool DeallocShmem(Shmem& mem): - # bool ok = DestroySharedMemory(mem); - ##ifdef DEBUG - # if (!ok) { - # NS_WARNING("bad Shmem"); // or NS_RUNTIMEABORT on child side - # return false; - # } - ##endif // DEBUG - # mem.forget(); - # return ok; - deallocShmem = MethodDefn(MethodDecl( - 'DeallocShmem', - params=[ Decl(_shmemType(ref=1), memvar.name) ], - ret=Type.BOOL)) - okvar = ExprVar('ok') - - ifbad = StmtIf(ExprNot(okvar)) - badShmemActions = [] - if (self.side == 'child'): - badShmemActions.append(_fatalError('bad Shmem')); - else: - badShmemActions.append(_printWarningMessage('bad Shmem')); - badShmemActions.append(StmtReturn.FALSE); - ifbad.addifstmts(badShmemActions) - - deallocShmem.addstmts([ - StmtDecl(Decl(Type.BOOL, okvar.name), - init=ExprCall(p.destroySharedMemory(), - args=[ memvar ])), - CppDirective('ifdef', 'DEBUG'), - ifbad, - CppDirective('endif', '// DEBUG'), - StmtExpr(_shmemForget(memvar)), - StmtReturn(okvar) - ]) - - return [ Whitespace('// Methods for managing shmem\n', indent=1), - allocShmem, - Whitespace.NL, - allocUnsafeShmem, - Whitespace.NL, - deallocShmem, - Whitespace.NL ] + return methods + [removemanagee, Whitespace.NL] def genShmemCreatedHandler(self): p = self.protocol assert p.decl.type.isToplevel() case = StmtBlock() - rawvar = ExprVar('rawmem') - idvar = ExprVar('id') + ifstmt = StmtIf(ExprNot(ExprCall(ExprVar('ShmemCreated'), args=[self.msgvar]))) case.addstmts([ - StmtDecl(Decl(_shmemIdType(), idvar.name)), - StmtDecl(Decl(_refptr(_rawShmemType()), rawvar.name), - initargs=[ _shmemOpenExisting(self.msgvar, - ExprAddrOf(idvar)) ]) - ]) - failif = StmtIf(ExprNot(rawvar)) - failif.addifstmt(StmtReturn(_Result.PayloadError)) - - case.addstmts([ - failif, - StmtExpr(ExprCall( - ExprSelect(p.shmemMapVar(), '.', 'AddWithID'), - args=[ _refptrTake(_refptrForget(rawvar)), idvar ])), - Whitespace.NL, + ifstmt, StmtReturn(_Result.Processed) ]) + ifstmt.addifstmt(StmtReturn(_Result.PayloadError)) return case def genShmemDestroyedHandler(self): p = self.protocol assert p.decl.type.isToplevel() case = StmtBlock() - rawvar = ExprVar('rawmem') - idvar = ExprVar('id') - itervar = ExprVar('iter') - case.addstmts([ - StmtDecl(Decl(_shmemIdType(), idvar.name)), - StmtDecl(Decl(_iterType(ptr=0), itervar.name), init=ExprCall(ExprVar('PickleIterator'), - args=[ self.msgvar ])) - ]) - - failif = StmtIf(ExprNot( - ExprCall(ExprVar('IPC::ReadParam'), - args=[ ExprAddrOf(self.msgvar), ExprAddrOf(itervar), - ExprAddrOf(idvar) ]))) - failif.addifstmt(StmtReturn(_Result.PayloadError)) - + ifstmt = StmtIf(ExprNot(ExprCall(ExprVar('ShmemDestroyed'), args=[self.msgvar]))) case.addstmts([ - failif, - StmtExpr(ExprCall(ExprSelect(self.msgvar, '.', 'EndRead'), - args=[ itervar ])), - Whitespace.NL, - StmtDecl(Decl(_rawShmemType(ptr=1), rawvar.name), - init=ExprCall(p.lookupSharedMemory(), args=[ idvar ])) - ]) - - # Here we don't return an error if we failed to look the shmem up. This - # is because we don't have a way to know if it is because we failed to - # map the shmem or if the id is wrong. In the latter case it would be - # better to catch the error but the former case is legit... - lookupif = StmtIf(rawvar) - lookupif.addifstmt(StmtExpr(p.removeShmemId(idvar))) - lookupif.addifstmt(StmtExpr(_shmemDealloc(rawvar))) - - case.addstmts([ - lookupif, + ifstmt, StmtReturn(_Result.Processed) ]) + ifstmt.addifstmt(StmtReturn(_Result.PayloadError)) return case def makeChannelOpenedHandlers(self, actors): handlers = StmtBlock() # unpack the transport descriptor et al. @@ -4834,20 +4099,20 @@ class _GenerateProtocolActorCode(ipdl.as idexpr = ExprCall(self.protocol.registerMethod(), args=[ actorvar ]) else: idexpr = ExprCall(self.protocol.registerIDMethod(), args=[ actorvar, idexpr ]) return [ self.failIfNullActor(actorvar, errfn, msg="Error constructing actor %s" % actortype.name() + self.side.capitalize()), - StmtExpr(ExprAssn(_actorId(actorvar), idexpr)), - StmtExpr(ExprAssn(_actorManager(actorvar), ExprVar.THIS)), - StmtExpr(ExprAssn(_actorChannel(actorvar), - self.protocol.channelForSubactor())), + StmtExpr(ExprCall(ExprSelect(actorvar, '->', 'SetId'), args=[idexpr])), + StmtExpr(ExprCall(ExprSelect(actorvar, '->', 'SetManager'), args=[ExprVar.THIS])), + StmtExpr(ExprCall(ExprSelect(actorvar, '->', 'SetIPCChannel'), + args=[self.protocol.callGetChannel()])), StmtExpr(_callInsertManagedActor( self.protocol.managedVar(md.decl.type.constructedType(), self.side), actorvar)), StmtExpr(ExprAssn(_actorState(actorvar), _startState(actorproto, fq=1))) ] @@ -5086,20 +4351,20 @@ class _GenerateProtocolActorCode(ipdl.as def failIfNullActor(self, actorExpr, retOnNull=ExprLiteral.FALSE, msg=None): failif = StmtIf(ExprNot(actorExpr)) if msg: failif.addifstmt(_printWarningMessage(msg)) failif.addifstmt(StmtReturn(retOnNull)) return failif - def unregisterActor(self, actorexpr=None): - return [ StmtExpr(ExprCall(self.protocol.unregisterMethod(actorexpr), - args=[ _actorId(actorexpr) ])), - StmtExpr(ExprAssn(_actorId(actorexpr), _FREED_ACTOR_ID)) ] + def unregisterActor(self): + return [ StmtExpr(ExprCall(self.protocol.unregisterMethod(), + args=[ _actorId() ])), + StmtExpr(ExprCall(ExprVar('SetId'), args=[_FREED_ACTOR_ID])) ] def makeMessage(self, md, errfn, fromActor=None): msgvar = self.msgvar routingId = self.protocol.routingId(fromActor) this = None if md.decl.type.isDtor(): this = md.actorDecl().var() stmts = ([ StmtDecl(Decl(Type('IPC::Message', ptr=1), msgvar.name), @@ -5261,35 +4526,35 @@ class _GenerateProtocolActorCode(ipdl.as sendok, ([ Whitespace.NL, self.logMessage(md, msgexpr, 'Sending ', actor), self.profilerLabel(md) ] + self.transition(md, 'out', actor) + [ Whitespace.NL, StmtDecl(Decl(Type.BOOL, sendok.name), init=ExprCall( - ExprSelect(self.protocol.channelVar(actor), - self.protocol.channelSel(), 'Send'), + ExprSelect(self.protocol.callGetChannel(actor), + '->', 'Send'), args=[ msgexpr ])) ]) ) def sendBlocking(self, md, msgexpr, replyexpr, actor=None): sendok = ExprVar('sendok__') return ( sendok, ([ Whitespace.NL, self.logMessage(md, msgexpr, 'Sending ', actor), self.profilerLabel(md) ] + self.transition(md, 'out', actor) + [ Whitespace.NL, StmtDecl( Decl(Type.BOOL, sendok.name), - init=ExprCall(ExprSelect(self.protocol.channelVar(actor), - self.protocol.channelSel(), + init=ExprCall(ExprSelect(self.protocol.callGetChannel(actor), + '->', _sendPrefix(md.decl.type)), args=[ msgexpr, ExprAddrOf(replyexpr) ])) ]) ) def callAllocActor(self, md, retsems, side): return ExprCall( _allocMethod(md.decl.type.constructedType(), side),
--- a/js/ipc/WrapperOwner.h +++ b/js/ipc/WrapperOwner.h @@ -15,18 +15,17 @@ #include "js/Proxy.h" namespace mozilla { namespace jsipc { class WrapperOwner : public virtual JavaScriptShared { public: - typedef mozilla::ipc::IProtocolManager< - mozilla::ipc::IProtocol>::ActorDestroyReason + typedef mozilla::ipc::IProtocol::ActorDestroyReason ActorDestroyReason; WrapperOwner(); bool init(); // Standard internal methods. // (The traps should be in the same order like js/Proxy.h) bool getOwnPropertyDescriptor(JSContext* cx, JS::HandleObject proxy, JS::HandleId id,
--- a/js/public/Class.h +++ b/js/public/Class.h @@ -549,36 +549,38 @@ struct ClassSpec ClassObjectCreationOp createPrototype_; const JSFunctionSpec* constructorFunctions_; const JSPropertySpec* constructorProperties_; const JSFunctionSpec* prototypeFunctions_; const JSPropertySpec* prototypeProperties_; FinishClassInitOp finishInit_; uintptr_t flags; - static const size_t ParentKeyWidth = JSCLASS_CACHED_PROTO_WIDTH; + static const size_t ProtoKeyWidth = JSCLASS_CACHED_PROTO_WIDTH; - static const uintptr_t ParentKeyMask = (1 << ParentKeyWidth) - 1; - static const uintptr_t DontDefineConstructor = 1 << ParentKeyWidth; - static const uintptr_t IsDelegated = 1 << (ParentKeyWidth + 1); + static const uintptr_t ProtoKeyMask = (1 << ProtoKeyWidth) - 1; + static const uintptr_t DontDefineConstructor = 1 << ProtoKeyWidth; + static const uintptr_t IsDelegated = 1 << (ProtoKeyWidth + 1); bool defined() const { return !!createConstructor_; } bool delegated() const { return (flags & IsDelegated); } - bool dependent() const { + // The ProtoKey this class inherits from. + JSProtoKey inheritanceProtoKey() const { MOZ_ASSERT(defined()); - return (flags & ParentKeyMask); - } + static_assert(JSProto_Null == 0, "zeroed key must be null"); - JSProtoKey parentKey() const { - static_assert(JSProto_Null == 0, "zeroed key must be null"); - return JSProtoKey(flags & ParentKeyMask); + // Default: Inherit from Object. + if (!(flags & ProtoKeyMask)) + return JSProto_Object; + + return JSProtoKey(flags & ProtoKeyMask); } bool shouldDefineConstructor() const { MOZ_ASSERT(defined()); return !(flags & DontDefineConstructor); } const ClassSpec* delegatedClassSpec() const { @@ -860,18 +862,18 @@ struct Class bool isWrappedNative() const { return flags & JSCLASS_IS_WRAPPED_NATIVE; } static size_t offsetOfFlags() { return offsetof(Class, flags); } bool specDefined() const { return spec ? spec->defined() : false; } - bool specDependent() const { return spec ? spec->dependent() : false; } - JSProtoKey specParentKey() const { return spec ? spec->parentKey() : JSProto_Null; } + JSProtoKey specInheritanceProtoKey() + const { return spec ? spec->inheritanceProtoKey() : JSProto_Null; } bool specShouldDefineConstructor() const { return spec ? spec->shouldDefineConstructor() : true; } ClassObjectCreationOp specCreateConstructorHook() const { return spec ? spec->createConstructorHook() : nullptr; } ClassObjectCreationOp specCreatePrototypeHook() const { return spec ? spec->createPrototypeHook() : nullptr; } const JSFunctionSpec* specConstructorFunctions() const { return spec ? spec->constructorFunctions() : nullptr; }
deleted file mode 100644 --- a/js/src/builtin/AsyncFunctions.js +++ /dev/null @@ -1,47 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -// Called when creating async function. -// See the comment for js::CreateAsyncFunction what unwrapped, wrapped and the -// return value are. -function AsyncFunction_wrap(unwrapped) { - var wrapper = function() { - // The try block is required to handle throws in default arguments properly. - try { - return AsyncFunction_start(callFunction(std_Function_apply, unwrapped, this, arguments)); - } catch (e) { - var promiseCtor = GetBuiltinConstructor('Promise'); - return callFunction(Promise_static_reject, promiseCtor, e); - } - }; - return CreateAsyncFunction(wrapper, unwrapped); -} - -function AsyncFunction_start(generator) { - return AsyncFunction_resume(generator, undefined, generator.next); -} - -function AsyncFunction_resume(gen, v, method) { - var promiseCtor = GetBuiltinConstructor('Promise'); - let result; - try { - // get back into async function, run to next await point - result = callFunction(method, gen, v); - } catch (exc) { - // The async function itself failed. - return callFunction(Promise_static_reject, promiseCtor, exc); - } - - if (result.done) - return callFunction(Promise_static_resolve, promiseCtor, result.value); - - // If we get here, `await` occurred. `gen` is paused at a yield point. - return callFunction(Promise_then, - callFunction(Promise_static_resolve, promiseCtor, result.value), - function(val) { - return AsyncFunction_resume(gen, val, gen.next); - }, function (err) { - return AsyncFunction_resume(gen, err, gen.throw); - }); -}
--- a/js/src/builtin/Promise.cpp +++ b/js/src/builtin/Promise.cpp @@ -9,16 +9,17 @@ #include "mozilla/Atomics.h" #include "mozilla/TimeStamp.h" #include "jscntxt.h" #include "gc/Heap.h" #include "js/Debug.h" +#include "vm/AsyncFunction.h" #include "vm/SelfHosting.h" #include "jsobjinlines.h" #include "vm/NativeObject-inl.h" using namespace js; @@ -27,16 +28,18 @@ MillisecondsSinceStartup() { auto now = mozilla::TimeStamp::Now(); bool ignored; return (now - mozilla::TimeStamp::ProcessCreation(ignored)).ToMilliseconds(); } #define PROMISE_HANDLER_IDENTITY 0 #define PROMISE_HANDLER_THROWER 1 +#define PROMISE_HANDLER_AWAIT_FULFILLED 2 +#define PROMISE_HANDLER_AWAIT_REJECTED 3 enum ResolutionMode { ResolveMode, RejectMode }; enum ResolveFunctionSlots { ResolveFunctionSlot_Promise = 0, @@ -165,16 +168,17 @@ enum ReactionRecordSlots { ReactionRecordSlot_Flags, ReactionRecordSlot_HandlerArg, ReactionRecordSlots, }; #define REACTION_FLAG_RESOLVED 0x1 #define REACTION_FLAG_FULFILLED 0x2 #define REACTION_FLAG_IGNORE_DEFAULT_RESOLUTION 0x4 +#define REACTION_FLAG_AWAIT 0x8 // ES2016, 25.4.1.2. class PromiseReactionRecord : public NativeObject { public: static const Class class_; JSObject* promise() { return getFixedSlot(ReactionRecordSlot_Promise).toObjectOrNull(); } @@ -191,16 +195,25 @@ class PromiseReactionRecord : public Nat int32_t flags = this->flags(); MOZ_ASSERT(!(flags & REACTION_FLAG_RESOLVED)); MOZ_ASSERT(state != JS::PromiseState::Pending, "Can't revert a reaction to pending."); flags |= REACTION_FLAG_RESOLVED; if (state == JS::PromiseState::Fulfilled) flags |= REACTION_FLAG_FULFILLED; setFixedSlot(ReactionRecordSlot_Flags, Int32Value(flags)); } + void setIsAwait() { + int32_t flags = this->flags(); + flags |= REACTION_FLAG_AWAIT; + setFixedSlot(ReactionRecordSlot_Flags, Int32Value(flags)); + } + bool isAwait() { + int32_t flags = this->flags(); + return flags & REACTION_FLAG_AWAIT; + } Value handler() { MOZ_ASSERT(targetState() != JS::PromiseState::Pending); uint32_t slot = targetState() == JS::PromiseState::Fulfilled ? ReactionRecordSlot_OnFulfilled : ReactionRecordSlot_OnRejected; return getFixedSlot(slot); } Value handlerArg() { @@ -620,16 +633,28 @@ static MOZ_MUST_USE PromiseObject* Creat bool protoIsWrapped = false, bool informDebugger = true); enum GetCapabilitiesExecutorSlots { GetCapabilitiesExecutorSlots_Resolve, GetCapabilitiesExecutorSlots_Reject }; +static MOZ_MUST_USE PromiseObject* +CreatePromiseObjectWithDefaultResolution(JSContext* cx) +{ + Rooted<PromiseObject*> promise(cx, CreatePromiseObjectInternal(cx)); + if (!promise) + return nullptr; + + AddPromiseFlags(*promise, PROMISE_FLAG_DEFAULT_RESOLVE_FUNCTION | + PROMISE_FLAG_DEFAULT_REJECT_FUNCTION); + return promise; +} + // ES2016, 25.4.1.5. static MOZ_MUST_USE bool NewPromiseCapability(JSContext* cx, HandleObject C, MutableHandleObject promise, MutableHandleObject resolve, MutableHandleObject reject, bool canOmitResolutionFunctions) { RootedValue cVal(cx, ObjectValue(*C)); @@ -644,22 +669,19 @@ NewPromiseCapability(JSContext* cx, Hand // creating and calling the executor function and instead return a Promise // marked as having default resolve/reject functions. // // This can't be used in Promise.all and Promise.race because we have to // pass the reject (and resolve, in the race case) function to thenables // in the list passed to all/race, which (potentially) means exposing them // to content. if (canOmitResolutionFunctions && IsNativeFunction(cVal, PromiseConstructor)) { - promise.set(CreatePromiseObjectInternal(cx)); + promise.set(CreatePromiseObjectWithDefaultResolution(cx)); if (!promise) return false; - AddPromiseFlags(promise->as<PromiseObject>(), PROMISE_FLAG_DEFAULT_RESOLVE_FUNCTION | - PROMISE_FLAG_DEFAULT_REJECT_FUNCTION); - return true; } // Step 3 (omitted). // Step 4. RootedAtom funName(cx, cx->names().empty); RootedFunction executor(cx, NewNativeFunction(cx, GetCapabilitiesExecutor, 2, funName, @@ -791,16 +813,45 @@ TriggerPromiseReactions(JSContext* cx, H reaction = &reactionsList->getDenseElement(i).toObject(); if (!EnqueuePromiseReactionJob(cx, reaction, valueOrReason, state)) return false; } return true; } +static MOZ_MUST_USE bool +AwaitPromiseReactionJob(JSContext* cx, Handle<PromiseReactionRecord*> reaction, + MutableHandleValue rval) +{ + MOZ_ASSERT(reaction->isAwait()); + + RootedValue handlerVal(cx, reaction->handler()); + RootedValue argument(cx, reaction->handlerArg()); + Rooted<PromiseObject*> resultPromise(cx, &reaction->promise()->as<PromiseObject>()); + RootedValue generatorVal(cx, resultPromise->getFixedSlot(PromiseSlot_AwaitGenerator)); + + int32_t handlerNum = int32_t(handlerVal.toNumber()); + MOZ_ASSERT(handlerNum == PROMISE_HANDLER_AWAIT_FULFILLED + || handlerNum == PROMISE_HANDLER_AWAIT_REJECTED); + + // Await's handlers don't return a value, nor throw exception. + // They fail only on OOM. + if (handlerNum == PROMISE_HANDLER_AWAIT_FULFILLED) { + if (!AsyncFunctionAwaitedFulfilled(cx, resultPromise, generatorVal, argument)) + return false; + } else { + if (!AsyncFunctionAwaitedRejected(cx, resultPromise, generatorVal, argument)) + return false; + } + + rval.setUndefined(); + return true; +} + // ES2016, 25.4.2.1. /** * Callback triggering the fulfill/reject reaction for a resolved Promise, * to be invoked by the embedding during its processing of the Promise job * queue. * * See http://www.ecma-international.org/ecma-262/7.0/index.html#sec-jobs-and-job-queues * @@ -827,16 +878,18 @@ PromiseReactionJob(JSContext* cx, unsign mozilla::Maybe<AutoCompartment> ac; if (IsWrapper(reactionObj)) { reactionObj = UncheckedUnwrap(reactionObj); ac.emplace(cx, reactionObj); } // Steps 1-2. Rooted<PromiseReactionRecord*> reaction(cx, &reactionObj->as<PromiseReactionRecord>()); + if (reaction->isAwait()) + return AwaitPromiseReactionJob(cx, reaction, args.rval()); // Step 3. RootedValue handlerVal(cx, reaction->handler()); RootedValue argument(cx, reaction->handlerArg()); RootedValue handlerResult(cx); ResolutionMode resolutionMode = ResolveMode; @@ -2050,16 +2103,100 @@ js::OriginalPromiseThen(JSContext* cx, H // Step 5. if (!PerformPromiseThen(cx, promise, onFulfilled, onRejected, resultPromise, resolve, reject)) return nullptr; return resultPromise; } +static MOZ_MUST_USE bool PerformPromiseThenWithReaction(JSContext* cx, + Handle<PromiseObject*> promise, + Handle<PromiseReactionRecord*> reaction); + +// Some async/await functions are implemented here instead of +// js/src/builtin/AsyncFunction.cpp, to call Promise internal functions. + +// Async Functions proposal 1.1.8 and 1.2.14 step 1. +MOZ_MUST_USE PromiseObject* +js::CreatePromiseObjectForAsync(JSContext* cx, HandleValue generatorVal) +{ + // Step 1. + Rooted<PromiseObject*> promise(cx, CreatePromiseObjectWithDefaultResolution(cx)); + if (!promise) + return nullptr; + + AddPromiseFlags(*promise, PROMISE_FLAG_ASYNC); + promise->setFixedSlot(PromiseSlot_AwaitGenerator, generatorVal); + return promise; +} + +// Async Functions proposal 2.2 steps 3.f, 3.g. +MOZ_MUST_USE bool +js::AsyncFunctionThrown(JSContext* cx, Handle<PromiseObject*> resultPromise) +{ + // Step 3.f. + RootedValue exc(cx); + if (!GetAndClearException(cx, &exc)) + return false; + + if (!RejectMaybeWrappedPromise(cx, resultPromise, exc)) + return false; + + // Step 3.g. + return true; +} + +// Async Functions proposal 2.2 steps 3.d-e, 3.g. +MOZ_MUST_USE bool +js::AsyncFunctionReturned(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue value) +{ + // Steps 3.d-e. + if (!ResolvePromiseInternal(cx, resultPromise, value)) + return false; + + // Step 3.g. + return true; +} + +// Async Functions proposal 2.3 steps 2-8. +MOZ_MUST_USE bool +js::AsyncFunctionAwait(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue value) +{ + // Step 2. + Rooted<PromiseObject*> promise(cx, CreatePromiseObjectWithDefaultResolution(cx)); + if (!promise) + return false; + + // Steps 3. + if (!ResolvePromiseInternal(cx, promise, value)) + return false; + + // Steps 4-5. + RootedValue onFulfilled(cx, Int32Value(PROMISE_HANDLER_AWAIT_FULFILLED)); + RootedValue onRejected(cx, Int32Value(PROMISE_HANDLER_AWAIT_REJECTED)); + + RootedObject incumbentGlobal(cx); + if (!GetObjectFromIncumbentGlobal(cx, &incumbentGlobal)) + return false; + + // Steps 6-7. + Rooted<PromiseReactionRecord*> reaction(cx, NewReactionRecord(cx, resultPromise, + onFulfilled, onRejected, + nullptr, nullptr, + incumbentGlobal)); + if (!reaction) + return false; + + reaction->setIsAwait(); + + // Step 8. + return PerformPromiseThenWithReaction(cx, promise, reaction); +} + // ES2016, 25.4.5.3. bool js::Promise_then(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); // Step 1. RootedValue promiseVal(cx, args.thisv()); @@ -2128,16 +2265,23 @@ PerformPromiseThen(JSContext* cx, Handle // Step 7. Rooted<PromiseReactionRecord*> reaction(cx, NewReactionRecord(cx, resultPromise, onFulfilled, onRejected, resolve, reject, incumbentGlobal)); if (!reaction) return false; + return PerformPromiseThenWithReaction(cx, promise, reaction); +} + +static MOZ_MUST_USE bool +PerformPromiseThenWithReaction(JSContext* cx, Handle<PromiseObject*> promise, + Handle<PromiseReactionRecord*> reaction) +{ JS::PromiseState state = promise->state(); int32_t flags = promise->getFixedSlot(PromiseSlot_Flags).toInt32(); if (state == JS::PromiseState::Pending) { // Steps 5,6 (reordered). // Instead of creating separate reaction records for fulfillment and // rejection, we create a combined record. All places we use the record // can handle that. if (!AddPromiseReaction(cx, promise, reaction)) @@ -2440,16 +2584,17 @@ PromiseObject::dependentPromises(JSConte } return true; } bool PromiseObject::resolve(JSContext* cx, HandleValue resolutionValue) { + MOZ_ASSERT(!PromiseHasAnyFlag(*this, PROMISE_FLAG_ASYNC)); if (state() != JS::PromiseState::Pending) return true; RootedObject resolveFun(cx, GetResolveFunctionFromPromise(this)); RootedValue funVal(cx, ObjectValue(*resolveFun)); // For xray'd Promises, the resolve fun may have been created in another // compartment. For the call below to work in that case, wrap the @@ -2462,16 +2607,17 @@ PromiseObject::resolve(JSContext* cx, Ha RootedValue dummy(cx); return Call(cx, funVal, UndefinedHandleValue, args, &dummy); } bool PromiseObject::reject(JSContext* cx, HandleValue rejectionValue) { + MOZ_ASSERT(!PromiseHasAnyFlag(*this, PROMISE_FLAG_ASYNC)); if (state() != JS::PromiseState::Pending) return true; RootedValue funVal(cx, this->getFixedSlot(PromiseSlot_RejectFunction)); MOZ_ASSERT(IsCallable(funVal)); FixedInvokeArgs<1> args(cx); args[0].set(rejectionValue);
--- a/js/src/builtin/Promise.h +++ b/js/src/builtin/Promise.h @@ -11,30 +11,32 @@ #include "vm/NativeObject.h" namespace js { enum PromiseSlots { PromiseSlot_Flags = 0, PromiseSlot_ReactionsOrResult, PromiseSlot_RejectFunction, + PromiseSlot_AwaitGenerator = PromiseSlot_RejectFunction, PromiseSlot_AllocationSite, PromiseSlot_ResolutionSite, PromiseSlot_AllocationTime, PromiseSlot_ResolutionTime, PromiseSlot_Id, PromiseSlots, }; #define PROMISE_FLAG_RESOLVED 0x1 #define PROMISE_FLAG_FULFILLED 0x2 #define PROMISE_FLAG_HANDLED 0x4 #define PROMISE_FLAG_REPORTED 0x8 #define PROMISE_FLAG_DEFAULT_RESOLVE_FUNCTION 0x10 #define PROMISE_FLAG_DEFAULT_REJECT_FUNCTION 0x20 +#define PROMISE_FLAG_ASYNC 0x40 class AutoSetNewObjectMetadata; class PromiseObject : public NativeObject { public: static const unsigned RESERVED_SLOTS = PromiseSlots; static const Class class_; @@ -112,16 +114,28 @@ EnqueuePromiseReactions(JSContext* cx, H MOZ_MUST_USE JSObject* GetWaitForAllPromise(JSContext* cx, const JS::AutoObjectVector& promises); MOZ_MUST_USE JSObject* OriginalPromiseThen(JSContext* cx, Handle<PromiseObject*> promise, HandleValue onFulfilled, HandleValue onRejected); +MOZ_MUST_USE PromiseObject* +CreatePromiseObjectForAsync(JSContext* cx, HandleValue generatorVal); + +MOZ_MUST_USE bool +AsyncFunctionReturned(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue value); + +MOZ_MUST_USE bool +AsyncFunctionThrown(JSContext* cx, Handle<PromiseObject*> resultPromise); + +MOZ_MUST_USE bool +AsyncFunctionAwait(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue value); + /** * A PromiseTask represents a task that can be dispatched to a helper thread * (via StartPromiseTask), executed (by implementing PromiseTask::execute()), * and then resolved back on the original JSContext owner thread. * Because it contains a PersistentRooted, a PromiseTask will only be destroyed * on the JSContext's owner thread. */ class PromiseTask : public JS::AsyncTask
--- a/js/src/builtin/SelfHostingDefines.h +++ b/js/src/builtin/SelfHostingDefines.h @@ -88,19 +88,16 @@ #define REGEXP_FLAGS_SLOT 2 #define REGEXP_IGNORECASE_FLAG 0x01 #define REGEXP_GLOBAL_FLAG 0x02 #define REGEXP_MULTILINE_FLAG 0x04 #define REGEXP_STICKY_FLAG 0x08 #define REGEXP_UNICODE_FLAG 0x10 -#define ASYNC_WRAPPED_SLOT 1 -#define ASYNC_UNWRAPPED_SLOT 1 - #define MODULE_OBJECT_ENVIRONMENT_SLOT 2 #define MODULE_STATE_FAILED 0 #define MODULE_STATE_PARSED 1 #define MODULE_STATE_INSTANTIATED 2 #define MODULE_STATE_EVALUATED 3 #endif
--- a/js/src/frontend/BytecodeEmitter.cpp +++ b/js/src/frontend/BytecodeEmitter.cpp @@ -7130,59 +7130,49 @@ BytecodeEmitter::emitAsyncWrapperLambda( if (!emitIndex32(JSOP_LAMBDA, index)) return false; } return true; } bool -BytecodeEmitter::emitAsyncWrapper(unsigned index, bool needsHomeObject, bool isArrow) { +BytecodeEmitter::emitAsyncWrapper(unsigned index, bool needsHomeObject, bool isArrow) +{ // needsHomeObject can be true for propertyList for extended class. // In that case push both unwrapped and wrapped function, in order to // initialize home object of unwrapped function, and set wrapped function // as a property. // // lambda // unwrapped - // getintrinsic // unwrapped AsyncFunction_wrap - // undefined // unwrapped AsyncFunction_wrap undefined - // dupat 2 // unwrapped AsyncFunction_wrap undefined unwrapped - // call 1 // unwrapped wrapped + // dup // unwrapped unwrapped + // toasync // unwrapped wrapped // // Emitted code is surrounded by the following code. // // // classObj classCtor classProto // (emitted code) // classObj classCtor classProto unwrapped wrapped // swap // classObj classCtor classProto wrapped unwrapped // inithomeobject 1 // classObj classCtor classProto wrapped unwrapped // // initialize the home object of unwrapped // // with classProto here // pop // classObj classCtor classProto wrapped // inithiddenprop // classObj classCtor classProto wrapped // // initialize the property of the classProto // // with wrapped function here // pop // classObj classCtor classProto // // needsHomeObject is false for other cases, push wrapped function only. - if (needsHomeObject) { - if (!emitAsyncWrapperLambda(index, isArrow)) - return false; - } - if (!emitAtomOp(cx->names().AsyncFunction_wrap, JSOP_GETINTRINSIC)) - return false; - if (!emit1(JSOP_UNDEFINED)) + if (!emitAsyncWrapperLambda(index, isArrow)) return false; if (needsHomeObject) { - if (!emitDupAt(2)) - return false; - } else { - if (!emitAsyncWrapperLambda(index, isArrow)) - return false; - } - if (!emitCall(JSOP_CALL, 1)) + if (!emit1(JSOP_DUP)) + return false; + } + if (!emit1(JSOP_TOASYNC)) return false; return true; } bool BytecodeEmitter::emitDo(ParseNode* pn) { /* Emit an annotated nop so IonBuilder can recognize the 'do' loop. */
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/auto-regress/bug1315943.js @@ -0,0 +1,11 @@ +var s = "{}"; +for (var i = 0; i < 21; i++) s += s; +var g = newGlobal(); +var dbg = Debugger(g); +dbg.onDebuggerStatement = function(frame) { + var s = frame.eval("f").return.script; +}; +g.eval("line0 = Error().lineNumber;\n" + "debugger;\n" + // line0 + 1 + "function f(i) {\n" + // line0 + 2 + s + // line0 + 3 ... line0 + where - 2 + "}\n");
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/debug/Script-format-01.js @@ -0,0 +1,19 @@ +// Tests that JavaScript scripts have a "js" format and wasm scripts have a +// "wasm" format. + +var g = newGlobal(); +var dbg = new Debugger(g); + +var gotScript; +dbg.onNewScript = (script) => { + gotScript = script; +}; + +g.eval(`(() => {})()`); +assertEq(gotScript.format, "js"); + +if (!wasmIsSupported()) + quit(); + +g.eval(`o = new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary('(module (func) (export "" 0))')));`); +assertEq(gotScript.format, "wasm");
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/debug/bug1308578.js @@ -0,0 +1,10 @@ +// |jit-test| error: ReferenceError + +g = newGlobal(); +g.parent = this; +g.eval("new Debugger(parent).onExceptionUnwind = function () {}"); +a = new class extends Array { + constructor() { + for (;; ([] = p)) {} + } +}
--- a/js/src/jit-test/tests/ion/dce-with-rinstructions.js +++ b/js/src/jit-test/tests/ion/dce-with-rinstructions.js @@ -1,13 +1,14 @@ setJitCompilerOption("baseline.warmup.trigger", 10); setJitCompilerOption("ion.warmup.trigger", 20); var i; var config = getBuildConfiguration(); +var max = 200; // Check that we are able to remove the operation inside recover test functions (denoted by "rop..."), // when we inline the first version of uceFault, and ensure that the bailout is correct // when uceFault is replaced (which cause an invalidation bailout) var uceFault = function (i) { if (i > 98) uceFault = function (i) { return true; }; @@ -1286,33 +1287,22 @@ function rhypot_object_4args(i) { assertEq(x, Math.sqrt(i * i + (i + 1) * (i + 1) + (i + 2) * (i + 2) + (i + 3) * (i + 3))); assertRecoveredOnBailout(x, false); return i; } var uceFault_random = eval(uneval(uceFault).replace('uceFault', 'uceFault_random')); function rrandom(i) { // setRNGState() exists only in debug builds + if(config.debug) setRNGState(2, 1+i); - if(config.debug) { - setRNGState(2, 0); - var x = Math.random(); - if (uceFault_random(i) || uceFault_random(i)) { - setRNGState(2, 0); - assertEq(x, Math.random()); - } - assertRecoveredOnBailout(x, true); - } else { - var x = Math.random(); - if (uceFault_random(i) || uceFault_random(i)) { - Math.random(); - } - assertRecoveredOnBailout(x, true); - } - + var x = Math.random(); + if (uceFault_random(i) || uceFault_random(i)) + assertEq(x, config.debug ? setRNGState(2, 1+i) || Math.random() : x); + assertRecoveredOnBailout(x, true); return i; } var uceFault_sin_number = eval(uneval(uceFault).replace('uceFault', 'uceFault_sin_number')); function rsin_number(i) { var x = Math.sin(i); if (uceFault_sin_number(i) || uceFault_sin_number(i)) assertEq(x, Math.sin(i)); @@ -1348,17 +1338,18 @@ function rlog_object(i) { var x = Math.log(o); /* Evaluated with t == i, not t == 1000 */ t = 1000; if (uceFault_log_object(i) || uceFault_log_object(i)) assertEq(x, Math.log(99) /* log(99) */); assertRecoveredOnBailout(x, false); return i; } -for (i = 0; i < 100; i++) { +for (j = 100 - max; j < 100; j++) { + let i = j < 2 ? (Math.abs(j) % 50) + 2 : j; rbitnot_number(i); rbitnot_object(i); rbitand_number(i); rbitand_object(i); rbitor_number(i); rbitor_object(i); rbitxor_number(i); rbitxor_object(i);
--- a/js/src/jit-test/tests/ion/recover-objects.js +++ b/js/src/jit-test/tests/ion/recover-objects.js @@ -1,29 +1,35 @@ // |jit-test| test-join=--no-unboxed-objects; --ion-pgo=on // // Unboxed object optimization might not trigger in all cases, thus we ensure // that Scalar Replacement optimization is working well independently of the // object representation. +var max = 200; + // Ion eager fails the test below because we have not yet created any // template object in baseline before running the content of the top-level // function. -if (getJitCompilerOptions()["ion.warmup.trigger"] <= 90) - setJitCompilerOption("ion.warmup.trigger", 90); +if (getJitCompilerOptions()["ion.warmup.trigger"] <= max - 10) + setJitCompilerOption("ion.warmup.trigger", max - 10); + +// Force Inlining heuristics to always inline the functions which have the same +// number of use count. +setJitCompilerOption("ion.warmup.trigger", getJitCompilerOptions()["ion.warmup.trigger"]); // This test checks that we are able to remove the getprop & setprop with scalar // replacement, so we should not force inline caches, as this would skip the // generation of getprop & setprop instructions. if (getJitCompilerOptions()["ion.forceinlineCaches"]) setJitCompilerOption("ion.forceinlineCaches", 0); function resumeHere() {} var uceFault = function (i) { - if (i > 98) + if (i > max - 2) uceFault = function (i) { return true; }; return false; }; // Without "use script" in the inner function, the arguments might be // observable. function inline_notSoEmpty1(a, b, c, d) { @@ -79,17 +85,17 @@ function notSoEmpty2(i) { // This can only be recovered on bailout iff either we have type // information for the property access in the branch, or the branch is // removed before scalar replacement. assertRecoveredOnBailout(res, true); } // Check that we can recover objects with their content. var argFault_observeArg = function (i) { - if (i > 98) + if (i > max - 2) return inline_observeArg.arguments[0]; return { test : i }; }; function inline_observeArg(obj, i) { return argFault_observeArg(i); } function observeArg(i) { var obj = { test: i }; @@ -130,36 +136,36 @@ function withinIf(i) { obj = undefined; } assertEq(x, i); } // Check case where one successor can have multiple times the same predecessor. function unknownLoad(i) { var obj = { foo: i }; + // Unknown properties are inlined as undefined. assertEq(obj.bar, undefined); - // Unknown properties are using GetPropertyCache. - assertRecoveredOnBailout(obj, false); + assertRecoveredOnBailout(obj, true); } // Check with dynamic slots. function dynamicSlots(i) { var obj = { p0: i + 0, p1: i + 1, p2: i + 2, p3: i + 3, p4: i + 4, p5: i + 5, p6: i + 6, p7: i + 7, p8: i + 8, p9: i + 9, p10: i + 10, p11: i + 11, p12: i + 12, p13: i + 13, p14: i + 14, p15: i + 15, p16: i + 16, p17: i + 17, p18: i + 18, p19: i + 19, p20: i + 20, p21: i + 21, p22: i + 22, p23: i + 23, p24: i + 24, p25: i + 25, p26: i + 26, p27: i + 27, p28: i + 28, p29: i + 29, p30: i + 30, p31: i + 31, p32: i + 32, p33: i + 33, p34: i + 34, p35: i + 35, p36: i + 36, p37: i + 37, p38: i + 38, p39: i + 39, p40: i + 40, p41: i + 41, p42: i + 42, p43: i + 43, p44: i + 44, p45: i + 45, p46: i + 46, p47: i + 47, p48: i + 48, p49: i + 49, p50: i + 50 }; // Add a function call to capture a resumepoint at the end of the call or // inside the inlined block, such as the bailout does not rewind to the // beginning of the function. resumeHere(); bailout(); assertEq(obj.p0 + obj.p10 + obj.p20 + obj.p30 + obj.p40, 5 * i + 100); - assertRecoveredOnBailout(obj, true); + assertRecoveredOnBailout(obj, false); } // Check that we can correctly recover allocations of new objects. function Point(x, y) { this.x = x; this.y = y; } @@ -167,17 +173,17 @@ function Point(x, y) function createThisWithTemplate(i) { var p = new Point(i - 1, i + 1); bailout(); assertEq(p.y - p.x, 2); assertRecoveredOnBailout(p, true); } -for (var i = 0; i < 100; i++) { +for (var i = 0; i < max; i++) { notSoEmpty1(i); notSoEmpty2(i); observeArg(i); complexPhi(i); withinIf(i); unknownLoad(i); dynamicSlots(i); createThisWithTemplate(i);
rename from js/src/jit-test/tests/self-test/assertRecoveredOnBailout.js rename to js/src/jit-test/tests/self-test/assertRecoveredOnBailout-0.js
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/self-test/assertRecoveredOnBailout-1.js @@ -0,0 +1,32 @@ +// |jit-test| crash + +var opts = getJitCompilerOptions(); +if (!opts['ion.enable'] || !opts['baseline.enable'] || + opts["ion.forceinlineCaches"] || opts["ion.check-range-analysis"]) +{ + crash("Cannot test assertRecoveredOnBailout"); +} + +function g() { + return inIon(); +} + +// Wait until IonMonkey compilation finished. +while(!(res = g())); + +// Check that we entered Ion succesfully. +if (res !== true) + crash("Cannot enter IonMonkey"); + +// Test that assertRecoveredOnBailout fails as expected. +function f () { + var o = {}; + assertRecoveredOnBailout(o, false); + return inIon(); +} + +// Wait until IonMonkey compilation finished. +while(!(res = f())); + +// Ensure that we entered Ion. +assertEq(res, true);
--- a/js/src/jit-test/tests/wasm/js-reexport.js +++ b/js/src/jit-test/tests/wasm/js-reexport.js @@ -9,45 +9,55 @@ const Table = WebAssembly.Table; function accum(...args) { var sum = 0; for (var i = 0; i < args.length; i++) sum += args[i]; return sum; } var e = wasmEvalText(`(module - (import $a "" "a" (param i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32)) - (import $b "" "b" (param f32 f32 f32 f32 f32 f32 f32 f32 f32 f32) (result f32)) - (import $c "" "c" (param f64 f64 f64 f64 f64 f64 f64 f64 f64 f64) (result f64)) - (import $d "" "d" (param i32 f32 f64 i32 f32 f64 i32 f32 f64 i32) (result f64)) + (import $a "" "a" (param i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32 i32) (result i32)) + (import $b "" "b" (param f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32 f32) (result f32)) + (import $c "" "c" (param f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64 f64) (result f64)) + (import $d "" "d" (param i32 f32 f64 i32 f32 f64 i32 f32 f64 i32 f32 f64 i32 f32 f64 i32 f32 f64 i32 f32) (result f64)) (func (export "a") (result i32) i32.const 1 i32.const 2 i32.const 3 i32.const 4 i32.const 5 i32.const 6 i32.const 7 i32.const 8 i32.const 9 i32.const 10 + i32.const 11 i32.const 12 i32.const 13 i32.const 14 i32.const 15 + i32.const 16 i32.const 17 i32.const 18 i32.const 19 i32.const 20 call $a ) (func (export "b") (result f32) f32.const 1.1 f32.const 2.1 f32.const 3.1 f32.const 4.1 f32.const 5.1 f32.const 6.1 f32.const 7.1 f32.const 8.1 f32.const 9.1 f32.const 10.1 + f32.const 11.1 f32.const 12.1 f32.const 13.1 f32.const 14.1 f32.const 15.1 + f32.const 16.1 f32.const 17.1 f32.const 18.1 f32.const 19.1 f32.const 20.1 call $b ) (func (export "c") (result f64) f64.const 1.2 f64.const 2.2 f64.const 3.2 f64.const 4.2 f64.const 5.2 f64.const 6.2 f64.const 7.2 f64.const 8.2 f64.const 9.2 f64.const 10.2 + f64.const 11.2 f64.const 12.2 f64.const 13.2 f64.const 14.2 f64.const 15.2 + f64.const 16.2 f64.const 17.2 f64.const 18.2 f64.const 19.2 f64.const 20.2 call $c ) (func (export "d") (result f64) - i32.const 1 f32.const 2.1 f64.const 3.1 i32.const 4 f32.const 5.1 - f64.const 6.1 i32.const 7 f32.const 8.3 f64.const 9.3 i32.const 10 + i32.const 1 f32.const 2.3 f64.const 3.3 i32.const 4 f32.const 5.3 + f64.const 6.3 i32.const 7 f32.const 8.3 f64.const 9.3 i32.const 10 + f32.const 11.3 f64.const 12.3 i32.const 13 f32.const 14.3 f64.const 15.3 + i32.const 16 f32.const 17.3 f64.const 18.3 i32.const 19 f32.const 20.3 call $d ) )`, {"":{a:accum, b:accum, c:accum, d:accum, e:accum}}).exports; -assertEq(e.a(), 55); -assertEq(e.b(), 56); -assertEq(e.c(), 57); -assertEq(e.d(), 56); + +const epsilon = .00001; +assertEq(e.a(), 210); +assertEq(Math.abs(e.b() - 212) < epsilon, true); +assertEq(Math.abs(e.c() - 214) < epsilon, true); +assertEq(Math.abs(e.d() - 213.9) < epsilon, true); setJitCompilerOption("baseline.warmup.trigger", 5); setJitCompilerOption("ion.warmup.trigger", 10); var e = wasmEvalText(`(module (import $a "" "a" (param i32 f64) (result f64)) (export "a" $a) )`, {"":{a:(a,b)=>a+b}}).exports;
--- a/js/src/jit/BaselineBailouts.cpp +++ b/js/src/jit/BaselineBailouts.cpp @@ -134,16 +134,17 @@ struct BaselineStackBuilder header_->valueR0 = UndefinedValue(); header_->setR1 = 0; header_->valueR1 = UndefinedValue(); header_->resumeFramePtr = nullptr; header_->resumeAddr = nullptr; header_->resumePC = nullptr; header_->monitorStub = nullptr; header_->numFrames = 0; + header_->checkGlobalDeclarationConflicts = false; return true; } MOZ_MUST_USE bool enlarge() { MOZ_ASSERT(buffer_ != nullptr); if (bufferTotal_ & mozilla::tl::MulOverflowMask<2>::value) return false; size_t newSize = bufferTotal_ * 2; @@ -407,16 +408,20 @@ struct BaselineStackBuilder BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr(); return virtualPointerAtStackOffset(priorOffset + extraOffset); #elif defined(JS_CODEGEN_NONE) MOZ_CRASH(); #else # error "Bad architecture!" #endif } + + void setCheckGlobalDeclarationConflicts() { + header_->checkGlobalDeclarationConflicts = true; + } }; // Ensure that all value locations are readable from the SnapshotIterator. // Remove RInstructionResults from the JitActivation if the frame got recovered // ahead of the bailout. class SnapshotIteratorForBailout : public SnapshotIterator { JitActivation* activation_; @@ -504,16 +509,26 @@ HasLiveIteratorAtStackDepth(JSScript* sc // stack. The iterator is below the result object. if (tn->kind == JSTRY_FOR_OF && stackDepth == tn->stackDepth - 1) return true; } return false; } +static bool +IsPrologueBailout(const SnapshotIterator& iter, const ExceptionBailoutInfo* excInfo) +{ + // If we are propagating an exception for debug mode, we will not resume + // into baseline code, but instead into HandleExceptionBaseline (i.e., + // never before the prologue). + return iter.pcOffset() == 0 && !iter.resumeAfter() && + (!excInfo || !excInfo->propagatingIonExceptionForDebugMode()); +} + // For every inline frame, we write out the following data: // // | ... | // +---------------+ // | Descr(???) | --- Descr size here is (PREV_FRAME_SIZE) // +---------------+ // | ReturnAddr | // -- +===============+ --- OVERWRITE STARTS HERE (START_STACK_ADDR) @@ -705,37 +720,37 @@ InitFromBailout(JSContext* cx, HandleScr } else { MOZ_ASSERT(v.isUndefined() || v.isMagic(JS_OPTIMIZED_OUT)); // Get env chain from function or script. if (fun) { // If pcOffset == 0, we may have to push a new call object, so // we leave envChain nullptr and enter baseline code before // the prologue. - // - // If we are propagating an exception for debug mode, we will - // not resume into baseline code, but instead into - // HandleExceptionBaseline, so *do* set the env chain here. - if (iter.pcOffset() != 0 || iter.resumeAfter() || - (excInfo && excInfo->propagatingIonExceptionForDebugMode())) - { + if (!IsPrologueBailout(iter, excInfo)) envChain = fun->environment(); - } } else if (script->module()) { envChain = script->module()->environment(); } else { // For global scripts without a non-syntactic env the env // chain is the script's global lexical environment (Ion does // not compile scripts with a non-syntactic global scope). // Also note that it's invalid to resume into the prologue in // this case because the prologue expects the env chain in R1 // for eval and global scripts. MOZ_ASSERT(!script->isForEval()); MOZ_ASSERT(!script->hasNonSyntacticScope()); envChain = &(script->global().lexicalEnvironment()); + + // We have possibly bailed out before Ion could do the global + // declaration conflicts check. Since it's invalid to resume + // into the prologue, set a flag so FinishBailoutToBaseline + // can do the conflict check. + if (IsPrologueBailout(iter, excInfo)) + builder.setCheckGlobalDeclarationConflicts(); } } // Make sure to add HAS_RVAL to flags here because setFlags() below // will clobber it. returnValue = iter.read(); flags |= BaselineFrame::HAS_RVAL; @@ -1602,17 +1617,17 @@ jit::BailoutIonToBaseline(JSContext* cx, if (handleException) break; MOZ_ASSERT(nextCallee); MOZ_ASSERT(callPC); caller = scr; callerPC = callPC; fun = nextCallee; - scr = fun->existingScriptForInlinedFunction(); + scr = fun->existingScript(); frameNo++; snapIter.nextInstruction(); } JitSpew(JitSpew_BaselineBailouts, " Done restoring frames"); BailoutKind bailoutKind = snapIter.bailoutKind(); @@ -1784,26 +1799,39 @@ jit::FinishBailoutToBaseline(BaselineBai // cleared at the end of this function. If we return false, we don't clear // it: the exception handler also needs it and will clear it for us. BaselineFrame* topFrame = GetTopBaselineFrame(cx); topFrame->setOverridePc(bailoutInfo->resumePC); uint32_t numFrames = bailoutInfo->numFrames; MOZ_ASSERT(numFrames > 0); BailoutKind bailoutKind = bailoutInfo->bailoutKind; + bool checkGlobalDeclarationConflicts = bailoutInfo->checkGlobalDeclarationConflicts; // Free the bailout buffer. js_free(bailoutInfo); bailoutInfo = nullptr; - // Ensure the frame has a call object if it needs one. If the env chain - // is nullptr, we will enter baseline code at the prologue so no need to do - // anything in that case. - if (topFrame->environmentChain() && !EnsureHasEnvironmentObjects(cx, topFrame)) - return false; + if (topFrame->environmentChain()) { + // Ensure the frame has a call object if it needs one. If the env chain + // is nullptr, we will enter baseline code at the prologue so no need to do + // anything in that case. + if (!EnsureHasEnvironmentObjects(cx, topFrame)) + return false; + + // If we bailed out before Ion could do the global declaration + // conflicts check, because we resume in the body instead of the + // prologue for global frames. + if (checkGlobalDeclarationConflicts) { + Rooted<LexicalEnvironmentObject*> lexicalEnv(cx, &cx->global()->lexicalEnvironment()); + RootedScript script(cx, topFrame->script()); + if (!CheckGlobalDeclarationConflicts(cx, script, lexicalEnv, cx->global())) + return false; + } + } // Create arguments objects for bailed out frames, to maintain the invariant // that script->needsArgsObj() implies frame->hasArgsObj(). RootedScript innerScript(cx, nullptr); RootedScript outerScript(cx, nullptr); MOZ_ASSERT(cx->currentlyRunningInJit()); JitFrameIterator iter(cx);
--- a/js/src/jit/BaselineCompiler.cpp +++ b/js/src/jit/BaselineCompiler.cpp @@ -17,16 +17,17 @@ #include "jit/JitSpewer.h" #include "jit/Linker.h" #ifdef JS_ION_PERF # include "jit/PerfSpewer.h" #endif #include "jit/SharedICHelpers.h" #include "jit/VMFunctions.h" #include "js/UniquePtr.h" +#include "vm/AsyncFunction.h" #include "vm/EnvironmentObject.h" #include "vm/Interpreter.h" #include "vm/TraceLogging.h" #include "jsscriptinlines.h" #include "jit/BaselineFrameInfo-inl.h" #include "jit/MacroAssembler-inl.h" @@ -3802,16 +3803,37 @@ BaselineCompiler::emit_JSOP_TOID() return false; masm.bind(&done); frame.pop(); // Pop index. frame.push(R0); return true; } +typedef JSObject* (*ToAsyncFn)(JSContext*, HandleFunction); +static const VMFunction ToAsyncInfo = FunctionInfo<ToAsyncFn>(js::WrapAsyncFunction, "ToAsync"); + +bool +BaselineCompiler::emit_JSOP_TOASYNC() +{ + frame.syncStack(0); + masm.unboxObject(frame.addressOfStackValue(frame.peek(-1)), R0.scratchReg()); + + prepareVMCall(); + pushArg(R0.scratchReg()); + + if (!callVM(ToAsyncInfo)) + return false; + + masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, R0); + frame.pop(); + frame.push(R0); + return true; +} + typedef bool (*ThrowObjectCoercibleFn)(JSContext*, HandleValue); static const VMFunction ThrowObjectCoercibleInfo = FunctionInfo<ThrowObjectCoercibleFn>(ThrowObjectCoercible, "ThrowObjectCoercible"); bool BaselineCompiler::emit_JSOP_CHECKOBJCOERCIBLE() { frame.syncStack(0);
--- a/js/src/jit/BaselineCompiler.h +++ b/js/src/jit/BaselineCompiler.h @@ -188,16 +188,17 @@ namespace jit { _(JSOP_DEBUGLEAVELEXICALENV) \ _(JSOP_PUSHVARENV) \ _(JSOP_POPVARENV) \ _(JSOP_EXCEPTION) \ _(JSOP_DEBUGGER) \ _(JSOP_ARGUMENTS) \ _(JSOP_RUNONCE) \ _(JSOP_REST) \ + _(JSOP_TOASYNC) \ _(JSOP_TOID) \ _(JSOP_TOSTRING) \ _(JSOP_TABLESWITCH) \ _(JSOP_ITER) \ _(JSOP_MOREITER) \ _(JSOP_ISNOITER) \ _(JSOP_ENDITER) \ _(JSOP_GENERATOR) \
--- a/js/src/jit/BaselineJIT.h +++ b/js/src/jit/BaselineJIT.h @@ -587,16 +587,22 @@ struct BaselineBailoutInfo // the first stub, not the resumeAddr above. The resumeAddr // above, in this case, is pushed onto the stack so that the // TypeMonitor chain can tail-return into the main jitcode when done. ICStub* monitorStub; // Number of baseline frames to push on the stack. uint32_t numFrames; + // If Ion bailed out on a global script before it could perform the global + // declaration conflicts check. In such cases the baseline script is + // resumed at the first pc instead of the prologue, so an extra flag is + // needed to perform the check. + bool checkGlobalDeclarationConflicts; + // The bailout kind. BailoutKind bailoutKind; }; uint32_t BailoutIonToBaseline(JSContext* cx, JitActivation* activation, JitFrameIterator& iter, bool invalidate, BaselineBailoutInfo** bailoutInfo, const ExceptionBailoutInfo* exceptionInfo);
--- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -34,16 +34,17 @@ #include "jit/JitcodeMap.h" #include "jit/JitSpewer.h" #include "jit/Linker.h" #include "jit/Lowering.h" #include "jit/MIRGenerator.h" #include "jit/MoveEmitter.h" #include "jit/RangeAnalysis.h" #include "jit/SharedICHelpers.h" +#include "vm/AsyncFunction.h" #include "vm/MatchPairs.h" #include "vm/RegExpObject.h" #include "vm/RegExpStatics.h" #include "vm/TraceLogging.h" #include "vm/Unicode.h" #include "jsboolinlines.h" @@ -10449,16 +10450,26 @@ CodeGenerator::visitOutOfLineTypeOfV(Out masm.passABIArg(output); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::TypeOfObjectOperation)); masm.storeCallResult(output); restoreVolatile(output); masm.jump(ool->rejoin()); } +typedef JSObject* (*ToAsyncFn)(JSContext*, HandleFunction); +static const VMFunction ToAsyncInfo = FunctionInfo<ToAsyncFn>(js::WrapAsyncFunction, "ToAsync"); + +void +CodeGenerator::visitToAsync(LToAsync* lir) +{ + pushArg(ToRegister(lir->unwrapped())); + callVM(ToAsyncInfo, lir); +} + typedef bool (*ToIdFn)(JSContext*, HandleScript, jsbytecode*, HandleValue, MutableHandleValue); static const VMFunction ToIdInfo = FunctionInfo<ToIdFn>(ToIdOperation, "ToIdOperation"); void CodeGenerator::visitToIdV(LToIdV* lir) { Label notInt32;
--- a/js/src/jit/CodeGenerator.h +++ b/js/src/jit/CodeGenerator.h @@ -286,16 +286,17 @@ class CodeGenerator final : public CodeG void visitFunctionEnvironment(LFunctionEnvironment* lir); void visitCallGetProperty(LCallGetProperty* lir); void visitCallGetElement(LCallGetElement* lir); void visitCallSetElement(LCallSetElement* lir); void visitCallInitElementArray(LCallInitElementArray* lir); void visitThrow(LThrow* lir); void visitTypeOfV(LTypeOfV* lir); void visitOutOfLineTypeOfV(OutOfLineTypeOfV* ool); + void visitToAsync(LToAsync* lir); void visitToIdV(LToIdV* lir); template<typename T> void emitLoadElementT(LLoadElementT* lir, const T& source); void visitLoadElementT(LLoadElementT* lir); void visitLoadElementV(LLoadElementV* load); void visitLoadElementHole(LLoadElementHole* lir); void visitLoadUnboxedPointerV(LLoadUnboxedPointerV* lir); void visitLoadUnboxedPointerT(LLoadUnboxedPointerT* lir); void visitUnboxObjectOrNull(LUnboxObjectOrNull* lir);
--- a/js/src/jit/CompileInfo.h +++ b/js/src/jit/CompileInfo.h @@ -2,16 +2,18 @@ * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef jit_CompileInfo_h #define jit_CompileInfo_h +#include "mozilla/Maybe.h" + #include "jsfun.h" #include "jit/JitAllocPolicy.h" #include "jit/JitFrames.h" #include "jit/Registers.h" #include "vm/EnvironmentObject.h" namespace js { @@ -216,16 +218,33 @@ class CompileInfo } nimplicit_ = StartArgSlot(script) /* env chain and argument obj */ + (fun ? 1 : 0); /* this */ nargs_ = fun ? fun->nargs() : 0; nlocals_ = script->nfixed(); nstack_ = Max<unsigned>(script->nslots() - script->nfixed(), MinJITStackSize); nslots_ = nimplicit_ + nargs_ + nlocals_ + nstack_; + + // For derived class constructors, find and cache the frame slot for + // the .this binding. This slot is assumed to be always + // observable. See isObservableFrameSlot. + if (script->isDerivedClassConstructor()) { + MOZ_ASSERT(script->functionHasThisBinding()); + CompileRuntime* runtime = GetJitContext()->runtime; + for (BindingIter bi(script); bi; bi++) { + if (bi.name() != runtime->names().dotThis) + continue; + BindingLocation loc = bi.location(); + if (loc.kind() == BindingLocation::Kind::Frame) { + thisSlotForDerivedClassConstructor_ = mozilla::Some(localSlot(loc.slot())); + break; + } + } + } } explicit CompileInfo(unsigned nlocals) : script_(nullptr), fun_(nullptr), osrPc_(nullptr), analysisMode_(Analysis_None), scriptNeedsArgsObj_(false), mayReadFrameArgsDirectly_(false), inlineScriptTree_(nullptr) { nimplicit_ = 0; @@ -432,16 +451,23 @@ class CompileInfo bool isObservableFrameSlot(uint32_t slot) const { if (!funMaybeLazy()) return false; // The |this| value must always be observable. if (slot == thisSlot()) return true; + // The |this| frame slot in derived class constructors should never be + // optimized out, as a Debugger might need to perform TDZ checks on it + // via, e.g., an exceptionUnwind handler. The TDZ check is required + // for correctness if the handler decides to continue execution. + if (thisSlotForDerivedClassConstructor_ && *thisSlotForDerivedClassConstructor_ == slot) + return true; + if (funMaybeLazy()->needsSomeEnvironmentObject() && slot == environmentChainSlot()) return true; // If the function may need an arguments object, then make sure to // preserve the env chain, because it may be needed to construct the // arguments object during bailout. If we've already created an // arguments object (or got one via OSR), preserve that as well. if (hasArguments() && (slot == environmentChainSlot() || slot == argsObjSlot())) @@ -498,16 +524,17 @@ class CompileInfo } private: unsigned nimplicit_; unsigned nargs_; unsigned nlocals_; unsigned nstack_; unsigned nslots_; + mozilla::Maybe<unsigned> thisSlotForDerivedClassConstructor_; JSScript* script_; JSFunction* fun_; jsbytecode* osrPc_; AnalysisMode analysisMode_; // Whether a script needs an arguments object is unstable over compilation // since the arguments optimization could be marked as failed on the main // thread, so cache a value here and use it throughout for consistency.
--- a/js/src/jit/IonBuilder.cpp +++ b/js/src/jit/IonBuilder.cpp @@ -2098,16 +2098,19 @@ IonBuilder::inspectOpcode(JSOp op) case JSOP_OBJECT: return jsop_object(info().getObject(pc)); case JSOP_TYPEOF: case JSOP_TYPEOFEXPR: return jsop_typeof(); + case JSOP_TOASYNC: + return jsop_toasync(); + case JSOP_TOID: return jsop_toid(); case JSOP_LAMBDA: return jsop_lambda(info().getFunction(pc)); case JSOP_LAMBDA_ARROW: return jsop_lambda_arrow(info().getFunction(pc)); @@ -13565,16 +13568,30 @@ IonBuilder::jsop_typeof() current->add(ins); current->push(ins); return true; } bool +IonBuilder::jsop_toasync() +{ + MDefinition* unwrapped = current->pop(); + MOZ_ASSERT(unwrapped->type() == MIRType::Object); + + MToAsync* ins = MToAsync::New(alloc(), unwrapped); + + current->add(ins); + current->push(ins); + + return resumeAfter(ins); +} + +bool IonBuilder::jsop_toid() { // No-op if the index is an integer. if (current->peek(-1)->type() == MIRType::Int32) return true; MDefinition* index = current->pop(); MToId* ins = MToId::New(alloc(), index);
--- a/js/src/jit/IonBuilder.h +++ b/js/src/jit/IonBuilder.h @@ -764,16 +764,17 @@ class IonBuilder MOZ_MUST_USE bool jsop_initprop_getter_setter(PropertyName* name); MOZ_MUST_USE bool jsop_regexp(RegExpObject* reobj); MOZ_MUST_USE bool jsop_object(JSObject* obj); MOZ_MUST_USE bool jsop_lambda(JSFunction* fun); MOZ_MUST_USE bool jsop_lambda_arrow(JSFunction* fun); MOZ_MUST_USE bool jsop_functionthis(); MOZ_MUST_USE bool jsop_globalthis(); MOZ_MUST_USE bool jsop_typeof(); + MOZ_MUST_USE bool jsop_toasync(); MOZ_MUST_USE bool jsop_toid(); MOZ_MUST_USE bool jsop_iter(uint8_t flags); MOZ_MUST_USE bool jsop_itermore(); MOZ_MUST_USE bool jsop_isnoiter(); MOZ_MUST_USE bool jsop_iterend(); MOZ_MUST_USE bool jsop_in(); MOZ_MUST_USE bool jsop_instanceof(); MOZ_MUST_USE bool jsop_getaliasedvar(EnvironmentCoordinate ec);
--- a/js/src/jit/JitFrames.cpp +++ b/js/src/jit/JitFrames.cpp @@ -2356,17 +2356,17 @@ InlineFrameIterator::findNextFrame() si_.nextFrame(); calleeTemplate_ = &funval.toObject().as<JSFunction>(); // Inlined functions may be clones that still point to the lazy script // for the executed script, if they are clones. The actual script // exists though, just make sure the function points to it. - script_ = calleeTemplate_->existingScriptForInlinedFunction(); + script_ = calleeTemplate_->existingScript(); MOZ_ASSERT(script_->hasBaselineScript()); pc_ = script_->offsetToPC(si_.pcOffset()); } // The first time we do not know the number of frames, we only settle on the // last frame, and update the number of frames based on the number of // iteration that we have done.
--- a/js/src/jit/JitSpewer.cpp +++ b/js/src/jit/JitSpewer.cpp @@ -16,16 +16,17 @@ # include <unistd.h> #endif #include "jsprf.h" #include "jit/Ion.h" #include "jit/MIR.h" #include "jit/MIRGenerator.h" +#include "jit/MIRGraph.h" #include "threading/LockGuard.h" #include "vm/HelperThreads.h" #include "vm/MutexIDs.h" #ifndef JIT_SPEW_DIR # if defined(_WIN32) @@ -297,16 +298,23 @@ GraphSpewer::spewPass(const char* pass) c1Spewer_.spewPass(pass); jsonSpewer_.beginPass(pass); jsonSpewer_.spewMIR(graph_); jsonSpewer_.spewLIR(graph_); jsonSpewer_.endPass(); ionspewer.spewPass(this); + + // As this function is used for debugging, we ignore any of the previous + // failures and ensure there is enough ballast space, such that we do not + // exhaust the ballast space before running the next phase. + AutoEnterOOMUnsafeRegion oomUnsafe; + if (!graph_->alloc().ensureBallast()) + oomUnsafe.crash("Could not ensure enough ballast space after spewing graph information."); } void GraphSpewer::spewPass(const char* pass, BacktrackingAllocator* ra) { if (!isSpewing()) return;
--- a/js/src/jit/Lowering.cpp +++ b/js/src/jit/Lowering.cpp @@ -1172,16 +1172,24 @@ LIRGenerator::visitTypeOf(MTypeOf* ins) MDefinition* opd = ins->input(); MOZ_ASSERT(opd->type() == MIRType::Value); LTypeOfV* lir = new(alloc()) LTypeOfV(useBox(opd), tempToUnbox()); define(lir, ins); } void +LIRGenerator::visitToAsync(MToAsync* ins) +{ + LToAsync* lir = new(alloc()) LToAsync(useRegisterAtStart(ins->input())); + defineReturn(lir, ins); + assignSafepoint(lir, ins); +} + +void LIRGenerator::visitToId(MToId* ins) { LToIdV* lir = new(alloc()) LToIdV(useBox(ins->input()), tempDouble()); defineBox(lir, ins); assignSafepoint(lir, ins); } void
--- a/js/src/jit/Lowering.h +++ b/js/src/jit/Lowering.h @@ -116,16 +116,17 @@ class LIRGenerator : public LIRGenerator void visitGetDynamicName(MGetDynamicName* ins); void visitCallDirectEval(MCallDirectEval* ins); void visitTest(MTest* test); void visitGotoWithFake(MGotoWithFake* ins); void visitFunctionDispatch(MFunctionDispatch* ins); void visitObjectGroupDispatch(MObjectGroupDispatch* ins); void visitCompare(MCompare* comp); void visitTypeOf(MTypeOf* ins); + void visitToAsync(MToAsync* ins); void visitToId(MToId* ins); void visitBitNot(MBitNot* ins); void visitBitAnd(MBitAnd* ins); void visitBitOr(MBitOr* ins); void visitBitXor(MBitXor* ins); void visitLsh(MLsh* ins); void visitRsh(MRsh* ins); void visitUrsh(MUrsh* ins);
--- a/js/src/jit/MCallOptimize.cpp +++ b/js/src/jit/MCallOptimize.cpp @@ -2367,17 +2367,19 @@ IonBuilder::inlineTypedArray(CallInfo& c return InliningStatus_NotInlined; uint32_t len = AssertedCast<uint32_t>(providedLen); if (obj->length() != len) return InliningStatus_NotInlined; callInfo.setImplicitlyUsedUnchecked(); - ins = MNewTypedArray::New(alloc(), constraints(), obj, + MConstant* templateConst = MConstant::NewConstraintlessObject(alloc(), obj); + current->add(templateConst); + ins = MNewTypedArray::New(alloc(), constraints(), templateConst, obj->group()->initialHeap(constraints())); } current->add(ins); current->push(ins); if (!resumeAfter(ins)) return InliningStatus_Error;
--- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -3360,56 +3360,51 @@ class MNewArrayDynamicLength return AliasSet::None(); } bool appendRoots(MRootList& roots) const override { return roots.append(templateObject_); } }; -class MNewTypedArray : public MNullaryInstruction -{ - CompilerGCPointer<TypedArrayObject*> templateObject_; +class MNewTypedArray + : public MUnaryInstruction, + public NoTypePolicy::Data +{ gc::InitialHeap initialHeap_; - MNewTypedArray(CompilerConstraintList* constraints, TypedArrayObject* templateObject, + MNewTypedArray(CompilerConstraintList* constraints, MConstant* templateConst, gc::InitialHeap initialHeap) - : templateObject_(templateObject), + : MUnaryInstruction(templateConst), initialHeap_(initialHeap) { - MOZ_ASSERT(!templateObject->isSingleton()); + MOZ_ASSERT(!templateObject()->isSingleton()); setResultType(MIRType::Object); - setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject)); + setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject())); } public: INSTRUCTION_HEADER(NewTypedArray) - - static MNewTypedArray* New(TempAllocator& alloc, - CompilerConstraintList* constraints, - TypedArrayObject* templateObject, - gc::InitialHeap initialHeap) - { - return new(alloc) MNewTypedArray(constraints, templateObject, initialHeap); - } + TRIVIAL_NEW_WRAPPERS TypedArrayObject* templateObject() const { - return templateObject_; + return &getOperand(0)->toConstant()->toObject().as<TypedArrayObject>(); } gc::InitialHeap initialHeap() const { return initialHeap_; } virtual AliasSet getAliasSet() const override { return AliasSet::None(); } - bool appendRoots(MRootList& roots) const override { - return roots.append(templateObject_); + MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override; + bool canRecoverOnBailout() const override { + return true; } }; class MNewTypedArrayDynamicLength : public MUnaryInstruction, public IntPolicy<0>::Data { CompilerObject templateObject_; @@ -5806,16 +5801,31 @@ class MTypeOf } MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override; bool canRecoverOnBailout() const override { return true; } }; +class MToAsync + : public MUnaryInstruction, + public SingleObjectPolicy::Data +{ + explicit MToAsync(MDefinition* unwrapped) + : MUnaryInstruction(unwrapped) + { + setResultType(MIRType::Object); + } + + public: + INSTRUCTION_HEADER(ToAsync) + TRIVIAL_NEW_WRAPPERS +}; + class MToId : public MUnaryInstruction, public BoxInputsPolicy::Data { explicit MToId(MDefinition* index) : MUnaryInstruction(index) { setResultType(MIRType::Value);
--- a/js/src/jit/MOpcodes.h +++ b/js/src/jit/MOpcodes.h @@ -74,16 +74,17 @@ namespace jit { _(Unreachable) \ _(EncodeSnapshot) \ _(AssertFloat32) \ _(AssertRecoveredOnBailout) \ _(GetDynamicName) \ _(CallDirectEval) \ _(BitNot) \ _(TypeOf) \ + _(ToAsync) \ _(ToId) \ _(BitAnd) \ _(BitOr) \ _(BitXor) \ _(Lsh) \ _(Rsh) \ _(Ursh) \ _(SignExtend) \
--- a/js/src/jit/Recover.cpp +++ b/js/src/jit/Recover.cpp @@ -1301,16 +1301,44 @@ RNewObject::recover(JSContext* cx, Snaps return false; result.setObject(*resultObject); iter.storeInstructionResult(result); return true; } bool +MNewTypedArray::writeRecoverData(CompactBufferWriter& writer) const +{ + MOZ_ASSERT(canRecoverOnBailout()); + writer.writeUnsigned(uint32_t(RInstruction::Recover_NewTypedArray)); + return true; +} + +RNewTypedArray::RNewTypedArray(CompactBufferReader& reader) +{ +} + +bool +RNewTypedArray::recover(JSContext* cx, SnapshotIterator& iter) const +{ + RootedObject templateObject(cx, &iter.read().toObject()); + RootedValue result(cx); + + uint32_t length = templateObject.as<TypedArrayObject>()->length(); + JSObject* resultObject = TypedArrayCreateWithTemplate(cx, templateObject, length); + if (!resultObject) + return false; + + result.setObject(*resultObject); + iter.storeInstructionResult(result); + return true; +} + +bool MNewArray::writeRecoverData(CompactBufferWriter& writer) const { MOZ_ASSERT(canRecoverOnBailout()); writer.writeUnsigned(uint32_t(RInstruction::Recover_NewArray)); writer.writeUnsigned(length()); return true; }
--- a/js/src/jit/Recover.h +++ b/js/src/jit/Recover.h @@ -95,16 +95,17 @@ namespace jit { _(RegExpSearcher) \ _(RegExpTester) \ _(StringReplace) \ _(TypeOf) \ _(ToDouble) \ _(ToFloat32) \ _(TruncateToInt32) \ _(NewObject) \ + _(NewTypedArray) \ _(NewArray) \ _(NewDerivedTypedObject) \ _(CreateThisWithTemplate) \ _(Lambda) \ _(SimdBox) \ _(ObjectState) \ _(ArrayState) \ _(AtomicIsLockFree) \ @@ -560,16 +561,24 @@ class RNewObject final : public RInstruc MNewObject::Mode mode_; public: RINSTRUCTION_HEADER_NUM_OP_(NewObject, 1) MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const; }; +class RNewTypedArray final : public RInstruction +{ + public: + RINSTRUCTION_HEADER_NUM_OP_(NewTypedArray, 1) + + MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const; +}; + class RNewArray final : public RInstruction { private: uint32_t count_; public: RINSTRUCTION_HEADER_NUM_OP_(NewArray, 1)
--- a/js/src/jit/ValueNumbering.cpp +++ b/js/src/jit/ValueNumbering.cpp @@ -722,17 +722,18 @@ ValueNumberer::visitDefinition(MDefiniti // The Nop is introduced to capture the result and make sure the operands // are not live anymore when there are no further uses. Though when // all operands are still needed the Nop doesn't decrease the liveness // and can get removed. MResumePoint* rp = nop->resumePoint(); if (rp && rp->numOperands() > 0 && rp->getOperand(rp->numOperands() - 1) == prev && - !nop->block()->lastIns()->isThrow()) + !nop->block()->lastIns()->isThrow() && + !prev->isAssertRecoveredOnBailout()) { size_t numOperandsLive = 0; for (size_t j = 0; j < prev->numOperands(); j++) { for (size_t i = 0; i < rp->numOperands(); i++) { if (prev->getOperand(j) == rp->getOperand(i)) { numOperandsLive++; break; }
--- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -129,20 +129,18 @@ ABIArgGenerator::hardNext(MIRType type) stackOffset_ += sizeof(uint64_t); break; } current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1)); intRegIndex_ += 2; break; case MIRType::Float32: if (floatRegIndex_ == NumFloatArgRegs) { - static const uint32_t align = sizeof(double) - 1; - stackOffset_ = (stackOffset_ + align) & ~align; current_ = ABIArg(stackOffset_); - stackOffset_ += sizeof(uint64_t); + stackOffset_ += sizeof(uint32_t); break; } current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single)); floatRegIndex_++; break; case MIRType::Double: // Double register are composed of 2 float registers, thus we have to // skip any float register which cannot be used in a pair of float
--- a/js/src/jit/shared/LIR-shared.h +++ b/js/src/jit/shared/LIR-shared.h @@ -1588,16 +1588,29 @@ class LTypeOfV : public LInstructionHelp return getTemp(0); } MTypeOf* mir() const { return mir_->toTypeOf(); } }; +class LToAsync : public LCallInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(ToAsync) + explicit LToAsync(const LAllocation& input) { + setOperand(0, input); + } + + const LAllocation* unwrapped() { + return getOperand(0); + } +}; + class LToIdV : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 1> { public: LIR_HEADER(ToIdV) LToIdV(const LBoxAllocation& input, const LDefinition& temp) { setBoxOperand(Input, input);
--- a/js/src/jit/shared/LOpcodes-shared.h +++ b/js/src/jit/shared/LOpcodes-shared.h @@ -354,16 +354,17 @@ _(ArgumentsLength) \ _(GetFrameArgument) \ _(SetFrameArgumentT) \ _(SetFrameArgumentC) \ _(SetFrameArgumentV) \ _(RunOncePrologue) \ _(Rest) \ _(TypeOfV) \ + _(ToAsync) \ _(ToIdV) \ _(Floor) \ _(FloorF) \ _(Ceil) \ _(CeilF) \ _(Round) \ _(RoundF) \ _(In) \
--- a/js/src/jit/x86/Assembler-x86.cpp +++ b/js/src/jit/x86/Assembler-x86.cpp @@ -16,21 +16,21 @@ ABIArgGenerator::ABIArgGenerator() current_() {} ABIArg ABIArgGenerator::next(MIRType type) { switch (type) { case MIRType::Int32: + case MIRType::Float32: case MIRType::Pointer: current_ = ABIArg(stackOffset_); stackOffset_ += sizeof(uint32_t); break; - case MIRType::Float32: // Float32 moves are actually double moves case MIRType::Double: current_ = ABIArg(stackOffset_); stackOffset_ += sizeof(uint64_t); break; case MIRType::Int64: current_ = ABIArg(stackOffset_); stackOffset_ += sizeof(uint64_t); break;
--- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -3082,16 +3082,34 @@ JS_DefineConstDoubles(JSContext* cx, Han return DefineConstScalar(cx, obj, cds); } JS_PUBLIC_API(bool) JS_DefineConstIntegers(JSContext* cx, HandleObject obj, const JSConstIntegerSpec* cis) { return DefineConstScalar(cx, obj, cis); } +bool +JSPropertySpec::getValue(JSContext* cx, MutableHandleValue vp) const +{ + MOZ_ASSERT(!isAccessor()); + + if (value.type == JSVAL_TYPE_STRING) { + RootedAtom atom(cx, Atomize(cx, value.string, strlen(value.string))); + if (!atom) + return false; + vp.setString(atom); + } else { + MOZ_ASSERT(value.type == JSVAL_TYPE_INT32); + vp.setInt32(value.int32); + } + + return true; +} + static JS::SymbolCode PropertySpecNameToSymbolCode(const char* name) { MOZ_ASSERT(JS::PropertySpecNameIsSymbol(name)); uintptr_t u = reinterpret_cast<uintptr_t>(name); return JS::SymbolCode(u - 1); } @@ -3144,21 +3162,20 @@ JS_DefineProperties(JSContext* cx, Handl if (!DefinePropertyById(cx, obj, id, JS::UndefinedHandleValue, ps->accessors.getter.native, ps->accessors.setter.native, ps->flags, 0)) { return false; } } } else { - RootedAtom atom(cx, Atomize(cx, ps->string.value, strlen(ps->string.value))); - if (!atom) + RootedValue v(cx); + if (!ps->getValue(cx, &v)) return false; - RootedValue v(cx, StringValue(atom)); if (!DefinePropertyById(cx, obj, id, v, NativeOpWrapper(nullptr), NativeOpWrapper(nullptr), ps->flags & ~JSPROP_INTERNAL_USE_BIT, 0)) { return false; } } } return true; @@ -6219,16 +6236,25 @@ JS_SetGlobalJitCompilerOption(JSContext* if (value == 0) { jit::JitOptions.forceInlineCaches = false; JitSpew(js::jit::JitSpew_IonScripts, "IonBuilder: Enable non-IC optimizations."); } else { jit::JitOptions.forceInlineCaches = true; JitSpew(js::jit::JitSpew_IonScripts, "IonBuilder: Disable non-IC optimizations."); } break; + case JSJITCOMPILER_ION_CHECK_RANGE_ANALYSIS: + if (value == 0) { + jit::JitOptions.checkRangeAnalysis = false; + JitSpew(js::jit::JitSpew_IonScripts, "IonBuilder: Enable range analysis checks."); + } else { + jit::JitOptions.checkRangeAnalysis = true; + JitSpew(js::jit::JitSpew_IonScripts, "IonBuilder: Disable range analysis checks."); + } + break; case JSJITCOMPILER_ION_ENABLE: if (value == 1) { JS::ContextOptionsRef(cx).setIon(true); JitSpew(js::jit::JitSpew_IonScripts, "Enable ion"); } else if (value == 0) { JS::ContextOptionsRef(cx).setIon(false); JitSpew(js::jit::JitSpew_IonScripts, "Disable ion"); } @@ -6290,16 +6316,19 @@ JS_GetGlobalJitCompilerOption(JSContext* case JSJITCOMPILER_ION_WARMUP_TRIGGER: *valueOut = jit::JitOptions.forcedDefaultIonWarmUpThreshold.isSome() ? jit::JitOptions.forcedDefaultIonWarmUpThreshold.ref() : jit::OptimizationInfo::CompilerWarmupThreshold; break; case JSJITCOMPILER_ION_FORCE_IC: *valueOut = jit::JitOptions.forceInlineCaches; break; + case JSJITCOMPILER_ION_CHECK_RANGE_ANALYSIS: + *valueOut = jit::JitOptions.checkRangeAnalysis; + break; case JSJITCOMPILER_ION_ENABLE: *valueOut = JS::ContextOptionsRef(cx).ion(); break; case JSJITCOMPILER_BASELINE_ENABLE: *valueOut = JS::ContextOptionsRef(cx).baseline(); break; case JSJITCOMPILER_OFFTHREAD_COMPILATION_ENABLE: *valueOut = rt->canUseOffthreadIonCompilation();
--- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -1889,40 +1889,44 @@ typedef struct JSNativeWrapper { * are helper macros for defining such arrays. */ struct JSPropertySpec { struct SelfHostedWrapper { void* unused; const char* funname; }; - struct StringValueWrapper { - void* unused; - const char* value; + struct ValueWrapper { + uintptr_t type; + union { + const char* string; + int32_t int32; + }; }; const char* name; uint8_t flags; union { struct { union { JSNativeWrapper native; SelfHostedWrapper selfHosted; } getter; union { JSNativeWrapper native; SelfHostedWrapper selfHosted; } setter; } accessors; - StringValueWrapper string; + ValueWrapper value; }; bool isAccessor() const { return !(flags & JSPROP_INTERNAL_USE_BIT); } + bool getValue(JSContext* cx, JS::MutableHandleValue value) const; bool isSelfHosted() const { MOZ_ASSERT(isAccessor()); #ifdef DEBUG // Verify that our accessors match our JSPROP_GETTER flag. if (flags & JSPROP_GETTER) checkAccessorsAreSelfHosted(); @@ -1960,34 +1964,40 @@ namespace detail { /* NEVER DEFINED, DON'T USE. For use by JS_CAST_NATIVE_TO only. */ inline int CheckIsNative(JSNative native); /* NEVER DEFINED, DON'T USE. For use by JS_CAST_STRING_TO only. */ template<size_t N> inline int CheckIsCharacterLiteral(const char (&arr)[N]); +/* NEVER DEFINED, DON'T USE. For use by JS_CAST_INT32_TO only. */ +inline int CheckIsInt32(int32_t value); + /* NEVER DEFINED, DON'T USE. For use by JS_PROPERTYOP_GETTER only. */ inline int CheckIsGetterOp(JSGetterOp op); /* NEVER DEFINED, DON'T USE. For use by JS_PROPERTYOP_SETTER only. */ inline int CheckIsSetterOp(JSSetterOp op); - } // namespace detail } // namespace JS #define JS_CAST_NATIVE_TO(v, To) \ (static_cast<void>(sizeof(JS::detail::CheckIsNative(v))), \ reinterpret_cast<To>(v)) #define JS_CAST_STRING_TO(s, To) \ (static_cast<void>(sizeof(JS::detail::CheckIsCharacterLiteral(s))), \ reinterpret_cast<To>(s)) +#define JS_CAST_INT32_TO(s, To) \ + (static_cast<void>(sizeof(JS::detail::CheckIsInt32(s))), \ + reinterpret_cast<To>(s)) + #define JS_CHECK_ACCESSOR_FLAGS(flags) \ (static_cast<mozilla::EnableIf<((flags) & ~(JSPROP_ENUMERATE | JSPROP_PERMANENT)) == 0>::Type>(0), \ (flags)) #define JS_PROPERTYOP_GETTER(v) \ (static_cast<void>(sizeof(JS::detail::CheckIsGetterOp(v))), \ reinterpret_cast<JSNative>(v)) @@ -1997,24 +2007,26 @@ inline int CheckIsSetterOp(JSSetterOp op #define JS_STUBGETTER JS_PROPERTYOP_GETTER(JS_PropertyStub) #define JS_STUBSETTER JS_PROPERTYOP_SETTER(JS_StrictPropertyStub) #define JS_PS_ACCESSOR_SPEC(name, getter, setter, flags, extraFlags) \ { name, uint8_t(JS_CHECK_ACCESSOR_FLAGS(flags) | extraFlags), \ { { getter, setter } } } -#define JS_PS_STRINGVALUE_SPEC(name, value, flags) \ +#define JS_PS_VALUE_SPEC(name, value, flags) \ { name, uint8_t(flags | JSPROP_INTERNAL_USE_BIT), \ - { { STRINGVALUE_WRAPPER(value), JSNATIVE_WRAPPER(nullptr) } } } + { { value, JSNATIVE_WRAPPER(nullptr) } } } #define SELFHOSTED_WRAPPER(name) \ { { nullptr, JS_CAST_STRING_TO(name, const JSJitInfo*) } } #define STRINGVALUE_WRAPPER(value) \ - { { nullptr, JS_CAST_STRING_TO(value, const JSJitInfo*) } } + { { reinterpret_cast<JSNative>(JSVAL_TYPE_STRING), JS_CAST_STRING_TO(value, const JSJitInfo*) } } +#define INT32VALUE_WRAPPER(value) \ + { { reinterpret_cast<JSNative>(JSVAL_TYPE_INT32), JS_CAST_INT32_TO(value, const JSJitInfo*) } } /* * JSPropertySpec uses JSNativeWrapper. These macros encapsulate the definition * of JSNative-backed JSPropertySpecs, by defining the JSNativeWrappers for * them. */ #define JS_PSG(name, getter, flags) \ JS_PS_ACCESSOR_SPEC(name, JSNATIVE_WRAPPER(getter), JSNATIVE_WRAPPER(nullptr), flags, \ @@ -2028,20 +2040,22 @@ inline int CheckIsSetterOp(JSSetterOp op #define JS_SELF_HOSTED_GETSET(name, getterName, setterName, flags) \ JS_PS_ACCESSOR_SPEC(name, SELFHOSTED_WRAPPER(getterName), SELFHOSTED_WRAPPER(setterName), \ flags, JSPROP_SHARED | JSPROP_GETTER | JSPROP_SETTER) #define JS_SELF_HOSTED_SYM_GET(symbol, getterName, flags) \ JS_PS_ACCESSOR_SPEC(reinterpret_cast<const char*>(uint32_t(::JS::SymbolCode::symbol) + 1), \ SELFHOSTED_WRAPPER(getterName), JSNATIVE_WRAPPER(nullptr), flags, \ JSPROP_SHARED | JSPROP_GETTER) #define JS_STRING_PS(name, string, flags) \ - JS_PS_STRINGVALUE_SPEC(name, string, flags) + JS_PS_VALUE_SPEC(name, STRINGVALUE_WRAPPER(string), flags) #define JS_STRING_SYM_PS(symbol, string, flags) \ - JS_PS_STRINGVALUE_SPEC(reinterpret_cast<const char*>(uint32_t(::JS::SymbolCode::symbol) + 1), \ - string, flags) + JS_PS_VALUE_SPEC(reinterpret_cast<const char*>(uint32_t(::JS::SymbolCode::symbol) + 1), \ + STRINGVALUE_WRAPPER(string), flags) +#define JS_INT32_PS(name, value, flags) \ + JS_PS_VALUE_SPEC(name, INT32VALUE_WRAPPER(value), flags) #define JS_PS_END \ JS_PS_ACCESSOR_SPEC(nullptr, JSNATIVE_WRAPPER(nullptr), JSNATIVE_WRAPPER(nullptr), 0, 0) /** * To define a native function, set call to a JSNativeWrapper. To define a * self-hosted function, set selfHostedName to the name of a function * compiled during JSRuntime::initSelfHosting. */ @@ -3204,18 +3218,18 @@ JS_DeleteElement(JSContext* cx, JS::Hand * var result = []; * for (key in obj) * result.push(key); * return result; * * This is the closest thing we currently have to the ES6 [[Enumerate]] * internal method. * - * The JSIdArray returned by JS_Enumerate must be rooted to protect its - * contents from garbage collection. Use JS::AutoIdArray. + * The array of ids returned by JS_Enumerate must be rooted to protect its + * contents from garbage collection. Use JS::Rooted<JS::IdVector>. */ extern JS_PUBLIC_API(bool) JS_Enumerate(JSContext* cx, JS::HandleObject obj, JS::MutableHandle<JS::IdVector> props); /* * API for determining callability and constructability. [[Call]] and * [[Construct]] are internal methods that aren't present on all objects, so it * is useful to ask if they are there or not. The standard itself asks these @@ -5731,16 +5745,17 @@ JS_SetOffthreadIonCompilationEnabled(JSC #define JIT_COMPILER_OPTIONS(Register) \ Register(BASELINE_WARMUP_TRIGGER, "baseline.warmup.trigger") \ Register(ION_WARMUP_TRIGGER, "ion.warmup.trigger") \ Register(ION_GVN_ENABLE, "ion.gvn.enable") \ Register(ION_FORCE_IC, "ion.forceinlineCaches") \ Register(ION_ENABLE, "ion.enable") \ Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \ + Register(ION_CHECK_RANGE_ANALYSIS, "ion.check-range-analysis") \ Register(BASELINE_ENABLE, "baseline.enable") \ Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable") \ Register(JUMP_THRESHOLD, "jump-threshold") \ Register(ASMJS_ATOMICS_ENABLE, "asmjs.atomics.enable") \ Register(WASM_TEST_MODE, "wasm.test-mode") \ Register(WASM_FOLD_OFFSETS, "wasm.fold-offsets") typedef enum JSJitCompilerOption {
--- a/js/src/jsexn.cpp +++ b/js/src/jsexn.cpp @@ -105,18 +105,18 @@ ErrorObject::errorClassSpec_ = { }; const ClassSpec ErrorObject::subErrorClassSpec_ = { ErrorObject::createConstructor, ErrorObject::createProto, nullptr, nullptr, - exception_methods, - exception_properties, + nullptr, + nullptr, nullptr, JSProto_Error }; const ClassSpec ErrorObject::nonGlobalErrorClassSpec_ = { ErrorObject::createConstructor, ErrorObject::createProto,
--- a/js/src/jsfriendapi.h +++ b/js/src/jsfriendapi.h @@ -639,44 +639,33 @@ inline const JSClass* GetObjectJSClass(JSObject* obj) { return js::Jsvalify(GetObjectClass(obj)); } JS_FRIEND_API(const Class*) ProtoKeyToClass(JSProtoKey key); -// Returns true if the standard class identified by |key| inherits from -// another standard class (in addition to Object) along its proto chain. -// -// In practice, this only returns true for Error subtypes. -inline bool -StandardClassIsDependent(JSProtoKey key) -{ - const Class* clasp = ProtoKeyToClass(key); - return clasp && clasp->specDefined() && clasp->specDependent(); -} - // Returns the key for the class inherited by a given standard class (that // is to say, the prototype of this standard class's prototype). // // You must be sure that this corresponds to a standard class with a cached // JSProtoKey before calling this function. In general |key| will match the // cached proto key, except in cases where multiple JSProtoKeys share a // JSClass. inline JSProtoKey -ParentKeyForStandardClass(JSProtoKey key) +InheritanceProtoKeyForStandardClass(JSProtoKey key) { // [Object] has nothing to inherit from. if (key == JSProto_Object) return JSProto_Null; - // If we're dependent, return the key of the class we depend on. - if (StandardClassIsDependent(key)) - return ProtoKeyToClass(key)->specParentKey(); + // If we're ClassSpec defined return the proto key from that + if (ProtoKeyToClass(key)->specDefined()) + return ProtoKeyToClass(key)->specInheritanceProtoKey(); // Otherwise, we inherit [Object]. return JSProto_Object; } JS_FRIEND_API(bool) IsFunctionObject(JSObject* obj);
--- a/js/src/jsfun.cpp +++ b/js/src/jsfun.cpp @@ -986,17 +986,17 @@ js::FunctionToString(JSContext* cx, Hand if (fun->isInterpretedLazy() && !fun->getOrCreateScript(cx)) return nullptr; if (IsAsmJSModule(fun)) return AsmJSModuleToString(cx, fun, !lambdaParen); if (IsAsmJSFunction(fun)) return AsmJSFunctionToString(cx, fun); - if (IsWrappedAsyncFunction(cx, fun)) { + if (IsWrappedAsyncFunction(fun)) { RootedFunction unwrapped(cx, GetUnwrappedAsyncFunction(fun)); return FunctionToString(cx, unwrapped, lambdaParen); } StringBuffer out(cx); RootedScript script(cx); if (fun->hasScript()) { @@ -1925,20 +1925,23 @@ js::Generator(JSContext* cx, unsigned ar bool js::AsyncFunctionConstructor(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); if (!FunctionConstructor(cx, argc, vp, StarGenerator, AsyncFunction)) return false; - FixedInvokeArgs<1> args2(cx); - args2[0].set(args.rval()); - return CallSelfHostedFunction(cx, cx->names().AsyncFunction_wrap, - NullHandleValue, args2, args.rval()); + RootedFunction unwrapped(cx, &args.rval().toObject().as<JSFunction>()); + RootedObject wrapped(cx, WrapAsyncFunction(cx, unwrapped)); + if (!wrapped) + return false; + + args.rval().setObject(*wrapped); + return true; } bool JSFunction::isBuiltinFunctionConstructor() { return maybeNative() == Function || maybeNative() == Generator; }
--- a/js/src/jsfun.h +++ b/js/src/jsfun.h @@ -389,49 +389,56 @@ class JSFunction : public js::NativeObje // // There are several methods to get the script of an interpreted function: // // - For all interpreted functions, getOrCreateScript() will get the // JSScript, delazifying the function if necessary. This is the safest to // use, but has extra checks, requires a cx and may trigger a GC. // // - For inlined functions which may have a LazyScript but whose JSScript - // is known to exist, existingScriptForInlinedFunction() will get the - // script and delazify the function if necessary. + // is known to exist, existingScript() will get the script and delazify + // the function if necessary. If the function should not be delazified, + // use existingScriptNonDelazifying(). // // - For functions known to have a JSScript, nonLazyScript() will get it. JSScript* getOrCreateScript(JSContext* cx) { MOZ_ASSERT(isInterpreted()); MOZ_ASSERT(cx); if (isInterpretedLazy()) { JS::RootedFunction self(cx, this); if (!createScriptForLazilyInterpretedFunction(cx, self)) return nullptr; return self->nonLazyScript(); } return nonLazyScript(); } - JSScript* existingScriptForInlinedFunction() { + JSScript* existingScriptNonDelazifying() const { MOZ_ASSERT(isInterpreted()); if (isInterpretedLazy()) { // Get the script from the canonical function. Ion used the // canonical function to inline the script and because it has // Baseline code it has not been relazified. Note that we can't // use lazyScript->script_ here as it may be null in some cases, // see bug 976536. js::LazyScript* lazy = lazyScript(); JSFunction* fun = lazy->functionNonDelazifying(); MOZ_ASSERT(fun); - JSScript* script = fun->nonLazyScript(); + return fun->nonLazyScript(); + } + return nonLazyScript(); + } + JSScript* existingScript() { + MOZ_ASSERT(isInterpreted()); + if (isInterpretedLazy()) { if (shadowZone()->needsIncrementalBarrier()) - js::LazyScript::writeBarrierPre(lazy); - + js::LazyScript::writeBarrierPre(lazyScript()); + JSScript* script = existingScriptNonDelazifying(); flags_ &= ~INTERPRETED_LAZY; flags_ |= INTERPRETED; initScript(script); } return nonLazyScript(); } // The state of a JSFunction whose script errored out during bytecode
--- a/js/src/jsscript.h +++ b/js/src/jsscript.h @@ -1905,17 +1905,19 @@ class LazyScript : public gc::TenuredCel private: struct PackedView { // Assorted bits that should really be in ScriptSourceObject. uint32_t version : 8; uint32_t shouldDeclareArguments : 1; uint32_t hasThisBinding : 1; uint32_t isAsync : 1; + // The number of bits should match to NumClosedOverBindingsLimit. uint32_t numClosedOverBindings : 21; + // The number of bits should match to NumInnerFunctionsLimit. uint32_t numInnerFunctions : 20; uint32_t generatorKindBits : 2; // N.B. These are booleans but need to be uint32_t to pack correctly on MSVC. // If you add another boolean here, make sure to initialze it in // LazyScript::CreateRaw(). uint32_t strict : 1; @@ -1946,17 +1948,17 @@ class LazyScript : public gc::TenuredCel // Create a LazyScript without initializing the closedOverBindings and the // innerFunctions. To be GC-safe, the caller must initialize both vectors // with valid atoms and functions. static LazyScript* CreateRaw(ExclusiveContext* cx, HandleFunction fun, uint64_t packedData, uint32_t begin, uint32_t end, uint32_t lineno, uint32_t column); public: - static const uint32_t NumClosedOverBindingsLimit = 1 << 22; + static const uint32_t NumClosedOverBindingsLimit = 1 << 21; static const uint32_t NumInnerFunctionsLimit = 1 << 20; // Create a LazyScript and initialize closedOverBindings and innerFunctions // with the provided vectors. static LazyScript* Create(ExclusiveContext* cx, HandleFunction fun, const frontend::AtomVector& closedOverBindings, Handle<GCVector<JSFunction*, 8>> innerFunctions, JSVersion version, uint32_t begin, uint32_t end,
--- a/js/src/moz.build +++ b/js/src/moz.build @@ -63,16 +63,18 @@ CONFIGURE_SUBST_FILES += [ 'js-config', 'js.pc', ] CONFIGURE_DEFINE_FILES += [ 'js-confdefs.h', ] if not CONFIG['JS_STANDALONE']: + LIBRARY_DEFINES['MOZ_HAS_MOZGLUE'] = True + CONFIGURE_SUBST_FILES += [ '../../config/autoconf-js.mk', '../../config/emptyvars-js.mk', ] CONFIGURE_DEFINE_FILES += [ 'js-config.h', ] @@ -653,17 +655,16 @@ if CONFIG['ENABLE_INTL_API']: # Linking 'icu' will pull in the stubdata library, # which the shell doesn't want, so link the other bits. USE_LIBS += [ 'icui18n', 'icuuc', ] USE_LIBS += [ - 'fdlibm', 'nspr', 'zlib', ] if CONFIG['NIGHTLY_BUILD']: DEFINES['ENABLE_BINARYDATA'] = True if CONFIG['NIGHTLY_BUILD']: @@ -745,17 +746,16 @@ GENERATED_FILES += [('selfhosted.out.h', selfhosted = GENERATED_FILES[('selfhosted.out.h', 'selfhosted.js')] selfhosted.script = 'builtin/embedjs.py:generate_selfhosted' selfhosted.inputs = [ 'js.msg', 'builtin/TypedObjectConstants.h', 'builtin/SelfHostingDefines.h', 'builtin/Utilities.js', 'builtin/Array.js', - 'builtin/AsyncFunctions.js', 'builtin/Classes.js', 'builtin/Date.js', 'builtin/Error.js', 'builtin/Function.js', 'builtin/Generator.js', 'builtin/Intl.js', 'builtin/IntlData.js', 'builtin/IntlTzData.js',
--- a/js/src/shell/js.cpp +++ b/js/src/shell/js.cpp @@ -79,16 +79,17 @@ #include "perf/jsperf.h" #include "shell/jsoptparse.h" #include "shell/jsshell.h" #include "shell/OSObject.h" #include "threading/ConditionVariable.h" #include "threading/LockGuard.h" #include "threading/Thread.h" #include "vm/ArgumentsObject.h" +#include "vm/AsyncFunction.h" #include "vm/Compression.h" #include "vm/Debugger.h" #include "vm/HelperThreads.h" #include "vm/Monitor.h" #include "vm/MutexIDs.h" #include "vm/Shape.h" #include "vm/SharedArrayObject.h" #include "vm/StringBuffer.h" @@ -2293,16 +2294,20 @@ ValueToScript(JSContext* cx, HandleValue while (fun->isBoundFunction()) { JSObject* target = fun->getBoundFunctionTarget(); if (target && target->is<JSFunction>()) fun = &target->as<JSFunction>(); else break; } + // Get unwrapped async function. + if (IsWrappedAsyncFunction(fun)) + fun = GetUnwrappedAsyncFunction(fun); + if (!fun->isInterpreted()) { JS_ReportErrorNumberASCII(cx, my_GetErrorMessage, nullptr, JSSMSG_SCRIPTS_ONLY); return nullptr; } JSScript* script = fun->getOrCreateScript(cx); if (!script) return nullptr;
--- a/js/src/tests/lib/jittests.py +++ b/js/src/tests/lib/jittests.py @@ -115,16 +115,17 @@ class JitTest: self.test_also_noasmjs = False # True means run with and without asm.js # enabled. self.test_also_wasm_baseline = False # True means run with and and without # wasm baseline compiler enabled. self.test_also = [] # List of other configurations to test with. self.test_join = [] # List of other configurations to test with all existing variants. self.expect_error = '' # Errors to expect and consider passing self.expect_status = 0 # Exit status to expect from shell + self.expect_crash = False # Exit status or error output. self.is_module = False self.test_reflect_stringify = None # Reflect.stringify implementation to test # Expected by the test runner. Always true for jit-tests. self.enable = True def copy(self): t = JitTest(self.path) @@ -136,16 +137,17 @@ class JitTest: t.valgrind = self.valgrind t.tz_pacific = self.tz_pacific t.test_also_noasmjs = self.test_also_noasmjs t.test_also_wasm_baseline = self.test_also_noasmjs t.test_also = self.test_also t.test_join = self.test_join t.expect_error = self.expect_error t.expect_status = self.expect_status + t.expect_crash = self.expect_crash t.test_reflect_stringify = self.test_reflect_stringify t.enable = True t.is_module = self.is_module return t def copy_and_extend_jitflags(self, variant): t = self.copy() t.jitflags.extend(variant) @@ -226,16 +228,18 @@ class JitTest: if options.can_test_also_wasm_baseline: test.test_also.append(['--wasm-always-baseline']) elif name.startswith('test-also='): test.test_also.append([name[len('test-also='):]]) elif name.startswith('test-join='): test.test_join.append([name[len('test-join='):]]) elif name == 'module': test.is_module = True + elif name == 'crash': + test.expect_crash = True elif name.startswith('--'): # // |jit-test| --ion-gvn=off; --no-sse4 test.jitflags.append(name) else: print('{}: warning: unrecognized |jit-test| attribute' ' {}'.format(path, part)) if options.valgrind_all: @@ -365,16 +369,29 @@ def check_output(out, err, rc, timed_out for line in out.split('\n'): if line.startswith('Trace stats check failed'): return False for line in err.split('\n'): if 'Assertion failed:' in line: return False + if test.expect_crash: + if sys.platform == 'win32' and rc == 3 - 2 ** 31: + return True + + if sys.platform != 'win32' and rc == -11: + return True + + # When building with ASan enabled, ASan will convert the -11 returned + # value to 1. As a work-around we look for the error output which + # includes the crash reason. + if rc == 1 and ("Hit MOZ_CRASH" in err or "Assertion failure:" in err): + return True + if rc != test.expect_status: # Tests which expect a timeout check for exit code 6. # Sometimes 0 is returned on Windows for unknown reasons. # See bug 899697. if sys.platform in ['win32', 'cygwin'] and rc == 0: return True # Allow a non-zero exit code if we want to allow OOM, but only if we
--- a/js/src/vm/AsyncFunction.cpp +++ b/js/src/vm/AsyncFunction.cpp @@ -3,18 +3,19 @@ * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "vm/AsyncFunction.h" #include "jscompartment.h" -#include "builtin/SelfHostingDefines.h" +#include "builtin/Promise.h" #include "vm/GlobalObject.h" +#include "vm/Interpreter.h" #include "vm/SelfHosting.h" using namespace js; using namespace js::gc; /* static */ bool GlobalObject::initAsyncFunction(JSContext* cx, Handle<GlobalObject*> global) { @@ -41,78 +42,193 @@ GlobalObject::initAsyncFunction(JSContex if (!LinkConstructorAndPrototype(cx, asyncFunction, asyncFunctionProto)) return false; global->setReservedSlot(ASYNC_FUNCTION, ObjectValue(*asyncFunction)); global->setReservedSlot(ASYNC_FUNCTION_PROTO, ObjectValue(*asyncFunctionProto)); return true; } +static MOZ_MUST_USE bool AsyncFunctionStart(JSContext* cx, Handle<PromiseObject*> resultPromise, + HandleValue generatorVal); + +#define UNWRAPPED_ASYNC_WRAPPED_SLOT 1 +#define WRAPPED_ASYNC_UNWRAPPED_SLOT 0 + +// Async Functions proposal 1.1.8 and 1.2.14. +static bool +WrappedAsyncFunction(JSContext* cx, unsigned argc, Value* vp) +{ + CallArgs args = CallArgsFromVp(argc, vp); + + RootedFunction wrapped(cx, &args.callee().as<JSFunction>()); + RootedValue unwrappedVal(cx, wrapped->getExtendedSlot(WRAPPED_ASYNC_UNWRAPPED_SLOT)); + RootedFunction unwrapped(cx, &unwrappedVal.toObject().as<JSFunction>()); + RootedValue thisValue(cx, args.thisv()); + + // Step 2. + // Also does a part of 2.2 steps 1-2. + RootedValue generatorVal(cx); + InvokeArgs args2(cx); + if (!args2.init(cx, argc)) + return false; + for (size_t i = 0, len = argc; i < len; i++) + args2[i].set(args[i]); + if (Call(cx, unwrappedVal, thisValue, args2, &generatorVal)) { + // Step 1. + Rooted<PromiseObject*> resultPromise(cx, CreatePromiseObjectForAsync(cx, generatorVal)); + if (!resultPromise) + return false; + + // Step 3. + if (!AsyncFunctionStart(cx, resultPromise, generatorVal)) + return false; + + // Step 5. + args.rval().setObject(*resultPromise); + return true; + } + + // Steps 1, 4. + RootedValue exc(cx); + if (!GetAndClearException(cx, &exc)) + return false; + RootedObject rejectPromise(cx, PromiseObject::unforgeableReject(cx, exc)); + if (!rejectPromise) + return false; + + // Step 5. + args.rval().setObject(*rejectPromise); + return true; +} + +// Async Functions proposal 2.1 steps 1, 3 (partially). +// In the spec it creates a function, but we create 2 functions `unwrapped` and +// `wrapped`. `unwrapped` is a generator that corresponds to +// the async function's body, replacing `await` with `yield`. `wrapped` is a +// function that is visible to the outside, and handles yielded values. +JSObject* +js::WrapAsyncFunction(JSContext* cx, HandleFunction unwrapped) +{ + MOZ_ASSERT(unwrapped->isStarGenerator()); + + // Create a new function with AsyncFunctionPrototype, reusing the name and + // the length of `unwrapped`. + + // Step 1. + RootedObject proto(cx, GlobalObject::getOrCreateAsyncFunctionPrototype(cx, cx->global())); + if (!proto) + return nullptr; + + RootedAtom funName(cx, unwrapped->name()); + uint16_t length; + if (!unwrapped->getLength(cx, &length)) + return nullptr; + + // Steps 3 (partially). + RootedFunction wrapped(cx, NewFunctionWithProto(cx, WrappedAsyncFunction, length, + JSFunction::NATIVE_FUN, nullptr, + funName, proto, + AllocKind::FUNCTION_EXTENDED, + TenuredObject)); + if (!wrapped) + return nullptr; + + // Link them to each other to make GetWrappedAsyncFunction and + // GetUnwrappedAsyncFunction work. + unwrapped->setExtendedSlot(UNWRAPPED_ASYNC_WRAPPED_SLOT, ObjectValue(*wrapped)); + wrapped->setExtendedSlot(WRAPPED_ASYNC_UNWRAPPED_SLOT, ObjectValue(*unwrapped)); + + return wrapped; +} + +enum class ResumeKind { + Normal, + Throw +}; + +// Async Functions proposal 2.2 steps 3.f, 3.g. +// Async Functions proposal 2.2 steps 3.d-e, 3.g. +// Implemented in js/src/builtin/Promise.cpp + +// Async Functions proposal 2.2 steps 3-8, 2.4 steps 2-7, 2.5 steps 2-7. +static bool +AsyncFunctionResume(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue generatorVal, + ResumeKind kind, HandleValue valueOrReason) +{ + // Execution context switching is handled in generator. + HandlePropertyName funName = kind == ResumeKind::Normal + ? cx->names().StarGeneratorNext + : cx->names().StarGeneratorThrow; + FixedInvokeArgs<1> args(cx); + args[0].set(valueOrReason); + RootedValue result(cx); + if (!CallSelfHostedFunction(cx, funName, generatorVal, args, &result)) + return AsyncFunctionThrown(cx, resultPromise); + + RootedObject resultObj(cx, &result.toObject()); + RootedValue doneVal(cx); + RootedValue value(cx); + if (!GetProperty(cx, resultObj, resultObj, cx->names().done, &doneVal)) + return false; + if (!GetProperty(cx, resultObj, resultObj, cx->names().value, &value)) + return false; + + if (doneVal.toBoolean()) + return AsyncFunctionReturned(cx, resultPromise, value); + + return AsyncFunctionAwait(cx, resultPromise, value); +} + +// Async Functions proposal 2.2 steps 3-8. +static MOZ_MUST_USE bool +AsyncFunctionStart(JSContext* cx, Handle<PromiseObject*> resultPromise, HandleValue generatorVal) +{ + return AsyncFunctionResume(cx, resultPromise, generatorVal, ResumeKind::Normal, UndefinedHandleValue); +} + +// Async Functions proposal 2.3 steps 1-8. +// Implemented in js/src/builtin/Promise.cpp + +// Async Functions proposal 2.4. +MOZ_MUST_USE bool +js::AsyncFunctionAwaitedFulfilled(JSContext* cx, Handle<PromiseObject*> resultPromise, + HandleValue generatorVal, HandleValue value) +{ + // Step 1 (implicit). + + // Steps 2-7. + return AsyncFunctionResume(cx, resultPromise, generatorVal, ResumeKind::Normal, value); +} + +// Async Functions proposal 2.5. +MOZ_MUST_USE bool +js::AsyncFunctionAwaitedRejected(JSContext* cx, Handle<PromiseObject*> resultPromise, + HandleValue generatorVal, HandleValue reason) +{ + // Step 1 (implicit). + + // Step 2-7. + return AsyncFunctionResume(cx, resultPromise, generatorVal, ResumeKind::Throw, reason); +} + JSFunction* js::GetWrappedAsyncFunction(JSFunction* unwrapped) { MOZ_ASSERT(unwrapped->isAsync()); - return &unwrapped->getExtendedSlot(ASYNC_WRAPPED_SLOT).toObject().as<JSFunction>(); + return &unwrapped->getExtendedSlot(UNWRAPPED_ASYNC_WRAPPED_SLOT).toObject().as<JSFunction>(); } JSFunction* -js::GetUnwrappedAsyncFunction(JSFunction* wrapper) +js::GetUnwrappedAsyncFunction(JSFunction* wrapped) { - JSFunction* unwrapped = &wrapper->getExtendedSlot(ASYNC_UNWRAPPED_SLOT).toObject().as<JSFunction>(); + MOZ_ASSERT(IsWrappedAsyncFunction(wrapped)); + JSFunction* unwrapped = &wrapped->getExtendedSlot(WRAPPED_ASYNC_UNWRAPPED_SLOT).toObject().as<JSFunction>(); MOZ_ASSERT(unwrapped->isAsync()); return unwrapped; } bool -js::IsWrappedAsyncFunction(JSContext* cx, JSFunction* wrapper) +js::IsWrappedAsyncFunction(JSFunction* fun) { - return IsSelfHostedFunctionWithName(wrapper, cx->names().AsyncWrapped); + return fun->maybeNative() == WrappedAsyncFunction; } -bool -js::CreateAsyncFunction(JSContext* cx, HandleFunction wrapper, HandleFunction unwrapped, - MutableHandleFunction result) -{ - // Create a new function with AsyncFunctionPrototype, reusing the script - // and the environment of `wrapper` function, and the name and the length - // of `unwrapped` function. - RootedObject proto(cx, GlobalObject::getOrCreateAsyncFunctionPrototype(cx, cx->global())); - RootedObject scope(cx, wrapper->environment()); - RootedAtom atom(cx, unwrapped->name()); - RootedFunction wrapped(cx, NewFunctionWithProto(cx, nullptr, 0, - JSFunction::INTERPRETED_LAMBDA, - scope, atom, proto, - AllocKind::FUNCTION_EXTENDED, TenuredObject)); - if (!wrapped) - return false; - - wrapped->initScript(wrapper->nonLazyScript()); - - // Link them each other to make GetWrappedAsyncFunction and - // GetUnwrappedAsyncFunction work. - unwrapped->setExtendedSlot(ASYNC_WRAPPED_SLOT, ObjectValue(*wrapped)); - wrapped->setExtendedSlot(ASYNC_UNWRAPPED_SLOT, ObjectValue(*unwrapped)); - - // The script of `wrapper` is self-hosted, so `wrapped` should also be - // set as self-hosted function. - wrapped->setIsSelfHostedBuiltin(); - - // Set LAZY_FUNCTION_NAME_SLOT to "AsyncWrapped" to make it detectable in - // IsWrappedAsyncFunction. - wrapped->setExtendedSlot(LAZY_FUNCTION_NAME_SLOT, StringValue(cx->names().AsyncWrapped)); - - // The length of the script of `wrapper` is different than the length of - // `unwrapped`. We should set actual length as resolved length, to avoid - // using the length of the script. - uint16_t length; - if (!unwrapped->getLength(cx, &length)) - return false; - - RootedValue lengthValue(cx, NumberValue(length)); - if (!DefineProperty(cx, wrapped, cx->names().length, lengthValue, - nullptr, nullptr, JSPROP_READONLY)) - { - return false; - } - - result.set(wrapped); - return true; -}
--- a/js/src/vm/AsyncFunction.h +++ b/js/src/vm/AsyncFunction.h @@ -11,20 +11,27 @@ #include "jsobj.h" namespace js { JSFunction* GetWrappedAsyncFunction(JSFunction* unwrapped); JSFunction* -GetUnwrappedAsyncFunction(JSFunction* wrapper); +GetUnwrappedAsyncFunction(JSFunction* wrapped); bool -IsWrappedAsyncFunction(JSContext* cx, JSFunction* wrapper); +IsWrappedAsyncFunction(JSFunction* fun); + +JSObject* +WrapAsyncFunction(JSContext* cx, HandleFunction unwrapped); -bool -CreateAsyncFunction(JSContext* cx, HandleFunction wrapper, HandleFunction unwrapped, - MutableHandleFunction result); +MOZ_MUST_USE bool +AsyncFunctionAwaitedFulfilled(JSContext* cx, Handle<PromiseObject*> resultPromise, + HandleValue generatorVal, HandleValue value); + +MOZ_MUST_USE bool +AsyncFunctionAwaitedRejected(JSContext* cx, Handle<PromiseObject*> resultPromise, + HandleValue generatorVal, HandleValue reason); } // namespace js #endif /* vm_AsyncFunction_h */
--- a/js/src/vm/CommonPropertyNames.h +++ b/js/src/vm/CommonPropertyNames.h @@ -25,17 +25,16 @@ macro(ArraySpeciesCreate, ArraySpeciesCreate, "ArraySpeciesCreate") \ macro(ArrayToLocaleString, ArrayToLocaleString, "ArrayToLocaleString") \ macro(ArrayType, ArrayType, "ArrayType") \ macro(ArrayValues, ArrayValues, "ArrayValues") \ macro(ArrayValuesAt, ArrayValuesAt, "ArrayValuesAt") \ macro(as, as, "as") \ macro(Async, Async, "Async") \ macro(AsyncFunction, AsyncFunction, "AsyncFunction") \ - macro(AsyncFunction_wrap, AsyncFunction_wrap, "AsyncFunction_wrap") \ macro(AsyncWrapped, AsyncWrapped, "AsyncWrapped") \ macro(async, async, "async") \ macro(await, await, "await") \ macro(Bool8x16, Bool8x16, "Bool8x16") \ macro(Bool16x8, Bool16x8, "Bool16x8") \ macro(Bool32x4, Bool32x4, "Bool32x4") \ macro(Bool64x2, Bool64x2, "Bool64x2") \ macro(boundWithSpace, boundWithSpace, "bound ") \ @@ -275,16 +274,18 @@ macro(setPrototypeOf, setPrototypeOf, "setPrototypeOf") \ macro(shape, shape, "shape") \ macro(size, size, "size") \ macro(source, source, "source") \ macro(SpeciesConstructor, SpeciesConstructor, "SpeciesConstructor") \ macro(stack, stack, "stack") \ macro(star, star, "*") \ macro(starDefaultStar, starDefaultStar, "*default*") \ + macro(StarGeneratorNext, StarGeneratorNext, "StarGeneratorNext") \ + macro(StarGeneratorThrow, StarGeneratorThrow, "StarGeneratorThrow") \ macro(startTimestamp, startTimestamp, "startTimestamp") \ macro(state, state, "state") \ macro(static, static_, "static") \ macro(std_Function_apply, std_Function_apply, "std_Function_apply") \ macro(sticky, sticky, "sticky") \ macro(StringIterator, StringIterator, "String Iterator") \ macro(strings, strings, "strings") \ macro(StructType, StructType, "StructType") \
--- a/js/src/vm/Debugger.cpp +++ b/js/src/vm/Debugger.cpp @@ -1556,19 +1556,18 @@ ParseResumptionValue(JSContext* cx, Hand static bool CheckResumptionValue(JSContext* cx, AbstractFramePtr frame, const Maybe<HandleValue>& maybeThisv, JSTrapStatus status, MutableHandleValue vp) { if (maybeThisv.isSome()) { const HandleValue& thisv = maybeThisv.ref(); if (status == JSTRAP_RETURN && vp.isPrimitive()) { if (vp.isUndefined()) { - if (thisv.isMagic(JS_UNINITIALIZED_LEXICAL)) { + if (thisv.isMagic(JS_UNINITIALIZED_LEXICAL)) return ThrowUninitializedThis(cx, frame); - } vp.set(thisv); } else { ReportValueError(cx, JSMSG_BAD_DERIVED_RETURN, JSDVG_IGNORE_STACK, vp, nullptr); return false; } } }
--- a/js/src/vm/EnvironmentObject.cpp +++ b/js/src/vm/EnvironmentObject.cpp @@ -1250,17 +1250,17 @@ EnvironmentIter::settle() #ifdef DEBUG if (si_) { if (hasSyntacticEnvironment()) { Scope* scope = si_.scope(); if (scope->is<LexicalScope>()) { MOZ_ASSERT(scope == &env_->as<LexicalEnvironmentObject>().scope()); } else if (scope->is<FunctionScope>()) { MOZ_ASSERT(scope->as<FunctionScope>().script() == - env_->as<CallObject>().callee().nonLazyScript()); + env_->as<CallObject>().callee().existingScriptNonDelazifying()); } else if (scope->is<VarScope>()) { MOZ_ASSERT(scope == &env_->as<VarEnvironmentObject>().scope()); } else if (scope->is<WithScope>()) { MOZ_ASSERT(scope == &env_->as<WithEnvironmentObject>().scope()); } else if (scope->is<EvalScope>()) { MOZ_ASSERT(scope == &env_->as<VarEnvironmentObject>().scope()); } else if (scope->is<GlobalScope>()) { MOZ_ASSERT(env_->is<GlobalObject>() || IsGlobalLexicalEnvironment(env_)); @@ -3128,17 +3128,17 @@ js::GetThisValueForDebuggerMaybeOptimize BindingLocation loc = bi.location(); if (loc.kind() == BindingLocation::Kind::Environment) { RootedObject callObj(cx, &ei.environment().as<CallObject>()); return GetProperty(cx, callObj, callObj, bi.name()->asPropertyName(), res); } if (loc.kind() == BindingLocation::Kind::Frame && ei.withinInitialFrame()) - res.set(frame.unaliasedLocal(bi.location().slot())); + res.set(frame.unaliasedLocal(loc.slot())); else res.setMagic(JS_OPTIMIZED_OUT); return true; } MOZ_CRASH("'this' binding must be found"); }
--- a/js/src/vm/GlobalObject.cpp +++ b/js/src/vm/GlobalObject.cpp @@ -223,21 +223,19 @@ GlobalObject::resolveConstructor(JSConte RootedValue ctorValue(cx, ObjectValue(*ctor)); if (!DefineProperty(cx, global, id, ctorValue, nullptr, nullptr, JSPROP_RESOLVING)) return false; } global->setConstructor(key, ObjectValue(*ctor)); } - // Define any specified functions and properties, unless we're a dependent - // standard class (in which case they live on the prototype), or we're - // operating on the self-hosting global, in which case we don't want any + // If we're operating on the self-hosting global, we don't want any // functions and properties on the builtins and their prototypes. - if (!StandardClassIsDependent(key) && !cx->runtime()->isSelfHostingGlobal(global)) { + if (!cx->runtime()->isSelfHostingGlobal(global)) { if (const JSFunctionSpec* funs = clasp->specPrototypeFunctions()) { if (!JS_DefineFunctions(cx, proto, funs)) return false; } if (const JSPropertySpec* props = clasp->specPrototypeProperties()) { if (!JS_DefineProperties(cx, proto, props)) return false; }
--- a/js/src/vm/GlobalObject.h +++ b/js/src/vm/GlobalObject.h @@ -981,20 +981,20 @@ GenericCreateConstructor(JSContext* cx, } inline JSObject* GenericCreatePrototype(JSContext* cx, JSProtoKey key) { MOZ_ASSERT(key != JSProto_Object); const Class* clasp = ProtoKeyToClass(key); MOZ_ASSERT(clasp); - JSProtoKey parentKey = ParentKeyForStandardClass(key); - if (!GlobalObject::ensureConstructor(cx, cx->global(), parentKey)) + JSProtoKey protoKey = InheritanceProtoKeyForStandardClass(key); + if (!GlobalObject::ensureConstructor(cx, cx->global(), protoKey)) return nullptr; - RootedObject parentProto(cx, &cx->global()->getPrototype(parentKey).toObject()); + RootedObject parentProto(cx, &cx->global()->getPrototype(protoKey).toObject()); return cx->global()->createBlankPrototypeInheriting(cx, clasp, parentProto); } inline JSProtoKey StandardProtoKeyOrNull(const JSObject* obj) { JSProtoKey key = JSCLASS_CACHED_PROTO_KEY(obj->getClass()); if (key == JSProto_Error)
--- a/js/src/vm/Interpreter.cpp +++ b/js/src/vm/Interpreter.cpp @@ -33,16 +33,17 @@ #include "jsscript.h" #include "jsstr.h" #include "builtin/Eval.h" #include "jit/AtomicOperations.h" #include "jit/BaselineJIT.h" #include "jit/Ion.h" #include "jit/IonAnalysis.h" +#include "vm/AsyncFunction.h" #include "vm/Debugger.h" #include "vm/GeneratorObject.h" #include "vm/Opcodes.h" #include "vm/Scope.h" #include "vm/Shape.h" #include "vm/Stopwatch.h" #include "vm/TraceLogging.h" @@ -1864,17 +1865,16 @@ CASE(EnableInterruptsPseudoOpcode) /* Commence executing the actual opcode. */ SANITY_CHECKS(); DISPATCH_TO(op); } /* Various 1-byte no-ops. */ CASE(JSOP_NOP) CASE(JSOP_NOP_DESTRUCTURING) -CASE(JSOP_UNUSED149) CASE(JSOP_UNUSED182) CASE(JSOP_UNUSED183) CASE(JSOP_UNUSED187) CASE(JSOP_UNUSED192) CASE(JSOP_UNUSED209) CASE(JSOP_UNUSED210) CASE(JSOP_UNUSED211) CASE(JSOP_UNUSED219) @@ -3475,16 +3475,28 @@ CASE(JSOP_LAMBDA_ARROW) if (!obj) goto error; MOZ_ASSERT(obj->staticPrototype()); REGS.sp[-1].setObject(*obj); } END_CASE(JSOP_LAMBDA_ARROW) +CASE(JSOP_TOASYNC) +{ + ReservedRooted<JSFunction*> unwrapped(&rootFunction0, + ®S.sp[-1].toObject().as<JSFunction>()); + JSObject* wrapped = WrapAsyncFunction(cx, unwrapped); + if (!wrapped) + goto error; + + REGS.sp[-1].setObject(*wrapped); +} +END_CASE(JSOP_TOASYNC) + CASE(JSOP_CALLEE) MOZ_ASSERT(REGS.fp()->isFunctionFrame()); PUSH_COPY(REGS.fp()->calleev()); END_CASE(JSOP_CALLEE) CASE(JSOP_INITPROP_GETTER) CASE(JSOP_INITHIDDENPROP_GETTER) CASE(JSOP_INITPROP_SETTER)
--- a/js/src/vm/Opcodes.h +++ b/js/src/vm/Opcodes.h @@ -1524,17 +1524,25 @@ 1234567890123456789012345678901234567890 * Push "new.target" * * Category: Variables and Scopes * Type: Arguments * Operands: * Stack: => new.target */ \ macro(JSOP_NEWTARGET, 148, "newtarget", NULL, 1, 0, 1, JOF_BYTE) \ - macro(JSOP_UNUSED149, 149, "unused149", NULL, 1, 0, 0, JOF_BYTE) \ + /* + * Pops the top of stack value as 'unwrapped', converts it to async + * function 'wrapped', and pushes 'wrapped' back on the stack. + * Category: Statements + * Type: Function + * Operands: + * Stack: unwrapped => wrapped + */ \ + macro(JSOP_TOASYNC, 149, "toasync", NULL, 1, 1, 1, JOF_BYTE) \ /* * Pops the top two values 'lval' and 'rval' from the stack, then pushes * the result of 'Math.pow(lval, rval)'. * Category: Operators * Type: Arithmetic Operators * Operands: * Stack: lval, rval => (lval ** rval) */ \
--- a/js/src/vm/SelfHosting.cpp +++ b/js/src/vm/SelfHosting.cpp @@ -33,17 +33,16 @@ #include "builtin/TypedObject.h" #include "builtin/WeakSetObject.h" #include "gc/Marking.h" #include "gc/Policy.h" #include "jit/AtomicOperations.h" #include "jit/InlinableNatives.h" #include "js/CharacterEncoding.h" #include "js/Date.h" -#include "vm/AsyncFunction.h" #include "vm/Compression.h" #include "vm/GeneratorObject.h" #include "vm/Interpreter.h" #include "vm/RegExpObject.h" #include "vm/String.h" #include "vm/StringBuffer.h" #include "vm/TypedArrayCommon.h" #include "vm/WrapperObject.h" @@ -1860,33 +1859,16 @@ js::ReportIncompatibleSelfHostedMethod(J } ++iter; } MOZ_ASSERT_UNREACHABLE("How did we not find a useful self-hosted frame?"); return false; } -bool -intrinsic_CreateAsyncFunction(JSContext* cx, unsigned argc, Value* vp) -{ - CallArgs args = CallArgsFromVp(argc, vp); - MOZ_ASSERT(args.length() == 2); - - RootedFunction wrapper(cx, &args[0].toObject().as<JSFunction>()); - RootedFunction unwrapped(cx, &args[1].toObject().as<JSFunction>()); - - RootedFunction wrapped(cx); - if (!CreateAsyncFunction(cx, wrapper, unwrapped, &wrapped)) - return false; - - args.rval().setObject(*wrapped); - return true; -} - /** * Returns the default locale as a well-formed, but not necessarily canonicalized, * BCP-47 language tag. */ static bool intrinsic_RuntimeDefaultLocale(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); @@ -2276,18 +2258,16 @@ static const JSFunctionSpec intrinsic_fu JS_FN("MakeDefaultConstructor", intrinsic_MakeDefaultConstructor, 2,0), JS_FN("_ConstructorForTypedArray", intrinsic_ConstructorForTypedArray, 1,0), JS_FN("_NameForTypedArray", intrinsic_NameForTypedArray, 1,0), JS_FN("DecompileArg", intrinsic_DecompileArg, 2,0), JS_FN("_FinishBoundFunctionInit", intrinsic_FinishBoundFunctionInit, 3,0), JS_FN("RuntimeDefaultLocale", intrinsic_RuntimeDefaultLocale, 0,0), JS_FN("AddContentTelemetry", intrinsic_AddContentTelemetry, 2,0), - JS_FN("CreateAsyncFunction", intrinsic_CreateAsyncFunction, 1,0), - JS_INLINABLE_FN("_IsConstructing", intrinsic_IsConstructing, 0,0, IntrinsicIsConstructing), JS_INLINABLE_FN("SubstringKernel", intrinsic_SubstringKernel, 3,0, IntrinsicSubstringKernel), JS_INLINABLE_FN("_DefineDataProperty", intrinsic_DefineDataProperty, 4,0, IntrinsicDefineDataProperty), JS_INLINABLE_FN("ObjectHasPrototype", intrinsic_ObjectHasPrototype, 2,0, IntrinsicObjectHasPrototype),
--- a/js/src/vm/Stack.cpp +++ b/js/src/vm/Stack.cpp @@ -106,17 +106,17 @@ AssertScopeMatchesEnvironment(Scope* sco !env->as<LexicalEnvironmentObject>().isSyntactic())) { MOZ_ASSERT(!IsSyntacticEnvironment(env)); env = &env->as<EnvironmentObject>().enclosingEnvironment(); } } else if (si.hasSyntacticEnvironment()) { switch (si.kind()) { case ScopeKind::Function: - MOZ_ASSERT(env->as<CallObject>().callee().nonLazyScript() == + MOZ_ASSERT(env->as<CallObject>().callee().existingScriptNonDelazifying() == si.scope()->as<FunctionScope>().script()); env = &env->as<CallObject>().enclosingEnvironment(); break; case ScopeKind::FunctionBodyVar: case ScopeKind::ParameterExpressionVar: MOZ_ASSERT(&env->as<VarEnvironmentObject>().scope() == si.scope()); env = &env->as<VarEnvironmentObject>().enclosingEnvironment();
--- a/js/src/vm/TypedArrayObject.cpp +++ b/js/src/vm/TypedArrayObject.cpp @@ -352,30 +352,16 @@ class TypedArrayObjectTemplate : public if (fun) fun->setJitInfo(&jit::JitInfo_TypedArrayConstructor); return fun; } static bool - finishClassInit(JSContext* cx, HandleObject ctor, HandleObject proto) - { - RootedValue bytesValue(cx, Int32Value(BYTES_PER_ELEMENT)); - if (!DefineProperty(cx, ctor, cx->names().BYTES_PER_ELEMENT, bytesValue, - nullptr, nullptr, JSPROP_PERMANENT | JSPROP_READONLY) || - !DefineProperty(cx, proto, cx->names().BYTES_PER_ELEMENT, bytesValue, - nullptr, nullptr, JSPROP_PERMANENT | JSPROP_READONLY)) - { - return false; - } - return true; - } - - static bool getOrCreateCreateArrayFromBufferFunction(JSContext* cx, MutableHandleValue fval) { RootedValue cache(cx, cx->global()->createArrayFromBuffer<NativeType>()); if (cache.isObject()) { MOZ_ASSERT(cache.toObject().is<JSFunction>()); fval.set(cache); return true; } @@ -2627,25 +2613,44 @@ static const ClassOps TypedArrayClassOps TypedArrayObject::trace, /* trace */ }; static const ClassExtension TypedArrayClassExtension = { nullptr, TypedArrayObject::objectMoved, }; +#define IMPL_TYPED_ARRAY_PROPERTIES(_type) \ +{ \ +JS_INT32_PS("BYTES_PER_ELEMENT", _type##Array::BYTES_PER_ELEMENT, \ + JSPROP_READONLY | JSPROP_PERMANENT), \ +JS_PS_END \ +} + +static const JSPropertySpec static_prototype_properties[Scalar::MaxTypedArrayViewType][2] = { + IMPL_TYPED_ARRAY_PROPERTIES(Int8), + IMPL_TYPED_ARRAY_PROPERTIES(Uint8), + IMPL_TYPED_ARRAY_PROPERTIES(Int16), + IMPL_TYPED_ARRAY_PROPERTIES(Uint16), + IMPL_TYPED_ARRAY_PROPERTIES(Int32), + IMPL_TYPED_ARRAY_PROPERTIES(Uint32), + IMPL_TYPED_ARRAY_PROPERTIES(Float32), + IMPL_TYPED_ARRAY_PROPERTIES(Float64), + IMPL_TYPED_ARRAY_PROPERTIES(Uint8Clamped) +}; + #define IMPL_TYPED_ARRAY_CLASS_SPEC(_type) \ { \ _type##Array::createConstructor, \ _type##Array::createPrototype, \ nullptr, \ - nullptr, \ + static_prototype_properties[Scalar::Type::_type], \ nullptr, \ + static_prototype_properties[Scalar::Type::_type], \ nullptr, \ - _type##Array::finishClassInit, \ JSProto_TypedArray \ } static const ClassSpec TypedArrayObjectClassSpecs[Scalar::MaxTypedArrayViewType] = { IMPL_TYPED_ARRAY_CLASS_SPEC(Int8), IMPL_TYPED_ARRAY_CLASS_SPEC(Uint8), IMPL_TYPED_ARRAY_CLASS_SPEC(Int16), IMPL_TYPED_ARRAY_CLASS_SPEC(Uint16),
--- a/js/src/wasm/WasmModule.cpp +++ b/js/src/wasm/WasmModule.cpp @@ -521,38 +521,47 @@ Module::extractCode(JSContext* cx, Mutab RootedValue value(cx, ObjectValue(*code)); if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE)) return false; RootedObject segments(cx, NewDenseEmptyArray(cx)); if (!segments) return false; - for (auto p = metadata_->codeRanges.begin(); p != metadata_->codeRanges.end(); p++) { + for (const CodeRange& p : metadata_->codeRanges) { RootedObject segment(cx, NewObjectWithGivenProto<PlainObject>(cx, nullptr)); - value.setNumber((uint32_t)p->begin()); + if (!segment) + return false; + + value.setNumber((uint32_t)p.begin()); if (!JS_DefineProperty(cx, segment, "begin", value, JSPROP_ENUMERATE)) return false; - value.setNumber((uint32_t)p->end()); + + value.setNumber((uint32_t)p.end()); if (!JS_DefineProperty(cx, segment, "end", value, JSPROP_ENUMERATE)) return false; - value.setNumber((uint32_t)p->kind()); + + value.setNumber((uint32_t)p.kind()); if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE)) return false; - if (p->isFunction()) { - value.setNumber((uint32_t)p->funcIndex()); + + if (p.isFunction()) { + value.setNumber((uint32_t)p.funcIndex()); if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE)) return false; - value.setNumber((uint32_t)p->funcNonProfilingEntry()); + + value.setNumber((uint32_t)p.funcNonProfilingEntry()); if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE)) return false; - value.setNumber((uint32_t)p->funcProfilingEpilogue()); + + value.setNumber((uint32_t)p.funcProfilingEpilogue()); if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value, JSPROP_ENUMERATE)) return false; } + if (!NewbornArrayPush(cx, segments, ObjectValue(*segment))) return false; } value.setObject(*segments); if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE)) return false;
--- a/js/src/wasm/WasmStubs.cpp +++ b/js/src/wasm/WasmStubs.cpp @@ -334,18 +334,21 @@ StackCopy(MacroAssembler& masm, MIRType masm.store32(scratch, Address(dst.base, dst.offset + INT64LOW_OFFSET)); masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch); masm.store32(scratch, Address(dst.base, dst.offset + INT64HIGH_OFFSET)); #else Register64 scratch64(scratch); masm.load64(src, scratch64); masm.store64(scratch64, dst); #endif + } else if (type == MIRType::Float32) { + masm.loadFloat32(src, ScratchFloat32Reg); + masm.storeFloat32(ScratchFloat32Reg, dst); } else { - MOZ_ASSERT(IsFloatingPointType(type)); + MOZ_ASSERT(type == MIRType::Double); masm.loadDouble(src, ScratchDoubleReg); masm.storeDouble(ScratchDoubleReg, dst); } } typedef bool ToValue; static void
--- a/js/xpconnect/wrappers/XrayWrapper.cpp +++ b/js/xpconnect/wrappers/XrayWrapper.cpp @@ -426,20 +426,20 @@ TryResolvePropertyFromSpecs(JSContext* c } else { desc.setGetter(JS_CAST_NATIVE_TO(psMatch->accessors.getter.native.op, JSGetterOp)); desc.setSetter(JS_CAST_NATIVE_TO(psMatch->accessors.setter.native.op, JSSetterOp)); } desc.setAttributes(flags); } else { - RootedString atom(cx, JS_AtomizeString(cx, psMatch->string.value)); - if (!atom) + RootedValue v(cx); + if (!psMatch->getValue(cx, &v)) return false; - desc.value().setString(atom); + desc.value().set(v); desc.setAttributes(flags & ~JSPROP_INTERNAL_USE_BIT); } // The generic Xray machinery only defines non-own properties on the holder. // This is broken, and will be fixed at some point, but for now we need to // cache the value explicitly. See the corresponding call to // JS_GetPropertyById at the top of JSXrayTraits::resolveOwnProperty. //
--- a/js/xpconnect/wrappers/XrayWrapper.h +++ b/js/xpconnect/wrappers/XrayWrapper.h @@ -272,22 +272,22 @@ public: bool getPrototype(JSContext* cx, JS::HandleObject wrapper, JS::HandleObject target, JS::MutableHandleObject protop) { JS::RootedObject holder(cx, ensureHolder(cx, wrapper)); JSProtoKey key = getProtoKey(holder); if (isPrototype(holder)) { - JSProtoKey parentKey = js::ParentKeyForStandardClass(key); - if (parentKey == JSProto_Null) { + JSProtoKey protoKey = js::InheritanceProtoKeyForStandardClass(key); + if (protoKey == JSProto_Null) { protop.set(nullptr); return true; } - key = parentKey; + key = protoKey; } { JSAutoCompartment ac(cx, target); if (!JS_GetClassPrototype(cx, key, protop)) return false; } return JS_WrapObject(cx, protop);
--- a/layout/build/nsLayoutStatics.cpp +++ b/layout/build/nsLayoutStatics.cpp @@ -62,17 +62,16 @@ #include "DOMStorageObserver.h" #include "CacheObserver.h" #include "DisplayItemClip.h" #include "ActiveLayerTracker.h" #include "CounterStyleManager.h" #include "FrameLayerBuilder.h" #include "AnimationCommon.h" #include "LayerAnimationInfo.h" -#include "mozilla/dom/VideoDecoderManagerChild.h" #include "AudioChannelService.h" #include "mozilla/dom/PromiseDebugging.h" #include "mozilla/dom/WebCryptoThreadPool.h" #ifdef MOZ_XUL #include "nsXULPopupManager.h" #include "nsXULContentUtils.h" @@ -297,18 +296,16 @@ nsLayoutStatics::Initialize() #ifdef DEBUG nsStyleContext::Initialize(); mozilla::LayerAnimationInfo::Initialize(); #endif MediaDecoder::InitStatics(); - VideoDecoderManagerChild::Initialize(); - PromiseDebugging::Init(); mozilla::dom::devicestorage::DeviceStorageStatics::Initialize(); mozilla::dom::WebCryptoThreadPool::Initialize(); // NB: We initialize servo in nsAppRunner.cpp, because we need to do it after // creating the hidden DOM window to support some current stylo hacks. We
--- a/layout/generic/ReflowInput.cpp +++ b/layout/generic/ReflowInput.cpp @@ -1410,18 +1410,21 @@ ReflowInput::CalculateHypotheticalPositi // like a XUL box, etc. // Just use the placeholder's block-offset aHypotheticalPos.mBStart = placeholderOffset.B(wm); } // Second, determine the hypothetical box's mIStart. // How we determine the hypothetical box depends on whether the element // would have been inline-level or block-level - if (mStyleDisplay->IsOriginalDisplayInlineOutsideStyle()) { - // The placeholder represents the left edge of the hypothetical box + if (mStyleDisplay->IsOriginalDisplayInlineOutsideStyle() || + mFlags.mIOffsetsNeedCSSAlign) { + // The placeholder represents the IStart edge of the hypothetical box. + // (Or if mFlags.mIOffsetsNeedCSSAlign is set, it represents the IStart + // edge of the Alignment Container.) aHypotheticalPos.mIStart = placeholderOffset.I(wm); } else { aHypotheticalPos.mIStart = blockIStartContentEdge; } // The current coordinate space is that of the nearest block to the placeholder. // Convert to the coordinate space of the absolute containing block // One weird thing here is that for fixed-positioned elements we want to do @@ -1559,42 +1562,41 @@ ReflowInput::InitAbsoluteConstraints(nsP bool bStartIsAuto = styleOffset.GetBStartUnit(cbwm) == eStyleUnit_Auto; bool bEndIsAuto = styleOffset.GetBEndUnit(cbwm) == eStyleUnit_Auto; // If both 'left' and 'right' are 'auto' or both 'top' and 'bottom' are // 'auto', then compute the hypothetical box position where the element would // have been if it had been in the flow nsHypotheticalPosition hypotheticalPos; if ((iStartIsAuto && iEndIsAuto) || (bStartIsAuto && bEndIsAuto)) { + nsIFrame* placeholderFrame = + aPresContext->PresShell()->GetPlaceholderFrameFor(mFrame); + NS_ASSERTION(placeholderFrame, "no placeholder frame"); + + if (placeholderFrame->HasAnyStateBits( + PLACEHOLDER_STATICPOS_NEEDS_CSSALIGN)) { + DebugOnly<nsIFrame*> placeholderParent = placeholderFrame->GetParent(); + MOZ_ASSERT(placeholderParent, "shouldn't have unparented placeholders"); + MOZ_ASSERT(placeholderParent->IsFlexOrGridContainer(), + "This flag should only be set on grid/flex children"); + + // If the (as-yet unknown) static position will determine the inline + // and/or block offsets, set flags to note those offsets aren't valid + // until we can do CSS Box Alignment on the OOF frame. + mFlags.mIOffsetsNeedCSSAlign = (iStartIsAuto && iEndIsAuto); + mFlags.mBOffsetsNeedCSSAlign = (bStartIsAuto && bEndIsAuto); + } + if (mFlags.mStaticPosIsCBOrigin) { - // XXXdholbert This whole clause should be removed in bug 1269017, where - // we'll be making abpsos grid children share our CSS Box Alignment code. hypotheticalPos.mWritingMode = cbwm; hypotheticalPos.mIStart = nscoord(0); hypotheticalPos.mBStart = nscoord(0); } else { - nsIFrame* placeholderFrame = - aPresContext->PresShell()->GetPlaceholderFrameFor(mFrame); - NS_ASSERTION(placeholderFrame, "no placeholder frame"); CalculateHypotheticalPosition(aPresContext, placeholderFrame, cbrs, hypotheticalPos, aFrameType); - - if (placeholderFrame->HasAnyStateBits( - PLACEHOLDER_STATICPOS_NEEDS_CSSALIGN)) { - DebugOnly<nsIFrame*> placeholderParent = placeholderFrame->GetParent(); - MOZ_ASSERT(placeholderParent, "shouldn't have unparented placeholders"); - MOZ_ASSERT(placeholderParent->IsFlexOrGridContainer(), - "This flag should only be set on grid/flex children"); - - // If the (as-yet unknown) static position will determine the inline - // and/or block offsets, set flags to note those offsets aren't valid - // until we can do CSS Box Alignment on the OOF frame. - mFlags.mIOffsetsNeedCSSAlign = (iStartIsAuto && iEndIsAuto); - mFlags.mBOffsetsNeedCSSAlign = (bStartIsAuto && bEndIsAuto); - } } } // Initialize the 'left' and 'right' computed offsets // XXX Handle new 'static-position' value... // Size of the containing block in its writing mode LogicalSize cbSize = aCBSize;
--- a/layout/generic/ReflowInput.h +++ b/layout/generic/ReflowInput.h @@ -229,18 +229,16 @@ public: // When these bits are set, the offset values (IStart/IEnd, BStart/BEnd) // represent the "start" edge of the frame's CSS Box Alignment container // area, in that axis -- and these offsets need to be further-resolved // (with CSS Box Alignment) after we know the OOF frame's size. // NOTE: The "I" and "B" (for "Inline" and "Block") refer the axes of the // *containing block's writing-mode*, NOT mFrame's own writing-mode. This // is purely for convenience, since that's the writing-mode we're dealing // with when we set & react to these bits. - // XXXdholbert These new bits will probably make the "mStaticPosIsCBOrigin" - // bit obsolete -- consider removing it in bug 1269017. uint32_t mIOffsetsNeedCSSAlign:1; uint32_t mBOffsetsNeedCSSAlign:1; }; #ifdef DEBUG // Reflow trace methods. Defined in nsFrame.cpp so they have access // to the display-reflow infrastructure. static void* DisplayInitOffsetsEnter( @@ -724,18 +722,20 @@ public: // The caller wants shrink-wrap behavior (i.e. ComputeSizeFlags::eShrinkWrap // will be passed to ComputeSize()). COMPUTE_SIZE_SHRINK_WRAP = (1<<2), // The caller wants 'auto' bsize behavior (ComputeSizeFlags::eUseAutoBSize // will be be passed to ComputeSize()). COMPUTE_SIZE_USE_AUTO_BSIZE = (1<<3), - // The caller wants the abs.pos. static-position resolved at the origin - // of the containing block, i.e. at LogicalPoint(0, 0). + // The caller wants the abs.pos. static-position resolved at the origin of + // the containing block, i.e. at LogicalPoint(0, 0). (Note that this + // doesn't necessarily mean that (0, 0) is the *correct* static position + // for the frame in question.) STATIC_POS_IS_CB_ORIGIN = (1<<4), // Pass ComputeSizeFlags::eIClampMarginBoxMinSize to ComputeSize(). I_CLAMP_MARGIN_BOX_MIN_SIZE = (1<<5), // Pass ComputeSizeFlags::eBClampMarginBoxMinSize to ComputeSize(). B_CLAMP_MARGIN_BOX_MIN_SIZE = (1<<6), };
--- a/layout/generic/nsAbsoluteContainingBlock.cpp +++ b/layout/generic/nsAbsoluteContainingBlock.cpp @@ -360,29 +360,31 @@ GetPlaceholderContainer(nsPresContext* a * This function returns the offset of an abs/fixed-pos child's static * position, with respect to the "start" corner of its alignment container, * according to CSS Box Alignment. This function only operates in a single * axis at a time -- callers can choose which axis via the |aAbsPosCBAxis| * parameter. * * @param aKidReflowInput The ReflowInput for the to-be-aligned abspos child. * @param aKidSizeInAbsPosCBWM The child frame's size (after it's been given - * the opportunity to reflow), in terms of the - * containing block's WritingMode. + * the opportunity to reflow), in terms of + * aAbsPosCBWM. + * @param aAbsPosCBSize The abspos CB size, in terms of aAbsPosCBWM. * @param aPlaceholderContainer The parent of the child frame's corresponding * placeholder frame, cast to a nsContainerFrame. * (This will help us choose which alignment enum * we should use for the child.) * @param aAbsPosCBWM The child frame's containing block's WritingMode. * @param aAbsPosCBAxis The axis (of the containing block) that we should * be doing this computation for. */ static nscoord OffsetToAlignedStaticPos(const ReflowInput& aKidReflowInput, const LogicalSize& aKidSizeInAbsPosCBWM, + const LogicalSize& aAbsPosCBSize, nsContainerFrame* aPlaceholderContainer, WritingMode aAbsPosCBWM, LogicalAxis aAbsPosCBAxis) { if (!aPlaceholderContainer) { // (The placeholder container should be the thing that kicks this whole // process off, by setting PLACEHOLDER_STATICPOS_NEEDS_CSSALIGN. So it // should exist... but bail gracefully if it doesn't.) @@ -405,20 +407,40 @@ OffsetToAlignedStaticPos(const ReflowInp // writing-mode. LogicalAxis pcAxis = (pcWM.IsOrthogonalTo(aAbsPosCBWM) ? GetOrthogonalAxis(aAbsPosCBAxis) : aAbsPosCBAxis); nsIAtom* parentType = aPlaceholderContainer->GetType(); LogicalSize alignAreaSize(pcWM); if (parentType == nsGkAtoms::flexContainerFrame) { + // The alignment container is the flex container's content box: alignAreaSize = aPlaceholderContainer->GetLogicalSize(pcWM); LogicalMargin pcBorderPadding = aPlaceholderContainer->GetLogicalUsedBorderAndPadding(pcWM); alignAreaSize -= pcBorderPadding.Size(pcWM); + } else if (parentType == nsGkAtoms::gridContainerFrame) { + // This abspos elem's parent is a grid container. Per CSS Grid 10.1 & 10.2: + // - If the grid container *also* generates the abspos containing block (a + // grid area) for this abspos child, we use that abspos containing block as + // the alignment container, too. (And its size is aAbsPosCBSize.) + // - Otherwise, we use the grid's padding box as the alignment container. + // https://drafts.csswg.org/css-grid/#static-position + if (aPlaceholderContainer == aKidReflowInput.mCBReflowInput->mFrame) { + // The alignment container is the grid area that we're using as the + // absolute containing block. + alignAreaSize = aAbsPosCBSize.ConvertTo(pcWM, aAbsPosCBWM); + } else { + // The alignment container is a the grid container's padding box (which + // we can get by subtracting away its border from frame's size): + alignAreaSize = aPlaceholderContainer->GetLogicalSize(pcWM); + LogicalMargin pcBorder = + aPlaceholderContainer->GetLogicalUsedBorder(pcWM); + alignAreaSize -= pcBorder.Size(pcWM); + } } else { NS_ERROR("Unsupported container for abpsos CSS Box Alignment"); return 0; // (leave the child at the start of its alignment container) } nscoord alignAreaSizeInAxis = (pcAxis == eLogicalAxisInline) ? alignAreaSize.ISize(pcWM) : alignAreaSize.BSize(pcWM); @@ -517,16 +539,17 @@ nsAbsoluteContainingBlock::ResolveSizeDe aOffsets->IStart(outerWM) = logicalCBSizeOuterWM.ISize(outerWM) - aOffsets->IEnd(outerWM) - aMargin.IStartEnd(outerWM) - aKidSize.ISize(outerWM); } else if (aKidReflowInput.mFlags.mIOffsetsNeedCSSAlign) { placeholderContainer = GetPlaceholderContainer(aPresContext, aKidReflowInput.mFrame); nscoord offset = OffsetToAlignedStaticPos(aKidReflowInput, aKidSize, + logicalCBSizeOuterWM, placeholderContainer, outerWM, eLogicalAxisInline); // Shift IStart from its current position (at start corner of the // alignment container) by the returned offset. And set IEnd to the // distance between the kid's end edge to containing block's end edge. aOffsets->IStart(outerWM) += offset; aOffsets->IEnd(outerWM) = logicalCBSizeOuterWM.ISize(outerWM) - @@ -539,16 +562,17 @@ nsAbsoluteContainingBlock::ResolveSizeDe aOffsets->BEnd(outerWM) - aMargin.BStartEnd(outerWM) - aKidSize.BSize(outerWM); } else if (aKidReflowInput.mFlags.mBOffsetsNeedCSSAlign) { if (!placeholderContainer) { placeholderContainer = GetPlaceholderContainer(aPresContext, aKidReflowInput.mFrame); } nscoord offset = OffsetToAlignedStaticPos(aKidReflowInput, aKidSize, + logicalCBSizeOuterWM, placeholderContainer, outerWM, eLogicalAxisBlock); // Shift BStart from its current position (at start corner of the // alignment container) by the returned offset. And set BEnd to the // distance between the kid's end edge to containing block's end edge. aOffsets->BStart(outerWM) += offset; aOffsets->BEnd(outerWM) = logicalCBSizeOuterWM.BSize(outerWM) - @@ -605,18 +629,21 @@ nsAbsoluteContainingBlock::ReflowAbsolut "Must have a useful inline-size _somewhere_"); availISize = aReflowInput.ComputedSizeWithPadding(wm).ISize(wm); } uint32_t rsFlags = 0; if (aFlags & AbsPosReflowFlags::eIsGridContainerCB) { // When a grid container generates the abs.pos. CB for a *child* then - // the static-position is the CB origin (i.e. of the grid area rect). - // https://drafts.csswg.org/css-grid/#static-position + // the static position is determined via CSS Box Alignment within the + // abs.pos. CB (a grid area, i.e. a piece of the grid). In this scenario, + // due to the multiple coordinate spaces in play, we use a convenience flag + // to simply have the child's ReflowInput give it a static position at its + // abs.pos. CB origin, and then we'll align & offset it from there. nsIFrame* placeholder = aPresContext->PresShell()->GetPlaceholderFrameFor(aKidFrame); if (placeholder && placeholder->GetParent() == aDelegatingFrame) { rsFlags |= ReflowInput::STATIC_POS_IS_CB_ORIGIN; } } ReflowInput kidReflowInput(aPresContext, aReflowInput, aKidFrame, LogicalSize(wm, availISize,
--- a/layout/generic/nsGridContainerFrame.cpp +++ b/layout/generic/nsGridContainerFrame.cpp @@ -5201,17 +5201,27 @@ nsGridContainerFrame::ReflowInFlowChild( aChild->Properties().Delete(aProp); } }; SetProp(eLogicalAxisBlock, isOrthogonal ? IBaselinePadProperty() : BBaselinePadProperty()); SetProp(eLogicalAxisInline, isOrthogonal ? BBaselinePadProperty() : IBaselinePadProperty()); } else { - cb = aContentArea; + // By convention, for frames that perform CSS Box Alignment, we position + // placeholder children at the start corner of their alignment container, + // and in this case that's usually the grid's padding box. + // ("Usually" - the exception is when the grid *also* forms the + // abs.pos. containing block. In that case, the alignment container isn't + // the padding box -- it's some grid area instead. But that case doesn't + // require any special handling here, because we handle it later using a + // special flag (STATIC_POS_IS_CB_ORIGIN) which will make us ignore the + // placeholder's position entirely.) + cb = aContentArea - padStart; + aChild->AddStateBits(PLACEHOLDER_STATICPOS_NEEDS_CSSALIGN); } LogicalSize reflowSize(cb.Size(wm)); if (isConstrainedBSize) { reflowSize.BSize(wm) = toFragmentainerEnd; } LogicalSize childCBSize = reflowSize.ConvertTo(childWM, wm); @@ -5287,20 +5297,18 @@ nsGridContainerFrame::ReflowInFlowChild( auto justify = childRI.mStylePosition->UsedJustifySelf(containerSC); auto state = aGridItemInfo->mState[eLogicalAxisInline]; if (state & ItemState::eContentBaseline) { justify = (state & ItemState::eFirstBaseline) ? NS_STYLE_JUSTIFY_SELF_START : NS_STYLE_JUSTIFY_SELF_END; } nscoord cbsz = cb.ISize(wm); JustifySelf(*aGridItemInfo, justify, cbsz, wm, childRI, size, &childPos); - } else { - // Put a placeholder at the padding edge, in case an ancestor is its CB. - childPos -= padStart; - } + } // else, nsAbsoluteContainingBlock.cpp will handle align/justify-self. + childRI.ApplyRelativePositioning(&childPos, aContainerSize); FinishReflowChild(aChild, pc, childSize, &childRI, childWM, childPos, aContainerSize, 0); ConsiderChildOverflow(aDesiredSize.mOverflowAreas, aChild); } nscoord nsGridContainerFrame::ReflowInFragmentainer(GridReflowInput& aState, @@ -6627,16 +6635,68 @@ nsGridContainerFrame::RemoveFrame(ChildL frameThatMayLie->GetPrevInFlow()); } while (frameThatMayLie); } #endif nsContainerFrame::RemoveFrame(aListID, aOldFrame); } +uint16_t +nsGridContainerFrame::CSSAlignmentForAbsPosChild(const ReflowInput& aChildRI, + LogicalAxis aLogicalAxis) const +{ + MOZ_ASSERT(aChildRI.mFrame->IsAbsolutelyPositioned(), + "This method should only be called for abspos children"); + + uint16_t alignment = (aLogicalAxis == eLogicalAxisInline) ? + aChildRI.mStylePosition->UsedJustifySelf(nullptr) : + aChildRI.mStylePosition->UsedAlignSelf(nullptr); + + // XXX strip off <overflow-position> bits until we implement it + // (bug 1311892) + alignment &= ~NS_STYLE_ALIGN_FLAG_BITS; + + // We group 'auto' with 'normal', because the spec says: + // "The 'auto' keyword is interpreted as 'normal' + // if the box is absolutely positioned [...]" + // https://drafts.csswg.org/css-align-3/#valdef-align-self-auto + // https://drafts.csswg.org/css-align-3/#valdef-justify-self-auto + if (alignment == NS_STYLE_ALIGN_AUTO || + alignment == NS_STYLE_ALIGN_NORMAL) { + // "the 'normal' keyword behaves as 'start' on replaced + // absolutely-positioned boxes, and behaves as 'stretch' on all other + // absolutely-positioned boxes." + // https://drafts.csswg.org/css-align/#align-abspos + // https://drafts.csswg.org/css-align/#justify-abspos + alignment = aChildRI.mFrame->IsFrameOfType(nsIFrame::eReplaced) ? + NS_STYLE_ALIGN_START : NS_STYLE_ALIGN_STRETCH; + } else if (alignment == NS_STYLE_ALIGN_FLEX_START) { + alignment = NS_STYLE_ALIGN_START; + } else if (alignment == NS_STYLE_ALIGN_FLEX_END) { + alignment = NS_STYLE_ALIGN_END; + } else if (alignment == NS_STYLE_ALIGN_LEFT || + alignment == NS_STYLE_ALIGN_RIGHT) { + if (aLogicalAxis == eLogicalAxisInline) { + const bool isLeft = (alignment == NS_STYLE_ALIGN_LEFT); + WritingMode wm = GetWritingMode(); + alignment = (isLeft == wm.IsBidiLTR()) ? NS_STYLE_ALIGN_START + : NS_STYLE_ALIGN_END; + } else { + alignment = NS_STYLE_ALIGN_START; + } + } else if (alignment == NS_STYLE_ALIGN_BASELINE) { + alignment = NS_STYLE_ALIGN_START; + } else if (alignment == NS_STYLE_ALIGN_LAST_BASELINE) { + alignment = NS_STYLE_ALIGN_END; + } + + return alignment; +} + nscoord nsGridContainerFrame::SynthesizeBaseline( const FindItemInGridOrderResult& aGridOrderItem, LogicalAxis aAxis, BaselineSharingGroup aGroup, const nsSize& aCBPhysicalSize, nscoord aCBSize, WritingMode aCBWM)
--- a/layout/generic/nsGridContainerFrame.h +++ b/layout/generic/nsGridContainerFrame.h @@ -121,16 +121,19 @@ public: #endif // nsContainerFrame overrides bool DrainSelfOverflowList() override; void AppendFrames(ChildListID aListID, nsFrameList& aFrameList) override; void InsertFrames(ChildListID aListID, nsIFrame* aPrevFrame, nsFrameList& aFrameList) override; void RemoveFrame(ChildListID aListID, nsIFrame* aOldFrame) override; + uint16_t CSSAlignmentForAbsPosChild( + const ReflowInput& aChildRI, + mozilla::LogicalAxis aLogicalAxis) const override; #ifdef DEBUG void SetInitialChildList(ChildListID aListID, nsFrameList& aChildList) override; #endif /** * Return the containing block for aChild which MUST be an abs.pos. child
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-001-ref.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + margin-left: 3px; + } + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -2px; } + .small .alignEnd { margin-top: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-001.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-001-ref.html"> + <style> + .container { + display: grid; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-002-ref.html @@ -0,0 +1,98 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 32px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -1px; } + .small .alignEnd { margin-top: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-002.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a static-pos grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-002-ref.html"> + <style> + .container { + display: grid; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-img-001-ref.html @@ -0,0 +1,127 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 40px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + margin-left: 3px; + display: block; + } + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -7px; } + .small .alignEnd { margin-top: -14px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--right--></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--right--></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-img-001.html @@ -0,0 +1,126 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos replaced children in a grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-img-001-ref.html"> + <style> + .container { + display: grid; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 40px; + width: 22px; + grid: 2px 30px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: right"></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: right"></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-img-002-ref.html @@ -0,0 +1,125 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 42px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + display: block; + } + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -6px; } + .small .alignEnd { margin-top: -12px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--right--></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--right--></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-img-002.html @@ -0,0 +1,125 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos replaced children in a static-pos grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-img-002-ref.html"> + <style> + .container { + display: grid; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 40px; + width: 22px; + grid: 2px 30px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: right"></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="align-self: right"></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-001-ref.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 11px; } + .small > .container > * { margin-left: -7px; } + + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -2px; } + .small .alignEnd { margin-top: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-001.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a RTL grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-rtl-001-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-002-ref.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 11px; } + .small > .container > * { margin-left: -7px; } + + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -2px; } + .small .alignEnd { margin-top: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-002.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos LTR children in a RTL grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-rtl-002-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + direction: ltr; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-003-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 32px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 18px; } + .small > .container > * { margin-left: 0px; } + + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -1px; } + .small .alignEnd { margin-top: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-003.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a RTL static-pos grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-rtl-003-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-004-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 32px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 18px; } + .small > .container > * { margin-left: 0px; } + + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -1px; } + .small .alignEnd { margin-top: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-rtl-004.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos LTR children in a RTL static-pos grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-rtl-004-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + direction: ltr; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-001-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + margin-top: 3px; + } + .big .alignStart { margin-left: 22px; } + .big .alignCenter { margin-left: 15px; } + .big .alignEnd { margin-left: 8px; } + .small .alignStart { margin-left: -4px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-001.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a vertical-rl grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-vertWM-001-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-002-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + margin-top: 3px; + } + .big .alignStart { margin-left: 22px; } + .big .alignCenter { margin-left: 15px; } + .big .alignEnd { margin-left: 8px; } + .small .alignStart { margin-left: -4px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-002.html @@ -0,0 +1,103 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos horizontal-tb children in a vertical-rl grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-vertWM-002-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + writing-mode: horizontal-tb; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-003-ref.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 32px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + } + .big .alignStart { margin-left: 26px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 0px; } + .small .alignStart { margin-left: -2px; } + .small .alignCenter { margin-left: -1px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-003.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a vertical-rl static-pos grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-vertWM-003-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-004-ref.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 32px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + } + .big .alignStart { margin-left: 26px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 0px; } + .small .alignStart { margin-left: -2px; } + .small .alignCenter { margin-left: -1px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-align-self-vertWM-004.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos horizontal-tb children in a static-pos vertical-rl grid container, with various "align-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-align-self-vertWM-004-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 2px 20px 2px / 3px 14px 3px; + } + .small > .container { + grid: 0px 2px 0px / 3px 2px 3px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + writing-mode: horizontal-tb; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various align-self values, from + https://www.w3.org/TR/css-align-3/#propdef-align-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="align-self: auto"></div></div> + <div class="container"><div style="align-self: normal"></div></div> + <div class="container"><div style="align-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="align-self: baseline"></div></div> + <div class="container"><div style="align-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="align-self: center"></div></div> + <div class="container"><div style="align-self: start"></div></div> + <div class="container"><div style="align-self: end"></div></div> + <div class="container"><div style="align-self: self-start"></div></div> + <div class="container"><div style="align-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="align-self: flex-start"></div></div> + <div class="container"><div style="align-self: flex-end"></div></div> + <div class="container"><div style="align-self: left"></div></div> + <div class="container"><div style="align-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-001-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + margin-top: 3px; + } + .big .alignStart { margin-left: 2px; } + .big .alignCenter { margin-left: 9px; } + .big .alignEnd { margin-left: 16px; } + .small .alignStart { margin-left: 0px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-001.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-001-ref.html"> + <style> + .container { + display: grid; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-002-ref.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 32px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + } + .big .alignStart { margin-left: 0px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 26px; } + .small .alignStart { margin-left: 0px; } + .small .alignCenter { margin-left: -1px; } + .small .alignEnd { margin-left: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-002.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-002-ref.html"> + <style> + .container { + display: grid; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-img-001-ref.html @@ -0,0 +1,127 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 40px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + margin-top: 3px; + display: block; + } + .big .alignStart { margin-left: 2px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 24px; } + .small .alignStart { margin-left: 0px; } + .small .alignCenter { margin-left: -3px; } + .small .alignEnd { margin-left: -6px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--right--></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--right--></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-img-001.html @@ -0,0 +1,126 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos replaced children in a grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-img-001-ref.html"> + <style> + .container { + display: grid; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 40px; + height: 22px; + grid: 3px 14px 3px / 2px 30px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: right"></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: right"></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-img-002-ref.html @@ -0,0 +1,125 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 42px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + display: block; + } + .big .alignStart { margin-left: 0px; } + .big .alignCenter { margin-left: 17px; } + .big .alignEnd { margin-left: 34px; } + .small .alignStart { margin-left: 0px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--right--></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--auto--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--normal--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--stretch--></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--baseline--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--last baseline--></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignCenter"><!--center--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--self-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--self-end--></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--flex-start--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--flex-end--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignStart"><!--left--></div> + <div class="container"><img src="support/colors-8x16.png" + class="alignEnd"><!--right--></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-img-002.html @@ -0,0 +1,125 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos replaced children in a static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-img-002-ref.html"> + <style> + .container { + display: grid; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 40px; + height: 22px; + grid: 3px 14px 3px / 2px 30px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-bottom: 20px; /* to reduce overlap between overflowing images */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: right"></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: auto"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: normal"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: stretch"></div> + <br> + <!-- <baseline-position> --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: baseline"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: last baseline"></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: center"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: self-end"></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-start"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: flex-end"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: left"></div> + <div class="container"><img src="support/colors-8x16.png" + style="justify-self: right"></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-001-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + margin-top: 3px; + } + .big .alignStart { margin-left: 22px; } + .big .alignCenter { margin-left: 15px; } + .big .alignEnd { margin-left: 8px; } + .small .alignStart { margin-left: -4px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-001.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a RTL grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-rtl-001-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-002-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + } + .small > .container { + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + margin-top: 3px; + } + .big .alignStart { margin-left: 22px; } + .big .alignCenter { margin-left: 15px; } + .big .alignEnd { margin-left: 8px; } + .small .alignStart { margin-left: -4px; } + .small .alignCenter { margin-left: -2px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-002.html @@ -0,0 +1,103 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos LTR children in a RTL grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-rtl-002-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + direction: ltr; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-003-ref.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 32px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + } + .big .alignStart { margin-left: 26px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 0px; } + .small .alignStart { margin-left: -2px; } + .small .alignCenter { margin-left: -1px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-003.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a RTL static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-rtl-003-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-004-ref.html @@ -0,0 +1,99 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 32px; + height: 26px; + } + .small > .container { + width: 4px; + height: 8px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + background: teal; + width: 6px; + height: 8px; + } + .big .alignStart { margin-left: 26px; } + .big .alignCenter { margin-left: 13px; } + .big .alignEnd { margin-left: 0px; } + .small .alignStart { margin-left: -2px; } + .small .alignCenter { margin-left: -1px; } + .small .alignEnd { margin-left: 0px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignEnd"><!--self-start--></div></div> + <div class="container"><div class="alignStart"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignEnd"><!--left--></div></div> + <div class="container"><div class="alignStart"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-rtl-004.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos LTR children in a RTL static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-rtl-004-ref.html"> + <style> + .container { + display: grid; + direction: rtl; + padding: 2px 1px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + width: 30px; + height: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + width: 2px; + height: 4px; + margin-right: 10px; /* To avoid overlap between overflowing kids */ + } + + .container > * { + position: absolute; + direction: ltr; + grid-area: 2 / 2 / 3 / 3; + background: teal; + width: 6px; + height: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-001-ref.html @@ -0,0 +1,103 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + margin-top: 3px; + } + .big > .container > * { margin-left: 11px; } + .small > .container > * { margin-left: -7px; } + + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -2px; } + .small .alignEnd { margin-top: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-001.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a vertical-rl grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-vertWM-001-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-002-ref.html @@ -0,0 +1,103 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + } + .small > .container { + height: 2px; + width: 4px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + margin-top: 3px; + } + .big > .container > * { margin-left: 11px; } + .small > .container > * { margin-left: -7px; } + + .big .alignStart { margin-top: 2px; } + .big .alignCenter { margin-top: 9px; } + .big .alignEnd { margin-top: 16px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -2px; } + .small .alignEnd { margin-top: -4px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-002.html @@ -0,0 +1,102 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos horizontal-tb children in a vertical-rl grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-vertWM-002-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + position: relative; + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + writing-mode: horizontal-tb; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-003-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 32px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 18px; } + .small > .container > * { margin-left: 0px; } + + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -1px; } + .small .alignEnd { margin-top: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-003.html @@ -0,0 +1,100 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos children in a vertical-rl static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-vertWM-003-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-004-ref.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Reference</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <style> + .container { + display: block; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 32px; + width: 26px; + } + .small > .container { + height: 4px; + width: 8px; + } + + .container > * { + background: teal; + height: 6px; + width: 8px; + } + .big > .container > * { margin-left: 18px; } + .small > .container > * { margin-left: 0px; } + + .big .alignStart { margin-top: 0px; } + .big .alignCenter { margin-top: 13px; } + .big .alignEnd { margin-top: 26px; } + .small .alignStart { margin-top: 0px; } + .small .alignCenter { margin-top: -1px; } + .small .alignEnd { margin-top: -2px; } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div class="alignStart"><!--auto--></div></div> + <div class="container"><div class="alignStart"><!--normal--></div></div> + <div class="container"><div class="alignStart"><!--stretch--></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div class="alignStart"><!--baseline--></div></div> + <div class="container"><div class="alignEnd"><!--last baseline--></div></div> + <br> + <!-- <self-position>, part 1: --> + <div class="container"><div class="alignCenter"><!--center--></div></div> + <div class="container"><div class="alignStart"><!--start--></div></div> + <div class="container"><div class="alignEnd"><!--end--></div></div> + <div class="container"><div class="alignStart"><!--self-start--></div></div> + <div class="container"><div class="alignEnd"><!--self-end--></div></div> + <br> + <!-- <self-position>, part 2: --> + <div class="container"><div class="alignStart"><!--flex-start--></div></div> + <div class="container"><div class="alignEnd"><!--flex-end--></div></div> + <div class="container"><div class="alignStart"><!--left--></div></div> + <div class="container"><div class="alignEnd"><!--right--></div></div> + <br> + </div> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/layout/reftests/w3c-css/submitted/align3/grid-abspos-staticpos-justify-self-vertWM-004.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<!-- + Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ +--> +<html> +<head> + <meta charset="utf-8"> + <title>CSS Test: Static position of abspos horizontal-tb children in a vertical-rl static-pos grid container, with various "justify-self" values</title> + <link rel="author" title="Daniel Holbert" href="mailto:dholbert@mozilla.com"> + <link rel="help" href="https://drafts.csswg.org/css-align-3/#align-abspos-static"> + <link rel="match" href="flex-abspos-staticpos-justify-self-vertWM-004-ref.html"> + <style> + .container { + display: grid; + writing-mode: vertical-rl; + padding: 1px 2px; + border: 1px solid black; + background: yellow; + margin-bottom: 5px; + margin-right: 5px; + float: left; /* For testing in "rows" of containers */ + } + br { clear: both } + + .big > .container { + height: 30px; + width: 22px; + grid: 3px 14px 3px / 2px 20px 2px; + } + .small > .container { + grid: 3px 2px 3px / 0px 2px 0px; + height: 2px; + width: 4px; + } + + .container > * { + position: absolute; + writing-mode: horizontal-tb; + grid-area: 2 / 2 / 3 / 3; + background: teal; + height: 6px; + width: 8px; + } + </style> +</head> +<body> + <div class="big"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> + <div class="small"> + <!-- The various justify-self values, from + https://www.w3.org/TR/css-align-3/#propdef-justify-self --> + <!-- auto | normal | stretch --> + <div class="container"><div style="justify-self: auto"></div></div> + <div class="container"><div style="justify-self: normal"></div></div> + <div class="container"><div style="justify-self: stretch"></div></div> + <br> + <!-- <baseline-position> --> + <div class="container"><div style="justify-self: baseline"></div></div> + <div class="container"><div style="justify-self: last baseline"></div></div> + <br> + <!-- <self-position>, part 1 --> + <div class="container"><div style="justify-self: center"></div></div> + <div class="container"><div style="justify-self: start"></div></div> + <div class="container"><div style="justify-self: end"></div></div> + <div class="container"><div style="justify-self: self-start"></div></div> + <div class="container"><div style="justify-self: self-end"></div></div> + <br> + <!-- <self-position>, part 2 --> + <div class="container"><div style="justify-self: flex-start"></div></div> + <div class="container"><div style="justify-self: flex-end"></div></div> + <div class="container"><div style="justify-self: left"></div></div> + <div class="container"><div style="justify-self: right"></div></div> + <br> + </div> +</body> +</html>
--- a/layout/reftests/w3c-css/submitted/align3/reftest.list +++ b/layout/reftests/w3c-css/submitted/align3/reftest.list @@ -44,8 +44,34 @@ == flex-abspos-staticpos-justify-content-rtl-002.html flex-abspos-staticpos-justify-content-rtl-002-ref.html == flex-abspos-staticpos-justify-content-vertWM-001.html flex-abspos-staticpos-justify-content-vertWM-001-ref.html == flex-abspos-staticpos-justify-content-vertWM-002.html flex-abspos-staticpos-justify-content-vertWM-002-ref.html == flex-abspos-staticpos-justify-self-001.html flex-abspos-staticpos-justify-self-001-ref.html == flex-abspos-staticpos-margin-001.html flex-abspos-staticpos-margin-001-ref.html == flex-abspos-staticpos-margin-002.html flex-abspos-staticpos-margin-002-ref.html + +== grid-abspos-staticpos-align-self-001.html grid-abspos-staticpos-align-self-001-ref.html +== grid-abspos-staticpos-align-self-002.html grid-abspos-staticpos-align-self-002-ref.html +== grid-abspos-staticpos-align-self-img-001.html grid-abspos-staticpos-align-self-img-001-ref.html +== grid-abspos-staticpos-align-self-img-002.html grid-abspos-staticpos-align-self-img-002-ref.html +== grid-abspos-staticpos-align-self-rtl-001.html grid-abspos-staticpos-align-self-rtl-001-ref.html +== grid-abspos-staticpos-align-self-rtl-002.html grid-abspos-staticpos-align-self-rtl-002-ref.html +== grid-abspos-staticpos-align-self-rtl-003.html grid-abspos-staticpos-align-self-rtl-003-ref.html +== grid-abspos-staticpos-align-self-rtl-004.html grid-abspos-staticpos-align-self-rtl-004-ref.html +== grid-abspos-staticpos-align-self-vertWM-001.html grid-abspos-staticpos-align-self-vertWM-001-ref.html +== grid-abspos-staticpos-align-self-vertWM-002.html grid-abspos-staticpos-align-self-vertWM-002-ref.html +== grid-abspos-staticpos-align-self-vertWM-003.html grid-abspos-staticpos-align-self-vertWM-003-ref.html +== grid-abspos-staticpos-align-self-vertWM-004.html grid-abspos-staticpos-align-self-vertWM-004-ref.html + +== grid-abspos-staticpos-justify-self-001.html grid-abspos-staticpos-justify-self-001-ref.html +== grid-abspos-staticpos-justify-self-002.html grid-abspos-staticpos-justify-self-002-ref.html +== grid-abspos-staticpos-justify-self-img-001.html grid-abspos-staticpos-justify-self-img-001-ref.html +== grid-abspos-staticpos-justify-self-img-002.html grid-abspos-staticpos-justify-self-img-002-ref.html +== grid-abspos-staticpos-justify-self-rtl-001.html grid-abspos-staticpos-justify-self-rtl-001-ref.html +== grid-abspos-staticpos-justify-self-rtl-002.html grid-abspos-staticpos-justify-self-rtl-002-ref.html +== grid-abspos-staticpos-justify-self-rtl-003.html grid-abspos-staticpos-justify-self-rtl-003-ref.html +== grid-abspos-staticpos-justify-self-rtl-004.html grid-abspos-staticpos-justify-self-rtl-004-ref.html +== grid-abspos-staticpos-justify-self-vertWM-001.html grid-abspos-staticpos-justify-self-vertWM-001-ref.html +== grid-abspos-staticpos-justify-self-vertWM-002.html grid-abspos-staticpos-justify-self-vertWM-002-ref.html +== grid-abspos-staticpos-justify-self-vertWM-003.html grid-abspos-staticpos-justify-self-vertWM-003-ref.html +== grid-abspos-staticpos-justify-self-vertWM-004.html grid-abspos-staticpos-justify-self-vertWM-004-ref.html
copy from layout/reftests/w3c-css/submitted/images3/support/colors-8x16.png copy to layout/reftests/w3c-css/submitted/align3/support/colors-8x16.png
--- a/layout/style/nsStyleUtil.cpp +++ b/layout/style/nsStyleUtil.cpp @@ -768,14 +768,16 @@ nsStyleUtil::CSPAllowsInlineStyle(nsICon // query the nonce nsAutoString nonce; if (aContent) { aContent->GetAttr(kNameSpaceID_None, nsGkAtoms::nonce, nonce); } bool allowInlineStyle = true; rv = csp->GetAllowsInline(nsIContentPolicy::TYPE_STYLESHEET, - nonce, aStyleText, aLineNumber, + nonce, + false, // aParserCreated only applies to scripts + aStyleText, aLineNumber, &allowInlineStyle); NS_ENSURE_SUCCESS(rv, false); return allowInlineStyle; }
--- a/memory/build/moz.build +++ b/memory/build/moz.build @@ -4,16 +4,17 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. EXPORTS += [ 'mozmemory.h', 'mozmemory_wrap.h', ] +DEFINES['MOZ_HAS_MOZGLUE'] = True DEFINES['MOZ_MEMORY_IMPL'] = True if CONFIG['MOZ_REPLACE_MALLOC']: EXPORTS += [ 'malloc_decls.h', 'replace_malloc.h', 'replace_malloc_bridge.h', ]
--- a/memory/jemalloc/moz.build +++ b/memory/jemalloc/moz.build @@ -20,23 +20,23 @@ UNIFIED_SOURCES += [ 'src/src/mb.c', 'src/src/mutex.c', 'src/src/nstime.c', 'src/src/pages.c', 'src/src/prng.c', 'src/src/prof.c', 'src/src/quarantine.c', 'src/src/rtree.c', + 'src/src/spin.c', 'src/src/stats.c', 'src/src/tcache.c', 'src/src/ticker.c', 'src/src/tsd.c', 'src/src/util.c', - # FIXME do we ever want valgrind.c? - # 'src/src/valgrind.c', + 'src/src/witness.c', ] SOURCES += [ # This file cannot be built in unified mode because of symbol clash on arena_purge. 'src/src/ctl.c', ] # Only OSX needs the zone allocation implementation, @@ -63,16 +63,17 @@ if CONFIG['_MSC_VER']: if CONFIG['OS_TARGET'] == 'Linux': # For mremap DEFINES['_GNU_SOURCE'] = True if CONFIG['GNU_CC']: CFLAGS += ['-std=gnu99'] DEFINES['abort'] = 'moz_abort' +DEFINES['MOZ_HAS_MOZGLUE'] = True LOCAL_INCLUDES += [ '!src/include', 'src/include', ] # We allow warnings for third-party code that can be updated from upstream. ALLOW_COMPILER_WARNINGS = True
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/.appveyor.yml @@ -0,0 +1,28 @@ +version: '{build}' + +environment: + matrix: + - MSYSTEM: MINGW64 + CPU: x86_64 + MSVC: amd64 + - MSYSTEM: MINGW32 + CPU: i686 + MSVC: x86 + - MSYSTEM: MINGW64 + CPU: x86_64 + - MSYSTEM: MINGW32 + CPU: i686 + +install: + - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% + - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC% + - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc + - pacman --noconfirm -Suy mingw-w64-%CPU%-make + +build_script: + - bash -c "autoconf" + - bash -c "./configure" + - mingw32-make -j3 + - file lib/jemalloc.dll + - mingw32-make -j3 tests + - mingw32-make -k check
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/.travis.yml @@ -0,0 +1,29 @@ +language: c + +matrix: + include: + - os: linux + compiler: gcc + - os: linux + compiler: gcc + env: + - EXTRA_FLAGS=-m32 + addons: + apt: + packages: + - gcc-multilib + - os: osx + compiler: clang + - os: osx + compiler: clang + env: + - EXTRA_FLAGS=-m32 + +before_script: + - autoconf + - ./configure${EXTRA_FLAGS:+ CC="$CC $EXTRA_FLAGS"} + - make -j3 + - make -j3 tests + +script: + - make check
--- a/memory/jemalloc/src/COPYING +++ b/memory/jemalloc/src/COPYING @@ -1,15 +1,15 @@ Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- -Copyright (C) 2002-2015 Jason Evans <jasone@canonware.com>. +Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>. All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2015 Facebook, Inc. All rights reserved. +Copyright (C) 2009-2016 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice(s), this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice(s), this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
--- a/memory/jemalloc/src/ChangeLog +++ b/memory/jemalloc/src/ChangeLog @@ -1,14 +1,101 @@ Following are change highlights associated with official releases. Important bug fixes are all mentioned, but some internal enhancements are omitted here for brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas.<i>.nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena.<i>.reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + * 4.1.1 (May 3, 2016) This bugfix release resolves a variety of mostly minor issues, though the bitmap fix is critical for 64-bit Windows. Bug fixes: - Fix the linear scan version of bitmap_sfu() to shift by the proper amount even when sizeof(long) is not the same as sizeof(void *), as on 64-bit @@ -16,17 +103,17 @@ brevity. Much more detail can be found - Fix hashing functions to avoid unaligned memory accesses (and resulting crashes). This is relevant at least to some ARM-based platforms. (@rkmisra) - Fix fork()-related lock rank ordering reversals. These reversals were unlikely to cause deadlocks in practice except when heap profiling was enabled and active. (@jasone) - Fix various chunk leaks in OOM code paths. (@jasone) - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) - - Fix MSVC-specific build/test issues. (@rustyx, yuslepukhin) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) - Fix a variety of test failures that were due to test fragility rather than core bugs. (@jasone) * 4.1.0 (February 28, 2016) This release is primarily about optimizations, but it also incorporates a lot of portability-motivated refactoring and enhancements. Many people worked on this release, to an extent that even with the omission here of minor changes @@ -75,24 +162,24 @@ brevity. Much more detail can be found up incremental huge reallocation. (@jasone) Incompatible changes: - Make opt.narenas unsigned rather than size_t. (@jasone) Bug fixes: - Fix stats.cactive accounting regression. (@rustyx, @jasone) - Handle unaligned keys in hash(). This caused problems for some ARM systems. - (@jasone, Christopher Ferris) + (@jasone, @cferris1000) - Refactor arenas array. In addition to fixing a fork-related deadlock, this makes arena lookups faster and simpler. (@jasone) - Move retained memory allocation out of the default chunk allocation function, to a location that gets executed even if the application installs a custom chunk allocation function. This resolves a virtual memory leak. (@buchgr) - - Fix a potential tsd cleanup leak. (Christopher Ferris, @jasone) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) - Fix run quantization. In practice this bug had no impact unless applications requested memory with alignment exceeding one page. (@jasone, @djwatson) - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) - jeprof: + Don't discard curl options if timeout is not defined. (@djwatson) + Detect failed profile fetches. (@djwatson) - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
--- a/memory/jemalloc/src/INSTALL +++ b/memory/jemalloc/src/INSTALL @@ -30,16 +30,20 @@ any of the following arguments (not a de --prefix=<install-root-dir> Set the base directory in which to install. For example: ./configure --prefix=/usr/local will cause files to be installed into /usr/local/include, /usr/local/lib, and /usr/local/man. +--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> + Use the specified version string rather than trying to generate one (if in + a git repository) or use existing the VERSION file (if present). + --with-rpath=<colon-separated-rpath> Embed one or more library paths, so that libjemalloc can find the libraries it is linked to. This works only on ELF-based systems. --with-mangling=<map> Mangle public symbols specified in <map> which is a comma-separated list of name:mangled pairs.
--- a/memory/jemalloc/src/Makefile.in +++ b/memory/jemalloc/src/Makefile.in @@ -19,21 +19,21 @@ DATADIR := $(DESTDIR)@DATADIR@ MANDIR := $(DESTDIR)@MANDIR@ srcroot := @srcroot@ objroot := @objroot@ abs_srcroot := @abs_srcroot@ abs_objroot := @abs_objroot@ # Build parameters. CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include -CFLAGS := @CFLAGS@ +EXTRA_CFLAGS := @EXTRA_CFLAGS@ +CFLAGS := @CFLAGS@ $(EXTRA_CFLAGS) LDFLAGS := @LDFLAGS@ EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ LIBS := @LIBS@ -TESTLIBS := @TESTLIBS@ RPATH_EXTRA := @RPATH_EXTRA@ SO := @so@ IMPORTLIB := @importlib@ O := @o@ A := @a@ EXE := @exe@ LIBPREFIX := @libprefix@ REV := @rev@ @@ -48,25 +48,29 @@ cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ enable_code_coverage := @enable_code_coverage@ enable_prof := @enable_prof@ enable_valgrind := @enable_valgrind@ enable_zone_allocator := @enable_zone_allocator@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF +link_whole_archive := @link_whole_archive@ DSO_LDFLAGS = @DSO_LDFLAGS@ SOREV = @SOREV@ PIC_CFLAGS = @PIC_CFLAGS@ CTARGET = @CTARGET@ LDTARGET = @LDTARGET@ +TEST_LD_MODE = @TEST_LD_MODE@ MKLIB = @MKLIB@ AR = @AR@ ARFLAGS = @ARFLAGS@ CC_MM = @CC_MM@ +LM := @LM@ +INSTALL = @INSTALL@ ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" else ifeq (pecoff, $(ABI)) TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib" else TEST_LIBRARY_PATH := @@ -95,20 +99,22 @@ C_SRCS := $(srcroot)src/jemalloc.c \ $(srcroot)src/mutex.c \ $(srcroot)src/nstime.c \ $(srcroot)src/pages.c \ $(srcroot)src/prng.c \ $(srcroot)src/prof.c \ $(srcroot)src/quarantine.c \ $(srcroot)src/rtree.c \ $(srcroot)src/stats.c \ + $(srcroot)src/spin.c \ $(srcroot)src/tcache.c \ $(srcroot)src/ticker.c \ $(srcroot)src/tsd.c \ - $(srcroot)src/util.c + $(srcroot)src/util.c \ + $(srcroot)src/witness.c ifeq ($(enable_valgrind), 1) C_SRCS += $(srcroot)src/valgrind.c endif ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif ifeq ($(IMPORTLIB),$(SO)) STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A) @@ -117,42 +123,55 @@ ifdef PIC_CFLAGS STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A) else STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A) endif DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif +ifeq (1, $(link_whole_archive)) +LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive +else +LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) +endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3) DOCS := $(DOCS_HTML) $(DOCS_MAN3) C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c +ifeq (1, $(link_whole_archive)) +C_UTIL_INTEGRATION_SRCS := +else C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c -TESTS_UNIT := $(srcroot)test/unit/atomic.c \ +endif +TESTS_UNIT := \ + $(srcroot)test/unit/a0.c \ + $(srcroot)test/unit/arena_reset.c \ + $(srcroot)test/unit/atomic.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/ckh.c \ $(srcroot)test/unit/decay.c \ $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ $(srcroot)test/unit/lg_chunk.c \ $(srcroot)test/unit/mallctl.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ + $(srcroot)test/unit/ph.c \ $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_reset.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ @@ -164,16 +183,17 @@ TESTS_UNIT := $(srcroot)test/unit/atomic $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/size_classes.c \ $(srcroot)test/unit/smoothstep.c \ $(srcroot)test/unit/stats.c \ $(srcroot)test/unit/ticker.c \ $(srcroot)test/unit/nstime.c \ $(srcroot)test/unit/tsd.c \ $(srcroot)test/unit/util.c \ + $(srcroot)test/unit/witness.c \ $(srcroot)test/unit/zero.c TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ @@ -285,79 +305,79 @@ endif $(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(STATIC_LIBS): @mkdir -p $(@D) $(AR) $(ARFLAGS)@AROUT@ $+ $(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) @mkdir -p $(@D) - $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS) + $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS) $(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) @mkdir -p $(@D) - $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(TESTLIBS) $(EXTRA_LDFLAGS) + $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread,$(LIBS))) $(LM) $(EXTRA_LDFLAGS) $(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) @mkdir -p $(@D) - $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS) + $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS) build_lib_shared: $(DSOS) build_lib_static: $(STATIC_LIBS) build_lib: build_lib_shared build_lib_static install_bin: - install -d $(BINDIR) + $(INSTALL) -d $(BINDIR) @for b in $(BINS); do \ - echo "install -m 755 $$b $(BINDIR)"; \ - install -m 755 $$b $(BINDIR); \ + echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \ + $(INSTALL) -m 755 $$b $(BINDIR); \ done install_include: - install -d $(INCLUDEDIR)/jemalloc + $(INSTALL) -d $(INCLUDEDIR)/jemalloc @for h in $(C_HDRS); do \ - echo "install -m 644 $$h $(INCLUDEDIR)/jemalloc"; \ - install -m 644 $$h $(INCLUDEDIR)/jemalloc; \ + echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \ + $(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \ done install_lib_shared: $(DSOS) - install -d $(LIBDIR) - install -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR) + $(INSTALL) -d $(LIBDIR) + $(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR) ifneq ($(SOREV),$(SO)) ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO) endif install_lib_static: $(STATIC_LIBS) - install -d $(LIBDIR) + $(INSTALL) -d $(LIBDIR) @for l in $(STATIC_LIBS); do \ - echo "install -m 755 $$l $(LIBDIR)"; \ - install -m 755 $$l $(LIBDIR); \ + echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \ + $(INSTALL) -m 755 $$l $(LIBDIR); \ done install_lib_pc: $(PC) - install -d $(LIBDIR)/pkgconfig + $(INSTALL) -d $(LIBDIR)/pkgconfig @for l in $(PC); do \ - echo "install -m 644 $$l $(LIBDIR)/pkgconfig"; \ - install -m 644 $$l $(LIBDIR)/pkgconfig; \ + echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \ + $(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \ done install_lib: install_lib_shared install_lib_static install_lib_pc install_doc_html: - install -d $(DATADIR)/doc/jemalloc$(install_suffix) + $(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix) @for d in $(DOCS_HTML); do \ - echo "install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \ - install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \ + echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \ + $(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \ done install_doc_man: - install -d $(MANDIR)/man3 + $(INSTALL) -d $(MANDIR)/man3 @for d in $(DOCS_MAN3); do \ - echo "install -m 644 $$d $(MANDIR)/man3"; \ - install -m 644 $$d $(MANDIR)/man3; \ + echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \ + $(INSTALL) -m 644 $$d $(MANDIR)/man3; \ done install_doc: install_doc_html install_doc_man install: install_bin install_include install_lib install_doc tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE)) tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE))
--- a/memory/jemalloc/src/README +++ b/memory/jemalloc/src/README @@ -12,9 +12,9 @@ repercussions for real world application The COPYING file contains copyright and licensing information. The INSTALL file contains information on how to configure, build, and install jemalloc. The ChangeLog file contains a brief summary of changes for each release. -URL: http://www.canonware.com/jemalloc/ +URL: http://jemalloc.net/
--- a/memory/jemalloc/src/VERSION +++ b/memory/jemalloc/src/VERSION @@ -1,1 +1,1 @@ -4.1.1-0-ge02b83cc5e3c4d30f93dba945162e3aa58d962d6 +4.3.1-0-g0110fa8451af905affd77c3bea0d545fee2251b2
--- a/memory/jemalloc/src/configure +++ b/memory/jemalloc/src/configure @@ -623,17 +623,16 @@ ac_subst_vars='LTLIBOBJS LIBOBJS cfgoutputs_out cfgoutputs_in cfghdrs_out cfghdrs_in enable_zone_allocator enable_tls enable_lazy_lock -TESTLIBS jemalloc_version_gid jemalloc_version_nrev jemalloc_version_bugfix jemalloc_version_minor jemalloc_version_major jemalloc_version enable_cache_oblivious enable_xmalloc @@ -653,26 +652,29 @@ enable_code_coverage AUTOCONF LD RANLIB INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM enable_autogen RPATH_EXTRA +LM CC_MM AROUT ARFLAGS MKLIB +TEST_LD_MODE LDTARGET CTARGET PIC_CFLAGS SOREV EXTRA_LDFLAGS DSO_LDFLAGS +link_whole_archive libprefix exe a o importlib so LD_PRELOAD_VAR RPATH @@ -684,16 +686,17 @@ host_cpu host build_os build_vendor build_cpu build EGREP GREP CPP +EXTRA_CFLAGS OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC XSLROOT @@ -778,16 +781,17 @@ enable_utrace enable_valgrind enable_xmalloc enable_cache_oblivious with_lg_tiny_min with_lg_quantum with_lg_page with_lg_page_sizes with_lg_size_class_group +with_version enable_lazy_lock enable_tls enable_zone_allocator ' ac_precious_vars='build_alias host_alias target_alias CC @@ -1466,16 +1470,18 @@ Optional Packages: --with-lg-quantum=<lg-quantum> Base 2 log of minimum allocation alignment --with-lg-page=<lg-page> Base 2 log of system page size --with-lg-page-sizes=<lg-page-sizes> Base 2 logs of system page sizes to support --with-lg-size-class-group=<lg-size-class-group> Base 2 log of size classes per doubling + --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> + Version string Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a nonstandard directory <lib dir> LIBS libraries to pass to the linker, e.g. -l<library> CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if @@ -3430,16 +3436,17 @@ if test "x$ac_cv_prog_cc_c89" != xno; th fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu + if test "x$GCC" != "xyes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5 $as_echo_n "checking whether compiler is MSVC... " >&6; } if ${je_cv_msvc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -3463,20 +3470,135 @@ else je_cv_msvc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5 $as_echo "$je_cv_msvc" >&6; } fi +je_cv_cray_prgenv_wrapper="" +if test "x${PE_ENV}" != "x" ; then + case "${CC}" in + CC|cc) + je_cv_cray_prgenv_wrapper="yes" + ;; + *) + ;; + esac +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5 +$as_echo_n "checking whether compiler is cray... " >&6; } +if ${je_cv_cray+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#ifndef _CRAYC + int fail-1; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cray=yes +else + je_cv_cray=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5 +$as_echo "$je_cv_cray" >&6; } + +if test "x${je_cv_cray}" = "xyes" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5 +$as_echo_n "checking whether cray compiler version is 8.4... " >&6; } +if ${je_cv_cray_84+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + +#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) + int fail-1; +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cray_84=yes +else + je_cv_cray_84=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5 +$as_echo "$je_cv_cray_84" >&6; } +fi + if test "x$CFLAGS" = "x" ; then no_CFLAGS="yes" if test "x$GCC" = "xyes" ; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5 +$as_echo_n "checking whether compiler supports -std=gnu11... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-std=gnu11" +else + CFLAGS="${CFLAGS} -std=gnu11" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-std=gnu11 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then + cat >>confdefs.h <<_ACEOF +#define JEMALLOC_HAS_RESTRICT 1 +_ACEOF + + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5 $as_echo_n "checking whether compiler supports -std=gnu99... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-std=gnu99" else CFLAGS="${CFLAGS} -std=gnu99" fi @@ -3502,21 +3624,22 @@ else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then - cat >>confdefs.h <<_ACEOF + if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then + cat >>confdefs.h <<_ACEOF #define JEMALLOC_HAS_RESTRICT 1 _ACEOF + fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5 $as_echo_n "checking whether compiler supports -Wall... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-Wall" else @@ -3617,16 +3740,52 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5 +$as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-Wsign-compare" +else + CFLAGS="${CFLAGS} -Wsign-compare" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-Wsign-compare + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5 $as_echo_n "checking whether compiler supports -pipe... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then CFLAGS="-pipe" else CFLAGS="${CFLAGS} -pipe" fi @@ -3836,55 +3995,168 @@ else $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" fi -fi -if test "x$EXTRA_CFLAGS" != "x" ; then - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports $EXTRA_CFLAGS" >&5 -$as_echo_n "checking whether compiler supports $EXTRA_CFLAGS... " >&6; } + if test "x$je_cv_cray" = "xyes" ; then + if test "x$je_cv_cray_84" = "xyes" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5 +$as_echo_n "checking whether compiler supports -hipa2... " >&6; } TCFLAGS="${CFLAGS}" if test "x${CFLAGS}" = "x" ; then - CFLAGS="$EXTRA_CFLAGS" -else - CFLAGS="${CFLAGS} $EXTRA_CFLAGS" + CFLAGS="-hipa2" +else + CFLAGS="${CFLAGS} -hipa2" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-hipa2 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5 +$as_echo_n "checking whether compiler supports -hnognu... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-hnognu" +else + CFLAGS="${CFLAGS} -hnognu" fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - je_cv_cflags_appended=$EXTRA_CFLAGS + je_cv_cflags_appended=-hnognu { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else je_cv_cflags_appended= { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi + fi + if test "x$enable_cc_silence" != "xno" ; then + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5 +$as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-hnomessage=128" +else + CFLAGS="${CFLAGS} -hnomessage=128" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-hnomessage=128 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5 +$as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-hnomessage=1357" +else + CFLAGS="${CFLAGS} -hnomessage=1357" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-hnomessage=1357 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + fi + fi +fi + ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. @@ -4911,27 +5183,37 @@ cat >>confdefs.h <<_ACEOF LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" +link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' +TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + TEST_LD_MODE='-dynamic' +fi + +if test "x${je_cv_cray}" = "xyes" ; then + CC_MM= +fi + if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. set dummy ${ac_tool_prefix}ar; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } @@ -5018,65 +5300,64 @@ ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi else AR="$ac_cv_prog_AR" fi +CFLAGS="$CFLAGS" default_munmap="1" maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) - CFLAGS="$CFLAGS" abi="macho" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" ;; *-*-freebsd*) - CFLAGS="$CFLAGS" abi="elf" + $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h + $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h force_lazy_lock="1" ;; *-*-dragonfly*) - CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-openbsd*) - CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h force_tls="0" ;; *-*-bitrig*) - CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-linux*) - CFLAGS="$CFLAGS" - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" + CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h + $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h + $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h default_munmap="0" ;; @@ -5095,28 +5376,27 @@ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - CFLAGS="$CFLAGS"; abi="elf" + abi="elf" else abi="aout" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5 $as_echo "$abi" >&6; } $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h ;; *-*-solaris2*) - CFLAGS="$CFLAGS" abi="elf" $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h RPATH='-Wl,-R,$(1)' CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" LIBS="$LIBS -lposix4 -lsocket -lnsl" ;; *-ibm-aix*) @@ -5125,33 +5405,33 @@ rm -f core conftest.err conftest.$ac_obj else LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" - force_lazy_lock="1" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" + link_whole_archive="1" fi a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5 @@ -5223,16 +5503,83 @@ cat >>confdefs.h <<_ACEOF + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 +$as_echo_n "checking for library containing log... " >&6; } +if ${ac_cv_search_log+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char log (); +int +main () +{ +return log (); + ; + return 0; +} +_ACEOF +for ac_lib in '' m; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_log=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_log+:} false; then : + break +fi +done +if ${ac_cv_search_log+:} false; then : + +else + ac_cv_search_log=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 +$as_echo "$ac_cv_search_log" >&6; } +ac_res=$ac_cv_search_log +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +else + as_fn_error $? "Missing math functions" "$LINENO" 5 +fi + +if test "x$ac_cv_search_log" != "xnone required" ; then + LM="$ac_cv_search_log" +else + LM= +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5 $as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; } if ${je_cv_attribute+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ static __attribute__((unused)) void foo(void){} @@ -5330,16 +5677,52 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-herror_on_warning" +else + CFLAGS="${CFLAGS} -herror_on_warning" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5 $as_echo_n "checking whether tls_model attribute is compilable... " >&6; } if ${je_cv_tls_model+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -5405,16 +5788,52 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-herror_on_warning" +else + CFLAGS="${CFLAGS} -herror_on_warning" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5 $as_echo_n "checking whether alloc_size attribute is compilable... " >&6; } if ${je_cv_alloc_size+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdlib.h> @@ -5475,16 +5894,52 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-herror_on_warning" +else + CFLAGS="${CFLAGS} -herror_on_warning" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_gnu_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdlib.h> @@ -5545,16 +6000,52 @@ else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } CFLAGS="${TCFLAGS}" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5 +$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-herror_on_warning" +else + CFLAGS="${CFLAGS} -herror_on_warning" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-herror_on_warning + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5 $as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; } if ${je_cv_format_printf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <stdlib.h> @@ -6647,18 +7138,18 @@ if test "x$backtrace_method" = "x" ; the backtrace_method="none (disabling profiling)" enable_prof="0" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5 $as_echo_n "checking configured backtracing method... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5 $as_echo "$backtrace_method" >&6; } if test "x$enable_prof" = "x1" ; then - if test "x$abi" != "xpecoff"; then - LIBS="$LIBS -lm" + if test "x$LM" != "x" ; then + LIBS="$LIBS $LM" fi $as_echo "#define JEMALLOC_PROF " >>confdefs.h fi # Check whether --enable-tcache was given. @@ -6897,16 +7388,62 @@ fi if test "x$enable_cache_oblivious" = "x1" ; then $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5 +$as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; } +if ${je_cv_gcc_builtin_unreachable+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +void foo (void) { + __builtin_unreachable(); +} + +int +main () +{ + + { + foo(); + } + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_gcc_builtin_unreachable=yes +else + je_cv_gcc_builtin_unreachable=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5 +$as_echo "$je_cv_gcc_builtin_unreachable" >&6; } + +if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then + $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h + +else + $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5 $as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; } if ${je_cv_gcc_builtin_ffsl+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -7122,33 +7659,46 @@ fi if test "${with_lg_size_class_group+set}" = set; then : withval=$with_lg_size_class_group; LG_SIZE_CLASS_GROUP="$with_lg_size_class_group" else LG_SIZE_CLASS_GROUP="2" fi -if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then - rm -f "${objroot}VERSION" - for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ - '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do - if test ! -e "${objroot}VERSION" ; then - (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null - if test $? -eq 0 ; then - mv "${objroot}VERSION.tmp" "${objroot}VERSION" - break - fi + +# Check whether --with-version was given. +if test "${with_version+set}" = set; then : + withval=$with_version; + echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null + if test $? -ne 0 ; then + as_fn_error $? "${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>" "$LINENO" 5 fi - done -fi -rm -f "${objroot}VERSION.tmp" + echo "$with_version" > "${objroot}VERSION" + +else + + if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + done + fi + rm -f "${objroot}VERSION.tmp" + +fi + + if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5 $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; } echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi @@ -7280,18 +7830,16 @@ else fi fi fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" -SAVED_LIBS="${LIBS}" -LIBS= { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 $as_echo_n "checking for library containing clock_gettime... " >&6; } if ${ac_cv_search_clock_gettime+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -7335,21 +7883,316 @@ fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 $as_echo "$ac_cv_search_clock_gettime" >&6; } ac_res=$ac_cv_search_clock_gettime if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - TESTLIBS="${LIBS}" -fi - - -LIBS="${SAVED_LIBS}" + +fi + + +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + if test "$ac_cv_search_clock_gettime" != "-lrt"; then + SAVED_CFLAGS="${CFLAGS}" + + unset ac_cv_search_clock_gettime + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5 +$as_echo_n "checking whether compiler supports -dynamic... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-dynamic" +else + CFLAGS="${CFLAGS} -dynamic" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-dynamic + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 +$as_echo_n "checking for library containing clock_gettime... " >&6; } +if ${ac_cv_search_clock_gettime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char clock_gettime (); +int +main () +{ +return clock_gettime (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_clock_gettime=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_clock_gettime+:} false; then : + break +fi +done +if ${ac_cv_search_clock_gettime+:} false; then : + +else + ac_cv_search_clock_gettime=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 +$as_echo "$ac_cv_search_clock_gettime" >&6; } +ac_res=$ac_cv_search_clock_gettime +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + + CFLAGS="${SAVED_CFLAGS}" + fi +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5 +$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; } +if ${je_cv_clock_monotonic_coarse+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include <time.h> + +int +main () +{ + + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_clock_monotonic_coarse=yes +else + je_cv_clock_monotonic_coarse=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5 +$as_echo "$je_cv_clock_monotonic_coarse" >&6; } + +if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5 +$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; } +if ${je_cv_clock_monotonic+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include <unistd.h> +#include <time.h> + +int +main () +{ + + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); +#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 +# error _POSIX_MONOTONIC_CLOCK missing/invalid +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_clock_monotonic=yes +else + je_cv_clock_monotonic=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5 +$as_echo "$je_cv_clock_monotonic" >&6; } + +if test "x${je_cv_clock_monotonic}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5 +$as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; } +if ${je_cv_mach_absolute_time+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include <mach/mach_time.h> + +int +main () +{ + + mach_absolute_time(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_mach_absolute_time=yes +else + je_cv_mach_absolute_time=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5 +$as_echo "$je_cv_mach_absolute_time" >&6; } + +if test "x${je_cv_mach_absolute_time}" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h + +fi + +SAVED_CFLAGS="${CFLAGS}" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5 +$as_echo_n "checking whether compiler supports -Werror... " >&6; } +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="-Werror" +else + CFLAGS="${CFLAGS} -Werror" +fi +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ + + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + je_cv_cflags_appended=-Werror + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + je_cv_cflags_appended= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CFLAGS="${TCFLAGS}" + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5 +$as_echo_n "checking whether syscall(2) is compilable... " >&6; } +if ${je_cv_syscall+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include <sys/syscall.h> +#include <unistd.h> + +int +main () +{ + + syscall(SYS_write, 2, "hello", 5); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_syscall=yes +else + je_cv_syscall=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5 +$as_echo "$je_cv_syscall" >&6; } + +CFLAGS="${SAVED_CFLAGS}" +if test "x$je_cv_syscall" = "xyes" ; then + $as_echo "#define JEMALLOC_HAVE_SYSCALL " >>confdefs.h + +fi ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv" if test "x$ac_cv_func_secure_getenv" = xyes; then : have_secure_getenv="1" else have_secure_getenv="0" fi @@ -7407,20 +8250,29 @@ else enable_lazy_lock="1" fi else enable_lazy_lock="" fi -if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 +if test "x${enable_lazy_lock}" = "x" ; then + if test "x${force_lazy_lock}" = "x1" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5 $as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; } - enable_lazy_lock="1" + enable_lazy_lock="1" + else + enable_lazy_lock="0" + fi +fi +if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5 +$as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; } + enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$abi" != "xpecoff" ; then for ac_header in dlfcn.h do : ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF @@ -7480,18 +8332,16 @@ else fi fi fi $as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h -else - enable_lazy_lock="0" fi # Check whether --enable-tls was given. if test "${enable_tls+set}" = set; then : enableval=$enable_tls; if test "x$enable_tls" = "xno" ; then enable_tls="0" else @@ -7882,16 +8732,56 @@ fi if test "x${je_cv_builtin_clz}" = "xyes" ; then $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5 +$as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; } +if ${je_cv_os_unfair_lock+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include <os/lock.h> + +int +main () +{ + + os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; + os_unfair_lock_lock(&lock); + os_unfair_lock_unlock(&lock); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + je_cv_os_unfair_lock=yes +else + je_cv_os_unfair_lock=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5 +$as_echo "$je_cv_os_unfair_lock" >&6; } + +if test "x${je_cv_os_unfair_lock}" = "xyes" ; then + $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h + +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5 $as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; } if ${je_cv_osspin+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9784,26 +10674,26 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5 $as_echo "CONFIG : ${CONFIG}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5 $as_echo "CC : ${CC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5 $as_echo "CFLAGS : ${CFLAGS}" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5 +$as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5 $as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5 $as_echo "LDFLAGS : ${LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5 $as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5 $as_echo "LIBS : ${LIBS}" >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: TESTLIBS : ${TESTLIBS}" >&5 -$as_echo "TESTLIBS : ${TESTLIBS}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5 $as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5 $as_echo "" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC : ${XSLTPROC}" >&5 $as_echo "XSLTPROC : ${XSLTPROC}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT : ${XSLROOT}" >&5 $as_echo "XSLROOT : ${XSLROOT}" >&6; }
--- a/memory/jemalloc/src/configure.ac +++ b/memory/jemalloc/src/configure.ac @@ -113,54 +113,108 @@ fi XSLROOT="${DEFAULT_XSLROOT}" ) AC_SUBST([XSLROOT]) dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, dnl just prevent autoconf from molesting CFLAGS. CFLAGS=$CFLAGS AC_PROG_CC + if test "x$GCC" != "xyes" ; then AC_CACHE_CHECK([whether compiler is MSVC], [je_cv_msvc], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [ #ifndef _MSC_VER int fail[-1]; #endif ])], [je_cv_msvc=yes], [je_cv_msvc=no])]) fi +dnl check if a cray prgenv wrapper compiler is being used +je_cv_cray_prgenv_wrapper="" +if test "x${PE_ENV}" != "x" ; then + case "${CC}" in + CC|cc) + je_cv_cray_prgenv_wrapper="yes" + ;; + *) + ;; + esac +fi + +AC_CACHE_CHECK([whether compiler is cray], + [je_cv_cray], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#ifndef _CRAYC + int fail[-1]; +#endif +])], + [je_cv_cray=yes], + [je_cv_cray=no])]) + +if test "x${je_cv_cray}" = "xyes" ; then + AC_CACHE_CHECK([whether cray compiler version is 8.4], + [je_cv_cray_84], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) + int fail[-1]; +#endif +])], + [je_cv_cray_84=yes], + [je_cv_cray_84=no])]) +fi + if test "x$CFLAGS" = "x" ; then no_CFLAGS="yes" if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-std=gnu99]) - if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then +dnl JE_CFLAGS_APPEND([-std=gnu99]) + JE_CFLAGS_APPEND([-std=gnu11]) + if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + else + JE_CFLAGS_APPEND([-std=gnu99]) + if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + fi fi JE_CFLAGS_APPEND([-Wall]) JE_CFLAGS_APPEND([-Werror=declaration-after-statement]) JE_CFLAGS_APPEND([-Wshorten-64-to-32]) + JE_CFLAGS_APPEND([-Wsign-compare]) JE_CFLAGS_APPEND([-pipe]) JE_CFLAGS_APPEND([-g3]) elif test "x$je_cv_msvc" = "xyes" ; then CC="$CC -nologo" JE_CFLAGS_APPEND([-Zi]) JE_CFLAGS_APPEND([-MT]) JE_CFLAGS_APPEND([-W3]) JE_CFLAGS_APPEND([-FS]) CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" fi + if test "x$je_cv_cray" = "xyes" ; then + dnl cray compiler 8.4 has an inlining bug + if test "x$je_cv_cray_84" = "xyes" ; then + JE_CFLAGS_APPEND([-hipa2]) + JE_CFLAGS_APPEND([-hnognu]) + fi + if test "x$enable_cc_silence" != "xno" ; then + dnl ignore unreachable code warning + JE_CFLAGS_APPEND([-hnomessage=128]) + dnl ignore redefinition of "malloc", "free", etc warning + JE_CFLAGS_APPEND([-hnomessage=1357]) + fi + fi fi -dnl Append EXTRA_CFLAGS to CFLAGS, if defined. -if test "x$EXTRA_CFLAGS" != "x" ; then - JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) -fi +AC_SUBST([EXTRA_CFLAGS]) AC_PROG_CPP AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) if test "x${ac_cv_big_endian}" = "x1" ; then AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then @@ -257,103 +311,110 @@ AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" +link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' +TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + TEST_LD_MODE='-dynamic' +fi + +if test "x${je_cv_cray}" = "xyes" ; then + CC_MM= +fi + AN_MAKEVAR([AR], [AC_PROG_AR]) AN_PROGRAM([ar], [AC_PROG_AR]) AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) AC_PROG_AR dnl Platform-specific settings. abi and RPATH can probably be determined dnl programmatically, but doing so is error-prone, which makes it generally dnl not worth the trouble. dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. +CFLAGS="$CFLAGS" default_munmap="1" maps_coalesce="1" case "${host}" in *-*-darwin* | *-*-ios*) - CFLAGS="$CFLAGS" abi="macho" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" importlib="${so}" force_tls="0" DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" ;; *-*-freebsd*) - CFLAGS="$CFLAGS" abi="elf" + AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) - CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-openbsd*) - CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_tls="0" ;; *-*-bitrig*) - CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-linux*) - CFLAGS="$CFLAGS" + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" abi="elf" AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) default_munmap="0" ;; *-*-netbsd*) AC_MSG_CHECKING([ABI]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifdef __ELF__ /* ELF */ #else #error aout #endif ]])], - [CFLAGS="$CFLAGS"; abi="elf"], + [abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-solaris2*) - CFLAGS="$CFLAGS" abi="elf" AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" LIBS="$LIBS -lposix4 -lsocket -lnsl" ;; *-ibm-aix*) @@ -364,33 +425,33 @@ case "${host}" in dnl 32bit AIX LD_PRELOAD_VAR="LDR_PRELOAD" fi abi="xcoff" ;; *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" - force_lazy_lock="1" maps_coalesce="0" RPATH="" so="dll" if test "x$je_cv_msvc" = "xyes" ; then importlib="lib" DSO_LDFLAGS="-LD" EXTRA_LDFLAGS="-link -DEBUG" CTARGET='-Fo$@' LDTARGET='-Fe$@' AR='lib' ARFLAGS='-nologo -out:' AROUT='$@' CC_MM= else importlib="${so}" DSO_LDFLAGS="-shared" + link_whole_archive="1" fi a="lib" libprefix="" SOREV="${so}" PIC_CFLAGS="" ;; *) AC_MSG_RESULT([Unsupported operating system: ${host}]) @@ -418,75 +479,90 @@ AC_SUBST([abi]) AC_SUBST([RPATH]) AC_SUBST([LD_PRELOAD_VAR]) AC_SUBST([so]) AC_SUBST([importlib]) AC_SUBST([o]) AC_SUBST([a]) AC_SUBST([exe]) AC_SUBST([libprefix]) +AC_SUBST([link_whole_archive]) AC_SUBST([DSO_LDFLAGS]) AC_SUBST([EXTRA_LDFLAGS]) AC_SUBST([SOREV]) AC_SUBST([PIC_CFLAGS]) AC_SUBST([CTARGET]) AC_SUBST([LDTARGET]) +AC_SUBST([TEST_LD_MODE]) AC_SUBST([MKLIB]) AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) AC_SUBST([CC_MM]) +dnl Determine whether libm must be linked to use e.g. log(3). +AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])]) +if test "x$ac_cv_search_log" != "xnone required" ; then + LM="$ac_cv_search_log" +else + LM= +fi +AC_SUBST(LM) + JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], [je_cv_attribute]) if test "x${je_cv_attribute}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then JE_CFLAGS_APPEND([-fvisibility=hidden]) fi fi dnl Check for tls_model attribute support (clang 3.0 still lacks support). SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_APPEND([-herror_on_warning]) JE_COMPILABLE([tls_model attribute], [], [static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0;], [je_cv_tls_model]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_tls_model}" = "xyes" ; then AC_DEFINE([JEMALLOC_TLS_MODEL], [__attribute__((tls_model("initial-exec")))]) else AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) fi dnl Check for alloc_size attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_APPEND([-herror_on_warning]) JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>], [void *foo(size_t size) __attribute__((alloc_size(1)));], [je_cv_alloc_size]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_alloc_size}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) fi dnl Check for format(gnu_printf, ...) attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_APPEND([-herror_on_warning]) JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>], [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], [je_cv_format_gnu_printf]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_gnu_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) fi dnl Check for format(printf, ...) attribute support. SAVED_CFLAGS="${CFLAGS}" JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_APPEND([-herror_on_warning]) JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>], [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], [je_cv_format_printf]) CFLAGS="${SAVED_CFLAGS}" if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi @@ -871,19 +947,19 @@ fi if test "x$backtrace_method" = "x" ; then backtrace_method="none (disabling profiling)" enable_prof="0" fi AC_MSG_CHECKING([configured backtracing method]) AC_MSG_RESULT([$backtrace_method]) if test "x$enable_prof" = "x1" ; then - if test "x$abi" != "xpecoff"; then - dnl Heap profiling uses the log(3) function. - LIBS="$LIBS -lm" + dnl Heap profiling uses the log(3) function. + if test "x$LM" != "x" ; then + LIBS="$LIBS $LM" fi AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) dnl Enable thread-specific caching by default. AC_ARG_ENABLE([tcache], @@ -1042,16 +1118,33 @@ fi ], [enable_cache_oblivious="1"] ) if test "x$enable_cache_oblivious" = "x1" ; then AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) fi AC_SUBST([enable_cache_oblivious]) + + +JE_COMPILABLE([a program using __builtin_unreachable], [ +void foo (void) { + __builtin_unreachable(); +} +], [ + { + foo(); + } +], [je_cv_gcc_builtin_unreachable]) +if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable]) +else + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort]) +fi + dnl ============================================================================ dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. dnl One of those two functions should (theoretically) exist on all platforms dnl that jemalloc currently has a chance of functioning on without modification. dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if dnl ffsl() or __builtin_ffsl() are defined, respectively. JE_COMPILABLE([a program using __builtin_ffsl], [ #include <stdio.h> @@ -1167,37 +1260,46 @@ AC_ARG_WITH([lg_size_class_group], [Base 2 log of size classes per doubling])], [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"], [LG_SIZE_CLASS_GROUP="2"]) dnl ============================================================================ dnl jemalloc configuration. dnl -dnl Set VERSION if source directory is inside a git repository. -if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then - dnl Pattern globs aren't powerful enough to match both single- and - dnl double-digit version numbers, so iterate over patterns to support up to - dnl version 99.99.99 without any accidental matches. - rm -f "${objroot}VERSION" - for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ - '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do - if test ! -e "${objroot}VERSION" ; then - (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null - if test $? -eq 0 ; then - mv "${objroot}VERSION.tmp" "${objroot}VERSION" - break - fi +AC_ARG_WITH([version], + [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>], + [Version string])], + [ + echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null + if test $? -ne 0 ; then + AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>]) fi - done -fi -rm -f "${objroot}VERSION.tmp" + echo "$with_version" > "${objroot}VERSION" + ], [ + dnl Set VERSION if source directory is inside a git repository. + if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + dnl Pattern globs aren't powerful enough to match both single- and + dnl double-digit version numbers, so iterate over patterns to support up + dnl to version 99.99.99 without any accidental matches. + for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + done + fi + rm -f "${objroot}VERSION.tmp" + ]) + if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then AC_MSG_RESULT( [Missing VERSION file, and unable to generate it; creating bogus VERSION]) echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" else cp ${srcroot}VERSION ${objroot}VERSION fi @@ -1224,23 +1326,86 @@ if test "x$abi" != "xpecoff" ; then dnl first, but try libc too before failing. AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) fi CPPFLAGS="$CPPFLAGS -D_REENTRANT" -dnl Check whether clock_gettime(2) is in libc or librt. This function is only -dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS. -SAVED_LIBS="${LIBS}" -LIBS= -AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"]) -AC_SUBST([TESTLIBS]) -LIBS="${SAVED_LIBS}" +dnl Check whether clock_gettime(2) is in libc or librt. +AC_SEARCH_LIBS([clock_gettime], [rt]) + +dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with +dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + if test "$ac_cv_search_clock_gettime" != "-lrt"; then + SAVED_CFLAGS="${CFLAGS}" + + unset ac_cv_search_clock_gettime + JE_CFLAGS_APPEND([-dynamic]) + AC_SEARCH_LIBS([clock_gettime], [rt]) + + CFLAGS="${SAVED_CFLAGS}" + fi +fi + +dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific). +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [ +#include <time.h> +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); +], [je_cv_clock_monotonic_coarse]) +if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE]) +fi + +dnl check for CLOCK_MONOTONIC. +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [ +#include <unistd.h> +#include <time.h> +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); +#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 +# error _POSIX_MONOTONIC_CLOCK missing/invalid +#endif +], [je_cv_clock_monotonic]) +if test "x${je_cv_clock_monotonic}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC]) +fi + +dnl Check for mach_absolute_time(). +JE_COMPILABLE([mach_absolute_time()], [ +#include <mach/mach_time.h> +], [ + mach_absolute_time(); +], [je_cv_mach_absolute_time]) +if test "x${je_cv_mach_absolute_time}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) +fi + +dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS X +dnl 10.12's deprecation warning prevents use. +SAVED_CFLAGS="${CFLAGS}" +JE_CFLAGS_APPEND([-Werror]) +JE_COMPILABLE([syscall(2)], [ +#include <sys/syscall.h> +#include <unistd.h> +], [ + syscall(SYS_write, 2, "hello", 5); +], + [je_cv_syscall]) +CFLAGS="${SAVED_CFLAGS}" +if test "x$je_cv_syscall" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_SYSCALL], [ ]) +fi dnl Check if the GNU-specific secure_getenv function exists. AC_CHECK_FUNC([secure_getenv], [have_secure_getenv="1"], [have_secure_getenv="0"] ) if test "x$have_secure_getenv" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) @@ -1286,31 +1451,37 @@ AC_ARG_ENABLE([lazy_lock], [if test "x$enable_lazy_lock" = "xno" ; then enable_lazy_lock="0" else enable_lazy_lock="1" fi ], [enable_lazy_lock=""] ) -if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then - AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) - enable_lazy_lock="1" +if test "x${enable_lazy_lock}" = "x" ; then + if test "x${force_lazy_lock}" = "x1" ; then + AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) + enable_lazy_lock="1" + else + enable_lazy_lock="0" + fi +fi +if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then + AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented]) + enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then if test "x$abi" != "xpecoff" ; then AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) AC_CHECK_FUNC([dlsym], [], [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [AC_MSG_ERROR([libdl is missing])]) ]) fi AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) -else - enable_lazy_lock="0" fi AC_SUBST([enable_lazy_lock]) AC_ARG_ENABLE([tls], [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], if test "x$enable_tls" = "xno" ; then enable_tls="0" else @@ -1489,16 +1660,30 @@ AC_CACHE_CHECK([for __builtin_clz], [je_cv_builtin_clz=yes], [je_cv_builtin_clz=no])]) if test "x${je_cv_builtin_clz}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) fi dnl ============================================================================ +dnl Check for os_unfair_lock operations as provided on Darwin. + +JE_COMPILABLE([Darwin os_unfair_lock_*()], [ +#include <os/lock.h> +], [ + os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; + os_unfair_lock_lock(&lock); + os_unfair_lock_unlock(&lock); +], [je_cv_os_unfair_lock]) +if test "x${je_cv_os_unfair_lock}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) +fi + +dnl ============================================================================ dnl Check for spinlock(3) operations as provided on Darwin. JE_COMPILABLE([Darwin OSSpin*()], [ #include <libkern/OSAtomic.h> #include <inttypes.h> ], [ OSSpinLock lock = 0; OSSpinLockLock(&lock); @@ -1732,21 +1917,21 @@ dnl ==================================== dnl Print out the results of configuration. AC_MSG_RESULT([===============================================================================]) AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) AC_MSG_RESULT([library revision : ${rev}]) AC_MSG_RESULT([]) AC_MSG_RESULT([CONFIG : ${CONFIG}]) AC_MSG_RESULT([CC : ${CC}]) AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) +AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) -AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) AC_MSG_RESULT([]) AC_MSG_RESULT([PREFIX : ${PREFIX}]) AC_MSG_RESULT([BINDIR : ${BINDIR}]) AC_MSG_RESULT([DATADIR : ${DATADIR}])
--- a/memory/jemalloc/src/doc/html.xsl.in +++ b/memory/jemalloc/src/doc/html.xsl.in @@ -1,4 +1,5 @@ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:import href="@XSLROOT@/html/docbook.xsl"/> <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/> + <xsl:output method="xml" encoding="utf-8"/> </xsl:stylesheet>
--- a/memory/jemalloc/src/doc/jemalloc.xml.in +++ b/memory/jemalloc/src/doc/jemalloc.xml.in @@ -47,17 +47,17 @@ <refname>malloc_usable_size</refname> --> <refpurpose>general purpose memory allocation functions</refpurpose> </refnamediv> <refsect1 id="library"> <title>LIBRARY</title> <para>This manual describes jemalloc @jemalloc_version@. More information can be found at the <ulink - url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para> + url="http://jemalloc.net/">jemalloc website</ulink>.</para> </refsect1> <refsynopsisdiv> <title>SYNOPSIS</title> <funcsynopsis> <funcsynopsisinfo>#include <<filename class="headerfile">jemalloc/jemalloc.h</filename>></funcsynopsisinfo> <refsect2> <title>Standard API</title> <funcprototype> @@ -175,73 +175,73 @@ </refsect2> </funcsynopsis> </refsynopsisdiv> <refsect1 id="description"> <title>DESCRIPTION</title> <refsect2> <title>Standard API</title> - <para>The <function>malloc<parameter/></function> function allocates + <para>The <function>malloc()</function> function allocates <parameter>size</parameter> bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object.</para> - <para>The <function>calloc<parameter/></function> function allocates + <para>The <function>calloc()</function> function allocates space for <parameter>number</parameter> objects, each <parameter>size</parameter> bytes in length. The result is identical to - calling <function>malloc<parameter/></function> with an argument of + calling <function>malloc()</function> with an argument of <parameter>number</parameter> * <parameter>size</parameter>, with the exception that the allocated memory is explicitly initialized to zero bytes.</para> - <para>The <function>posix_memalign<parameter/></function> function + <para>The <function>posix_memalign()</function> function allocates <parameter>size</parameter> bytes of memory such that the allocation's base address is a multiple of <parameter>alignment</parameter>, and returns the allocation in the value pointed to by <parameter>ptr</parameter>. The requested <parameter>alignment</parameter> must be a power of 2 at least as large as <code language="C">sizeof(<type>void *</type>)</code>.</para> - <para>The <function>aligned_alloc<parameter/></function> function + <para>The <function>aligned_alloc()</function> function allocates <parameter>size</parameter> bytes of memory such that the allocation's base address is a multiple of <parameter>alignment</parameter>. The requested <parameter>alignment</parameter> must be a power of 2. Behavior is undefined if <parameter>size</parameter> is not an integral multiple of <parameter>alignment</parameter>.</para> - <para>The <function>realloc<parameter/></function> function changes the + <para>The <function>realloc()</function> function changes the size of the previously allocated memory referenced by <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The contents of the memory are unchanged up to the lesser of the new and old sizes. If the new size is larger, the contents of the newly allocated portion of the memory are undefined. Upon success, the memory referenced by <parameter>ptr</parameter> is freed and a pointer to the newly allocated memory is returned. Note that - <function>realloc<parameter/></function> may move the memory allocation, + <function>realloc()</function> may move the memory allocation, resulting in a different return value than <parameter>ptr</parameter>. If <parameter>ptr</parameter> is <constant>NULL</constant>, the - <function>realloc<parameter/></function> function behaves identically to - <function>malloc<parameter/></function> for the specified size.</para> + <function>realloc()</function> function behaves identically to + <function>malloc()</function> for the specified size.</para> - <para>The <function>free<parameter/></function> function causes the + <para>The <function>free()</function> function causes the allocated memory referenced by <parameter>ptr</parameter> to be made available for future allocations. If <parameter>ptr</parameter> is <constant>NULL</constant>, no action occurs.</para> </refsect2> <refsect2> <title>Non-standard API</title> - <para>The <function>mallocx<parameter/></function>, - <function>rallocx<parameter/></function>, - <function>xallocx<parameter/></function>, - <function>sallocx<parameter/></function>, - <function>dallocx<parameter/></function>, - <function>sdallocx<parameter/></function>, and - <function>nallocx<parameter/></function> functions all have a + <para>The <function>mallocx()</function>, + <function>rallocx()</function>, + <function>xallocx()</function>, + <function>sallocx()</function>, + <function>dallocx()</function>, + <function>sdallocx()</function>, and + <function>nallocx()</function> functions all have a <parameter>flags</parameter> argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (<code language="C">|</code>) operations to specify one or more of the following: <variablelist> <varlistentry id="MALLOCX_LG_ALIGN"> <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>) </constant></term> @@ -302,87 +302,87 @@ <parameter>a</parameter>. This macro has no effect for regions that were allocated via an arena other than the one specified. This macro does not validate that <parameter>a</parameter> specifies an arena index in the valid range.</para></listitem> </varlistentry> </variablelist> </para> - <para>The <function>mallocx<parameter/></function> function allocates at + <para>The <function>mallocx()</function> function allocates at least <parameter>size</parameter> bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if <parameter>size</parameter> is <constant>0</constant>.</para> - <para>The <function>rallocx<parameter/></function> function resizes the + <para>The <function>rallocx()</function> function resizes the allocation at <parameter>ptr</parameter> to be at least <parameter>size</parameter> bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if <parameter>size</parameter> is <constant>0</constant>.</para> - <para>The <function>xallocx<parameter/></function> function resizes the + <para>The <function>xallocx()</function> function resizes the allocation at <parameter>ptr</parameter> in place to be at least <parameter>size</parameter> bytes, and returns the real size of the allocation. If <parameter>extra</parameter> is non-zero, an attempt is made to resize the allocation to be at least <code language="C">(<parameter>size</parameter> + <parameter>extra</parameter>)</code> bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize. Behavior is undefined if <parameter>size</parameter> is <constant>0</constant>, or if <code language="C">(<parameter>size</parameter> + <parameter>extra</parameter> > <constant>SIZE_T_MAX</constant>)</code>.</para> - <para>The <function>sallocx<parameter/></function> function returns the + <para>The <function>sallocx()</function> function returns the real size of the allocation at <parameter>ptr</parameter>.</para> - <para>The <function>dallocx<parameter/></function> function causes the + <para>The <function>dallocx()</function> function causes the memory referenced by <parameter>ptr</parameter> to be made available for future allocations.</para> - <para>The <function>sdallocx<parameter/></function> function is an - extension of <function>dallocx<parameter/></function> with a + <para>The <function>sdallocx()</function> function is an + extension of <function>dallocx()</function> with a <parameter>size</parameter> parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by - <function>nallocx<parameter/></function> or - <function>sallocx<parameter/></function>.</para> + <function>nallocx()</function> or + <function>sallocx()</function>.</para> - <para>The <function>nallocx<parameter/></function> function allocates no + <para>The <function>nallocx()</function> function allocates no memory, but it performs the same size computation as the - <function>mallocx<parameter/></function> function, and returns the real + <function>mallocx()</function> function, and returns the real size of the allocation that would result from the equivalent - <function>mallocx<parameter/></function> function call, or + <function>mallocx()</function> function call, or <constant>0</constant> if the inputs exceed the maximum supported size class and/or alignment. Behavior is undefined if <parameter>size</parameter> is <constant>0</constant>.</para> - <para>The <function>mallctl<parameter/></function> function provides a + <para>The <function>mallctl()</function> function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated <parameter>name</parameter> argument specifies a location in a tree-structured namespace; see the <xref linkend="mallctl_namespace" xrefstyle="template:%t"/> section for documentation on the tree contents. To read a value, pass a pointer via <parameter>oldp</parameter> to adequate space to contain the value, and a pointer to its length via <parameter>oldlenp</parameter>; otherwise pass <constant>NULL</constant> and <constant>NULL</constant>. Similarly, to write a value, pass a pointer to the value via <parameter>newp</parameter>, and its length via <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant> and <constant>0</constant>.</para> - <para>The <function>mallctlnametomib<parameter/></function> function + <para>The <function>mallctlnametomib()</function> function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name - to a “Management Information Base” (MIB) that can be passed - repeatedly to <function>mallctlbymib<parameter/></function>. Upon - successful return from <function>mallctlnametomib<parameter/></function>, + to a <quote>Management Information Base</quote> (MIB) that can be passed + repeatedly to <function>mallctlbymib()</function>. Upon + successful return from <function>mallctlnametomib()</function>, <parameter>mibp</parameter> contains an array of <parameter>*miblenp</parameter> integers, where <parameter>*miblenp</parameter> is the lesser of the number of components in <parameter>name</parameter> and the input value of <parameter>*miblenp</parameter>. Thus it is possible to pass a <parameter>*miblenp</parameter> that is smaller than the number of period-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB. For name @@ -405,67 +405,68 @@ for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); /* Do something with bin_size... */ }]]></programlisting></para> - <para>The <function>malloc_stats_print<parameter/></function> function - writes human-readable summary statistics via the - <parameter>write_cb</parameter> callback function pointer and - <parameter>cbopaque</parameter> data passed to - <parameter>write_cb</parameter>, or - <function>malloc_message<parameter/></function> if - <parameter>write_cb</parameter> is <constant>NULL</constant>. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character + <para>The <function>malloc_stats_print()</function> function writes + summary statistics via the <parameter>write_cb</parameter> callback + function pointer and <parameter>cbopaque</parameter> data passed to + <parameter>write_cb</parameter>, or <function>malloc_message()</function> + if <parameter>write_cb</parameter> is <constant>NULL</constant>. The + statistics are presented in human-readable form unless <quote>J</quote> is + specified as a character within the <parameter>opts</parameter> string, in + which case the statistics are presented in <ulink + url="http://www.json.org/">JSON format</ulink>. This function can be + called repeatedly. General information that never changes during + execution can be omitted by specifying <quote>g</quote> as a character within the <parameter>opts</parameter> string. Note that - <function>malloc_message<parameter/></function> uses the - <function>mallctl*<parameter/></function> functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If <option>--enable-stats</option> is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b”, “l”, and “h” can be specified to - omit per size class statistics for bins, large objects, and huge objects, - respectively. Unrecognized characters are silently ignored. Note that - thread caching may prevent some statistics from being completely up to - date, since extra locking would be required to merge counters that track - thread cache operations. - </para> + <function>malloc_message()</function> uses the + <function>mallctl*()</function> functions internally, so inconsistent + statistics can be reported if multiple threads use these functions + simultaneously. If <option>--enable-stats</option> is specified during + configuration, <quote>m</quote> and <quote>a</quote> can be specified to + omit merged arena and per arena statistics, respectively; + <quote>b</quote>, <quote>l</quote>, and <quote>h</quote> can be specified + to omit per size class statistics for bins, large objects, and huge + objects, respectively. Unrecognized characters are silently ignored. + Note that thread caching may prevent some statistics from being completely + up to date, since extra locking would be required to merge counters that + track thread cache operations.</para> - <para>The <function>malloc_usable_size<parameter/></function> function + <para>The <function>malloc_usable_size()</function> function returns the usable size of the allocation pointed to by <parameter>ptr</parameter>. The return value may be larger than the size that was requested during allocation. The - <function>malloc_usable_size<parameter/></function> function is not a - mechanism for in-place <function>realloc<parameter/></function>; rather + <function>malloc_usable_size()</function> function is not a + mechanism for in-place <function>realloc()</function>; rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported - by <function>malloc_usable_size<parameter/></function> should not be + by <function>malloc_usable_size()</function> should not be depended on, since such behavior is entirely implementation-dependent. </para> </refsect2> </refsect1> <refsect1 id="tuning"> <title>TUNING</title> <para>Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time.</para> <para>The string specified via <option>--with-malloc-conf</option>, the string pointed to by the global variable <varname>malloc_conf</varname>, the - “name” of the file referenced by the symbolic link named + <quote>name</quote> of the file referenced by the symbolic link named <filename class="symlink">/etc/malloc.conf</filename>, and the value of the environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in that order, from left to right as options. Note that <varname>malloc_conf</varname> may be read before - <function>main<parameter/></function> is entered, so the declaration of + <function>main()</function> is entered, so the declaration of <varname>malloc_conf</varname> should specify an initializer that contains the final value to be read by jemalloc. <option>--with-malloc-conf</option> and <varname>malloc_conf</varname> are compile-time mechanisms, whereas <filename class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar> can be safely set any time prior to program invocation.</para> <para>An options string is a comma-separated list of option:value pairs. @@ -535,33 +536,33 @@ for (i = 0; i < nbins; i++) { up to the nearest power of two that is at least <code language="C">sizeof(<type>double</type>)</code>. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the <link linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and - huge size classes extend from the chunk size up to one size class less than - the full address space size.</para> + huge size classes extend from the chunk size up to the largest size class + that does not exceed <constant>PTRDIFF_MAX</constant>.</para> <para>Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating.</para> - <para>The <function>realloc<parameter/></function>, - <function>rallocx<parameter/></function>, and - <function>xallocx<parameter/></function> functions may resize allocations + <para>The <function>realloc()</function>, + <function>rallocx()</function>, and + <function>xallocx()</function> functions may resize allocations without moving them under limited circumstances. Unlike the - <function>*allocx<parameter/></function> API, the standard API does not + <function>*allocx()</function> API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call - <function>realloc<parameter/></function> to grow e.g. a 9-byte allocation to + <function>realloc()</function> to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre-size and post-size are both large or both huge. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see <link @@ -654,17 +655,17 @@ for (i = 0; i < nbins; i++) { <entry>128 KiB</entry> <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry> </row> <row> <entry>256 KiB</entry> <entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry> </row> <row> - <entry morerows="6">Huge</entry> + <entry morerows="8">Huge</entry> <entry>256 KiB</entry> <entry>[2 MiB]</entry> </row> <row> <entry>512 KiB</entry> <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry> </row> <row> @@ -682,24 +683,32 @@ for (i = 0; i < nbins; i++) { <row> <entry>8 MiB</entry> <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry> </row> <row> <entry>...</entry> <entry>...</entry> </row> + <row> + <entry>512 PiB</entry> + <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry> + </row> + <row> + <entry>1 EiB</entry> + <entry>[5 EiB, 6 EiB, 7 EiB]</entry> + </row> </tbody> </tgroup> </table> </refsect1> <refsect1 id="mallctl_namespace"> <title>MALLCTL NAMESPACE</title> <para>The following names are defined in the namespace accessible via the - <function>mallctl*<parameter/></function> functions. Value types are + <function>mallctl*()</function> functions. Value types are specified in parentheses, their readable/writable statuses are encoded as <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or <literal>--</literal>, and required build configuration flags follow, if any. A name element encoded as <literal><i></literal> or <literal><j></literal> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection. In the case of <mallctl>stats.arenas.<i>.*</mallctl>, <literal><i></literal> equal to <link @@ -720,17 +729,17 @@ for (i = 0; i < nbins; i++) { <varlistentry id="epoch"> <term> <mallctl>epoch</mallctl> (<type>uint64_t</type>) <literal>rw</literal> </term> <listitem><para>If a value is passed in, refresh the data from which - the <function>mallctl*<parameter/></function> functions report values, + the <function>mallctl*()</function> functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh.</para></listitem> </varlistentry> <varlistentry id="config.cache_oblivious"> <term> <mallctl>config.cache_oblivious</mallctl> (<type>bool</type>) @@ -904,22 +913,22 @@ for (i = 0; i < nbins; i++) { </term> <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle> <manvolnum>2</manvolnum></citerefentry>) allocation precedence as related to <citerefentry><refentrytitle>mmap</refentrytitle> <manvolnum>2</manvolnum></citerefentry> allocation. The following settings are supported if <citerefentry><refentrytitle>sbrk</refentrytitle> <manvolnum>2</manvolnum></citerefentry> is supported by the operating - system: “disabled”, “primary”, and - “secondary”; otherwise only “disabled” is - supported. The default is “secondary” if + system: <quote>disabled</quote>, <quote>primary</quote>, and + <quote>secondary</quote>; otherwise only <quote>disabled</quote> is + supported. The default is <quote>secondary</quote> if <citerefentry><refentrytitle>sbrk</refentrytitle> <manvolnum>2</manvolnum></citerefentry> is supported by the operating - system; “disabled” otherwise. + system; <quote>disabled</quote> otherwise. </para></listitem> </varlistentry> <varlistentry id="opt.lg_chunk"> <term> <mallctl>opt.lg_chunk</mallctl> (<type>size_t</type>) <literal>r-</literal> @@ -1000,50 +1009,51 @@ for (i = 0; i < nbins; i++) { <varlistentry id="opt.stats_print"> <term> <mallctl>opt.stats_print</mallctl> (<type>bool</type>) <literal>r-</literal> </term> <listitem><para>Enable/disable statistics printing at exit. If - enabled, the <function>malloc_stats_print<parameter/></function> + enabled, the <function>malloc_stats_print()</function> function is called at program exit via an <citerefentry><refentrytitle>atexit</refentrytitle> <manvolnum>3</manvolnum></citerefentry> function. If <option>--enable-stats</option> is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation - functions. Furthermore, <function>atexit<parameter/></function> may + functions. Furthermore, <function>atexit()</function> may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls - <function>atexit<parameter/></function>, so this option is not + <function>atexit()</function>, so this option is not universally usable (though the application can register its own - <function>atexit<parameter/></function> function with equivalent + <function>atexit()</function> function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default.</para></listitem> </varlistentry> <varlistentry id="opt.junk"> <term> <mallctl>opt.junk</mallctl> (<type>const char *</type>) <literal>r-</literal> [<option>--enable-fill</option>] </term> - <listitem><para>Junk filling. If set to "alloc", each byte of - uninitialized allocated memory will be initialized to - <literal>0xa5</literal>. If set to "free", all deallocated memory will - be initialized to <literal>0x5a</literal>. If set to "true", both - allocated and deallocated memory will be initialized, and if set to - "false", junk filling be disabled entirely. This is intended for - debugging and will impact performance negatively. This option is - "false" by default unless <option>--enable-debug</option> is specified - during configuration, in which case it is "true" by default unless + <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte + of uninitialized allocated memory will be initialized to + <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated + memory will be initialized to <literal>0x5a</literal>. If set to + <quote>true</quote>, both allocated and deallocated memory will be + initialized, and if set to <quote>false</quote>, junk filling be + disabled entirely. This is intended for debugging and will impact + performance negatively. This option is <quote>false</quote> by default + unless <option>--enable-debug</option> is specified during + configuration, in which case it is <quote>true</quote> by default unless running inside <ulink url="http://valgrind.org/">Valgrind</ulink>.</para></listitem> </varlistentry> <varlistentry id="opt.quarantine"> <term> <mallctl>opt.quarantine</mallctl> (<type>size_t</type>) @@ -1088,18 +1098,18 @@ for (i = 0; i < nbins; i++) { <mallctl>opt.zero</mallctl> (<type>bool</type>) <literal>r-</literal> [<option>--enable-fill</option>] </term> <listitem><para>Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so - <function>realloc<parameter/></function> and - <function>rallocx<parameter/></function> calls do not zero memory that + <function>realloc()</function> and + <function>rallocx()</function> calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. </para></listitem> </varlistentry> <varlistentry id="opt.utrace"> <term> <mallctl>opt.utrace</mallctl> @@ -1312,21 +1322,21 @@ malloc_conf = "xmalloc:true";]]></progra </term> <listitem><para>Use an <citerefentry><refentrytitle>atexit</refentrytitle> <manvolnum>3</manvolnum></citerefentry> function to dump final memory usage to a file named according to the pattern <filename><prefix>.<pid>.<seq>.f.heap</filename>, where <literal><prefix></literal> is controlled by the <link linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> - option. Note that <function>atexit<parameter/></function> may allocate + option. Note that <function>atexit()</function> may allocate memory during application initialization and then deadlock internally - when jemalloc in turn calls <function>atexit<parameter/></function>, so + when jemalloc in turn calls <function>atexit()</function>, so this option is not universally usable (though the application can - register its own <function>atexit<parameter/></function> function with + register its own <function>atexit()</function> function with equivalent functionality). This option is disabled by default.</para></listitem> </varlistentry> <varlistentry id="opt.prof_leak"> <term> <mallctl>opt.prof_leak</mallctl> (<type>bool</type>) @@ -1375,17 +1385,17 @@ malloc_conf = "xmalloc:true";]]></progra (<type>uint64_t *</type>) <literal>r-</literal> [<option>--enable-stats</option>] </term> <listitem><para>Get a pointer to the the value that is returned by the <link linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link> mallctl. This is useful for avoiding the overhead of repeated - <function>mallctl*<parameter/></function> calls.</para></listitem> + <function>mallctl*()</function> calls.</para></listitem> </varlistentry> <varlistentry id="thread.deallocated"> <term> <mallctl>thread.deallocated</mallctl> (<type>uint64_t</type>) <literal>r-</literal> [<option>--enable-stats</option>] @@ -1402,17 +1412,17 @@ malloc_conf = "xmalloc:true";]]></progra (<type>uint64_t *</type>) <literal>r-</literal> [<option>--enable-stats</option>] </term> <listitem><para>Get a pointer to the the value that is returned by the <link linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link> mallctl. This is useful for avoiding the overhead of repeated - <function>mallctl*<parameter/></function> calls.</para></listitem> + <function>mallctl*()</function> calls.</para></listitem> </varlistentry> <varlistentry id="thread.tcache.enabled"> <term> <mallctl>thread.tcache.enabled</mallctl> (<type>bool</type>) <literal>rw</literal> [<option>--enable-tcache</option>] @@ -1545,16 +1555,33 @@ malloc_conf = "xmalloc:true";]]></progra arena <i>, or for all arenas if <i> equals <link linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. The proportion of unused dirty pages to be purged depends on the current time; see <link linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for details.</para></listitem> </varlistentry> + <varlistentry id="arena.i.reset"> + <term> + <mallctl>arena.<i>.reset</mallctl> + (<type>void</type>) + <literal>--</literal> + </term> + <listitem><para>Discard all of the arena's extant allocations. This + interface can only be used with arenas created via <link + linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link>. None + of the arena's discarded/cached allocations may accessed afterward. As + part of this requirement, all thread caches which were used to + allocate/deallocate in conjunction with the arena must be flushed + beforehand. This interface cannot be used if running inside Valgrind, + nor if the <link linkend="opt.quarantine">quarantine</link> size is + non-zero.</para></listitem> + </varlistentry> + <varlistentry id="arena.i.dss"> <term> <mallctl>arena.<i>.dss</mallctl> (<type>const char *</type>) <literal>rw</literal> </term> <listitem><para>Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals @@ -2156,16 +2183,35 @@ typedef struct { allocator. This is a multiple of the chunk size, and is larger than <link linkend="stats.active"><mallctl>stats.active</mallctl></link>. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and <link linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem> </varlistentry> + <varlistentry id="stats.retained"> + <term> + <mallctl>stats.retained</mallctl> + (<type>size_t</type>) + <literal>r-</literal> + [<option>--enable-stats</option>] + </term> + <listitem><para>Total number of bytes in virtual memory mappings that + were retained rather than being returned to the operating system via + e.g. <citerefentry><refentrytitle>munmap</refentrytitle> + <manvolnum>2</manvolnum></citerefentry>. Retained virtual memory is + typically untouched, decommitted, or purged, so it has no strongly + associated physical memory (see <link + linkend="arena.i.chunk_hooks">chunk hooks</link> for details). Retained + memory is excluded from mapped memory statistics, e.g. <link + linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>. + </para></listitem> + </varlistentry> + <varlistentry id="stats.arenas.i.dss"> <term> <mallctl>stats.arenas.<i>.dss</mallctl> (<type>const char *</type>) <literal>r-</literal> </term> <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle> <manvolnum>2</manvolnum></citerefentry>) allocation precedence as @@ -2236,16 +2282,28 @@ typedef struct { <mallctl>stats.arenas.<i>.mapped</mallctl> (<type>size_t</type>) <literal>r-</literal> [<option>--enable-stats</option>] </term> <listitem><para>Number of mapped bytes.</para></listitem> </varlistentry> + <varlistentry id="stats.arenas.i.retained"> + <term> + <mallctl>stats.arenas.<i>.retained</mallctl> + (<type>size_t</type>) + <literal>r-</literal> + [<option>--enable-stats</option>] + </term> + <listitem><para>Number of retained bytes. See <link + linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for + details.</para></listitem> + </varlistentry> + <varlistentry id="stats.arenas.i.metadata.mapped"> <term> <mallctl>stats.arenas.<i>.metadata.mapped</mallctl> (<type>size_t</type>) <literal>r-</literal> [<option>--enable-stats</option>] </term> <listitem><para>Number of mapped bytes in arena chunk headers, which @@ -2673,17 +2731,17 @@ MAPPED_LIBRARIES: <title>DEBUGGING MALLOC PROBLEMS</title> <para>When debugging, it is a good idea to configure/build jemalloc with the <option>--enable-debug</option> and <option>--enable-fill</option> options, and recompile the program with suitable options and symbols for debugger support. When so configured, jemalloc incorporates a wide variety of run-time assertions that catch application errors such as double-free, write-after-free, etc.</para> - <para>Programs often accidentally depend on “uninitialized” + <para>Programs often accidentally depend on <quote>uninitialized</quote> memory actually being filled with zero bytes. Junk filling (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps. Conversely, zero filling (see the <link linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates the symptoms of such bugs. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs.</para> @@ -2702,39 +2760,39 @@ MAPPED_LIBRARIES: dumping core. If the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most warnings are treated as errors.</para> <para>The <varname>malloc_message</varname> variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the <constant>STDERR_FILENO</constant> file descriptor is not suitable for this. - <function>malloc_message<parameter/></function> takes the + <function>malloc_message()</function> takes the <parameter>cbopaque</parameter> pointer argument that is <constant>NULL</constant> unless overridden by the arguments in a call to - <function>malloc_stats_print<parameter/></function>, followed by a string + <function>malloc_stats_print()</function>, followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock.</para> <para>All messages are prefixed by - “<computeroutput><jemalloc>: </computeroutput>”.</para> + <quote><computeroutput><jemalloc>: </computeroutput></quote>.</para> </refsect1> <refsect1 id="return_values"> <title>RETURN VALUES</title> <refsect2> <title>Standard API</title> - <para>The <function>malloc<parameter/></function> and - <function>calloc<parameter/></function> functions return a pointer to the + <para>The <function>malloc()</function> and + <function>calloc()</function> functions return a pointer to the allocated memory if successful; otherwise a <constant>NULL</constant> pointer is returned and <varname>errno</varname> is set to <errorname>ENOMEM</errorname>.</para> - <para>The <function>posix_memalign<parameter/></function> function + <para>The <function>posix_memalign()</function> function returns the value 0 if successful; otherwise it returns an error value. - The <function>posix_memalign<parameter/></function> function will fail + The <function>posix_memalign()</function> function will fail if: <variablelist> <varlistentry> <term><errorname>EINVAL</errorname></term> <listitem><para>The <parameter>alignment</parameter> parameter is not a power of 2 at least as large as <code language="C">sizeof(<type>void *</type>)</code>. @@ -2743,75 +2801,75 @@ MAPPED_LIBRARIES: <varlistentry> <term><errorname>ENOMEM</errorname></term> <listitem><para>Memory allocation error.</para></listitem> </varlistentry> </variablelist> </para> - <para>The <function>aligned_alloc<parameter/></function> function returns + <para>The <function>aligned_alloc()</function> function returns a pointer to the allocated memory if successful; otherwise a <constant>NULL</constant> pointer is returned and <varname>errno</varname> is set. The - <function>aligned_alloc<parameter/></function> function will fail if: + <function>aligned_alloc()</function> function will fail if: <variablelist> <varlistentry> <term><errorname>EINVAL</errorname></term> <listitem><para>The <parameter>alignment</parameter> parameter is not a power of 2. </para></listitem> </varlistentry> <varlistentry> <term><errorname>ENOMEM</errorname></term> <listitem><para>Memory allocation error.</para></listitem> </varlistentry> </variablelist> </para> - <para>The <function>realloc<parameter/></function> function returns a + <para>The <function>realloc()</function> function returns a pointer, possibly identical to <parameter>ptr</parameter>, to the allocated memory if successful; otherwise a <constant>NULL</constant> pointer is returned, and <varname>errno</varname> is set to <errorname>ENOMEM</errorname> if the error was the result of an - allocation failure. The <function>realloc<parameter/></function> + allocation failure. The <function>realloc()</function> function always leaves the original buffer intact when an error occurs. </para> - <para>The <function>free<parameter/></function> function returns no + <para>The <function>free()</function> function returns no value.</para> </refsect2> <refsect2> <title>Non-standard API</title> - <para>The <function>mallocx<parameter/></function> and - <function>rallocx<parameter/></function> functions return a pointer to + <para>The <function>mallocx()</function> and + <function>rallocx()</function> functions return a pointer to the allocated memory if successful; otherwise a <constant>NULL</constant> pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. </para> - <para>The <function>xallocx<parameter/></function> function returns the + <para>The <function>xallocx()</function> function returns the real size of the resulting resized allocation pointed to by <parameter>ptr</parameter>, which is a value less than <parameter>size</parameter> if the allocation could not be adequately grown in place. </para> - <para>The <function>sallocx<parameter/></function> function returns the + <para>The <function>sallocx()</function> function returns the real size of the allocation pointed to by <parameter>ptr</parameter>. </para> - <para>The <function>nallocx<parameter/></function> returns the real size + <para>The <function>nallocx()</function> returns the real size that would result from a successful equivalent - <function>mallocx<parameter/></function> function call, or zero if + <function>mallocx()</function> function call, or zero if insufficient memory is available to perform the size computation. </para> - <para>The <function>mallctl<parameter/></function>, - <function>mallctlnametomib<parameter/></function>, and - <function>mallctlbymib<parameter/></function> functions return 0 on + <para>The <function>mallctl()</function>, + <function>mallctlnametomib()</function>, and + <function>mallctlbymib()</function> functions return 0 on success; otherwise they return an error value. The functions will fail if: <variablelist> <varlistentry> <term><errorname>EINVAL</errorname></term> <listitem><para><parameter>newp</parameter> is not <constant>NULL</constant>, and <parameter>newlen</parameter> is too @@ -2837,23 +2895,23 @@ MAPPED_LIBRARIES: <listitem><para>A memory allocation failure occurred.</para></listitem> </varlistentry> <varlistentry> <term><errorname>EFAULT</errorname></term> <listitem><para>An interface with side effects failed in some way - not directly related to <function>mallctl*<parameter/></function> + not directly related to <function>mallctl*()</function> read/write processing.</para></listitem> </varlistentry> </variablelist> </para> - <para>The <function>malloc_usable_size<parameter/></function> function + <para>The <function>malloc_usable_size()</function> function returns the usable size of the allocation pointed to by <parameter>ptr</parameter>. </para> </refsect2> </refsect1> <refsect1 id="environment"> <title>ENVIRONMENT</title> <para>The following environment variable affects the execution of the allocation functions: @@ -2891,18 +2949,18 @@ malloc_conf = "lg_chunk:24";]]></program <manvolnum>3</manvolnum></citerefentry>, <citerefentry><refentrytitle>atexit</refentrytitle> <manvolnum>3</manvolnum></citerefentry>, <citerefentry><refentrytitle>getpagesize</refentrytitle> <manvolnum>3</manvolnum></citerefentry></para> </refsect1> <refsect1 id="standards"> <title>STANDARDS</title> - <para>The <function>malloc<parameter/></function>, - <function>calloc<parameter/></function>, - <function>realloc<parameter/></function>, and - <function>free<parameter/></function> functions conform to ISO/IEC - 9899:1990 (“ISO C90”).</para> + <para>The <function>malloc()</function>, + <function>calloc()</function>, + <function>realloc()</function>, and + <function>free()</function> functions conform to ISO/IEC + 9899:1990 (<quote>ISO C90</quote>).</para> - <para>The <function>posix_memalign<parameter/></function> function conforms - to IEEE Std 1003.1-2001 (“POSIX.1”).</para> + <para>The <function>posix_memalign()</function> function conforms + to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para> </refsect1> </refentry>
--- a/memory/jemalloc/src/doc/stylesheet.xsl +++ b/memory/jemalloc/src/doc/stylesheet.xsl @@ -1,7 +1,10 @@ <xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"> <xsl:param name="funcsynopsis.style">ansi</xsl:param> - <xsl:param name="function.parens" select="1"/> + <xsl:param name="function.parens" select="0"/> + <xsl:template match="function"> + <xsl:call-template name="inline.monoseq"/> + </xsl:template> <xsl:template match="mallctl"> - "<xsl:call-template name="inline.monoseq"/>" + <quote><xsl:call-template name="inline.monoseq"/></quote> </xsl:template> </xsl:stylesheet>
--- a/memory/jemalloc/src/include/jemalloc/internal/arena.h +++ b/memory/jemalloc/src/include/jemalloc/internal/arena.h @@ -31,21 +31,23 @@ typedef enum { } purge_mode_t; #define PURGE_DEFAULT purge_mode_ratio /* Default decay time in seconds. */ #define DECAY_TIME_DEFAULT 10 /* Number of event ticks between time checks. */ #define DECAY_NTICKS_PER_UPDATE 1000 typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; +typedef struct arena_avail_links_s arena_avail_links_t; typedef struct arena_run_s arena_run_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_decay_s arena_decay_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; typedef struct arena_tdata_s arena_tdata_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS @@ -148,39 +150,39 @@ struct arena_runs_dirty_link_s { /* * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just * like arena_chunk_map_bits_t. Two separate arrays are stored within each * chunk header in order to improve cache locality. */ struct arena_chunk_map_misc_s { /* - * Linkage for run trees. There are two disjoint uses: + * Linkage for run heaps. There are two disjoint uses: * - * 1) arena_t's runs_avail tree. + * 1) arena_t's runs_avail heaps. * 2) arena_run_t conceptually uses this linkage for in-use non-full * runs, rather than directly embedding linkage. */ - rb_node(arena_chunk_map_misc_t) rb_link; + phn(arena_chunk_map_misc_t) ph_link; union { /* Linkage for list of dirty runs. */ arena_runs_dirty_link_t rd; /* Profile counters, used for large object runs. */ union { void *prof_tctx_pun; prof_tctx_t *prof_tctx; }; /* Small region run metadata. */ arena_run_t run; }; }; -typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t; +typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; #endif /* JEMALLOC_ARENA_STRUCTS_A */ #ifdef JEMALLOC_ARENA_STRUCTS_B /* Arena chunk header. */ struct arena_chunk_s { /* * A pointer to the arena that owns the chunk is stored within the node. * This field as a whole is used by chunks_rtree to support both @@ -251,53 +253,104 @@ struct arena_bin_info_s { * bin. */ bitmap_info_t bitmap_info; /* Offset of first region in a run for this bin's size class. */ uint32_t reg0_offset; }; +struct arena_decay_s { + /* + * Approximate time in seconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + ssize_t time; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of dirty pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay.ndirty and + * arena->ndirty to determine how many dirty pages, if any, were + * generated. + */ + size_t ndirty; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; +}; + struct arena_bin_s { /* * All operations on runcur, runs, and stats require that lock be * locked. Run allocation/deallocation are protected by the arena lock, * which may be acquired while holding one or more bin locks, but not * vise versa. */ malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ arena_run_t *runcur; /* - * Tree of non-full runs. This tree is used when looking for an + * Heap of non-full runs. This heap is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ - arena_run_tree_t runs; + arena_run_heap_t runs; /* Bin statistics. */ malloc_bin_stats_t stats; }; struct arena_s { /* This arena's index within the arenas array. */ unsigned ind; /* - * Number of threads currently assigned to this arena. This field is - * synchronized via atomic operations. + * Number of threads currently assigned to this arena, synchronized via + * atomic operations. Each thread has two distinct assignments, one for + * application-serving allocation, and the other for internal metadata + * allocation. Internal metadata must not be allocated from arenas + * created via the arenas.extend mallctl, because the arena.<i>.reset + * mallctl indiscriminately discards all allocations for the affected + * arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. */ - unsigned nthreads; + unsigned nthreads[2]; /* * There are three classes of arena operations from a locking * perspective: * 1) Thread assignment (modifies nthreads) is synchronized via atomics. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ @@ -312,20 +365,24 @@ struct arena_s { ql_head(tcache_t) tcache_ql; uint64_t prof_accumbytes; /* * PRNG state for cache index randomization of large allocation base * pointers. */ - uint64_t offset_state; + size_t offset_state; dss_prec_t dss_prec; + + /* Extant arena chunks. */ + ql_head(extent_node_t) achunks; + /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most * recently freed chunk. The spare is left in the arena's chunk trees * until it is deleted. * * There is one spare chunk per arena, rather than one spare total, in * order to avoid interactions between multiple threads that could make @@ -376,62 +433,18 @@ struct arena_s { * | | \-------/ \-------/ | | * | | | | * | | | | * \------------/ \---------/ */ arena_runs_dirty_link_t runs_dirty; extent_node_t chunks_cache; - /* - * Approximate time in seconds from the creation of a set of unused - * dirty pages until an equivalent set of unused dirty pages is purged - * and/or reused. - */ - ssize_t decay_time; - /* decay_time / SMOOTHSTEP_NSTEPS. */ - nstime_t decay_interval; - /* - * Time at which the current decay interval logically started. We do - * not actually advance to a new epoch until sometime after it starts - * because of scheduling and computation delays, and it is even possible - * to completely skip epochs. In all cases, during epoch advancement we - * merge all relevant activity into the most recently recorded epoch. - */ - nstime_t decay_epoch; - /* decay_deadline randomness generator. */ - uint64_t decay_jitter_state; - /* - * Deadline for current epoch. This is the sum of decay_interval and - * per epoch jitter which is a uniform random variable in - * [0..decay_interval). Epochs always advance by precise multiples of - * decay_interval, but we randomize the deadline to reduce the - * likelihood of arenas purging in lockstep. - */ - nstime_t decay_deadline; - /* - * Number of dirty pages at beginning of current epoch. During epoch - * advancement we use the delta between decay_ndirty and ndirty to - * determine how many dirty pages, if any, were generated, and record - * the result in decay_backlog. - */ - size_t decay_ndirty; - /* - * Memoized result of arena_decay_backlog_npages_limit() corresponding - * to the current contents of decay_backlog, i.e. the limit on how many - * pages are allowed to exist for the decay epochs. - */ - size_t decay_backlog_npages_limit; - /* - * Trailing log of how many unused dirty pages were generated during - * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last - * element is the most recent epoch. Corresponding epoch times are - * relative to decay_epoch. - */ - size_t decay_backlog[SMOOTHSTEP_NSTEPS]; + /* Decay-based purging state. */ + arena_decay_t decay; /* Extant huge allocations. */ ql_head(extent_node_t) huge; /* Synchronizes all huge allocation/update/deallocation. */ malloc_mutex_t huge_mtx; /* * Trees of chunks that were previously allocated (trees differ only in @@ -452,20 +465,22 @@ struct arena_s { /* User-configurable chunk hook functions. */ chunk_hooks_t chunk_hooks; /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; /* - * Quantized address-ordered trees of this arena's available runs. The - * trees are used for first-best-fit run allocation. + * Size-segregated address-ordered heaps of this arena's available runs, + * used for first-best-fit run allocation. Runs are quantized, i.e. + * they reside in the last heap which corresponds to a size class less + * than or equal to the run size. */ - arena_run_tree_t runs_avail[1]; /* Dynamically sized. */ + arena_run_heap_t runs_avail[NPSIZES]; }; /* Used in conjunction with tsd for fast arena-related context lookup. */ struct arena_tdata_s { ticker_t decay_ticker; }; #endif /* JEMALLOC_ARENA_STRUCTS_B */ @@ -487,142 +502,155 @@ extern ssize_t opt_lg_dirty_mult; extern ssize_t opt_decay_time; extern arena_bin_info_t arena_bin_info[NBINS]; extern size_t map_bias; /* Number of arena chunk header pages. */ extern size_t map_misc_offset; extern size_t arena_maxrun; /* Max run size for arenas. */ extern size_t large_maxclass; /* Max large size class. */ -extern size_t run_quantize_max; /* Max run_quantize_*() input. */ extern unsigned nlclasses; /* Number of large size classes. */ extern unsigned nhclasses; /* Number of huge size classes. */ #ifdef JEMALLOC_JET typedef size_t (run_quantize_t)(size_t); extern run_quantize_t *run_quantize_floor; extern run_quantize_t *run_quantize_ceil; #endif void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); -extent_node_t *arena_node_alloc(arena_t *arena); -void arena_node_dalloc(arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero); -void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); -void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, - size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(arena_t *arena); -bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(arena_t *arena); -bool arena_decay_time_set(arena_t *arena, ssize_t decay_time); -void arena_maybe_purge(arena_t *arena); -void arena_purge(arena_t *arena, bool all); -void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, - szind_t binind, uint64_t prof_accumbytes); +extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); +void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero); +void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t usize); +void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); +bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, + ssize_t lg_dirty_mult); +ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); +bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); +void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); +void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, uint8_t); extern arena_redzone_corruption_t *arena_redzone_corruption; typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; #else void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero); -void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache); -void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, +void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, + bool zero); +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif -void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr); +void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif -bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(arena_t *arena); -bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); +bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); ssize_t arena_decay_time_default_get(void); bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, + ssize_t *decay_time, size_t *nactive, size_t *ndirty); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty); -void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, - ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); -unsigned arena_nthreads_get(arena_t *arena); -void arena_nthreads_inc(arena_t *arena); -void arena_nthreads_dec(arena_t *arena); -arena_t *arena_new(unsigned ind); -bool arena_boot(void); -void arena_prefork0(arena_t *arena); -void arena_prefork1(arena_t *arena); -void arena_prefork2(arena_t *arena); -void arena_prefork3(arena_t *arena); -void arena_postfork_parent(arena_t *arena); -void arena_postfork_child(arena_t *arena); + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind); +void arena_boot(void); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk, +arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind); -arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk, +const arena_chunk_map_bits_t *arena_bitselm_get_const( + const arena_chunk_t *chunk, size_t pageind); +arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind); +const arena_chunk_map_misc_t *arena_miscelm_get_const( + const arena_chunk_t *chunk, size_t pageind); size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); -void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); -size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbitsp_read(size_t *mapbitsp); -size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); +size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); +const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbitsp_read(const size_t *mapbitsp); +size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_size_decode(size_t mapbits); -size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, +size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, + size_t pageind); +szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags); void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size); void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags); @@ -632,73 +660,89 @@ void arena_mapbits_large_binind_set(aren szind_t binind); void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags); void arena_metadata_allocated_add(arena_t *arena, size_t size); void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); -prof_tctx_t *arena_prof_tctx_get(const void *ptr); -void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); -void arena_prof_tctx_reset(const void *ptr, size_t usize, +prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsd_t *tsd, arena_t *arena); -void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, +void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); +void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); +void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path); arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); +void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); +void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * -arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) +arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return (&chunk->map_bits[pageind-map_bias]); } +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * +arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) +arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); assert(pageind < chunk_npages); return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + (uintptr_t)map_misc_offset) + pageind-map_bias); } +JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * +arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); +} + JEMALLOC_ALWAYS_INLINE size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; assert(pageind >= map_bias); assert(pageind < chunk_npages); return (pageind); } JEMALLOC_ALWAYS_INLINE void * -arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm) +arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm); return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); } JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * @@ -721,34 +765,41 @@ arena_run_to_miscelm(arena_run_t *run) assert(arena_miscelm_to_pageind(miscelm) >= map_bias); assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); return (miscelm); } JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) { - return (&arena_bitselm_get(chunk, pageind)->bits); + return (&arena_bitselm_get_mutable(chunk, pageind)->bits); +} + +JEMALLOC_ALWAYS_INLINE const size_t * +arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(size_t *mapbitsp) +arena_mapbitsp_read(const size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) { - return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); + return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t arena_mapbits_size_decode(size_t mapbits) { size_t size; #if CHUNK_MAP_SIZE_SHIFT > 0 @@ -758,103 +809,103 @@ arena_mapbits_size_decode(size_t mapbits #else size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; #endif return (size); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); return (arena_mapbits_size_decode(mapbits)); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == CHUNK_MAP_ALLOCATED); return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); } JEMALLOC_ALWAYS_INLINE szind_t -arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; szind_t binind; mapbits = arena_mapbits_get(chunk, pageind); binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; assert(binind < NBINS || binind == BININD_INVALID); return (binind); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DIRTY); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_UNZEROED); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); return (mapbits & CHUNK_MAP_DECOMMITTED); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_LARGE); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) +arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) { size_t mapbits; mapbits = arena_mapbits_get(chunk, pageind); return (mapbits & CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void @@ -880,82 +931,82 @@ arena_mapbits_size_encode(size_t size) assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); return (mapbits); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | (mapbits & ~CHUNK_MAP_SIZE_MASK)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, flags); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + large_pad); arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | (binind << CHUNK_MAP_BININD_SHIFT)); } JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); } @@ -1002,30 +1053,30 @@ arena_prof_accum_locked(arena_t *arena, cassert(config_prof); if (likely(prof_interval == 0)) return (false); return (arena_prof_accum_impl(arena, accumbytes)); } JEMALLOC_INLINE bool -arena_prof_accum(arena_t *arena, uint64_t accumbytes) +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); if (likely(prof_interval == 0)) return (false); { bool ret; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } } JEMALLOC_ALWAYS_INLINE szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits) { szind_t binind; @@ -1033,35 +1084,35 @@ arena_ptr_small_binind_get(const void *p binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; if (config_debug) { arena_chunk_t *chunk; arena_t *arena; size_t pageind; size_t actual_mapbits; size_t rpages_ind; - arena_run_t *run; + const arena_run_t *run; arena_bin_t *bin; szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; - arena_chunk_map_misc_t *miscelm; - void *rpages; + const arena_chunk_map_misc_t *miscelm; + const void *rpages; assert(binind != BININD_INVALID); assert(binind < NBINS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = extent_node_arena_get(&chunk->node); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; actual_mapbits = arena_mapbits_get(chunk, pageind); assert(mapbits == actual_mapbits); assert(arena_mapbits_large_get(chunk, pageind) == 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - miscelm = arena_miscelm_get(chunk, rpages_ind); + miscelm = arena_miscelm_get_const(chunk, rpages_ind); run = &miscelm->run; run_binind = run->binind; bin = &arena->bins[run_binind]; actual_binind = (szind_t)(bin - arena->bins); assert(run_binind == actual_binind); bin_info = &arena_bin_info[actual_binind]; rpages = arena_miscelm_to_rpages(miscelm); assert(((uintptr_t)ptr - ((uintptr_t)rpages + @@ -1151,44 +1202,45 @@ arena_run_regind(arena_run_t *run, arena } assert(diff == regind * interval); assert(regind < bin_info->nregs); return (regind); } JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(const void *ptr) +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else { - arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, - pageind); + arena_chunk_map_misc_t *elm = + arena_miscelm_get_mutable(chunk, pageind); ret = atomic_read_p(&elm->prof_tctx_pun); } } else - ret = huge_prof_tctx_get(ptr); + ret = huge_prof_tctx_get(tsdn, ptr); return (ret); } JEMALLOC_INLINE void -arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { @@ -1197,34 +1249,34 @@ arena_prof_tctx_set(const void *ptr, siz assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > (uintptr_t)1U)) { arena_chunk_map_misc_t *elm; assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get(chunk, pageind); + elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); } else { /* * tctx must always be initialized for large runs. * Assert that the surrounding conditional logic is * equivalent to checking whether ptr refers to a large * run. */ assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else - huge_prof_tctx_set(ptr, tctx); + huge_prof_tctx_set(tsdn, ptr, tctx); } JEMALLOC_INLINE void -arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, - prof_tctx_t *old_tctx) +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && (uintptr_t)old_tctx > (uintptr_t)1U))) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -1233,83 +1285,86 @@ arena_prof_tctx_reset(const void *ptr, s arena_chunk_map_misc_t *elm; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get(chunk, pageind); + elm = arena_miscelm_get_mutable(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else - huge_prof_tctx_reset(ptr); + huge_prof_tctx_reset(tsdn, ptr); } } JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks) +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; ticker_t *decay_ticker; - if (unlikely(tsd == NULL)) + if (unlikely(tsdn_null(tsdn))) return; + tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena->ind); if (unlikely(decay_ticker == NULL)) return; if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(arena, false); + arena_purge(tsdn, arena, false); } JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsd_t *tsd, arena_t *arena) +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { - arena_decay_ticks(tsd, arena, 1); + arena_decay_ticks(tsdn, arena, 1); } JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero, +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); if (likely(tcache != NULL)) { if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } - return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache)); + return (arena_malloc_hard(tsdn, arena, size, ind, zero)); } JEMALLOC_ALWAYS_INLINE arena_t * arena_aalloc(const void *ptr) { arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) return (extent_node_arena_get(&chunk->node)); else return (huge_aalloc(ptr)); } /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(const void *ptr, bool demote) +arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; size_t pageind; szind_t binind; assert(ptr != NULL); @@ -1342,112 +1397,120 @@ arena_salloc(const void *ptr, bool demot * object). */ assert(arena_mapbits_large_get(chunk, pageind) != 0 || arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) == binind); ret = index2size(binind); } } else - ret = huge_salloc(ptr); + ret = huge_salloc(tsdn, ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; size_t pageind, mapbits; + assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tsd, tcache, ptr, binind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, pageind); assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size - - large_pad, slow_path); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size - large_pad, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr, tcache); + huge_dalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) { arena_chunk_t *chunk; + assert(!tsdn_null(tsdn) || tcache == NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (arena_mapbits_large_get(chunk, pageind) != 0) { /* * Make sure to use promoted size, not request * size. */ size = arena_mapbits_large_size_get(chunk, pageind) - large_pad; } } - assert(s2u(size) == s2u(arena_salloc(ptr, false))); + assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); - tcache_dalloc_small(tsd, tcache, ptr, binind, - true); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size, - true); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr, tcache); + huge_dalloc(tsdn, ptr); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/base.h +++ b/memory/jemalloc/src/include/jemalloc/internal/base.h @@ -4,21 +4,22 @@ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *base_alloc(size_t size); -void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped); +void *base_alloc(tsdn_t *tsdn, size_t size); +void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped); bool base_boot(void); -void base_prefork(void); -void base_postfork_parent(void); -void base_postfork_child(void); +void base_prefork(tsdn_t *tsdn); +void base_postfork_parent(tsdn_t *tsdn); +void base_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/bitmap.h +++ b/memory/jemalloc/src/include/jemalloc/internal/bitmap.h @@ -12,18 +12,18 @@ typedef unsigned long bitmap_t; /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute - * force linear search, if we would have to call ffsl more than 2^3 times, use a - * tree instead. + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define USE_TREE #endif /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) @@ -218,17 +218,17 @@ bitmap_sfu(bitmap_t *bitmap, const bitma } #else i = 0; g = bitmap[0]; while ((bit = ffs_lu(g)) == 0) { i++; g = bitmap[i]; } - bit = (bit - 1) + (i << LG_BITMAP_GROUP_NBITS); + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif bitmap_set(bitmap, binfo, bit); return (bit); } JEMALLOC_INLINE void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
--- a/memory/jemalloc/src/include/jemalloc/internal/chunk.h +++ b/memory/jemalloc/src/include/jemalloc/internal/chunk.h @@ -43,38 +43,39 @@ extern const char *opt_dss; extern rtree_t chunks_rtree; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; extern const chunk_hooks_t chunk_hooks_default; -chunk_hooks_t chunk_hooks_get(arena_t *arena); -chunk_hooks_t chunk_hooks_set(arena_t *arena, +chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); +chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks); -bool chunk_register(const void *chunk, const extent_node_t *node); +bool chunk_register(tsdn_t *tsdn, const void *chunk, + const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); -void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, bool *zero, - bool dalloc_node); -void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); -void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, bool committed); -void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, bool zeroed, bool committed); -bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t size, size_t offset, size_t length); +void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit, bool dalloc_node); +void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed); +void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, + bool committed); +bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, + size_t length); bool chunk_boot(void); -void chunk_prefork(void); -void chunk_postfork_parent(void); -void chunk_postfork_child(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE extent_node_t *chunk_lookup(const void *chunk, bool dependent); #endif
--- a/memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h +++ b/memory/jemalloc/src/include/jemalloc/internal/chunk_dss.h @@ -18,22 +18,20 @@ typedef enum { extern const char *dss_prec_names[]; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit); +void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_in_dss(void *chunk); -bool chunk_dss_boot(void); -void chunk_dss_prefork(void); -void chunk_dss_postfork_parent(void); -void chunk_dss_postfork_child(void); +bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); +void chunk_dss_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/ctl.h +++ b/memory/jemalloc/src/include/jemalloc/internal/ctl.h @@ -16,23 +16,24 @@ struct ctl_node_s { }; struct ctl_named_node_s { struct ctl_node_s node; const char *name; /* If (nchildren == 0), this is a terminal node. */ unsigned nchildren; const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); + int (*ctl)(tsd_t *, const size_t *, size_t, void *, + size_t *, void *, size_t); }; struct ctl_indexed_node_s { struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; const char *dss; ssize_t lg_dirty_mult; ssize_t decay_time; @@ -55,34 +56,36 @@ struct ctl_arena_stats_s { }; struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t resident; size_t mapped; + size_t retained; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, + size_t *miblenp); -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \
--- a/memory/jemalloc/src/include/jemalloc/internal/extent.h +++ b/memory/jemalloc/src/include/jemalloc/internal/extent.h @@ -43,17 +43,17 @@ struct extent_node_s { /* Linkage for arena's runs_dirty and chunks_cache rings. */ arena_runs_dirty_link_t rd; qr(extent_node_t) cc_link; union { /* Linkage for the size/address-ordered tree. */ rb_node(extent_node_t) szad_link; - /* Linkage for arena's huge and node_cache lists. */ + /* Linkage for arena's achunks, huge, and node_cache lists. */ ql_elm(extent_node_t) ql_link; }; /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) ad_link; }; typedef rb_tree(extent_node_t) extent_tree_t;
--- a/memory/jemalloc/src/include/jemalloc/internal/huge.h +++ b/memory/jemalloc/src/include/jemalloc/internal/huge.h @@ -4,33 +4,32 @@ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero, - tcache_t *tcache); -void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache); -bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, +void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero); +bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif -void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); +void huge_dalloc(tsdn_t *tsdn, void *ptr); arena_t *huge_aalloc(const void *ptr); -size_t huge_salloc(const void *ptr); -prof_tctx_t *huge_prof_tctx_get(const void *ptr); -void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); -void huge_prof_tctx_reset(const void *ptr); +size_t huge_salloc(tsdn_t *tsdn, const void *ptr); +prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); +void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in +++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in @@ -156,17 +156,20 @@ static const bool config_cache_oblivious #ifdef JEMALLOC_ZONE #include <mach/mach_error.h> #include <mach/mach_init.h> #include <mach/vm_map.h> #include <malloc/malloc.h> #endif +#include "jemalloc/internal/ph.h" +#ifndef __PGI #define RB_COMPACT +#endif #include "jemalloc/internal/rb.h" #include "jemalloc/internal/qr.h" #include "jemalloc/internal/ql.h" /* * jemalloc can conceptually be broken into components (arena, tcache, etc.), * but there are circular dependencies that cannot be broken without * substantial performance degradation. In order to reduce the effect on @@ -179,16 +182,19 @@ static const bool config_cache_oblivious * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. * JEMALLOC_H_INLINES : Inline functions. */ /******************************************************************************/ #define JEMALLOC_H_TYPES #include "jemalloc/internal/jemalloc_internal_macros.h" +/* Page size index type. */ +typedef unsigned pszind_t; + /* Size class index type. */ typedef unsigned szind_t; /* * Flags bits: * * a: arena * t: tcache @@ -228,17 +234,17 @@ typedef unsigned szind_t; # define LG_QUANTUM 4 # endif # ifdef __ia64__ # define LG_QUANTUM 4 # endif # ifdef __alpha__ # define LG_QUANTUM 4 # endif -# if (defined(__sparc64__) || defined(__sparcv9)) +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) # define LG_QUANTUM 4 # endif # if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) # define LG_QUANTUM 4 # endif # ifdef __arm__ # define LG_QUANTUM 3 # endif @@ -252,16 +258,19 @@ typedef unsigned szind_t; # define LG_QUANTUM 3 # endif # ifdef __or1k__ # define LG_QUANTUM 3 # endif # ifdef __powerpc__ # define LG_QUANTUM 4 # endif +# ifdef __riscv__ +# define LG_QUANTUM 4 +# endif # ifdef __s390__ # define LG_QUANTUM 4 # endif # ifdef __SH4__ # define LG_QUANTUM 4 # endif # ifdef __tile__ # define LG_QUANTUM 4 @@ -355,23 +364,25 @@ typedef unsigned szind_t; #else # define VARIABLE_ARRAY(type, name, count) type name[(count)] #endif #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" @@ -386,23 +397,25 @@ typedef unsigned szind_t; #undef JEMALLOC_H_TYPES /******************************************************************************/ #define JEMALLOC_H_STRUCTS #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #define JEMALLOC_ARENA_STRUCTS_A #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_STRUCTS_A #include "jemalloc/internal/extent.h" #define JEMALLOC_ARENA_STRUCTS_B @@ -435,66 +448,77 @@ extern bool opt_xmalloc; extern bool opt_zero; extern unsigned opt_narenas; extern bool in_valgrind; /* Number of CPUs. */ extern unsigned ncpus; +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. */ extern arena_t **arenas; /* + * pind2sz_tab encodes the same information as could be computed by + * pind2sz_compute(). + */ +extern size_t const pind2sz_tab[NPSIZES]; +/* * index2size_tab encodes the same information as could be computed (at * unacceptable cost in some code paths) by index2size_compute(). */ -extern size_t const index2size_tab[NSIZES+1]; +extern size_t const index2size_tab[NSIZES]; /* * size2index_tab is a compact lookup table that rounds request sizes up to * size classes. In order to reduce cache footprint, the table is compressed, * and all accesses are via size2index(). */ extern uint8_t const size2index_tab[]; +arena_t *a0get(void); void *a0malloc(size_t size); void a0dalloc(void *ptr); void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); -arena_t *arenas_extend(unsigned ind); unsigned narenas_total_get(void); -arena_t *arena_init(unsigned ind); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); -arena_t *arena_choose_hard(tsd_t *tsd); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); void thread_allocated_cleanup(tsd_t *tsd); void thread_deallocated_cleanup(tsd_t *tsd); +void iarena_cleanup(tsd_t *tsd); void arena_cleanup(tsd_t *tsd); void arenas_tdata_cleanup(tsd_t *tsd); void narenas_tdata_cleanup(tsd_t *tsd); void arenas_tdata_bypass_cleanup(tsd_t *tsd); void jemalloc_prefork(void); void jemalloc_postfork_parent(void); void jemalloc_postfork_child(void); #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/witness.h" #include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/bitmap.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/arena.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" @@ -509,67 +533,154 @@ void jemalloc_postfork_child(void); #undef JEMALLOC_H_EXTERNS /******************************************************************************/ #define JEMALLOC_H_INLINES #include "jemalloc/internal/nstime.h" #include "jemalloc/internal/valgrind.h" #include "jemalloc/internal/util.h" #include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/spin.h" #include "jemalloc/internal/prng.h" #include "jemalloc/internal/ticker.h" #include "jemalloc/internal/ckh.h" #include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/smoothstep.h" #include "jemalloc/internal/stats.h" #include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" #include "jemalloc/internal/mb.h" #include "jemalloc/internal/extent.h" #include "jemalloc/internal/base.h" #include "jemalloc/internal/rtree.h" #include "jemalloc/internal/pages.h" #include "jemalloc/internal/chunk.h" #include "jemalloc/internal/huge.h" #ifndef JEMALLOC_ENABLE_INLINE +pszind_t psz2ind(size_t psz); +size_t pind2sz_compute(pszind_t pind); +size_t pind2sz_lookup(pszind_t pind); +size_t pind2sz(pszind_t pind); +size_t psz2u(size_t psz); szind_t size2index_compute(size_t size); szind_t size2index_lookup(size_t size); szind_t size2index(size_t size); size_t index2size_compute(szind_t index); size_t index2size_lookup(szind_t index); size_t index2size(szind_t index); size_t s2u_compute(size_t size); size_t s2u_lookup(size_t size); size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment); +arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); arena_t *arena_choose(tsd_t *tsd, arena_t *arena); +arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing); -arena_t *arena_get(unsigned ind, bool init_if_missing); +arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE pszind_t +psz2ind(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (NPSIZES); + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return (ind); + } +} + +JEMALLOC_INLINE size_t +pind2sz_compute(pszind_t pind) +{ + + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return (sz); + } +} + +JEMALLOC_INLINE size_t +pind2sz_lookup(pszind_t pind) +{ + size_t ret = (size_t)pind2sz_tab[pind]; + assert(ret == pind2sz_compute(pind)); + return (ret); +} + +JEMALLOC_INLINE size_t +pind2sz(pszind_t pind) +{ + + assert(pind < NPSIZES); + return (pind2sz_lookup(pind)); +} + +JEMALLOC_INLINE size_t +psz2u(size_t psz) +{ + + if (unlikely(psz > HUGE_MAXCLASS)) + return (0); + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return (usize); + } +} + JEMALLOC_INLINE szind_t size2index_compute(size_t size) { + if (unlikely(size > HUGE_MAXCLASS)) + return (NSIZES); #if (NTBINS != 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); } #endif { - szind_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? - (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) - : lg_floor((size<<1)-1); + szind_t x = lg_floor((size<<1)-1); szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); szind_t grp = shift << LG_SIZE_CLASS_GROUP; szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta_inverse_mask = ZI(-1) << lg_delta; @@ -645,28 +756,28 @@ index2size(szind_t index) assert(index < NSIZES); return (index2size_lookup(index)); } JEMALLOC_ALWAYS_INLINE size_t s2u_compute(size_t size) { + if (unlikely(size > HUGE_MAXCLASS)) + return (0); #if (NTBINS > 0) if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : (ZU(1) << lg_ceil)); } #endif { - size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? - (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) - : lg_floor((size<<1)-1); + size_t x = lg_floor((size<<1)-1); size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; size_t delta = ZU(1) << lg_delta; size_t delta_mask = delta - 1; size_t usize = (size + delta_mask) & ~delta_mask; return (usize); } } @@ -775,29 +886,44 @@ sa2u(size_t size, size_t alignment) /* size_t overflow. */ return (0); } return (usize); } /* Choose an arena based on a per-thread value. */ JEMALLOC_INLINE arena_t * -arena_choose(tsd_t *tsd, arena_t *arena) +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { arena_t *ret; if (arena != NULL) return (arena); - if (unlikely((ret = tsd_arena_get(tsd)) == NULL)) - ret = arena_choose_hard(tsd); + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) + ret = arena_choose_hard(tsd, internal); return (ret); } +JEMALLOC_INLINE arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, false)); +} + +JEMALLOC_INLINE arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) +{ + + return (arena_choose_impl(tsd, arena, true)); +} + JEMALLOC_INLINE arena_tdata_t * arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { arena_tdata_t *tdata; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); if (unlikely(arenas_tdata == NULL)) { /* arenas_tdata hasn't been initialized yet. */ @@ -814,27 +940,27 @@ arena_tdata_get(tsd_t *tsd, unsigned ind tdata = &arenas_tdata[ind]; if (likely(tdata != NULL) || !refresh_if_missing) return (tdata); return (arena_tdata_get_hard(tsd, ind)); } JEMALLOC_INLINE arena_t * -arena_get(unsigned ind, bool init_if_missing) +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_t *ret; assert(ind <= MALLOCX_ARENA_MAX); ret = arenas[ind]; if (unlikely(ret == NULL)) { ret = atomic_read_p((void *)&arenas[ind]); if (init_if_missing && unlikely(ret == NULL)) - ret = arena_init(ind); + ret = arena_init(tsdn, ind); } return (ret); } JEMALLOC_INLINE ticker_t * decay_ticker_get(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata; @@ -858,171 +984,151 @@ decay_ticker_get(tsd_t *tsd, unsigned in #define JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/arena.h" #undef JEMALLOC_ARENA_INLINE_B #include "jemalloc/internal/hash.h" #include "jemalloc/internal/quarantine.h" #ifndef JEMALLOC_ENABLE_INLINE arena_t *iaalloc(const void *ptr); -size_t isalloc(const void *ptr, bool demote); -void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, +size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); +void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); -void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, - arena_t *arena); -void *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path); -void *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, - arena_t *arena); -void *icalloc(tsd_t *tsd, size_t size, szind_t ind); -void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, + bool slow_path); +void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena); -void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); -size_t ivsalloc(const void *ptr, bool demote); +size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); size_t u2rz(size_t usize); -size_t p2rz(const void *ptr); -void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata, +size_t p2rz(tsdn_t *tsdn, const void *ptr); +void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, bool slow_path); -void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache); void idalloc(tsd_t *tsd, void *ptr); void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); -void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); +void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, + bool slow_path); void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero); -bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, +bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE arena_t * iaalloc(const void *ptr) { assert(ptr != NULL); return (arena_aalloc(ptr)); } /* * Typical usage: + * tsdn_t *tsdn = [...] * void *ptr = [...] - * size_t sz = isalloc(ptr, config_prof); + * size_t sz = isalloc(tsdn, ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t -isalloc(const void *ptr, bool demote) +isalloc(tsdn_t *tsdn, const void *ptr, bool demote) { assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || !demote); - return (arena_salloc(ptr, demote)); + return (arena_salloc(tsdn, ptr, demote)); } JEMALLOC_ALWAYS_INLINE void * -iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache, +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path) { void *ret; assert(size != 0); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path); + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, + arena_metadata_allocated_add(iaalloc(ret), + isalloc(tsdn, ret, config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) +{ + + return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), + false, NULL, slow_path)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, config_prof)); } return (ret); } JEMALLOC_ALWAYS_INLINE void * -imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena) -{ - - return (iallocztm(tsd, size, ind, false, tcache, false, arena, true)); -} - -JEMALLOC_ALWAYS_INLINE void * -imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path) -{ - - return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false, - NULL, slow_path)); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena) -{ - - return (iallocztm(tsd, size, ind, true, tcache, false, arena, true)); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloc(tsd_t *tsd, size_t size, szind_t ind) -{ - - return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false, - NULL, true)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, bool is_metadata, arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - - ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache); - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, - config_prof)); - } - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { - return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena)); + return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); } JEMALLOC_ALWAYS_INLINE void * ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { - return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true), - false, NULL)); + return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd, true), false, NULL)); } JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(const void *ptr, bool demote) +ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) { extent_node_t *node; /* Return 0 if ptr is not within a chunk managed by jemalloc. */ node = chunk_lookup(ptr, false); if (node == NULL) return (0); /* Only arena chunks should be looked up via interior pointers. */ assert(extent_node_addr_get(node) == ptr || extent_node_achunk_get(node)); - return (isalloc(ptr, demote)); + return (isalloc(tsdn, ptr, demote)); } JEMALLOC_INLINE size_t u2rz(size_t usize) { size_t ret; if (usize <= SMALL_MAXCLASS) { @@ -1030,107 +1136,104 @@ u2rz(size_t usize) ret = arena_bin_info[binind].redzone_size; } else ret = 0; return (ret); } JEMALLOC_INLINE size_t -p2rz(const void *ptr) +p2rz(tsdn_t *tsdn, const void *ptr) { - size_t usize = isalloc(ptr, false); + size_t usize = isalloc(tsdn, ptr, false); return (u2rz(usize)); } JEMALLOC_ALWAYS_INLINE void -idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata, +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, bool slow_path) { assert(ptr != NULL); + assert(!is_metadata || tcache == NULL); + assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr, + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, config_prof)); } - arena_dalloc(tsd, ptr, tcache, slow_path); -} - -JEMALLOC_ALWAYS_INLINE void -idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache) -{ - - idalloctm(tsd, ptr, tcache, false, true); + arena_dalloc(tsdn, ptr, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void idalloc(tsd_t *tsd, void *ptr) { - idalloctm(tsd, ptr, tcache_get(tsd, false), false, true); + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); } JEMALLOC_ALWAYS_INLINE void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { if (slow_path && config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else - idalloctm(tsd, ptr, tcache, false, slow_path); + idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); } JEMALLOC_ALWAYS_INLINE void -isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) { - arena_sdalloc(tsd, ptr, size, tcache); + arena_sdalloc(tsdn, ptr, size, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void -isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) { - if (config_fill && unlikely(opt_quarantine)) + if (slow_path && config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else - isdalloct(tsd, ptr, size, tcache); + isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void * iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { void *p; size_t usize, copysize; usize = sa2u(size + extra, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); if (p == NULL) { if (extra == 0) return (NULL); /* Try again, without extra this time. */ usize = sa2u(size, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, + arena); if (p == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; memcpy(p, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); + isqalloc(tsd, ptr, oldsize, tcache, true); return (p); } JEMALLOC_ALWAYS_INLINE void * iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { @@ -1156,30 +1259,30 @@ iralloc(tsd_t *tsd, void *ptr, size_t ol bool zero) { return (iralloct(tsd, ptr, oldsize, size, alignment, zero, tcache_get(tsd, true), NULL)); } JEMALLOC_ALWAYS_INLINE bool -ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { assert(ptr != NULL); assert(size != 0); if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { /* Existing object alignment is inadequate. */ return (true); } - return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero)); + return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); } #endif #include "jemalloc/internal/prof.h" #undef JEMALLOC_H_INLINES /******************************************************************************/ #endif /* JEMALLOC_INTERNAL_H */
--- a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_decls.h @@ -12,18 +12,28 @@ # if !defined(__pnacl__) && !defined(__native_client__) # include <sys/syscall.h> # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # include <sys/uio.h> # endif # include <pthread.h> +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include <os/lock.h> +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include <sched.h> +# endif # include <errno.h> # include <sys/time.h> +# include <time.h> +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include <mach/mach_time.h> +# endif #endif #include <sys/types.h> #include <limits.h> #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include <stdarg.h>
--- a/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -56,32 +56,55 @@ #undef JEMALLOC_HAVE_BUILTIN_CLZ /* * Defined if madvise(2) is available. */ #undef JEMALLOC_HAVE_MADVISE /* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +#undef JEMALLOC_OS_UNFAIR_LOCK + +/* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ #undef JEMALLOC_OSSPIN +/* Defined if syscall(2) is available. */ +#undef JEMALLOC_HAVE_SYSCALL + /* * Defined if secure_getenv(3) is available. */ #undef JEMALLOC_HAVE_SECURE_GETENV /* * Defined if issetugid(2) is available. */ #undef JEMALLOC_HAVE_ISSETUGID /* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC + +/* + * Defined if mach_absolute_time() is available. + */ +#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME + +/* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc * bootstrapping will cause recursion into the pthreads library. Therefore, if * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in * malloc_tsd. */ #undef JEMALLOC_MALLOC_THREAD_CLEANUP @@ -184,16 +207,22 @@ * of mmap()/munmap() calls will cause virtual memory map holes. */ #undef JEMALLOC_MUNMAP /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS /* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#undef JEMALLOC_INTERNAL_UNREACHABLE + +/* * ffs*() functions to use for bitmapping. Don't use these directly; instead, * use ffs_*() from util.h. */ #undef JEMALLOC_INTERNAL_FFSLL #undef JEMALLOC_INTERNAL_FFSL #undef JEMALLOC_INTERNAL_FFS /* @@ -210,16 +239,25 @@ /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE #undef JEMALLOC_ZONE_VERSION /* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* * Methods for purging unused pages differ between operating systems. * * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, * such that new pages will be demand-zeroed if * the address region is later touched. * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being * unused, such that they will be discarded rather * than swapped out.
--- a/memory/jemalloc/src/include/jemalloc/internal/mb.h +++ b/memory/jemalloc/src/include/jemalloc/internal/mb.h @@ -37,27 +37,27 @@ mb_write(void) asm volatile ("pusha;" "xor %%eax,%%eax;" "cpuid;" "popa;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); -#else +# else /* * This is hopefully enough to keep the compiler from reordering * instructions around this one. */ asm volatile ("nop;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); -#endif +# endif } #elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE void mb_write(void) { asm volatile ("sfence" : /* Outputs. */ @@ -99,17 +99,17 @@ mb_write(void) * This is much slower than a simple memory barrier, but the semantics of mutex * unlock make this work. */ JEMALLOC_INLINE void mb_write(void) { malloc_mutex_t mtx; - malloc_mutex_init(&mtx); - malloc_mutex_lock(&mtx); - malloc_mutex_unlock(&mtx); + malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); + malloc_mutex_lock(TSDN_NULL, &mtx); + malloc_mutex_unlock(TSDN_NULL, &mtx); } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/mutex.h +++ b/memory/jemalloc/src/include/jemalloc/internal/mutex.h @@ -1,111 +1,147 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} +# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #else # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ + WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} +# define MALLOC_MUTEX_INITIALIZER \ + {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif + witness_t witness; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); +void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { + witness_assert_not_owner(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 AcquireSRWLockExclusive(&mutex->lock); # else EnterCriticalSection(&mutex->lock); # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif + witness_lock(tsdn, &mutex->witness); } } JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { + witness_unlock(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 ReleaseSRWLockExclusive(&mutex->lock); # else LeaveCriticalSection(&mutex->lock); # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } + +JEMALLOC_INLINE void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + if (isthreaded) + witness_assert_owner(tsdn, &mutex->witness); +} + +JEMALLOC_INLINE void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) +{ + + if (isthreaded) + witness_assert_not_owner(tsdn, &mutex->witness); +} #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/nstime.h +++ b/memory/jemalloc/src/include/jemalloc/internal/nstime.h @@ -1,18 +1,15 @@ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES -#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \ - && _POSIX_MONOTONIC_CLOCK >= 0 - typedef struct nstime_s nstime_t; /* Maximum supported number of seconds (~584 years). */ -#define NSTIME_SEC_MAX 18446744072 +#define NSTIME_SEC_MAX KQU(18446744072) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct nstime_s { uint64_t ns; }; @@ -29,19 +26,22 @@ uint64_t nstime_nsec(const nstime_t *tim void nstime_copy(nstime_t *time, const nstime_t *source); int nstime_compare(const nstime_t *a, const nstime_t *b); void nstime_add(nstime_t *time, const nstime_t *addend); void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); #ifdef JEMALLOC_JET +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *nstime_monotonic; typedef bool (nstime_update_t)(nstime_t *); extern nstime_update_t *nstime_update; #else +bool nstime_monotonic(void); bool nstime_update(nstime_t *time); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */
--- a/memory/jemalloc/src/include/jemalloc/internal/pages.h +++ b/memory/jemalloc/src/include/jemalloc/internal/pages.h @@ -4,23 +4,24 @@ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *pages_map(void *addr, size_t size); +void *pages_map(void *addr, size_t size, bool *commit); void pages_unmap(void *addr, size_t size); void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, - size_t size); + size_t size, bool *commit); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge(void *addr, size_t size); +void pages_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/include/jemalloc/internal/ph.h @@ -0,0 +1,345 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) \ + r_phn = a_phn1; \ + else if (a_phn1 == NULL) \ + r_phn = a_phn0; \ + else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) \ + break; \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) \ + r_phn = NULL; \ + else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) \ +{ \ + \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) \ +{ \ + \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) \ +{ \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return (ph->ph_root); \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) \ +{ \ + \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) \ + ph->ph_root = phn; \ + else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) \ +{ \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) \ + return (NULL); \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return (ret); \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) \ +{ \ + a_type *replace, *parent; \ + \ + /* \ + * We can delete from aux list without merging it, but we need \ + * to merge if we are dealing with the root node. \ + */ \ + if (ph->ph_root == phn) { \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) \ + parent = NULL; \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */
--- a/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt +++ b/memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt @@ -1,46 +1,48 @@ a0dalloc +a0get a0malloc arena_aalloc arena_alloc_junk_small arena_basic_stats_merge arena_bin_index arena_bin_info -arena_bitselm_get +arena_bitselm_get_const +arena_bitselm_get_mutable arena_boot arena_choose arena_choose_hard +arena_choose_impl arena_chunk_alloc_huge arena_chunk_cache_maybe_insert arena_chunk_cache_maybe_remove arena_chunk_dalloc_huge arena_chunk_ralloc_huge_expand arena_chunk_ralloc_huge_shrink arena_chunk_ralloc_huge_similar arena_cleanup arena_dalloc arena_dalloc_bin arena_dalloc_bin_junked_locked arena_dalloc_junk_large -arena_dalloc_junk_large_impl arena_dalloc_junk_small -arena_dalloc_junk_small_impl arena_dalloc_large arena_dalloc_large_junked_locked arena_dalloc_small arena_decay_tick arena_decay_ticks arena_decay_time_default_get arena_decay_time_default_set arena_decay_time_get arena_decay_time_set arena_dss_prec_get arena_dss_prec_set arena_get +arena_ichoose arena_init arena_lg_dirty_mult_default_get arena_lg_dirty_mult_default_set arena_lg_dirty_mult_get arena_lg_dirty_mult_set arena_malloc arena_malloc_hard arena_malloc_large @@ -57,26 +59,28 @@ arena_mapbits_large_size_get arena_mapbits_size_decode arena_mapbits_size_encode arena_mapbits_small_runind_get arena_mapbits_small_set arena_mapbits_unallocated_set arena_mapbits_unallocated_size_get arena_mapbits_unallocated_size_set arena_mapbits_unzeroed_get -arena_mapbitsp_get +arena_mapbitsp_get_const +arena_mapbitsp_get_mutable arena_mapbitsp_read arena_mapbitsp_write arena_maxrun arena_maybe_purge arena_metadata_allocated_add arena_metadata_allocated_get arena_metadata_allocated_sub arena_migrate -arena_miscelm_get +arena_miscelm_get_const +arena_miscelm_get_mutable arena_miscelm_to_pageind arena_miscelm_to_rpages arena_new arena_node_alloc arena_node_dalloc arena_nthreads_dec arena_nthreads_get arena_nthreads_inc @@ -97,16 +101,17 @@ arena_prof_tctx_set arena_ptr_small_binind_get arena_purge arena_quarantine_junk_small arena_ralloc arena_ralloc_junk_large arena_ralloc_no_move arena_rd_to_miscelm arena_redzone_corruption +arena_reset arena_run_regind arena_run_to_miscelm arena_salloc arena_sdalloc arena_stats_merge arena_tcache_fill_small arena_tdata_get arena_tdata_get_hard @@ -158,30 +163,25 @@ chunk_alloc_dss chunk_alloc_mmap chunk_alloc_wrapper chunk_boot chunk_dalloc_cache chunk_dalloc_mmap chunk_dalloc_wrapper chunk_deregister chunk_dss_boot -chunk_dss_postfork_child -chunk_dss_postfork_parent +chunk_dss_mergeable chunk_dss_prec_get chunk_dss_prec_set -chunk_dss_prefork chunk_hooks_default chunk_hooks_get chunk_hooks_set chunk_in_dss chunk_lookup chunk_npages -chunk_postfork_child -chunk_postfork_parent -chunk_prefork chunk_purge_wrapper chunk_register chunks_rtree chunksize chunksize_mask ckh_count ckh_delete ckh_insert @@ -282,24 +282,21 @@ huge_malloc huge_palloc huge_prof_tctx_get huge_prof_tctx_reset huge_prof_tctx_set huge_ralloc huge_ralloc_no_move huge_salloc iaalloc +ialloc iallocztm -icalloc -icalloct +iarena_cleanup idalloc -idalloct idalloctm -imalloc -imalloct in_valgrind index2size index2size_compute index2size_lookup index2size_tab ipalloc ipalloct ipallocztm @@ -315,16 +312,19 @@ ivsalloc ixalloc jemalloc_postfork_child jemalloc_postfork_parent jemalloc_prefork large_maxclass lg_floor lg_prof_sample malloc_cprintf +malloc_mutex_assert_not_owner +malloc_mutex_assert_owner +malloc_mutex_boot malloc_mutex_init malloc_mutex_lock malloc_mutex_postfork_child malloc_mutex_postfork_parent malloc_mutex_prefork malloc_mutex_unlock malloc_printf malloc_snprintf @@ -336,37 +336,37 @@ malloc_tsd_dalloc malloc_tsd_malloc malloc_tsd_no_cleanup malloc_vcprintf malloc_vsnprintf malloc_write map_bias map_misc_offset mb_write -mutex_boot +narenas_auto narenas_tdata_cleanup narenas_total_get ncpus nhbins nhclasses nlclasses nstime_add nstime_compare nstime_copy nstime_divide nstime_idivide nstime_imultiply nstime_init nstime_init2 +nstime_monotonic nstime_ns nstime_nsec nstime_sec nstime_subtract nstime_update -nstime_update_impl opt_abort opt_decay_time opt_dss opt_junk opt_junk_alloc opt_junk_free opt_lg_chunk opt_lg_dirty_mult @@ -386,27 +386,39 @@ opt_purge opt_quarantine opt_redzone opt_stats_print opt_tcache opt_utrace opt_xmalloc opt_zero p2rz +pages_boot pages_commit pages_decommit pages_map pages_purge pages_trim pages_unmap +pind2sz +pind2sz_compute +pind2sz_lookup +pind2sz_tab pow2_ceil_u32 pow2_ceil_u64 pow2_ceil_zu -prng_lg_range -prng_range +prng_lg_range_u32 +prng_lg_range_u64 +prng_lg_range_zu +prng_range_u32 +prng_range_u64 +prng_range_zu +prng_state_next_u32 +prng_state_next_u64 +prng_state_next_zu prof_active prof_active_get prof_active_get_unlocked prof_active_set prof_alloc_prep prof_alloc_rollback prof_backtrace prof_boot0 @@ -445,22 +457,23 @@ prof_tdata_get prof_tdata_init prof_tdata_reinit prof_thread_active_get prof_thread_active_init_get prof_thread_active_init_set prof_thread_active_set prof_thread_name_get prof_thread_name_set +psz2ind +psz2u purge_mode_names quarantine quarantine_alloc_hook quarantine_alloc_hook_work quarantine_cleanup -register_zone rtree_child_read rtree_child_read_hard rtree_child_tryread rtree_delete rtree_get rtree_new rtree_node_valid rtree_set @@ -468,37 +481,36 @@ rtree_start_level rtree_subkey rtree_subtree_read rtree_subtree_read_hard rtree_subtree_tryread rtree_val_read rtree_val_write run_quantize_ceil run_quantize_floor -run_quantize_max s2u s2u_compute s2u_lookup sa2u set_errno size2index size2index_compute size2index_lookup size2index_tab +spin_adaptive +spin_init stats_cactive stats_cactive_add stats_cactive_get stats_cactive_sub stats_print tcache_alloc_easy tcache_alloc_large tcache_alloc_small tcache_alloc_small_hard -tcache_arena_associate -tcache_arena_dissociate tcache_arena_reassociate tcache_bin_flush_large tcache_bin_flush_small tcache_bin_info tcache_boot tcache_cleanup tcache_create tcache_dalloc_large @@ -534,29 +546,35 @@ tsd_arenas_tdata_bypass_set tsd_arenas_tdata_bypassp_get tsd_arenas_tdata_get tsd_arenas_tdata_set tsd_arenas_tdatap_get tsd_boot tsd_boot0 tsd_boot1 tsd_booted +tsd_booted_get tsd_cleanup tsd_cleanup_wrapper tsd_fetch +tsd_fetch_impl tsd_get +tsd_get_allocates +tsd_iarena_get +tsd_iarena_set +tsd_iarenap_get +tsd_initialized +tsd_init_check_recursion +tsd_init_finish +tsd_init_head tsd_narenas_tdata_get tsd_narenas_tdata_set tsd_narenas_tdatap_get tsd_wrapper_get tsd_wrapper_set -tsd_initialized -tsd_init_check_recursion -tsd_init_finish -tsd_init_head tsd_nominal tsd_prof_tdata_get tsd_prof_tdata_set tsd_prof_tdatap_get tsd_quarantine_get tsd_quarantine_set tsd_quarantinep_get tsd_set @@ -569,13 +587,40 @@ tsd_tcachep_get tsd_thread_allocated_get tsd_thread_allocated_set tsd_thread_allocatedp_get tsd_thread_deallocated_get tsd_thread_deallocated_set tsd_thread_deallocatedp_get tsd_tls tsd_tsd +tsd_tsdn +tsd_witness_fork_get +tsd_witness_fork_set +tsd_witness_forkp_get +tsd_witnesses_get +tsd_witnesses_set +tsd_witnessesp_get +tsdn_fetch +tsdn_null +tsdn_tsd u2rz valgrind_freelike_block valgrind_make_mem_defined valgrind_make_mem_noaccess valgrind_make_mem_undefined +witness_assert_lockless +witness_assert_not_owner +witness_assert_owner +witness_fork_cleanup +witness_init +witness_lock +witness_lock_error +witness_lockless_error +witness_not_owner_error +witness_owner +witness_owner_error +witness_postfork_child +witness_postfork_parent +witness_prefork +witness_unlock +witnesses_cleanup +zone_register
--- a/memory/jemalloc/src/include/jemalloc/internal/prng.h +++ b/memory/jemalloc/src/include/jemalloc/internal/prng.h @@ -14,66 +14,194 @@ * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. */ -#define PRNG_A UINT64_C(6364136223846793005) -#define PRNG_C UINT64_C(1442695040888963407) + +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -uint64_t prng_lg_range(uint64_t *state, unsigned lg_range); -uint64_t prng_range(uint64_t *state, uint64_t range); +uint32_t prng_state_next_u32(uint32_t state); +uint64_t prng_state_next_u64(uint64_t state); +size_t prng_state_next_zu(size_t state); + +uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, + bool atomic); +uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); +size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); + +uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); +uint64_t prng_range_u64(uint64_t *state, uint64_t range); +size_t prng_range_zu(size_t *state, size_t range, bool atomic); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) +{ + + return ((state * PRNG_A_32) + PRNG_C_32); +} + JEMALLOC_ALWAYS_INLINE uint64_t -prng_lg_range(uint64_t *state, unsigned lg_range) +prng_state_next_u64(uint64_t state) +{ + + return ((state * PRNG_A_64) + PRNG_C_64); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) +{ + +#if LG_SIZEOF_PTR == 2 + return ((state * PRNG_A_32) + PRNG_C_32); +#elif LG_SIZEOF_PTR == 3 + return ((state * PRNG_A_64) + PRNG_C_64); +#else +#error Unsupported pointer size +#endif +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) { - uint64_t ret; + uint32_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + if (atomic) { + uint32_t state0; + + do { + state0 = atomic_read_uint32(state); + state1 = prng_state_next_u32(state0); + } while (atomic_cas_uint32(state, state0, state1)); + } else { + state1 = prng_state_next_u32(*state); + *state = state1; + } + ret = state1 >> (32 - lg_range); + + return (ret); +} + +/* 64-bit atomic operations cannot be supported on all relevant platforms. */ +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) +{ + uint64_t ret, state1; assert(lg_range > 0); assert(lg_range <= 64); - ret = (*state * PRNG_A) + PRNG_C; - *state = ret; - ret >>= (64 - lg_range); + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) +{ + size_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + if (atomic) { + size_t state0; + + do { + state0 = atomic_read_z(state); + state1 = prng_state_next_zu(state0); + } while (atomic_cas_z(state, state0, state1)); + } else { + state1 = prng_state_next_zu(*state); + *state = state1; + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(uint32_t *state, uint32_t range, bool atomic) +{ + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); return (ret); } JEMALLOC_ALWAYS_INLINE uint64_t -prng_range(uint64_t *state, uint64_t range) +prng_range_u64(uint64_t *state, uint64_t range) { uint64_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { - ret = prng_lg_range(state, lg_range); + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(size_t *state, size_t range, bool atomic) +{ + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); } while (ret >= range); return (ret); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/prof.h +++ b/memory/jemalloc/src/include/jemalloc/internal/prof.h @@ -276,76 +276,76 @@ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); -void prof_malloc_sample_object(const void *ptr, size_t usize, +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; -typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *); +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *prof_dump_header; #endif -void prof_idump(void); -bool prof_mdump(const char *filename); -void prof_gdump(void); +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); -const char *prof_thread_name_get(void); -bool prof_active_get(void); -bool prof_active_set(bool active); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); -bool prof_thread_active_get(void); -bool prof_thread_active_set(bool active); -bool prof_thread_active_init_get(void); -bool prof_thread_active_init_set(bool active_init); -bool prof_gdump_get(void); -bool prof_gdump_set(bool active); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); -bool prof_boot2(void); -void prof_prefork0(void); -void prof_prefork1(void); -void prof_postfork_parent(void); -void prof_postfork_child(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool prof_active_get_unlocked(void); bool prof_gdump_get_unlocked(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); +prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *tctx); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update); -prof_tctx_t *prof_tctx_get(const void *ptr); -void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); -void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, +void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); -void prof_malloc_sample_object(const void *ptr, size_t usize, - prof_tctx_t *tctx); -void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx); void prof_free(tsd_t *tsd, const void *ptr, size_t usize); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) JEMALLOC_ALWAYS_INLINE bool @@ -393,44 +393,44 @@ prof_tdata_get(tsd_t *tsd, bool create) } assert(tdata == NULL || tdata->attached); } return (tdata); } JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_tctx_get(const void *ptr) +prof_tctx_get(tsdn_t *tsdn, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - return (arena_prof_tctx_get(ptr)); + return (arena_prof_tctx_get(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); - arena_prof_tctx_set(ptr, usize, tctx); + arena_prof_tctx_set(tsdn, ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE void -prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); - arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx); + arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; @@ -475,72 +475,73 @@ prof_alloc_prep(tsd_t *tsd, size_t usize prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return (ret); } JEMALLOC_ALWAYS_INLINE void -prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); - assert(usize == isalloc(ptr, true)); + assert(usize == isalloc(tsdn, ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_malloc_sample_object(ptr, usize, tctx); + prof_malloc_sample_object(tsdn, ptr, usize, tctx); else - prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U); + prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { - assert(usize == isalloc(ptr, true)); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ + prof_alloc_rollback(tsd, tctx, true); tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); if (unlikely(sampled)) - prof_malloc_sample_object(ptr, usize, tctx); + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); else - prof_tctx_reset(ptr, usize, old_ptr, old_tctx); + prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); if (unlikely(old_sampled)) prof_free_sampled_object(tsd, old_usize, old_tctx); } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize) { - prof_tctx_t *tctx = prof_tctx_get(ptr); + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); cassert(config_prof); - assert(usize == isalloc(ptr, true)); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(tsd, usize, tctx); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/rtree.h +++ b/memory/jemalloc/src/include/jemalloc/internal/rtree.h @@ -10,19 +10,20 @@ typedef struct rtree_node_elm_s rtree_no typedef struct rtree_level_s rtree_level_t; typedef struct rtree_s rtree_t; /* * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the * machine address width. */ #define LG_RTREE_BITS_PER_LEVEL 4 -#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL) +#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) +/* Maximum rtree height. */ #define RTREE_HEIGHT_MAX \ - ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) + ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) /* Used for two-stage lock-free node initialization. */ #define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) /* * The node allocation callback function's argument is the number of contiguous * rtree_node_elm_t structures to allocate, and the resulting memory must be * zeroed. @@ -106,85 +107,91 @@ rtree_node_elm_t *rtree_child_read_hard( /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); bool rtree_node_valid(rtree_node_elm_t *node); -rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm); +rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, + bool dependent); rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, - unsigned level); + unsigned level, bool dependent); extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent); void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val); -rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level); -rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level); +rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, + bool dependent); +rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, + bool dependent); extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -JEMALLOC_INLINE unsigned +JEMALLOC_ALWAYS_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key) { unsigned start_level; if (unlikely(key == 0)) return (rtree->height - 1); start_level = rtree->start_level[lg_floor(key) >> LG_RTREE_BITS_PER_LEVEL]; assert(start_level < rtree->height); return (start_level); } -JEMALLOC_INLINE uintptr_t +JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) { return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - rtree->levels[level].cumbits)) & ((ZU(1) << rtree->levels[level].bits) - 1)); } -JEMALLOC_INLINE bool +JEMALLOC_ALWAYS_INLINE bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); } -JEMALLOC_INLINE rtree_node_elm_t * -rtree_child_tryread(rtree_node_elm_t *elm) +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *child; /* Double-checked read (first read may be stale. */ child = elm->child; - if (!rtree_node_valid(child)) + if (!dependent && !rtree_node_valid(child)) child = atomic_read_p(&elm->pun); + assert(!dependent || child != NULL); return (child); } -JEMALLOC_INLINE rtree_node_elm_t * -rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, + bool dependent) { rtree_node_elm_t *child; - child = rtree_child_tryread(elm); - if (unlikely(!rtree_node_valid(child))) + child = rtree_child_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(child))) child = rtree_child_read_hard(rtree, elm, level); + assert(!dependent || child != NULL); return (child); } -JEMALLOC_INLINE extent_node_t * +JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) { if (dependent) { /* * Reading a val on behalf of a pointer to a valid allocation is * guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the @@ -203,91 +210,156 @@ rtree_val_read(rtree_t *rtree, rtree_nod JEMALLOC_INLINE void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) { atomic_write_p(&elm->pun, val); } -JEMALLOC_INLINE rtree_node_elm_t * -rtree_subtree_tryread(rtree_t *rtree, unsigned level) +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; /* Double-checked read (first read may be stale. */ subtree = rtree->levels[level].subtree; - if (!rtree_node_valid(subtree)) + if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = atomic_read_p(&rtree->levels[level].subtree_pun); + assert(!dependent || subtree != NULL); return (subtree); } -JEMALLOC_INLINE rtree_node_elm_t * -rtree_subtree_read(rtree_t *rtree, unsigned level) +JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * +rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; - subtree = rtree_subtree_tryread(rtree, level); - if (unlikely(!rtree_node_valid(subtree))) + subtree = rtree_subtree_tryread(rtree, level, dependent); + if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = rtree_subtree_read_hard(rtree, level); + assert(!dependent || subtree != NULL); return (subtree); } -JEMALLOC_INLINE extent_node_t * +JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) { uintptr_t subkey; - unsigned i, start_level; - rtree_node_elm_t *node, *child; + unsigned start_level; + rtree_node_elm_t *node; start_level = rtree_start_level(rtree, key); - for (i = start_level, node = rtree_subtree_tryread(rtree, start_level); - /**/; i++, node = child) { - if (!dependent && unlikely(!rtree_node_valid(node))) - return (NULL); - subkey = rtree_subkey(rtree, key, i); - if (i == rtree->height - 1) { - /* - * node is a leaf, so it contains values rather than - * child pointers. - */ - return (rtree_val_read(rtree, &node[subkey], - dependent)); - } - assert(i < rtree->height - 1); - child = rtree_child_tryread(&node[subkey]); + node = rtree_subtree_tryread(rtree, start_level, dependent); +#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) + switch (start_level + RTREE_GET_BIAS) { +#define RTREE_GET_SUBTREE(level) \ + case level: \ + assert(level < (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + node = rtree_child_tryread(&node[subkey], dependent); \ + /* Fall through. */ +#define RTREE_GET_LEAF(level) \ + case level: \ + assert(level == (RTREE_HEIGHT_MAX-1)); \ + if (!dependent && unlikely(!rtree_node_valid(node))) \ + return (NULL); \ + subkey = rtree_subkey(rtree, key, level - \ + RTREE_GET_BIAS); \ + /* \ + * node is a leaf, so it contains values rather than \ + * child pointers. \ + */ \ + return (rtree_val_read(rtree, &node[subkey], \ + dependent)); +#if RTREE_HEIGHT_MAX > 1 + RTREE_GET_SUBTREE(0) +#endif +#if RTREE_HEIGHT_MAX > 2 + RTREE_GET_SUBTREE(1) +#endif +#if RTREE_HEIGHT_MAX > 3 + RTREE_GET_SUBTREE(2) +#endif +#if RTREE_HEIGHT_MAX > 4 + RTREE_GET_SUBTREE(3) +#endif +#if RTREE_HEIGHT_MAX > 5 + RTREE_GET_SUBTREE(4) +#endif +#if RTREE_HEIGHT_MAX > 6 + RTREE_GET_SUBTREE(5) +#endif +#if RTREE_HEIGHT_MAX > 7 + RTREE_GET_SUBTREE(6) +#endif +#if RTREE_HEIGHT_MAX > 8 + RTREE_GET_SUBTREE(7) +#endif +#if RTREE_HEIGHT_MAX > 9 + RTREE_GET_SUBTREE(8) +#endif +#if RTREE_HEIGHT_MAX > 10 + RTREE_GET_SUBTREE(9) +#endif +#if RTREE_HEIGHT_MAX > 11 + RTREE_GET_SUBTREE(10) +#endif +#if RTREE_HEIGHT_MAX > 12 + RTREE_GET_SUBTREE(11) +#endif +#if RTREE_HEIGHT_MAX > 13 + RTREE_GET_SUBTREE(12) +#endif +#if RTREE_HEIGHT_MAX > 14 + RTREE_GET_SUBTREE(13) +#endif +#if RTREE_HEIGHT_MAX > 15 + RTREE_GET_SUBTREE(14) +#endif +#if RTREE_HEIGHT_MAX > 16 +# error Unsupported RTREE_HEIGHT_MAX +#endif + RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) +#undef RTREE_GET_SUBTREE +#undef RTREE_GET_LEAF + default: not_reached(); } +#undef RTREE_GET_BIAS not_reached(); } JEMALLOC_INLINE bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) { uintptr_t subkey; unsigned i, start_level; rtree_node_elm_t *node, *child; start_level = rtree_start_level(rtree, key); - node = rtree_subtree_read(rtree, start_level); + node = rtree_subtree_read(rtree, start_level, false); if (node == NULL) return (true); for (i = start_level; /**/; i++, node = child) { subkey = rtree_subkey(rtree, key, i); if (i == rtree->height - 1) { /* * node is a leaf, so it contains values rather than * child pointers. */ rtree_val_write(rtree, &node[subkey], val); return (false); } assert(i + 1 < rtree->height); - child = rtree_child_read(rtree, &node[subkey], i); + child = rtree_child_read(rtree, &node[subkey], i, false); if (child == NULL) return (true); } not_reached(); } #endif #endif /* JEMALLOC_H_INLINES */
--- a/memory/jemalloc/src/include/jemalloc/internal/size_classes.sh +++ b/memory/jemalloc/src/include/jemalloc/internal/size_classes.sh @@ -43,16 +43,31 @@ lg() { size_class() { index=$1 lg_grp=$2 lg_delta=$3 ndelta=$4 lg_p=$5 lg_kmax=$6 + if [ ${lg_delta} -ge ${lg_p} ] ; then + psz="yes" + else + pow2 ${lg_p}; p=${pow2_result} + pow2 ${lg_grp}; grp=${pow2_result} + pow2 ${lg_delta}; delta=${pow2_result} + sz=$((${grp} + ${delta} * ${ndelta})) + npgs=$((${sz} / ${p})) + if [ ${sz} -eq $((${npgs} * ${p})) ] ; then + psz="yes" + else + psz="no" + fi + fi + lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" else rem="no" fi lg_size=${lg_grp} @@ -69,54 +84,59 @@ size_class() { bin="no" fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then lg_delta_lookup=${lg_delta} else lg_delta_lookup="no" fi - printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup} + printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup} # Defined upon return: + # - psz ("yes" or "no") + # - bin ("yes" or "no") # - lg_delta_lookup (${lg_delta} or "no") - # - bin ("yes" or "no") } sep_line() { - echo " \\" + echo " \\" } size_classes() { lg_z=$1 lg_q=$2 lg_t=$3 lg_p=$4 lg_g=$5 pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} echo "#define SIZE_CLASSES \\" - echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\" + echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 + npsizes=0 # Tiny size classes. ndelta=0 index=0 lg_grp=${lg_t} lg_delta=${lg_grp} while [ ${lg_grp} -lt ${lg_q} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi ntbins=$((${ntbins} + 1)) lg_tiny_maxclass=${lg_grp} # Final written value is correct. index=$((${index} + 1)) lg_delta=${lg_grp} lg_grp=$((${lg_grp} + 1)) @@ -128,21 +148,27 @@ size_classes() { # The first size class has an unusual encoding, because the size has to be # split between grp and delta*ndelta. lg_grp=$((${lg_grp} - 1)) ndelta=1 size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do sep_line ndelta=1 if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then @@ -152,16 +178,19 @@ size_classes() { fi while [ ${ndelta} -le ${ndelta_limit} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" if [ ${lg_g} -gt 0 ] ; then lg_large_minclass=$((${lg_grp} + 1)) else lg_large_minclass=$((${lg_grp} + 2)) @@ -178,16 +207,17 @@ size_classes() { echo nsizes=${index} # Defined upon completion: # - ntbins # - nlbins # - nbins # - nsizes + # - npsizes # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass # - lg_large_minclass # - huge_maxclass } cat <<EOF @@ -195,30 +225,31 @@ cat <<EOF /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to * be defined prior to inclusion, and it in turn defines: * * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. - * SIZE_CLASSES: Complete table of - * SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) - * tuples. + * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, + * bin, lg_delta_lookup) tuples. * index: Size class index. * lg_grp: Lg group base size (no deltas added). * lg_delta: Lg delta to previous size class. * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta + * psz: 'yes' if a multiple of the page size, 'no' otherwise. * bin: 'yes' if a small bin size class, 'no' otherwise. * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' * otherwise. * NTBINS: Number of tiny bins. * NLBINS: Number of bins supported by the lookup table. * NBINS: Number of small size class bins. * NSIZES: Number of size classes. + * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE). * LG_TINY_MAXCLASS: Lg of maximum tiny size class. * LOOKUP_MAXCLASS: Maximum size class included in lookup table. * SMALL_MAXCLASS: Maximum small size class. * LG_LARGE_MINCLASS: Lg of minimum large size class. * HUGE_MAXCLASS: Maximum (huge) size class. */ #define LG_SIZE_CLASS_GROUP ${lg_g} @@ -233,16 +264,17 @@ for lg_z in ${lg_zarr} ; do for lg_p in ${lg_parr} ; do echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})" size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g} echo "#define SIZE_CLASSES_DEFINED" echo "#define NTBINS ${ntbins}" echo "#define NLBINS ${nlbins}" echo "#define NBINS ${nbins}" echo "#define NSIZES ${nsizes}" + echo "#define NPSIZES ${npsizes}" echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}" echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}" echo "#define SMALL_MAXCLASS ${small_maxclass}" echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}" echo "#define HUGE_MAXCLASS ${huge_maxclass}" echo "#endif" echo done
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/include/jemalloc/internal/spin.h @@ -0,0 +1,51 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct spin_s spin_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct spin_s { + unsigned iteration; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void spin_init(spin_t *spin); +void spin_adaptive(spin_t *spin); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_)) +JEMALLOC_INLINE void +spin_init(spin_t *spin) +{ + + spin->iteration = 0; +} + +JEMALLOC_INLINE void +spin_adaptive(spin_t *spin) +{ + volatile uint64_t i; + + for (i = 0; i < (KQU(1) << spin->iteration); i++) + CPU_SPINWAIT; + + if (spin->iteration < 63) + spin->iteration++; +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ +
--- a/memory/jemalloc/src/include/jemalloc/internal/stats.h +++ b/memory/jemalloc/src/include/jemalloc/internal/stats.h @@ -98,16 +98,24 @@ struct malloc_huge_stats_s { size_t curhchunks; }; struct arena_stats_s { /* Number of bytes currently mapped. */ size_t mapped; /* + * Number of bytes currently retained as a side effect of munmap() being + * disabled/bypassed. Retained bytes are technically mapped (though + * always decommitted or purged), but they are excluded from the mapped + * statistic (above). + */ + size_t retained; + + /* * Total number of purge sweeps, total number of madvise calls made, * and total pages purged in order to keep dirty unused memory under * control. */ uint64_t npurge; uint64_t nmadvise; uint64_t purged;
--- a/memory/jemalloc/src/include/jemalloc/internal/tcache.h +++ b/memory/jemalloc/src/include/jemalloc/internal/tcache.h @@ -125,37 +125,35 @@ extern size_t tcache_maxclass; * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; -size_t tcache_salloc(const void *ptr); +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); -void tcache_arena_associate(tcache_t *tcache, arena_t *arena); -void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, - arena_t *newarena); -void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *oldarena, arena_t *newarena); tcache_t *tcache_get_hard(tsd_t *tsd); -tcache_t *tcache_create(tsd_t *tsd, arena_t *arena); +tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); -void tcache_stats_merge(tcache_t *tcache, arena_t *arena); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(void); +bool tcache_boot(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache); void tcache_flush(void); @@ -292,30 +290,30 @@ tcache_alloc_small(tsd_t *tsd, arena_t * ret = tcache_alloc_easy(tbin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); - ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind, - &tcache_hard_success); + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + tbin, binind, &tcache_hard_success); if (tcache_hard_success == false) return (NULL); } assert(ret); /* * Only compute usize if required. The checks in the following if * statement are all static. */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); - assert(tcache_salloc(ret) == usize); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) @@ -353,17 +351,17 @@ tcache_alloc_large(tsd_t *tsd, arena_t * /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); - ret = arena_malloc_large(tsd, arena, binind, zero); + ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); if (ret == NULL) return (NULL); } else { size_t usize JEMALLOC_CC_SILENCE_INIT(0); /* Only compute usize on demand */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { @@ -376,19 +374,20 @@ tcache_alloc_large(tsd_t *tsd, arena_t * (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } if (likely(!zero)) { if (slow_path && config_fill) { - if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); - else if (unlikely(opt_zero)) + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else memset(ret, 0, usize); if (config_stats) tbin->tstats.nrequests++; if (config_prof) @@ -401,17 +400,17 @@ tcache_alloc_large(tsd_t *tsd, arena_t * JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; - assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_small(tsd, tcache, tbin, binind, @@ -428,18 +427,18 @@ JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, bool slow_path) { szind_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(ptr) <= tcache_maxclass); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); binind = size2index(size); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_large(ptr, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; @@ -453,16 +452,18 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; - if (unlikely(elm->tcache == NULL)) - elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL)); + if (unlikely(elm->tcache == NULL)) { + elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, + NULL)); + } return (elm->tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/tsd.h +++ b/memory/jemalloc/src/include/jemalloc/internal/tsd.h @@ -8,16 +8,19 @@ typedef bool (*malloc_tsd_cleanup_t)(voi #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) typedef struct tsd_init_block_s tsd_init_block_t; typedef struct tsd_init_head_s tsd_init_head_t; #endif typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; + +#define TSDN_NULL ((tsdn_t *)0) typedef enum { tsd_state_uninitialized, tsd_state_nominal, tsd_state_purgatory, tsd_state_reincarnated } tsd_state_t; @@ -39,17 +42,18 @@ typedef enum { * In example.c: * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} - * example_t *example_tsd_get() {...} + * bool example_tsd_booted_get(void) {...} + * example_t *example_tsd_get(bool init) {...} * void example_tsd_set(example_t *val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast the * function argument to (a_type *), then dereference the resulting pointer to * access fields, e.g. @@ -93,18 +97,20 @@ typedef struct { \ /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##tsd_boot0(void); \ a_attr void \ a_name##tsd_boot1(void); \ a_attr bool \ a_name##tsd_boot(void); \ +a_attr bool \ +a_name##tsd_booted_get(void); \ a_attr a_type * \ -a_name##tsd_get(void); \ +a_name##tsd_get(bool init); \ a_attr void \ a_name##tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern __thread bool a_name##tsd_initialized; \ @@ -196,19 +202,31 @@ a_name##tsd_boot1(void) \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ /* Get/set. */ \ a_attr a_type * \ -a_name##tsd_get(void) \ +a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ @@ -241,19 +259,31 @@ a_name##tsd_boot1(void) \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (false); \ +} \ /* Get/set. */ \ a_attr a_type * \ -a_name##tsd_get(void) \ +a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ @@ -302,24 +332,24 @@ a_name##tsd_wrapper_set(a_name##tsd_wrap \ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(void) \ +a_name##tsd_wrapper_get(bool init) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ - if (unlikely(wrapper == NULL)) { \ + if (init && unlikely(wrapper == NULL)) { \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ @@ -363,33 +393,47 @@ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ /* Get/set. */ \ a_attr a_type * \ -a_name##tsd_get(void) \ +a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ + wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ @@ -423,22 +467,22 @@ a_name##tsd_wrapper_set(a_name##tsd_wrap if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(void) \ +a_name##tsd_wrapper_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ pthread_getspecific(a_name##tsd_tsd); \ \ - if (unlikely(wrapper == NULL)) { \ + if (init && unlikely(wrapper == NULL)) { \ tsd_init_block_t block; \ wrapper = tsd_init_check_recursion( \ &a_name##tsd_init_head, &block); \ if (wrapper) \ return (wrapper); \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ block.data = wrapper; \ @@ -485,33 +529,47 @@ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ +a_attr bool \ +a_name##tsd_get_allocates(void) \ +{ \ + \ + return (true); \ +} \ /* Get/set. */ \ a_attr a_type * \ -a_name##tsd_get(void) \ +a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ + wrapper = a_name##tsd_wrapper_get(init); \ + if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ + return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ + wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ @@ -531,108 +589,146 @@ struct tsd_init_head_s { #endif #define MALLOC_TSD \ /* O(name, type) */ \ O(tcache, tcache_t *) \ O(thread_allocated, uint64_t) \ O(thread_deallocated, uint64_t) \ O(prof_tdata, prof_tdata_t *) \ + O(iarena, arena_t *) \ O(arena, arena_t *) \ O(arenas_tdata, arena_tdata_t *) \ O(narenas_tdata, unsigned) \ O(arenas_tdata_bypass, bool) \ O(tcache_enabled, tcache_enabled_t) \ O(quarantine, quarantine_t *) \ + O(witnesses, witness_list_t) \ + O(witness_fork, bool) \ #define TSD_INITIALIZER { \ tsd_state_uninitialized, \ NULL, \ 0, \ 0, \ NULL, \ NULL, \ NULL, \ + NULL, \ 0, \ false, \ tcache_enabled_default, \ - NULL \ + NULL, \ + ql_head_initializer(witnesses), \ + false \ } struct tsd_s { tsd_state_t state; #define O(n, t) \ t n; MALLOC_TSD #undef O }; +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; + static const tsd_t tsd_initializer = TSD_INITIALIZER; malloc_tsd_types(, tsd_t) #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *arg); void malloc_tsd_cleanup_register(bool (*f)(void)); -bool malloc_tsd_boot0(void); +tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); #endif void tsd_cleanup(void *arg); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) +tsd_t *tsd_fetch_impl(bool init); tsd_t *tsd_fetch(void); +tsdn_t *tsd_tsdn(tsd_t *tsd); bool tsd_nominal(tsd_t *tsd); #define O(n, t) \ t *tsd_##n##p_get(tsd_t *tsd); \ t tsd_##n##_get(tsd_t *tsd); \ void tsd_##n##_set(tsd_t *tsd, t n); MALLOC_TSD #undef O +tsdn_t *tsdn_fetch(void); +bool tsdn_null(const tsdn_t *tsdn); +tsd_t *tsdn_tsd(tsdn_t *tsdn); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) malloc_tsd_externs(, tsd_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch(void) +tsd_fetch_impl(bool init) { - tsd_t *tsd = tsd_get(); + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) + return (NULL); + assert(tsd != NULL); if (unlikely(tsd->state != tsd_state_nominal)) { if (tsd->state == tsd_state_uninitialized) { tsd->state = tsd_state_nominal; /* Trigger cleanup handler registration. */ tsd_set(tsd); } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); } else assert(tsd->state == tsd_state_reincarnated); } return (tsd); } +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) +{ + + return (tsd_fetch_impl(true)); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) +{ + + return ((tsdn_t *)tsd); +} + JEMALLOC_INLINE bool tsd_nominal(tsd_t *tsd) { return (tsd->state == tsd_state_nominal); } #define O(n, t) \ @@ -654,12 +750,38 @@ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t n) \ { \ \ assert(tsd->state == tsd_state_nominal); \ tsd->n = n; \ } MALLOC_TSD #undef O + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) +{ + + if (!tsd_booted_get()) + return (NULL); + + return (tsd_tsdn(tsd_fetch_impl(false))); +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) +{ + + return (tsdn == NULL); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) +{ + + assert(!tsdn_null(tsdn)); + + return (&tsdn->tsd); +} #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/internal/util.h +++ b/memory/jemalloc/src/include/jemalloc/internal/util.h @@ -35,16 +35,20 @@ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 +/* Junk fill patterns. */ +#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) + /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used @@ -52,40 +56,30 @@ * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif -#define JEMALLOC_GNUC_PREREQ(major, minor) \ - (!defined(__clang__) && \ - (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))) -#ifndef __has_builtin -# define __has_builtin(builtin) (0) -#endif -#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \ - (defined(__clang__) && __has_builtin(builtin)) - #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) -# if JEMALLOC_GNUC_PREREQ(4, 6) || \ - JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable) -# define unreachable() __builtin_unreachable() -# else -# define unreachable() -# endif #else # define likely(x) !!(x) # define unlikely(x) !!(x) -# define unreachable() #endif +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +#endif + +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() + #include "jemalloc/internal/assert.h" /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if (unlikely(!(c))) \ not_reached(); \ } while (0) @@ -101,19 +95,19 @@ int buferror(int err, char *buf, size_t uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ -int malloc_vsnprintf(char *str, size_t size, const char *format, +size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); #endif /* JEMALLOC_H_EXTERNS */
--- a/memory/jemalloc/src/include/jemalloc/internal/valgrind.h +++ b/memory/jemalloc/src/include/jemalloc/internal/valgrind.h @@ -25,25 +25,27 @@ if (unlikely(in_valgrind)) \ valgrind_make_mem_defined(ptr, usize); \ } while (0) /* * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ - if (unlikely(in_valgrind && cond)) \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ + if (unlikely(in_valgrind && cond)) { \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ + zero); \ + } \ } while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do { \ if (unlikely(in_valgrind)) { \ - size_t rzsize = p2rz(ptr); \ + size_t rzsize = p2rz(tsdn, ptr); \ \ if (!maybe_moved || ptr == old_ptr) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ @@ -76,18 +78,18 @@ if (unlikely(in_valgrind)) \ valgrind_freelike_block(ptr, rzsize); \ } while (0) #else #define RUNNING_ON_VALGRIND ((unsigned)0) #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/include/jemalloc/internal/witness.h @@ -0,0 +1,266 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, const witness_t *); + +/* + * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by + * the witness machinery. + */ +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_ARENAS 2U + +#define WITNESS_RANK_PROF_DUMP 3U +#define WITNESS_RANK_PROF_BT2GCTX 4U +#define WITNESS_RANK_PROF_TDATAS 5U +#define WITNESS_RANK_PROF_TDATA 6U +#define WITNESS_RANK_PROF_GCTX 7U + +#define WITNESS_RANK_ARENA 8U +#define WITNESS_RANK_ARENA_CHUNKS 9U +#define WITNESS_RANK_ARENA_NODE_CACHE 10 + +#define WITNESS_RANK_BASE 11U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp); +#ifdef JEMALLOC_JET +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *witness_lock_error; +#else +void witness_lock_error(const witness_list_t *witnesses, + const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *witness_owner_error; +#else +void witness_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *witness_not_owner_error; +#else +void witness_not_owner_error(const witness_t *witness); +#endif +#ifdef JEMALLOC_JET +typedef void (witness_lockless_error_t)(const witness_list_t *); +extern witness_lockless_error_t *witness_lockless_error; +#else +void witness_lockless_error(const witness_list_t *witnesses); +#endif + +void witnesses_cleanup(tsd_t *tsd); +void witness_fork_cleanup(tsd_t *tsd); +void witness_prefork(tsd_t *tsd); +void witness_postfork_parent(tsd_t *tsd); +void witness_postfork_child(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool witness_owner(tsd_t *tsd, const witness_t *witness); +void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); +void witness_assert_lockless(tsdn_t *tsdn); +void witness_lock(tsdn_t *tsdn, witness_t *witness); +void witness_unlock(tsdn_t *tsdn, witness_t *witness); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE bool +witness_owner(tsd_t *tsd, const witness_t *witness) +{ + witness_list_t *witnesses; + witness_t *w; + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + return (true); + } + + return (false); +} + +JEMALLOC_INLINE void +witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + if (witness_owner(tsd, witness)) + return; + witness_owner_error(witness); +} + +JEMALLOC_INLINE void +witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witnesses = tsd_witnessesp_get(tsd); + ql_foreach(w, witnesses, link) { + if (w == witness) + witness_not_owner_error(witness); + } +} + +JEMALLOC_INLINE void +witness_assert_lockless(tsdn_t *tsdn) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w != NULL) + witness_lockless_error(witnesses); +} + +JEMALLOC_INLINE void +witness_lock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + witness_assert_not_owner(tsdn, witness); + + witnesses = tsd_witnessesp_get(tsd); + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, witness) > 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +JEMALLOC_INLINE void +witness_unlock(tsdn_t *tsdn, witness_t *witness) +{ + tsd_t *tsd; + witness_list_t *witnesses; + + if (!config_debug) + return; + + if (tsdn_null(tsdn)) + return; + tsd = tsdn_tsd(tsdn); + if (witness->rank == WITNESS_RANK_OMIT) + return; + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(tsd, witness)) { + witnesses = tsd_witnessesp_get(tsd); + ql_remove(witnesses, witness, link); + } else + witness_assert_owner(tsdn, witness); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/
--- a/memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in +++ b/memory/jemalloc/src/include/jemalloc/jemalloc_macros.h.in @@ -8,33 +8,33 @@ #define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ #define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ #define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ #define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" # define MALLOCX_LG_ALIGN(la) ((int)(la)) # if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) ((int)(ffs(a)-1)) +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) # else # define MALLOCX_ALIGN(a) \ - ((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ - ffs((int)((a)>>32))+31)) + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) # endif # define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ # define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) # define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ -# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20)) +# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() #else # define JEMALLOC_CXX_THROW #endif #if _MSC_VER
--- a/memory/jemalloc/src/jemalloc.pc.in +++ b/memory/jemalloc/src/jemalloc.pc.in @@ -1,12 +1,12 @@ prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ install_suffix=@install_suffix@ Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. -URL: http://www.canonware.com/jemalloc +URL: http://jemalloc.net/ Version: @jemalloc_version@ Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix}
--- a/memory/jemalloc/src/msvc/ReadMe.txt +++ b/memory/jemalloc/src/msvc/ReadMe.txt @@ -12,13 +12,13 @@ 1. Install Cygwin with at least the foll 2. Install Visual Studio 2015 with Visual C++ 3. Add Cygwin\bin to the PATH environment variable 4. Open "VS2015 x86 Native Tools Command Prompt" (note: x86/x64 doesn't matter at this point) 5. Generate header files: - sh -c "./autogen.sh CC=cl --enable-lazy-lock=no" + sh -c "CC=cl ./autogen.sh" 6. Now the project can be opened and built in Visual Studio: msvc\jemalloc_vc2015.sln
--- a/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj +++ b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -51,34 +51,37 @@ <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" /> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" /> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" /> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" /> - <ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h" /> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" /> @@ -104,21 +107,23 @@ <ClCompile Include="..\..\..\..\src\mb.c" /> <ClCompile Include="..\..\..\..\src\mutex.c" /> <ClCompile Include="..\..\..\..\src\nstime.c" /> <ClCompile Include="..\..\..\..\src\pages.c" /> <ClCompile Include="..\..\..\..\src\prng.c" /> <ClCompile Include="..\..\..\..\src\prof.c" /> <ClCompile Include="..\..\..\..\src\quarantine.c" /> <ClCompile Include="..\..\..\..\src\rtree.c" /> + <ClCompile Include="..\..\..\..\src\spin.c" /> <ClCompile Include="..\..\..\..\src\stats.c" /> <ClCompile Include="..\..\..\..\src\tcache.c" /> <ClCompile Include="..\..\..\..\src\ticker.c" /> <ClCompile Include="..\..\..\..\src\tsd.c" /> <ClCompile Include="..\..\..\..\src\util.c" /> + <ClCompile Include="..\..\..\..\src\witness.c" /> </ItemGroup> <PropertyGroup Label="Globals"> <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid> <Keyword>Win32Proj</Keyword> <RootNamespace>jemalloc</RootNamespace> <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion> </PropertyGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> @@ -245,85 +250,86 @@ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <ClCompile> <PrecompiledHeader> </PrecompiledHeader> <WarningLevel>Level3</WarningLevel> <Optimization>Disabled</Optimization> <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'"> <ClCompile> <PrecompiledHeader> </PrecompiledHeader> <WarningLevel>Level3</WarningLevel> <Optimization>Disabled</Optimization> <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <ClCompile> <PrecompiledHeader> </PrecompiledHeader> <WarningLevel>Level3</WarningLevel> <Optimization>Disabled</Optimization> <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'"> <ClCompile> <PrecompiledHeader> </PrecompiledHeader> <WarningLevel>Level3</WarningLevel> <Optimization>Disabled</Optimization> <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> - <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> + <DebugInformationFormat>OldStyle</DebugInformationFormat> + <MinimalRebuild>false</MinimalRebuild> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> <ClCompile> <WarningLevel>Level3</WarningLevel> <PrecompiledHeader> </PrecompiledHeader> <Optimization>MaxSpeed</Optimization> <FunctionLevelLinking>true</FunctionLevelLinking> <IntrinsicFunctions>true</IntrinsicFunctions> <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> <EnableCOMDATFolding>true</EnableCOMDATFolding> <OptimizeReferences>true</OptimizeReferences> </Link> @@ -334,17 +340,17 @@ <PrecompiledHeader> </PrecompiledHeader> <Optimization>MaxSpeed</Optimization> <FunctionLevelLinking>true</FunctionLevelLinking> <IntrinsicFunctions>true</IntrinsicFunctions> <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <RuntimeLibrary>MultiThreaded</RuntimeLibrary> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> <EnableCOMDATFolding>true</EnableCOMDATFolding> <OptimizeReferences>true</OptimizeReferences> </Link> @@ -354,17 +360,17 @@ <WarningLevel>Level3</WarningLevel> <PrecompiledHeader> </PrecompiledHeader> <Optimization>MaxSpeed</Optimization> <FunctionLevelLinking>true</FunctionLevelLinking> <IntrinsicFunctions>true</IntrinsicFunctions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> <EnableCOMDATFolding>true</EnableCOMDATFolding> <OptimizeReferences>true</OptimizeReferences> </Link> @@ -375,18 +381,18 @@ <PrecompiledHeader> </PrecompiledHeader> <Optimization>MaxSpeed</Optimization> <FunctionLevelLinking>true</FunctionLevelLinking> <IntrinsicFunctions>true</IntrinsicFunctions> <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions> <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <RuntimeLibrary>MultiThreaded</RuntimeLibrary> - <DisableSpecificWarnings>4090;4146;4244;4267;4334</DisableSpecificWarnings> - <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName> + <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings> + <DebugInformationFormat>OldStyle</DebugInformationFormat> </ClCompile> <Link> <SubSystem>Windows</SubSystem> <GenerateDebugInformation>true</GenerateDebugInformation> <EnableCOMDATFolding>true</EnableCOMDATFolding> <OptimizeReferences>true</OptimizeReferences> </Link> </ItemDefinitionGroup>
--- a/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters +++ b/memory/jemalloc/src/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -102,16 +102,19 @@ <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h"> <Filter>Header Files\internal</Filter> </ClInclude> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h"> + <Filter>Header Files\internal</Filter> + </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h"> <Filter>Header Files\internal</Filter> @@ -138,32 +141,38 @@ <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h"> <Filter>Header Files\internal</Filter> </ClInclude> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h"> + <Filter>Header Files\internal</Filter> + </ClInclude> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h"> + <Filter>Header Files\internal</Filter> + </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h"> <Filter>Header Files\internal</Filter> </ClInclude> - <ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h"> + <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h"> <Filter>Header Files\internal</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h"> <Filter>Header Files\msvc_compat</Filter> </ClInclude> <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h"> <Filter>Header Files\msvc_compat</Filter> </ClInclude> @@ -233,25 +242,31 @@ <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\quarantine.c"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\rtree.c"> <Filter>Source Files</Filter> </ClCompile> + <ClCompile Include="..\..\..\..\src\spin.c"> + <Filter>Source Files</Filter> + </ClCompile> <ClCompile Include="..\..\..\..\src\stats.c"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\tcache.c"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\ticker.c"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\tsd.c"> <Filter>Source Files</Filter> </ClCompile> <ClCompile Include="..\..\..\..\src\util.c"> <Filter>Source Files</Filter> </ClCompile> + <ClCompile Include="..\..\..\..\src\witness.c"> + <Filter>Source Files</Filter> + </ClCompile> </ItemGroup> -</Project> \ No newline at end of file +</Project>
--- a/memory/jemalloc/src/src/arena.c +++ b/memory/jemalloc/src/src/arena.c @@ -16,37 +16,33 @@ ssize_t opt_decay_time = DECAY_TIME_DEF static ssize_t decay_time_default; arena_bin_info_t arena_bin_info[NBINS]; size_t map_bias; size_t map_misc_offset; size_t arena_maxrun; /* Max run size for arenas. */ size_t large_maxclass; /* Max large size class. */ -size_t run_quantize_max; /* Max run_quantize_*() input. */ -static size_t small_maxrun; /* Max run size for small size classes. */ -static bool *small_run_tab; /* Valid small run page multiples. */ -static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ -static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ unsigned nlclasses; /* Number of large size classes. */ unsigned nhclasses; /* Number of huge size classes. */ -static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ -static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ -static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk); +static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, + size_t ndirty_limit); +static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, + bool dirty, bool cleaned, bool decommitted); +static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); /******************************************************************************/ JEMALLOC_INLINE_C size_t arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) { @@ -67,199 +63,142 @@ arena_run_addr_comp(const arena_chunk_ma uintptr_t b_miscelm = (uintptr_t)b; assert(a != NULL); assert(b != NULL); return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, - rb_link, arena_run_addr_comp) - +/* Generate pairing heap functions. */ +ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, + ph_link, arena_run_addr_comp) + +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) +#endif static size_t -run_quantize_floor_compute(size_t size) +run_quantize_floor(size_t size) { - size_t qsize; + size_t ret; + pszind_t pind; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); assert(size != 0); assert(size == PAGE_CEILING(size)); - /* Don't change sizes that are valid small run sizes. */ - if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) + pind = psz2ind(size - large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ return (size); - - /* - * Round down to the nearest run size that can actually be requested - * during normal large allocation. Add large_pad so that cache index - * randomization can offset the allocation from the page boundary. - */ - qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; - if (qsize <= SMALL_MAXCLASS + large_pad) - return (run_quantize_floor_compute(size - large_pad)); - assert(qsize <= size); - return (qsize); + } + ret = pind2sz(pind - 1) + large_pad; + assert(ret <= size); + return (ret); } - +#ifdef JEMALLOC_JET +#undef run_quantize_floor +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) +run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); +#endif + +#ifdef JEMALLOC_JET +#undef run_quantize_ceil +#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) +#endif static size_t -run_quantize_ceil_compute_hard(size_t size) +run_quantize_ceil(size_t size) { - size_t large_run_size_next; - - assert(size != 0); - assert(size == PAGE_CEILING(size)); - - /* - * Return the next quantized size greater than the input size. - * Quantized sizes comprise the union of run sizes that back small - * region runs, and run sizes that back large regions with no explicit - * alignment constraints. - */ - - if (size > SMALL_MAXCLASS) { - large_run_size_next = PAGE_CEILING(index2size(size2index(size - - large_pad) + 1) + large_pad); - } else - large_run_size_next = SIZE_T_MAX; - if (size >= small_maxrun) - return (large_run_size_next); - - while (true) { - size += PAGE; - assert(size <= small_maxrun); - if (small_run_tab[size >> LG_PAGE]) { - if (large_run_size_next < size) - return (large_run_size_next); - return (size); - } - } -} - -static size_t -run_quantize_ceil_compute(size_t size) -{ - size_t qsize = run_quantize_floor_compute(size); - - if (qsize < size) { + size_t ret; + + assert(size > 0); + assert(size <= HUGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = run_quantize_floor(size); + if (ret < size) { /* * Skip a quantization that may have an adequately large run, * because under-sized runs may be mixed in. This only happens * when an unusual size is requested, i.e. for aligned * allocation, and is just one of several places where linear * search would potentially find sufficiently aligned available * memory somewhere lower. */ - qsize = run_quantize_ceil_compute_hard(qsize); + ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; } - return (qsize); -} - -#ifdef JEMALLOC_JET -#undef run_quantize_floor -#define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl) -#endif -static size_t -run_quantize_floor(size_t size) -{ - size_t ret; - - assert(size > 0); - assert(size <= run_quantize_max); - assert((size & PAGE_MASK) == 0); - - ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; - assert(ret == run_quantize_floor_compute(size)); - return (ret); -} -#ifdef JEMALLOC_JET -#undef run_quantize_floor -#define run_quantize_floor JEMALLOC_N(run_quantize_floor) -run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl); -#endif - -#ifdef JEMALLOC_JET -#undef run_quantize_ceil -#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl) -#endif -static size_t -run_quantize_ceil(size_t size) -{ - size_t ret; - - assert(size > 0); - assert(size <= run_quantize_max); - assert((size & PAGE_MASK) == 0); - - ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; - assert(ret == run_quantize_ceil_compute(size)); return (ret); } #ifdef JEMALLOC_JET #undef run_quantize_ceil #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) -run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl); +run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); #endif -static arena_run_tree_t * -arena_runs_avail_get(arena_t *arena, szind_t ind) -{ - - assert(ind >= runs_avail_bias); - assert(ind - runs_avail_bias < runs_avail_nclasses); - - return (&arena->runs_avail[ind - runs_avail_bias]); -} - static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( - arena_miscelm_get(chunk, pageind)))); + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); - arena_run_tree_insert(arena_runs_avail_get(arena, ind), - arena_miscelm_get(chunk, pageind)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_insert(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); } static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( - arena_miscelm_get(chunk, pageind)))); + pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( + arena_miscelm_get_const(chunk, pageind)))); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); - arena_run_tree_remove(arena_runs_avail_get(arena, ind), - arena_miscelm_get(chunk, pageind)); + assert((npages << LG_PAGE) < chunksize); + assert(pind2sz(pind) <= chunksize); + arena_run_heap_remove(&arena->runs_avail[pind], + arena_miscelm_get_mutable(chunk, pageind)); } static void arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == CHUNK_MAP_DIRTY); qr_new(&miscelm->rd, rd_link); qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); arena->ndirty += npages; } static void arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, size_t npages) { - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> LG_PAGE)); assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == CHUNK_MAP_DIRTY); qr_remove(&miscelm->rd, rd_link); @@ -584,220 +523,237 @@ arena_chunk_init_spare(arena_t *arena) arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); return (chunk); } static bool -arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) +arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + bool zero) { /* * The extent node notion of "committed" doesn't directly apply to * arena chunks. Arbitrarily mark them as committed. The commit state * of runs is tracked individually, and upon chunk deallocation the * entire chunk is in a consistent commit state. */ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); extent_node_achunk_set(&chunk->node, true); - return (chunk_register(chunk, &chunk->node)); + return (chunk_register(tsdn, chunk, &chunk->node)); } static arena_chunk_t * -arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool *zero, bool *commit) +arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) { arena_chunk_t *chunk; - malloc_mutex_unlock(&arena->lock); - - chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, - chunksize, chunksize, zero, commit); + malloc_mutex_unlock(tsdn, &arena->lock); + + chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, + NULL, chunksize, chunksize, zero, commit); if (chunk != NULL && !*commit) { /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, - chunksize, *zero, *commit); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, + (void *)chunk, chunksize, *zero, *commit); chunk = NULL; } } - if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { + if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { if (!*commit) { /* Undo commit of header. */ chunk_hooks->decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } - chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, chunksize, *zero, *commit); chunk = NULL; } - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); return (chunk); } static arena_chunk_t * -arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) +arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, + bool *commit) { arena_chunk_t *chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, - chunksize, zero, true); + chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, + chunksize, zero, commit, true); if (chunk != NULL) { - if (arena_chunk_register(arena, chunk, *zero)) { - chunk_dalloc_cache(arena, &chunk_hooks, chunk, + if (arena_chunk_register(tsdn, arena, chunk, *zero)) { + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, chunksize, true); return (NULL); } - *commit = true; } if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, - zero, commit); + chunk = arena_chunk_alloc_internal_hard(tsdn, arena, + &chunk_hooks, zero, commit); } if (config_stats && chunk != NULL) { arena->stats.mapped += chunksize; arena->stats.metadata_mapped += (map_bias << LG_PAGE); } return (chunk); } static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) +arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; bool zero, commit; size_t flag_unzeroed, flag_decommitted, i; assert(arena->spare == NULL); zero = false; commit = false; - chunk = arena_chunk_alloc_internal(arena, &zero, &commit); + chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); if (chunk == NULL) return (NULL); /* * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted - * chunk. + * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed + * or decommitted chunk. */ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, flag_unzeroed | flag_decommitted); /* * There is no need to initialize the internal page map entries unless * the chunk is not zeroed. */ if (!zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_bitselm_get(chunk, - chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, - map_bias+1))); + (void *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); for (i = map_bias+1; i < chunk_npages-1; i++) arena_mapbits_internal_set(chunk, i, flag_unzeroed); } else { JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) - arena_bitselm_get(chunk, chunk_npages-1) - - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); + *)arena_bitselm_get_const(chunk, map_bias+1), + (size_t)((uintptr_t)arena_bitselm_get_const(chunk, + chunk_npages-1) - + (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); if (config_debug) { for (i = map_bias+1; i < chunk_npages-1; i++) { assert(arena_mapbits_unzeroed_get(chunk, i) == flag_unzeroed); } } } arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, flag_unzeroed); return (chunk); } static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) +arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; if (arena->spare != NULL) chunk = arena_chunk_init_spare(arena); else { - chunk = arena_chunk_init_hard(arena); + chunk = arena_chunk_init_hard(tsdn, arena); if (chunk == NULL) return (NULL); } + ql_elm_new(&chunk->node, ql_link); + ql_tail_insert(&arena->achunks, &chunk->node, ql_link); arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); return (chunk); } static void -arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) +arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) { + bool committed; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + chunk_deregister(chunk, &chunk->node); + + committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); + if (!committed) { + /* + * Decommit the header. Mark the chunk as decommitted even if + * header decommit fails, since treating a partially committed + * chunk as committed has a high potential for causing later + * access of decommitted memory. + */ + chunk_hooks = chunk_hooks_get(tsdn, arena); + chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, + arena->ind); + } + + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, + committed); + + if (config_stats) { + arena->stats.mapped -= chunksize; + arena->stats.metadata_mapped -= (map_bias << LG_PAGE); + } +} + +static void +arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) +{ + + assert(arena->spare != spare); + + if (arena_mapbits_dirty_get(spare, map_bias) != 0) { + arena_run_dirty_remove(arena, spare, map_bias, + chunk_npages-map_bias); + } + + arena_chunk_discard(tsdn, arena, spare); +} + +static void +arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) +{ + arena_chunk_t *spare; assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == arena_maxrun); assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == arena_maxrun); assert(arena_mapbits_dirty_get(chunk, map_bias) == arena_mapbits_dirty_get(chunk, chunk_npages-1)); assert(arena_mapbits_decommitted_get(chunk, map_bias) == arena_mapbits_decommitted_get(chunk, chunk_npages-1)); /* Remove run from runs_avail, so that the arena does not use it. */ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool committed; - - arena->spare = chunk; - if (arena_mapbits_dirty_get(spare, map_bias) != 0) { - arena_run_dirty_remove(arena, spare, map_bias, - chunk_npages-map_bias); - } - - chunk_deregister(spare, &spare->node); - - committed = (arena_mapbits_decommitted_get(spare, map_bias) == - 0); - if (!committed) { - /* - * Decommit the header. Mark the chunk as decommitted - * even if header decommit fails, since treating a - * partially committed chunk as committed has a high - * potential for causing later access of decommitted - * memory. - */ - chunk_hooks = chunk_hooks_get(arena); - chunk_hooks.decommit(spare, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - - chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, - chunksize, committed); - - if (config_stats) { - arena->stats.mapped -= chunksize; - arena->stats.metadata_mapped -= (map_bias << LG_PAGE); - } - } else - arena->spare = chunk; + ql_remove(&arena->achunks, &chunk->node, ql_link); + spare = arena->spare; + arena->spare = chunk; + if (spare != NULL) + arena_spare_discard(tsdn, arena, spare); } static void arena_huge_malloc_stats_update(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); @@ -830,16 +786,27 @@ arena_huge_dalloc_stats_update(arena_t * arena->stats.ndalloc_huge++; arena->stats.allocated_huge -= usize; arena->stats.hstats[index].ndalloc++; arena->stats.hstats[index].curhchunks--; } static void +arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.hstats[index].ndalloc--; +} + +static void arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) { szind_t index = size2index(usize) - nlclasses - NBINS; cassert(config_stats); arena->stats.ndalloc_huge--; arena->stats.allocated_huge += usize; @@ -860,270 +827,275 @@ arena_huge_ralloc_stats_update_undo(aren size_t usize) { arena_huge_dalloc_stats_update_undo(arena, oldsize); arena_huge_malloc_stats_update_undo(arena, usize); } extent_node_t * -arena_node_alloc(arena_t *arena) +arena_node_alloc(tsdn_t *tsdn, arena_t *arena) { extent_node_t *node; - malloc_mutex_lock(&arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); node = ql_last(&arena->node_cache, ql_link); if (node == NULL) { - malloc_mutex_unlock(&arena->node_cache_mtx); - return (base_alloc(sizeof(extent_node_t))); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); + return (base_alloc(tsdn, sizeof(extent_node_t))); } ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); return (node); } void -arena_node_dalloc(arena_t *arena, extent_node_t *node) +arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) { - malloc_mutex_lock(&arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); } static void * -arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - size_t usize, size_t alignment, bool *zero, size_t csize) +arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, + size_t csize) { void *ret; bool commit = true; - ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, - zero, &commit); + ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, + alignment, zero, &commit); if (ret == NULL) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_malloc_stats_update_undo(arena, usize); arena->stats.mapped -= usize; } arena_nactive_sub(arena, usize >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } return (ret); } void * -arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero) +arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize = CHUNK_CEILING(usize); - - malloc_mutex_lock(&arena->lock); + bool commit = true; + + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_malloc_stats_update(arena, usize); arena->stats.mapped += usize; } arena_nactive_add(arena, usize >> LG_PAGE); - ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, - zero, true); - malloc_mutex_unlock(&arena->lock); + ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, + alignment, zero, &commit, true); + malloc_mutex_unlock(tsdn, &arena->lock); if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, - alignment, zero, csize); + ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, + usize, alignment, zero, csize); } return (ret); } void -arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) +arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize; csize = CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_dalloc_stats_update(arena, usize); arena->stats.mapped -= usize; } arena_nactive_sub(arena, usize >> LG_PAGE); - chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); - malloc_mutex_unlock(&arena->lock); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) +arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize) { assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(oldsize != usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) arena_huge_ralloc_stats_update(arena, oldsize, usize); if (oldsize < usize) arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); else arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) +arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize) { size_t udiff = oldsize - usize; size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); if (cdiff != 0) arena->stats.mapped -= cdiff; } arena_nactive_sub(arena, udiff >> LG_PAGE); if (cdiff != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(usize)); - chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + true); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, - size_t udiff, size_t cdiff) +arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, + bool *zero, void *nchunk, size_t udiff, size_t cdiff) { bool err; bool commit = true; - err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, - zero, &commit) == NULL); + err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + chunksize, zero, &commit) == NULL); if (err) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update_undo(arena, oldsize, usize); arena->stats.mapped -= cdiff; } arena_nactive_sub(arena, udiff >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, nchunk, cdiff, *zero, - true); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, + *zero, true); err = true; } return (err); } bool -arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, - size_t usize, bool *zero) +arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, + size_t oldsize, size_t usize, bool *zero) { bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); size_t udiff = usize - oldsize; size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - - malloc_mutex_lock(&arena->lock); + bool commit = true; + + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); arena->stats.mapped += cdiff; } arena_nactive_add(arena, udiff >> LG_PAGE); - err = (chunk_alloc_cache(arena, &chunk_hooks, nchunk, cdiff, chunksize, - zero, true) == NULL); - malloc_mutex_unlock(&arena->lock); + err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, + chunksize, zero, &commit, true) == NULL); + malloc_mutex_unlock(tsdn, &arena->lock); if (err) { - err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, - chunk, oldsize, usize, zero, nchunk, udiff, + err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, + &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(arena, &chunk_hooks, nchunk, cdiff, *zero, - true); + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, + *zero, true); err = true; } return (err); } /* * Do first-best-fit run selection, i.e. select the lowest run that best fits. * Run sizes are indexed, so not all candidate runs are necessarily exactly the * same size. */ static arena_run_t * arena_run_first_best_fit(arena_t *arena, size_t size) { - szind_t ind, i; - - ind = size2index(run_quantize_ceil(size)); - for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { - arena_chunk_map_misc_t *miscelm = arena_run_tree_first( - arena_runs_avail_get(arena, i)); + pszind_t pind, i; + + pind = psz2ind(run_quantize_ceil(size)); + + for (i = pind; pind2sz(i) <= chunksize; i++) { + arena_chunk_map_misc_t *miscelm = arena_run_heap_first( + &arena->runs_avail[i]); if (miscelm != NULL) return (&miscelm->run); } return (NULL); } static arena_run_t * arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) { - arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); + arena_run_t *run = arena_run_first_best_fit(arena, size); if (run != NULL) { if (arena_run_split_large(arena, run, size, zero)) run = NULL; } return (run); } static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) +arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) { arena_chunk_t *chunk; arena_run_t *run; assert(size <= arena_maxrun); assert(size == PAGE_CEILING(size)); /* Search the arena's chunks for the lowest best fit. */ run = arena_run_alloc_large_helper(arena, size, zero); if (run != NULL) return (run); /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_large(arena, run, size, zero)) run = NULL; return (run); } /* * arena_chunk_alloc() failed, but another thread may have made * sufficient memory available while this one dropped arena->lock in @@ -1139,36 +1111,36 @@ arena_run_alloc_small_helper(arena_t *ar if (run != NULL) { if (arena_run_split_small(arena, run, size, binind)) run = NULL; } return (run); } static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) +arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_run_t *run; assert(size <= arena_maxrun); assert(size == PAGE_CEILING(size)); assert(binind != BININD_INVALID); /* Search the arena's chunks for the lowest best fit. */ run = arena_run_alloc_small_helper(arena, size, binind); if (run != NULL) return (run); /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; + run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_small(arena, run, size, binind)) run = NULL; return (run); } /* * arena_chunk_alloc() failed, but another thread may have made * sufficient memory available while this one dropped arena->lock in @@ -1181,70 +1153,70 @@ static bool arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) { return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3)); } ssize_t -arena_lg_dirty_mult_get(arena_t *arena) +arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) { ssize_t lg_dirty_mult; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (lg_dirty_mult); } bool -arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) +arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) { if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } static void arena_decay_deadline_init(arena_t *arena) { assert(opt_purge == purge_mode_decay); /* * Generate a new deadline that is uniformly random within the next * epoch after the current one. */ - nstime_copy(&arena->decay_deadline, &arena->decay_epoch); - nstime_add(&arena->decay_deadline, &arena->decay_interval); - if (arena->decay_time > 0) { + nstime_copy(&arena->decay.deadline, &arena->decay.epoch); + nstime_add(&arena->decay.deadline, &arena->decay.interval); + if (arena->decay.time > 0) { nstime_t jitter; - nstime_init(&jitter, prng_range(&arena->decay_jitter_state, - nstime_ns(&arena->decay_interval))); - nstime_add(&arena->decay_deadline, &jitter); + nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state, + nstime_ns(&arena->decay.interval))); + nstime_add(&arena->decay.deadline, &jitter); } } static bool arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) { assert(opt_purge == purge_mode_decay); - return (nstime_compare(&arena->decay_deadline, time) <= 0); + return (nstime_compare(&arena->decay.deadline, time) <= 0); } static size_t arena_decay_backlog_npages_limit(const arena_t *arena) { static const uint64_t h_steps[] = { #define STEP(step, h, x, y) \ h, @@ -1259,144 +1231,163 @@ arena_decay_backlog_npages_limit(const a /* * For each element of decay_backlog, multiply by the corresponding * fixed-point smoothstep decay factor. Sum the products, then divide * to round down to the nearest whole number of pages. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) - sum += arena->decay_backlog[i] * h_steps[i]; - npages_limit_backlog = (sum >> SMOOTHSTEP_BFP); + sum += arena->decay.backlog[i] * h_steps[i]; + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); return (npages_limit_backlog); } static void -arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) +arena_decay_backlog_update_last(arena_t *arena) +{ + size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? + arena->ndirty - arena->decay.ndirty : 0; + arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; +} + +static void +arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) { - uint64_t nadvance; + + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(arena); +} + +static void +arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) +{ + uint64_t nadvance_u64; nstime_t delta; - size_t ndirty_delta; assert(opt_purge == purge_mode_decay); assert(arena_decay_deadline_reached(arena, time)); nstime_copy(&delta, time); - nstime_subtract(&delta, &arena->decay_epoch); - nadvance = nstime_divide(&delta, &arena->decay_interval); - assert(nadvance > 0); - - /* Add nadvance decay intervals to epoch. */ - nstime_copy(&delta, &arena->decay_interval); - nstime_imultiply(&delta, nadvance); - nstime_add(&arena->decay_epoch, &delta); + nstime_subtract(&delta, &arena->decay.epoch); + nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &arena->decay.interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&arena->decay.epoch, &delta); /* Set a new deadline. */ arena_decay_deadline_init(arena); /* Update the backlog. */ - if (nadvance >= SMOOTHSTEP_NSTEPS) { - memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * - sizeof(size_t)); - } else { - memmove(arena->decay_backlog, &arena->decay_backlog[nadvance], - (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t)); - if (nadvance > 1) { - memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - - nadvance], 0, (nadvance-1) * sizeof(size_t)); - } - } - ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - - arena->decay_ndirty : 0; - arena->decay_ndirty = arena->ndirty; - arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; - arena->decay_backlog_npages_limit = - arena_decay_backlog_npages_limit(arena); + arena_decay_backlog_update(arena, nadvance_u64); } -static size_t -arena_decay_npages_limit(arena_t *arena) +static void +arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) { - size_t npages_limit; - - assert(opt_purge == purge_mode_decay); - - npages_limit = arena->decay_backlog_npages_limit; - - /* Add in any dirty pages created during the current epoch. */ - if (arena->ndirty > arena->decay_ndirty) - npages_limit += arena->ndirty - arena->decay_ndirty; - - return (npages_limit); + size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); + + if (arena->ndirty > ndirty_limit) + arena_purge_to_limit(tsdn, arena, ndirty_limit); + arena->decay.ndirty = arena->ndirty; +} + +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) +{ + + arena_decay_epoch_advance_helper(arena, time); + arena_decay_epoch_advance_purge(tsdn, arena); } static void arena_decay_init(arena_t *arena, ssize_t decay_time) { - arena->decay_time = decay_time; + arena->decay.time = decay_time; if (decay_time > 0) { - nstime_init2(&arena->decay_interval, decay_time, 0); - nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); + nstime_init2(&arena->decay.interval, decay_time, 0); + nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); } - nstime_init(&arena->decay_epoch, 0); - nstime_update(&arena->decay_epoch); - arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; + nstime_init(&arena->decay.epoch, 0); + nstime_update(&arena->decay.epoch); + arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; arena_decay_deadline_init(arena); - arena->decay_ndirty = arena->ndirty; - arena->decay_backlog_npages_limit = 0; - memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); + arena->decay.ndirty = arena->ndirty; + memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } static bool arena_decay_time_valid(ssize_t decay_time) { - return (decay_time >= -1 && decay_time <= NSTIME_SEC_MAX); + if (decay_time < -1) + return (false); + if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) + return (true); + return (false); } ssize_t -arena_decay_time_get(arena_t *arena) +arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) { ssize_t decay_time; - malloc_mutex_lock(&arena->lock); - decay_time = arena->decay_time; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + decay_time = arena->decay.time; + malloc_mutex_unlock(tsdn, &arena->lock); return (decay_time); } bool -arena_decay_time_set(arena_t *arena, ssize_t decay_time) +arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) { if (!arena_decay_time_valid(decay_time)) return (true); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* * Restart decay backlog from scratch, which may cause many dirty pages * to be immediately purged. It would conceptually be possible to map * the old backlog onto the new backlog, but there is no justification * for such complexity since decay_time changes are intended to be * infrequent, either between the {-1, 0, >0} states, or a one-time * arbitrary change during initial arena configuration. */ arena_decay_init(arena, decay_time); - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } static void -arena_maybe_purge_ratio(arena_t *arena) +arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) { assert(opt_purge == purge_mode_ratio); /* Don't purge if the option is disabled. */ if (arena->lg_dirty_mult < 0) return; @@ -1409,67 +1400,76 @@ arena_maybe_purge_ratio(arena_t *arena) if (threshold < chunk_npages) threshold = chunk_npages; /* * Don't purge unless the number of purgeable pages exceeds the * threshold. */ if (arena->ndirty <= threshold) return; - arena_purge_to_limit(arena, threshold); + arena_purge_to_limit(tsdn, arena, threshold); } } static void -arena_maybe_purge_decay(arena_t *arena) +arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) { nstime_t time; - size_t ndirty_limit; assert(opt_purge == purge_mode_decay); /* Purge all or nothing if the option is disabled. */ - if (arena->decay_time <= 0) { - if (arena->decay_time == 0) - arena_purge_to_limit(arena, 0); + if (arena->decay.time <= 0) { + if (arena->decay.time == 0) + arena_purge_to_limit(tsdn, arena, 0); return; } - nstime_copy(&time, &arena->decay_epoch); - if (unlikely(nstime_update(&time))) { - /* Time went backwards. Force an epoch advance. */ - nstime_copy(&time, &arena->decay_deadline); + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, + &time) > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&arena->decay.epoch, &time); + arena_decay_deadline_init(arena); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&arena->decay.epoch, &time) <= 0); } - if (arena_decay_deadline_reached(arena, &time)) - arena_decay_epoch_advance(arena, &time); - - ndirty_limit = arena_decay_npages_limit(arena); - /* - * Don't try to purge unless the number of purgeable pages exceeds the - * current limit. + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances. */ - if (arena->ndirty <= ndirty_limit) - return; - arena_purge_to_limit(arena, ndirty_limit); + if (arena_decay_deadline_reached(arena, &time)) + arena_decay_epoch_advance(tsdn, arena, &time); } void -arena_maybe_purge(arena_t *arena) +arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) { /* Don't recursively purge. */ if (arena->purging) return; if (opt_purge == purge_mode_ratio) - arena_maybe_purge_ratio(arena); + arena_maybe_purge_ratio(tsdn, arena); else - arena_maybe_purge_decay(arena); + arena_maybe_purge_decay(tsdn, arena); } static size_t arena_dirty_count(arena_t *arena) { size_t ndirty = 0; arena_runs_dirty_link_t *rdelm; extent_node_t *chunkselm; @@ -1497,51 +1497,52 @@ arena_dirty_count(arena_t *arena) } ndirty += npages; } return (ndirty); } static size_t -arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { arena_runs_dirty_link_t *rdelm, *rdelm_next; extent_node_t *chunkselm; size_t nstashed = 0; /* Stash runs/chunks according to ndirty_limit. */ for (rdelm = qr_next(&arena->runs_dirty, rd_link), chunkselm = qr_next(&arena->chunks_cache, cc_link); rdelm != &arena->runs_dirty; rdelm = rdelm_next) { size_t npages; rdelm_next = qr_next(rdelm, rd_link); if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next; - bool zero; + bool zero, commit; UNUSED void *chunk; npages = extent_node_size_get(chunkselm) >> LG_PAGE; if (opt_purge == purge_mode_decay && arena->ndirty - (nstashed + npages) < ndirty_limit) break; chunkselm_next = qr_next(chunkselm, cc_link); /* * Allocate. chunkselm remains valid due to the * dalloc_node=false argument to chunk_alloc_cache(). */ zero = false; - chunk = chunk_alloc_cache(arena, chunk_hooks, + commit = false; + chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, extent_node_addr_get(chunkselm), extent_node_size_get(chunkselm), chunksize, &zero, - false); + &commit, false); assert(chunk == extent_node_addr_get(chunkselm)); assert(zero == extent_node_zeroed_get(chunkselm)); extent_node_dirty_insert(chunkselm, purge_runs_sentinel, purge_chunks_sentinel); assert(npages == (extent_node_size_get(chunkselm) >> LG_PAGE)); chunkselm = chunkselm_next; } else { @@ -1563,17 +1564,17 @@ arena_stash_dirty(arena_t *arena, chunk_ assert(arena_mapbits_dirty_get(chunk, pageind) == arena_mapbits_dirty_get(chunk, pageind+npages-1)); /* * If purging the spare chunk's run, make it available * prior to allocation. */ if (chunk == arena->spare) - arena_chunk_alloc(arena); + arena_chunk_alloc(tsdn, arena); /* Temporarily allocate the free dirty run. */ arena_run_split_large(arena, run, run_size, false); /* Stash. */ if (false) qr_new(rdelm, rd_link); /* Redundant. */ else { assert(qr_next(rdelm, rd_link) == rdelm); @@ -1587,29 +1588,29 @@ arena_stash_dirty(arena_t *arena, chunk_ ndirty_limit) break; } return (nstashed); } static size_t -arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { size_t npurged, nmadvise; arena_runs_dirty_link_t *rdelm; extent_node_t *chunkselm; if (config_stats) nmadvise = 0; npurged = 0; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (rdelm = qr_next(purge_runs_sentinel, rd_link), chunkselm = qr_next(purge_chunks_sentinel, cc_link); rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { size_t npages; if (rdelm == &chunkselm->rd) { /* * Don't actually purge the chunk here because 1) @@ -1638,17 +1639,17 @@ arena_purge_stashed(arena_t *arena, chun assert(!arena_mapbits_decommitted_get(chunk, pageind+npages-1)); decommitted = !chunk_hooks->decommit(chunk, chunksize, pageind << LG_PAGE, npages << LG_PAGE, arena->ind); if (decommitted) { flag_unzeroed = 0; flags = CHUNK_MAP_DECOMMITTED; } else { - flag_unzeroed = chunk_purge_wrapper(arena, + flag_unzeroed = chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk, chunksize, pageind << LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; flags = flag_unzeroed; } arena_mapbits_large_set(chunk, pageind+npages-1, 0, flags); arena_mapbits_large_set(chunk, pageind, run_size, flags); @@ -1669,28 +1670,28 @@ arena_purge_stashed(arena_t *arena, chun flag_unzeroed); } } npurged += npages; if (config_stats) nmadvise++; } - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena->stats.nmadvise += nmadvise; arena->stats.purged += npurged; } return (npurged); } static void -arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { arena_runs_dirty_link_t *rdelm, *rdelm_next; extent_node_t *chunkselm; /* Deallocate chunks/runs. */ for (rdelm = qr_next(purge_runs_sentinel, rd_link), @@ -1700,48 +1701,49 @@ arena_unstash_purged(arena_t *arena, chu if (rdelm == &chunkselm->rd) { extent_node_t *chunkselm_next = qr_next(chunkselm, cc_link); void *addr = extent_node_addr_get(chunkselm); size_t size = extent_node_size_get(chunkselm); bool zeroed = extent_node_zeroed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm); extent_node_dirty_remove(chunkselm); - arena_node_dalloc(arena, chunkselm); + arena_node_dalloc(tsdn, arena, chunkselm); chunkselm = chunkselm_next; - chunk_dalloc_wrapper(arena, chunk_hooks, addr, size, - zeroed, committed); + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, + size, zeroed, committed); } else { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); arena_chunk_map_misc_t *miscelm = arena_rd_to_miscelm(rdelm); size_t pageind = arena_miscelm_to_pageind(miscelm); bool decommitted = (arena_mapbits_decommitted_get(chunk, pageind) != 0); arena_run_t *run = &miscelm->run; qr_remove(rdelm, rd_link); - arena_run_dalloc(arena, run, false, true, decommitted); + arena_run_dalloc(tsdn, arena, run, false, true, + decommitted); } } } /* * NB: ndirty_limit is interpreted differently depending on opt_purge: * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the * desired state: * (arena->ndirty <= ndirty_limit) * - purge_mode_decay: Purge as many dirty runs/chunks as possible without * violating the invariant: * (arena->ndirty >= ndirty_limit) */ static void -arena_purge_to_limit(arena_t *arena, size_t ndirty_limit) +arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) { - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); size_t npurge, npurged; arena_runs_dirty_link_t purge_runs_sentinel; extent_node_t purge_chunks_sentinel; arena->purging = true; /* * Calls to arena_dirty_count() are disabled even for debug builds @@ -1752,43 +1754,187 @@ arena_purge_to_limit(arena_t *arena, siz assert(ndirty == arena->ndirty); } assert(opt_purge != purge_mode_ratio || (arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); qr_new(&purge_runs_sentinel, rd_link); extent_node_dirty_linkage_init(&purge_chunks_sentinel); - npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit, + npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, &purge_runs_sentinel, &purge_chunks_sentinel); if (npurge == 0) goto label_return; - npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); + npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, + &purge_runs_sentinel, &purge_chunks_sentinel); assert(npurged == npurge); - arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, + arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); if (config_stats) arena->stats.npurge++; label_return: arena->purging = false; } void -arena_purge(arena_t *arena, bool all) +arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) { - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (all) - arena_purge_to_limit(arena, 0); + arena_purge_to_limit(tsdn, arena, 0); else - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); +} + +static void +arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) +{ + size_t pageind, npages; + + cassert(config_prof); + assert(opt_prof); + + /* + * Iterate over the allocated runs and remove profiled allocations from + * the sample set. + */ + for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { + if (arena_mapbits_allocated_get(chunk, pageind) != 0) { + if (arena_mapbits_large_get(chunk, pageind) != 0) { + void *ptr = (void *)((uintptr_t)chunk + (pageind + << LG_PAGE)); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, + config_prof); + + prof_free(tsd, ptr, usize); + npages = arena_mapbits_large_size_get(chunk, + pageind) >> LG_PAGE; + } else { + /* Skip small run. */ + size_t binind = arena_mapbits_binind_get(chunk, + pageind); + arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + npages = bin_info->run_size >> LG_PAGE; + } + } else { + /* Skip unallocated run. */ + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + assert(pageind + npages <= chunk_npages); + } +} + +void +arena_reset(tsd_t *tsd, arena_t *arena) +{ + unsigned i; + extent_node_t *node; + + /* + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. + */ + + /* Remove large allocations from prof sample set. */ + if (config_prof && opt_prof) { + ql_foreach(node, &arena->achunks, ql_link) { + arena_achunk_prof_reset(tsd, arena, + extent_node_addr_get(node)); + } + } + + /* Reset curruns for large size classes. */ + if (config_stats) { + for (i = 0; i < nlclasses; i++) + arena->stats.lstats[i].curruns = 0; + } + + /* Huge allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + for (node = ql_last(&arena->huge, ql_link); node != NULL; node = + ql_last(&arena->huge, ql_link)) { + void *ptr = extent_node_addr_get(node); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + if (config_stats || (config_prof && opt_prof)) + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + /* Remove huge allocation from prof sample set. */ + if (config_prof && opt_prof) + prof_free(tsd, ptr, usize); + huge_dalloc(tsd_tsdn(tsd), ptr); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); + /* Cancel out unwanted effects on stats. */ + if (config_stats) + arena_huge_reset_stats_cancel(arena, usize); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); + + /* Bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->runcur = NULL; + arena_run_heap_new(&bin->runs); + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curruns = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + /* + * Re-initialize runs_dirty such that the chunks_cache and runs_dirty + * chains directly correspond. + */ + qr_new(&arena->runs_dirty, rd_link); + for (node = qr_next(&arena->chunks_cache, cc_link); + node != &arena->chunks_cache; node = qr_next(node, cc_link)) { + qr_new(&node->rd, rd_link); + qr_meld(&arena->runs_dirty, &node->rd, rd_link); + } + + /* Arena chunks. */ + for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = + ql_last(&arena->achunks, ql_link)) { + ql_remove(&arena->achunks, node, ql_link); + arena_chunk_discard(tsd_tsdn(tsd), arena, + extent_node_addr_get(node)); + } + + /* Spare. */ + if (arena->spare != NULL) { + arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); + arena->spare = NULL; + } + + assert(!arena->purging); + arena->nactive = 0; + + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } static void arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, size_t flag_decommitted) { size_t size = *p_size; @@ -1895,18 +2041,18 @@ arena_run_size_get(arena_t *arena, arena arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; size = bin_info->run_size; } return (size); } static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, - bool decommitted) +arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, + bool cleaned, bool decommitted) { arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); run_ind = arena_miscelm_to_pageind(miscelm); @@ -1956,33 +2102,33 @@ arena_run_dalloc(arena_t *arena, arena_r if (dirty) arena_run_dirty_insert(arena, chunk, run_ind, run_pages); /* Deallocate chunk if it is now completely unused. */ if (size == arena_maxrun) { assert(run_ind == map_bias); assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(arena, chunk); + arena_chunk_dalloc(tsdn, arena, chunk); } /* * It is okay to do dirty page processing here even if the chunk was * deallocated above, since in that case it is the spare. Waiting * until after possible chunk deallocation to do dirty processing * allows for an old spare to be fully deallocated, thus decreasing the * chances of spuriously crossing the dirty page purging threshold. */ if (dirty) - arena_maybe_purge(arena); + arena_maybe_purge(tsdn, arena); } static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) +arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = (oldsize - newsize) >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? CHUNK_MAP_UNZEROED : 0; @@ -2007,22 +2153,23 @@ arena_run_trim_head(arena_t *arena, aren pageind+head_npages+tail_npages-1) == 0); assert(arena_mapbits_dirty_get(chunk, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); - arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); + arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != + 0)); } static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) +arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); size_t pageind = arena_miscelm_to_pageind(miscelm); size_t head_npages = newsize >> LG_PAGE; size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? CHUNK_MAP_UNZEROED : 0; @@ -2049,94 +2196,74 @@ arena_run_trim_tail(arena_t *arena, aren pageind+head_npages+tail_npages-1) == 0); assert(arena_mapbits_dirty_get(chunk, pageind+head_npages+tail_npages-1) == flag_dirty); } arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); - tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); + tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); tail_run = &tail_miscelm->run; - arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != - 0)); -} - -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); - if (miscelm != NULL) - return (&miscelm->run); - - return (NULL); + arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted + != 0)); } static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); - - arena_run_tree_insert(&bin->runs, miscelm); -} - -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - - assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); - - arena_run_tree_remove(&bin->runs, miscelm); + arena_run_heap_insert(&bin->runs, miscelm); } static arena_run_t * arena_bin_nonfull_run_tryget(arena_bin_t *bin) { - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; - } - return (run); + arena_chunk_map_misc_t *miscelm; + + miscelm = arena_run_heap_remove_first(&bin->runs); + if (miscelm == NULL) + return (NULL); + if (config_stats) + bin->stats.reruns++; + + return (&miscelm->run); } static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) +arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { arena_run_t *run; szind_t binind; arena_bin_info_t *bin_info; /* Look for a usable run. */ run = arena_bin_nonfull_run_tryget(bin); if (run != NULL) return (run); /* No existing runs have any space available. */ binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); if (run != NULL) { /* Initialize run internals. */ run->binind = binind; run->nfree = bin_info->nregs; bitmap_init(run->bitmap, &bin_info->bitmap_info); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); /********************************/ - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (run != NULL) { if (config_stats) { bin->stats.nruns++; bin->stats.curruns++; } return (run); } @@ -2149,26 +2276,26 @@ arena_bin_nonfull_run_get(arena_t *arena if (run != NULL) return (run); return (NULL); } /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { szind_t binind; arena_bin_info_t *bin_info; arena_run_t *run; binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); + run = arena_bin_nonfull_run_get(tsdn, arena, bin); if (bin->runcur != NULL && bin->runcur->nfree > 0) { /* * Another thread updated runcur while this one ran without the * bin lock in arena_bin_nonfull_run_get(). */ void *ret; assert(bin->runcur->nfree > 0); ret = arena_run_reg_alloc(bin->runcur, bin_info); @@ -2179,55 +2306,56 @@ arena_bin_malloc_hard(arena_t *arena, ar * arena_run_alloc_small() may have allocated run, or * it may have pulled run from the bin's run tree. * Therefore it is unsafe to make any assumptions about * how run has previously been used, and * arena_bin_lower_run() must be called, as if a region * were just deallocated from the run. */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else + if (run->nfree == bin_info->nregs) { + arena_dalloc_bin_run(tsdn, arena, chunk, run, + bin); + } else arena_bin_lower_run(arena, chunk, run, bin); } return (ret); } if (run == NULL) return (NULL); bin->runcur = run; assert(bin->runcur->nfree > 0); return (arena_run_reg_alloc(bin->runcur, bin_info)); } void -arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) + prof_idump(tsdn); bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tbin->lg_fill_div); i < nfill; i++) { arena_run_t *run; void *ptr; if ((run = bin->runcur) != NULL && run->nfree > 0) ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ptr = arena_bin_malloc_hard(arena, bin); + ptr = arena_bin_malloc_hard(tsdn, arena, bin); if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must * be moved just before tbin->avail before bailing out. */ if (i > 0) { memmove(tbin->avail - i, tbin->avail - nfill, @@ -2244,111 +2372,112 @@ arena_tcache_fill_small(tsd_t *tsd, aren } if (config_stats) { bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); } void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) { + size_t redzone_size = bin_info->redzone_size; + if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, redzone_size); + memset((void *)((uintptr_t)ptr + bin_info->reg_size), + JEMALLOC_ALLOC_JUNK, redzone_size); } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); + memset((void *)((uintptr_t)ptr - redzone_size), + JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); } } #ifdef JEMALLOC_JET #undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) +#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) #endif static void arena_redzone_corruption(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", after ? "after" : "before", ptr, usize, byte); } #ifdef JEMALLOC_JET #undef arena_redzone_corruption #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); + JEMALLOC_N(n_arena_redzone_corruption); #endif static void arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) { bool error = false; if (opt_junk_alloc) { size_t size = bin_info->reg_size; size_t redzone_size = bin_info->redzone_size; size_t i; for (i = 1; i <= redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { + if (*byte != JEMALLOC_ALLOC_JUNK) { error = true; arena_redzone_corruption(ptr, size, false, i, *byte); if (reset) - *byte = 0xa5; + *byte = JEMALLOC_ALLOC_JUNK; } } for (i = 0; i < redzone_size; i++) { uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { + if (*byte != JEMALLOC_ALLOC_JUNK) { error = true; arena_redzone_corruption(ptr, size, true, i, *byte); if (reset) - *byte = 0xa5; + *byte = JEMALLOC_ALLOC_JUNK; } } } if (opt_abort && error) abort(); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) +#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) #endif void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) { size_t redzone_size = bin_info->redzone_size; arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, + memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, bin_info->reg_interval); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_small #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); + JEMALLOC_N(n_arena_dalloc_junk_small); #endif void arena_quarantine_junk_small(void *ptr, size_t usize) { szind_t binind; arena_bin_info_t *bin_info; cassert(config_fill); @@ -2357,46 +2486,46 @@ arena_quarantine_junk_small(void *ptr, s assert(usize <= SMALL_MAXCLASS); binind = size2index(usize); bin_info = &arena_bin_info[binind]; arena_redzones_validate(ptr, bin_info, true); } static void * -arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; size_t usize; arena_run_t *run; assert(binind < NBINS); bin = &arena->bins[binind]; usize = index2size(binind); - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ret = arena_bin_malloc_hard(arena, bin); + ret = arena_bin_malloc_hard(tsdn, arena, bin); if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); return (NULL); } if (config_stats) { bin->stats.nmalloc++; bin->stats.nrequests++; bin->stats.curregs++; } - malloc_mutex_unlock(&bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(arena, usize)) - prof_idump(); + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) + prof_idump(tsdn); if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) memset(ret, 0, usize); @@ -2406,48 +2535,49 @@ arena_malloc_small(tsd_t *tsd, arena_t * if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); memset(ret, 0, usize); } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) +arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; size_t usize; uintptr_t random_offset; arena_run_t *run; arena_chunk_map_misc_t *miscelm; UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); /* Large allocation. */ usize = index2size(binind); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_cache_oblivious) { uint64_t r; /* * Compute a uniformly distributed offset within the first page * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 * for 4 KiB pages and 64-byte cachelines. */ - r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); + r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - + LG_CACHELINE, false); random_offset = ((uintptr_t)r) << LG_CACHELINE; } else random_offset = 0; - run = arena_run_alloc_large(arena, usize + large_pad, zero); + run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); if (run == NULL) { - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } miscelm = arena_run_to_miscelm(run); ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + random_offset); if (config_stats) { szind_t index = binind - NBINS; @@ -2455,236 +2585,241 @@ arena_malloc_large(tsd_t *tsd, arena_t * arena->stats.nrequests_large++; arena->stats.allocated_large += usize; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } if (config_prof) idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_prof && idump) - prof_idump(); + prof_idump(tsdn); if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache) +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { - arena = arena_choose(tsd, arena); + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL)) return (NULL); if (likely(size <= SMALL_MAXCLASS)) - return (arena_malloc_small(tsd, arena, ind, zero)); + return (arena_malloc_small(tsdn, arena, ind, zero)); if (likely(size <= large_maxclass)) - return (arena_malloc_large(tsd, arena, ind, zero)); - return (huge_malloc(tsd, arena, index2size(ind), zero, tcache)); + return (arena_malloc_large(tsdn, arena, ind, zero)); + return (huge_malloc(tsdn, arena, index2size(ind), zero)); } /* Only handles large allocations that require more than page alignment. */ static void * -arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; size_t alloc_size, leadsize, trailsize; arena_run_t *run; arena_chunk_t *chunk; arena_chunk_map_misc_t *miscelm; void *rpages; + assert(!tsdn_null(tsdn) || arena != NULL); assert(usize == PAGE_CEILING(usize)); - arena = arena_choose(tsd, arena); + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL)) return (NULL); alignment = PAGE_CEILING(alignment); alloc_size = usize + large_pad + alignment - PAGE; - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_large(tsdn, arena, alloc_size, false); if (run == NULL) { - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); miscelm = arena_run_to_miscelm(run); rpages = arena_miscelm_to_rpages(miscelm); leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - (uintptr_t)rpages; assert(alloc_size >= leadsize + usize); trailsize = alloc_size - leadsize - usize - large_pad; if (leadsize != 0) { arena_chunk_map_misc_t *head_miscelm = miscelm; arena_run_t *head_run = run; - miscelm = arena_miscelm_get(chunk, + miscelm = arena_miscelm_get_mutable(chunk, arena_miscelm_to_pageind(head_miscelm) + (leadsize >> LG_PAGE)); run = &miscelm->run; - arena_run_trim_head(arena, chunk, head_run, alloc_size, + arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, alloc_size - leadsize); } if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, run, usize + large_pad + + arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + trailsize, usize + large_pad, false); } if (arena_run_init_large(arena, run, usize + large_pad, zero)) { size_t run_ind = arena_miscelm_to_pageind(arena_run_to_miscelm(run)); bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); bool decommitted = (arena_mapbits_decommitted_get(chunk, run_ind) != 0); assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(arena, run, dirty, false, decommitted); - malloc_mutex_unlock(&arena->lock); + arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } ret = arena_miscelm_to_rpages(miscelm); if (config_stats) { szind_t index = size2index(usize) - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += usize; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_fill && !zero) { if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); else if (unlikely(opt_zero)) memset(ret, 0, usize); } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true); } else if (usize <= large_maxclass && alignment <= PAGE) { /* * Large; alignment doesn't require special run placement. * However, the cached pointer may be at a random offset from * the base of the run, so do some bit manipulation to retrieve * the base. */ - ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true); if (config_cache_oblivious) ret = (void *)((uintptr_t)ret & ~PAGE_MASK); } else { if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsd, arena, usize, alignment, + ret = arena_palloc_large(tsdn, arena, usize, alignment, zero); } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero, tcache); + ret = huge_malloc(tsdn, arena, usize, zero); else { - ret = huge_palloc(tsd, arena, usize, alignment, zero, - tcache); + ret = huge_palloc(tsdn, arena, usize, alignment, zero); } } return (ret); } void -arena_prof_promoted(const void *ptr, size_t size) +arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind; szind_t binind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; binind = size2index(size); assert(binind < NBINS); arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == size); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == size); } static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { /* Dissociate run from bin. */ if (run == bin->runcur) bin->runcur = NULL; else { szind_t binind = arena_bin_index(extent_node_arena_get( &chunk->node), bin); arena_bin_info_t *bin_info = &arena_bin_info[binind]; + /* + * The following block's conditional is necessary because if the + * run only contains one region, then it never gets inserted + * into the non-full runs tree. + */ if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); + arena_chunk_map_misc_t *miscelm = + arena_run_to_miscelm(run); + + arena_run_heap_remove(&bin->runs, miscelm); } } } static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) +arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin) { assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == - NULL); - - malloc_mutex_unlock(&bin->lock); + + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - arena_run_dalloc(arena, run, true, false, false); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_dalloc(tsdn, arena, run, true, false, false); + malloc_mutex_unlock(tsdn, &arena->lock); /****************************/ - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) bin->stats.curruns--; } static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { @@ -2701,112 +2836,113 @@ arena_bin_lower_run(arena_t *arena, aren bin->runcur = run; if (config_stats) bin->stats.reruns++; } else arena_bin_runs_insert(bin, run); } static void -arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm, bool junked) +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) { size_t pageind, rpages_ind; arena_run_t *run; arena_bin_t *bin; arena_bin_info_t *bin_info; szind_t binind; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; binind = run->binind; bin = &arena->bins[binind]; bin_info = &arena_bin_info[binind]; if (!junked && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, bin_info); arena_run_reg_dalloc(run, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); + arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) arena_bin_lower_run(arena, chunk, run, bin); if (config_stats) { bin->stats.ndalloc++; bin->stats.curregs--; } } void -arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm) +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) { - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); } void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm) { arena_run_t *run; arena_bin_t *bin; size_t rpages_ind; rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; + run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); + malloc_mutex_unlock(tsdn, &bin->lock); } void -arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) +arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind) { arena_chunk_map_bits_t *bitselm; if (config_debug) { /* arena_ptr_small_binind_get() does extra sanity checking. */ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)) != BININD_INVALID); } - bitselm = arena_bitselm_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); - arena_decay_tick(tsd, arena); + bitselm = arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); + arena_decay_tick(tsdn, arena); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) +#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) #endif void arena_dalloc_junk_large(void *ptr, size_t usize) { if (config_fill && unlikely(opt_junk_free)) - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } #ifdef JEMALLOC_JET #undef arena_dalloc_junk_large #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); + JEMALLOC_N(n_arena_dalloc_junk_large); #endif static void -arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, - void *ptr, bool junked) +arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, bool junked) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); arena_run_t *run = &miscelm->run; if (config_fill || config_stats) { size_t usize = arena_mapbits_large_size_get(chunk, pageind) - large_pad; if (!junked) arena_dalloc_junk_large(ptr, usize); @@ -2815,53 +2951,55 @@ arena_dalloc_large_locked_impl(arena_t * arena->stats.ndalloc_large++; arena->stats.allocated_large -= usize; arena->stats.lstats[index].ndalloc++; arena->stats.lstats[index].curruns--; } } - arena_run_dalloc(arena, run, true, false, false); + arena_run_dalloc(tsdn, arena, run, true, false, false); } void -arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr) +arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr) { - arena_dalloc_large_locked_impl(arena, chunk, ptr, true); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); } void -arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr) { - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked_impl(arena, chunk, ptr, false); - malloc_mutex_unlock(&arena->lock); - arena_decay_tick(tsd, arena); + malloc_mutex_lock(tsdn, &arena->lock); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); + malloc_mutex_unlock(tsdn, &arena->lock); + arena_decay_tick(tsdn, arena); } static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) +arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, + pageind); arena_run_t *run = &miscelm->run; assert(size < oldsize); /* * Shrink the run, and make trailing pages available for other * allocations. */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + large_pad, true); if (config_stats) { szind_t oldindex = size2index(oldsize) - NBINS; szind_t index = size2index(size) - NBINS; arena->stats.ndalloc_large++; arena->stats.allocated_large -= oldsize; arena->stats.lstats[oldindex].ndalloc++; @@ -2869,32 +3007,32 @@ arena_ralloc_large_shrink(arena_t *arena arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t usize_min, size_t usize_max, bool zero) +arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t followsize; assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - large_pad); /* Try to extend the run. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, pageind+npages) != 0) goto label_fail; followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); if (oldsize + followsize >= usize_min) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing @@ -2907,17 +3045,17 @@ arena_ralloc_large_grow(arena_t *arena, while (oldsize + followsize < usize) usize = index2size(size2index(usize)-1); assert(usize >= usize_min); assert(usize >= oldsize); splitsize = usize - oldsize; if (splitsize == 0) goto label_fail; - run = &arena_miscelm_get(chunk, pageind+npages)->run; + run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; if (arena_run_split_large(arena, run, splitsize, zero)) goto label_fail; if (config_cache_oblivious && zero) { /* * Zero the trailing bytes of the original allocation's * last page, since they are in an indeterminate state. * There will always be trailing bytes, because ptr's @@ -2964,87 +3102,88 @@ arena_ralloc_large_grow(arena_t *arena, arena->stats.nmalloc_large++; arena->stats.nrequests_large++; arena->stats.allocated_large += size; arena->stats.lstats[index].nmalloc++; arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } label_fail: - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (true); } #ifdef JEMALLOC_JET #undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) +#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) #endif static void arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) { if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, + memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, old_usize - usize); } } #ifdef JEMALLOC_JET #undef arena_ralloc_junk_large #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); + JEMALLOC_N(n_arena_ralloc_junk_large); #endif /* * Try to resize a large allocation, in order to avoid copying. This will * always fail if growing an object, and the following run is already in use. */ static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, +arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { arena_chunk_t *chunk; arena_t *arena; if (oldsize == usize_max) { /* Current size class is compatible and maximal. */ return (false); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena = extent_node_arena_get(&chunk->node); if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, - usize_min, usize_max, zero); + bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, + oldsize, usize_min, usize_max, zero); if (config_fill && !ret && !zero) { if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, - isalloc(ptr, config_prof) - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, + isalloc(tsdn, ptr, config_prof) - oldsize); } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(ptr, config_prof) - oldsize); + isalloc(tsdn, ptr, config_prof) - oldsize); } } return (ret); } assert(oldsize > usize_max); /* Fill before shrinking in order avoid a race. */ arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); + arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); return (false); } bool -arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t usize_min, usize_max; /* Calls with non-zero extra had to clamp extra. */ assert(extra == 0 || size + extra <= HUGE_MAXCLASS); if (unlikely(size > HUGE_MAXCLASS)) @@ -3064,42 +3203,42 @@ arena_ralloc_no_move(tsd_t *tsd, void *p oldsize); if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) != size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) return (true); } else { if (usize_max <= SMALL_MAXCLASS) return (true); - if (arena_ralloc_large(ptr, oldsize, usize_min, + if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, usize_max, zero)) return (true); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); + arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); return (false); } else { - return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, + return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, usize_max, zero)); } } static void * -arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) - return (arena_malloc(tsd, arena, usize, size2index(usize), zero, - tcache, true)); + return (arena_malloc(tsdn, arena, usize, size2index(usize), + zero, tcache, true)); usize = sa2u(usize, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); } void * arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t usize; @@ -3107,65 +3246,66 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, usize = s2u(size); if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) return (NULL); if (likely(usize <= large_maxclass)) { size_t copysize; /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) + if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, + zero)) return (ptr); /* * size and oldsize are different enough that we need to move * the object. In that case, fall back to allocating new space * and copying. */ - ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, - zero, tcache); + ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, + alignment, zero, tcache); if (ret == NULL) return (NULL); /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ copysize = (usize < oldsize) ? usize : oldsize; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); + isqalloc(tsd, ptr, oldsize, tcache, true); } else { ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, zero, tcache); } return (ret); } dss_prec_t -arena_dss_prec_get(arena_t *arena) +arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) { dss_prec_t ret; - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } bool -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) +arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } ssize_t arena_lg_dirty_mult_default_get(void) { return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); @@ -3203,51 +3343,53 @@ arena_decay_time_default_set(ssize_t dec } static void arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty) { - *nthreads += arena_nthreads_get(arena); + *nthreads += arena_nthreads_get(arena, false); *dss = dss_prec_names[arena->dss_prec]; *lg_dirty_mult = arena->lg_dirty_mult; - *decay_time = arena->decay_time; + *decay_time = arena->decay.time; *nactive += arena->nactive; *ndirty += arena->ndirty; } void -arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, - ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, - size_t *ndirty) +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty) { - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, decay_time, nactive, ndirty); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, - ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats) { unsigned i; cassert(config_stats); - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, decay_time, nactive, ndirty); astats->mapped += arena->stats.mapped; + astats->retained += arena->stats.retained; astats->npurge += arena->stats.npurge; astats->nmadvise += arena->stats.nmadvise; astats->purged += arena->stats.purged; astats->metadata_mapped += arena->stats.metadata_mapped; astats->metadata_allocated += arena_metadata_allocated_get(arena); astats->allocated_large += arena->stats.allocated_large; astats->nmalloc_large += arena->stats.nmalloc_large; astats->ndalloc_large += arena->stats.ndalloc_large; @@ -3263,95 +3405,91 @@ arena_stats_merge(arena_t *arena, unsign lstats[i].curruns += arena->stats.lstats[i].curruns; } for (i = 0; i < nhclasses; i++) { hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; bstats[i].curregs += bin->stats.curregs; if (config_tcache) { bstats[i].nfills += bin->stats.nfills; bstats[i].nflushes += bin->stats.nflushes; } bstats[i].nruns += bin->stats.nruns; bstats[i].reruns += bin->stats.reruns; bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); } } unsigned -arena_nthreads_get(arena_t *arena) +arena_nthreads_get(arena_t *arena, bool internal) { - return (atomic_read_u(&arena->nthreads)); -} - -void -arena_nthreads_inc(arena_t *arena) -{ - - atomic_add_u(&arena->nthreads, 1); + return (atomic_read_u(&arena->nthreads[internal])); } void -arena_nthreads_dec(arena_t *arena) +arena_nthreads_inc(arena_t *arena, bool internal) { - atomic_sub_u(&arena->nthreads, 1); + atomic_add_u(&arena->nthreads[internal], 1); +} + +void +arena_nthreads_dec(arena_t *arena, bool internal) +{ + + atomic_sub_u(&arena->nthreads[internal], 1); } arena_t * -arena_new(unsigned ind) +arena_new(tsdn_t *tsdn, unsigned ind) { arena_t *arena; - size_t arena_size; unsigned i; - arena_bin_t *bin; - - /* Compute arena size to incorporate sufficient runs_avail elements. */ - arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) * - runs_avail_nclasses); + /* * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly * because there is no way to clean up if base_alloc() OOMs. */ if (config_stats) { - arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) + - QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + - nhclasses) * sizeof(malloc_huge_stats_t)); + arena = (arena_t *)base_alloc(tsdn, + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) + + (nhclasses * sizeof(malloc_huge_stats_t))); } else - arena = (arena_t *)base_alloc(arena_size); + arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); if (arena == NULL) return (NULL); arena->ind = ind; - arena->nthreads = 0; - if (malloc_mutex_init(&arena->lock)) + arena->nthreads[0] = arena->nthreads[1] = 0; + if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) return (NULL); if (config_stats) { memset(&arena->stats, 0, sizeof(arena_stats_t)); arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(arena_size)); + + CACHELINE_CEILING(sizeof(arena_t))); memset(arena->stats.lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(arena_size) + + + CACHELINE_CEILING(sizeof(arena_t)) + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); memset(arena->stats.hstats, 0, nhclasses * sizeof(malloc_huge_stats_t)); if (config_tcache) ql_new(&arena->tcache_ql); } if (config_prof) @@ -3361,59 +3499,66 @@ arena_new(unsigned ind) /* * A nondeterministic seed based on the address of arena reduces * the likelihood of lockstep non-uniform cache index * utilization among identical concurrent processes, but at the * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ arena->offset_state = config_debug ? ind : - (uint64_t)(uintptr_t)arena; + (size_t)(uintptr_t)arena; } arena->dss_prec = chunk_dss_prec_get(); + ql_new(&arena->achunks); + arena->spare = NULL; arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); arena->purging = false; arena->nactive = 0; arena->ndirty = 0; - for(i = 0; i < runs_avail_nclasses; i++) - arena_run_tree_new(&arena->runs_avail[i]); + for (i = 0; i < NPSIZES; i++) + arena_run_heap_new(&arena->runs_avail[i]); + qr_new(&arena->runs_dirty, rd_link); qr_new(&arena->chunks_cache, cc_link); if (opt_purge == purge_mode_decay) arena_decay_init(arena, arena_decay_time_default_get()); ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx)) + if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", + WITNESS_RANK_ARENA_HUGE)) return (NULL); extent_tree_szad_new(&arena->chunks_szad_cached); extent_tree_ad_new(&arena->chunks_ad_cached); extent_tree_szad_new(&arena->chunks_szad_retained); extent_tree_ad_new(&arena->chunks_ad_retained); - if (malloc_mutex_init(&arena->chunks_mtx)) + if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", + WITNESS_RANK_ARENA_CHUNKS)) return (NULL); ql_new(&arena->node_cache); - if (malloc_mutex_init(&arena->node_cache_mtx)) + if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", + WITNESS_RANK_ARENA_NODE_CACHE)) return (NULL); arena->chunk_hooks = chunk_hooks_default; /* Initialize bins. */ for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) + arena_bin_t *bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock, "arena_bin", + WITNESS_RANK_ARENA_BIN)) return (NULL); bin->runcur = NULL; - arena_run_tree_new(&bin->runs); + arena_run_heap_new(&bin->runs); if (config_stats) memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); } return (arena); } /* @@ -3500,98 +3645,40 @@ bin_info_run_size_calc(arena_bin_info_t assert(actual_run_size == s2u(actual_run_size)); /* Copy final settings. */ bin_info->run_size = actual_run_size; bin_info->nregs = actual_nregs; bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * bin_info->reg_interval) - pad_size + bin_info->redzone_size); - if (actual_run_size > small_maxrun) - small_maxrun = actual_run_size; - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs * bin_info->reg_interval) + pad_size == bin_info->run_size); } static void bin_info_init(void) { arena_bin_info_t *bin_info; #define BIN_INFO_INIT_bin_yes(index, size) \ bin_info = &arena_bin_info[index]; \ bin_info->reg_size = size; \ bin_info_run_size_calc(bin_info); \ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); #define BIN_INFO_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) SIZE_CLASSES #undef BIN_INFO_INIT_bin_yes #undef BIN_INFO_INIT_bin_no #undef SC } -static bool -small_run_size_init(void) -{ - - assert(small_maxrun != 0); - - small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >> - LG_PAGE)); - if (small_run_tab == NULL) - return (true); - -#define TAB_INIT_bin_yes(index, size) { \ - arena_bin_info_t *bin_info = &arena_bin_info[index]; \ - small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ - } -#define TAB_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) - SIZE_CLASSES -#undef TAB_INIT_bin_yes -#undef TAB_INIT_bin_no -#undef SC - - return (false); -} - -static bool -run_quantize_init(void) -{ - unsigned i; - - run_quantize_max = chunksize + large_pad; - - run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) * - (run_quantize_max >> LG_PAGE)); - if (run_quantize_floor_tab == NULL) - return (true); - - run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) * - (run_quantize_max >> LG_PAGE)); - if (run_quantize_ceil_tab == NULL) - return (true); - - for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { - size_t run_size = i << LG_PAGE; - - run_quantize_floor_tab[i-1] = - run_quantize_floor_compute(run_size); - run_quantize_ceil_tab[i-1] = - run_quantize_ceil_compute(run_size); - } - - return (false); -} - -bool +void arena_boot(void) { unsigned i; arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); arena_decay_time_default_set(opt_decay_time); /* @@ -3629,75 +3716,66 @@ arena_boot(void) */ large_maxclass = arena_maxrun; } assert(large_maxclass > 0); nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); nhclasses = NSIZES - nlclasses - NBINS; bin_info_init(); - if (small_run_size_init()) - return (true); - if (run_quantize_init()) - return (true); - - runs_avail_bias = size2index(PAGE); - runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; - - return (false); +} + +void +arena_prefork0(tsdn_t *tsdn, arena_t *arena) +{ + + malloc_mutex_prefork(tsdn, &arena->lock); } void -arena_prefork0(arena_t *arena) +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(&arena->lock); + malloc_mutex_prefork(tsdn, &arena->chunks_mtx); } void -arena_prefork1(arena_t *arena) +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(&arena->chunks_mtx); + malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); } void -arena_prefork2(arena_t *arena) -{ - - malloc_mutex_prefork(&arena->node_cache_mtx); -} - -void -arena_prefork3(arena_t *arena) +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); - malloc_mutex_prefork(&arena->huge_mtx); + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + malloc_mutex_prefork(tsdn, &arena->huge_mtx); } void -arena_postfork_parent(arena_t *arena) +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_parent(&arena->huge_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->node_cache_mtx); - malloc_mutex_postfork_parent(&arena->chunks_mtx); - malloc_mutex_postfork_parent(&arena->lock); + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->lock); } void -arena_postfork_child(arena_t *arena) +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_child(&arena->huge_mtx); + malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->node_cache_mtx); - malloc_mutex_postfork_child(&arena->chunks_mtx); - malloc_mutex_postfork_child(&arena->lock); + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_child(tsdn, &arena->lock); }
--- a/memory/jemalloc/src/src/base.c +++ b/memory/jemalloc/src/src/base.c @@ -8,57 +8,59 @@ static malloc_mutex_t base_mtx; static extent_tree_t base_avail_szad; static extent_node_t *base_nodes; static size_t base_allocated; static size_t base_resident; static size_t base_mapped; /******************************************************************************/ -/* base_mtx must be held. */ static extent_node_t * -base_node_try_alloc(void) +base_node_try_alloc(tsdn_t *tsdn) { extent_node_t *node; + malloc_mutex_assert_owner(tsdn, &base_mtx); + if (base_nodes == NULL) return (NULL); node = base_nodes; base_nodes = *(extent_node_t **)node; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); return (node); } -/* base_mtx must be held. */ static void -base_node_dalloc(extent_node_t *node) +base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) { + malloc_mutex_assert_owner(tsdn, &base_mtx); + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); *(extent_node_t **)node = base_nodes; base_nodes = node; } -/* base_mtx must be held. */ static extent_node_t * -base_chunk_alloc(size_t minsize) +base_chunk_alloc(tsdn_t *tsdn, size_t minsize) { extent_node_t *node; size_t csize, nsize; void *addr; + malloc_mutex_assert_owner(tsdn, &base_mtx); assert(minsize != 0); - node = base_node_try_alloc(); + node = base_node_try_alloc(tsdn); /* Allocate enough space to also carve a node out if necessary. */ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; csize = CHUNK_CEILING(minsize + nsize); addr = chunk_alloc_base(csize); if (addr == NULL) { if (node != NULL) - base_node_dalloc(node); + base_node_dalloc(tsdn, node); return (NULL); } base_mapped += csize; if (node == NULL) { node = (extent_node_t *)addr; addr = (void *)((uintptr_t)addr + nsize); csize -= nsize; if (config_stats) { @@ -71,104 +73,105 @@ base_chunk_alloc(size_t minsize) } /* * base_alloc() guarantees demand-zeroed memory, in order to make multi-page * sparse data structures such as radix tree nodes efficient with respect to * physical memory usage. */ void * -base_alloc(size_t size) +base_alloc(tsdn_t *tsdn, size_t size) { void *ret; size_t csize, usize; extent_node_t *node; extent_node_t key; /* * Round size up to nearest multiple of the cacheline size, so that * there is no chance of false cache line sharing. */ csize = CACHELINE_CEILING(size); usize = s2u(csize); extent_node_init(&key, NULL, NULL, usize, false, false); - malloc_mutex_lock(&base_mtx); + malloc_mutex_lock(tsdn, &base_mtx); node = extent_tree_szad_nsearch(&base_avail_szad, &key); if (node != NULL) { /* Use existing space. */ extent_tree_szad_remove(&base_avail_szad, node); } else { /* Try to allocate more space. */ - node = base_chunk_alloc(csize); + node = base_chunk_alloc(tsdn, csize); } if (node == NULL) { ret = NULL; goto label_return; } ret = extent_node_addr_get(node); if (extent_node_size_get(node) > csize) { extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); extent_node_size_set(node, extent_node_size_get(node) - csize); extent_tree_szad_insert(&base_avail_szad, node); } else - base_node_dalloc(node); + base_node_dalloc(tsdn, node); if (config_stats) { base_allocated += csize; /* * Add one PAGE to base_resident for every page boundary that is * crossed by the new allocation. */ base_resident += PAGE_CEILING((uintptr_t)ret + csize) - PAGE_CEILING((uintptr_t)ret); } JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); label_return: - malloc_mutex_unlock(&base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); return (ret); } void -base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) +base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped) { - malloc_mutex_lock(&base_mtx); + malloc_mutex_lock(tsdn, &base_mtx); assert(base_allocated <= base_resident); assert(base_resident <= base_mapped); *allocated = base_allocated; *resident = base_resident; *mapped = base_mapped; - malloc_mutex_unlock(&base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); } bool base_boot(void) { - if (malloc_mutex_init(&base_mtx)) + if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE)) return (true); extent_tree_szad_new(&base_avail_szad); base_nodes = NULL; return (false); } void -base_prefork(void) +base_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(&base_mtx); + malloc_mutex_prefork(tsdn, &base_mtx); } void -base_postfork_parent(void) +base_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(&base_mtx); + malloc_mutex_postfork_parent(tsdn, &base_mtx); } void -base_postfork_child(void) +base_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(&base_mtx); + malloc_mutex_postfork_child(tsdn, &base_mtx); }
--- a/memory/jemalloc/src/src/bitmap.c +++ b/memory/jemalloc/src/src/bitmap.c @@ -69,44 +69,41 @@ bitmap_init(bitmap_t *bitmap, const bitm } } #else /* USE_TREE */ void bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { - size_t i; assert(nbits > 0); assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - i = nbits >> LG_BITMAP_GROUP_NBITS; - if (nbits % BITMAP_GROUP_NBITS != 0) - i++; - binfo->ngroups = i; + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); binfo->nbits = nbits; } static size_t bitmap_info_ngroups(const bitmap_info_t *binfo) { return (binfo->ngroups); } void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t extra; memset(bitmap, 0xffU, bitmap_size(binfo)); - extra = (binfo->nbits % (binfo->ngroups * BITMAP_GROUP_NBITS)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; if (extra != 0) - bitmap[binfo->ngroups - 1] >>= (BITMAP_GROUP_NBITS - extra); + bitmap[binfo->ngroups - 1] >>= extra; } #endif /* USE_TREE */ size_t bitmap_size(const bitmap_info_t *binfo) {
--- a/memory/jemalloc/src/src/chunk.c +++ b/memory/jemalloc/src/src/chunk.c @@ -44,47 +44,48 @@ const chunk_hooks_t chunk_hooks_default }; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ -static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed); +static void chunk_record(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, + extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, + bool committed); /******************************************************************************/ static chunk_hooks_t chunk_hooks_get_locked(arena_t *arena) { return (arena->chunk_hooks); } chunk_hooks_t -chunk_hooks_get(arena_t *arena) +chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) { chunk_hooks_t chunk_hooks; - malloc_mutex_lock(&arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks = chunk_hooks_get_locked(arena); - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (chunk_hooks); } chunk_hooks_t -chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) +chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) { chunk_hooks_t old_chunk_hooks; - malloc_mutex_lock(&arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); old_chunk_hooks = arena->chunk_hooks; /* * Copy each field atomically so that it is impossible for readers to * see partially updated pointers. There are places where readers only * need one hook function pointer (therefore no need to copy the * entirety of arena->chunk_hooks), and stale reads do not affect * correctness, so they perform unlocked reads. */ @@ -99,52 +100,53 @@ chunk_hooks_set(arena_t *arena, const ch ATOMIC_COPY_HOOK(alloc); ATOMIC_COPY_HOOK(dalloc); ATOMIC_COPY_HOOK(commit); ATOMIC_COPY_HOOK(decommit); ATOMIC_COPY_HOOK(purge); ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (old_chunk_hooks); } static void -chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool locked) +chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks, bool locked) { static const chunk_hooks_t uninitialized_hooks = CHUNK_HOOKS_INITIALIZER; if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 0) { *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : - chunk_hooks_get(arena); + chunk_hooks_get(tsdn, arena); } } static void -chunk_hooks_assure_initialized_locked(arena_t *arena, +chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); } static void -chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) +chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, + chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); } bool -chunk_register(const void *chunk, const extent_node_t *node) +chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) { assert(extent_node_addr_get(node) == chunk); if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) return (true); if (config_prof && opt_prof) { size_t size = extent_node_size_get(node); @@ -154,17 +156,17 @@ chunk_register(const void *chunk, const while (cur > high && atomic_cas_z(&highchunks, high, cur)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highchunks update race. */ high = atomic_read_z(&highchunks); } if (cur > high && prof_gdump_get_unlocked()) - prof_gdump(); + prof_gdump(tsdn); } return (false); } void chunk_deregister(const void *chunk, const extent_node_t *node) { @@ -192,17 +194,17 @@ chunk_first_best_fit(arena_t *arena, ext assert(size == CHUNK_CEILING(size)); extent_node_init(&key, arena, NULL, size, false, false); return (extent_tree_szad_nsearch(chunks_szad, &key)); } static void * -chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_node) { void *ret; extent_node_t *node; size_t alloc_size, leadsize, trailsize; bool zeroed, committed; @@ -214,30 +216,30 @@ chunk_recycle(arena_t *arena, chunk_hook * we're operating on a specific chunk. */ assert(dalloc_node || new_addr != NULL); alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; extent_node_init(&key, arena, new_addr, alloc_size, false, false); node = extent_tree_ad_search(chunks_ad, &key); } else { node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, alloc_size); } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), alignment) - (uintptr_t)extent_node_addr_get(node); assert(new_addr == NULL || leadsize == 0); assert(extent_node_size_get(node) >= leadsize + size); trailsize = extent_node_size_get(node) - leadsize - size; ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); @@ -246,17 +248,17 @@ chunk_recycle(arena_t *arena, chunk_hook *zero = true; committed = extent_node_committed_get(node); if (committed) *commit = true; /* Split the lead. */ if (leadsize != 0 && chunk_hooks->split(extent_node_addr_get(node), extent_node_size_get(node), leadsize, size, false, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } /* Remove node from the tree. */ extent_tree_szad_remove(chunks_szad, node); extent_tree_ad_remove(chunks_ad, node); arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ @@ -266,96 +268,98 @@ chunk_recycle(arena_t *arena, chunk_hook arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (trailsize != 0) { /* Split the trail. */ if (chunk_hooks->split(ret, size + trailsize, size, trailsize, false, arena->ind)) { if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, - cache, ret, size + trailsize, zeroed, committed); + arena_node_dalloc(tsdn, arena, node); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szad, + chunks_ad, cache, ret, size + trailsize, zeroed, + committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { - node = arena_node_alloc(arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, - chunks_ad, cache, ret, size + trailsize, - zeroed, committed); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, + chunks_szad, chunks_ad, cache, ret, size + + trailsize, zeroed, committed); return (NULL); } } extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), trailsize, zeroed, committed); extent_tree_szad_insert(chunks_szad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, - ret, size, zeroed, committed); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad, + cache, ret, size, zeroed, committed); return (NULL); } - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); assert(dalloc_node || node != NULL); if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); + arena_node_dalloc(tsdn, arena, node); if (*zero) { if (!zeroed) memset(ret, 0, size); else if (config_debug) { size_t i; size_t *p = (size_t *)(uintptr_t)ret; - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); } return (ret); } /* * If the caller specifies (!*zero), it is still possible to receive zeroed * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes * advantage of this to avoid demanding zeroed chunks, but taking advantage of * them if they are returned. */ static void * -chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit, dss_prec_t dss_prec) +chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) return (ret); /* mmap. */ if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) return (ret); /* All strategies for allocation failed. */ return (NULL); } void * chunk_alloc_base(size_t size) @@ -375,123 +379,148 @@ chunk_alloc_base(size_t size) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } void * -chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool dalloc_node) +chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, + bool dalloc_node) { void *ret; - bool commit; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - commit = true; - ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, - &commit, dalloc_node); + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, + new_addr, size, alignment, zero, commit, dalloc_node); if (ret == NULL) return (NULL); - assert(commit); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static arena_t * -chunk_arena_get(unsigned arena_ind) +chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) { arena_t *arena; - arena = arena_get(arena_ind, false); + arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. */ assert(arena != NULL); return (arena); } static void * -chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit, unsigned arena_ind) +chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; - arena_t *arena; - arena = chunk_arena_get(arena_ind); - ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit, - arena->dss_prec); + ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, arena->dss_prec); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static void * -chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) +chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = chunk_arena_get(tsdn, arena_ind); + + return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, + zero, commit)); +} + +static void * +chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained, - &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, - commit, true)); + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, zero, commit, true); + + if (config_stats && ret != NULL) + arena->stats.retained -= size; + + return (ret); } void * -chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) +chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; - chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); - ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size, + ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, alignment, zero, commit); if (ret == NULL) { - ret = chunk_hooks->alloc(new_addr, size, alignment, zero, - commit, arena->ind); + if (chunk_hooks->alloc == chunk_alloc_default) { + /* Call directly to propagate tsdn. */ + ret = chunk_alloc_default_impl(tsdn, arena, new_addr, + size, alignment, zero, commit); + } else { + ret = chunk_hooks->alloc(new_addr, size, alignment, + zero, commit, arena->ind); + } + if (ret == NULL) return (NULL); + + if (config_valgrind && chunk_hooks->alloc != + chunk_alloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); } - if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); return (ret); } static void -chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, bool committed) { bool unzeroed; extent_node_t *node, *prev; extent_node_t key; assert(!cache || !zeroed); unzeroed = cache || !zeroed; JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ if (node != NULL && extent_node_addr_get(node) == extent_node_addr_get(&key) && extent_node_committed_get(node) == committed && !chunk_hooks->merge(chunk, size, extent_node_addr_get(node), extent_node_size_get(node), false, @@ -506,27 +535,27 @@ chunk_record(arena_t *arena, chunk_hooks extent_node_addr_set(node, chunk); extent_node_size_set(node, size + extent_node_size_get(node)); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && !unzeroed); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ - node = arena_node_alloc(arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { /* * Node allocation failed, which is an exceedingly * unlikely failure. Leak chunk after making sure its * pages have already been purged, so that this is only * a virtual memory leak. */ if (cache) { - chunk_purge_wrapper(arena, chunk_hooks, chunk, - size, 0, size); + chunk_purge_wrapper(tsdn, arena, chunk_hooks, + chunk, size, 0, size); } goto label_return; } extent_node_init(node, arena, chunk, size, !unzeroed, committed); extent_tree_ad_insert(chunks_ad, node); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); @@ -552,71 +581,88 @@ chunk_record(arena_t *arena, chunk_hooks extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_get(node)); extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_get(node)); extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); - arena_node_dalloc(arena, prev); + arena_node_dalloc(tsdn, arena, prev); } label_return: - malloc_mutex_unlock(&arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); } void -chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) +chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool committed) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, chunk, size, false, committed); - arena_maybe_purge(arena); + arena_maybe_purge(tsdn, arena); +} + +static bool +chunk_dalloc_default_impl(void *chunk, size_t size) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); } static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind) { - if (!have_dss || !chunk_in_dss(chunk)) - return (chunk_dalloc_mmap(chunk, size)); - return (true); + return (chunk_dalloc_default_impl(chunk, size)); } void -chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool zeroed, bool committed) +chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool zeroed, bool committed) { + bool err; assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); /* Try to deallocate. */ - if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) + if (chunk_hooks->dalloc == chunk_dalloc_default) { + /* Call directly to propagate tsdn. */ + err = chunk_dalloc_default_impl(chunk, size); + } else + err = chunk_hooks->dalloc(chunk, size, committed, arena->ind); + + if (!err) return; /* Try to decommit; purge if that fails. */ if (committed) { committed = chunk_hooks->decommit(chunk, size, 0, size, arena->ind); } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); - chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); + + if (config_stats) + arena->stats.retained += size; } static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), @@ -643,52 +689,59 @@ chunk_purge_default(void *chunk, size_t assert(length != 0); assert((length & PAGE_MASK) == 0); return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } bool -chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, size_t offset, size_t length) +chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t offset, size_t length) { - chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); } static bool chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { if (!maps_coalesce) return (true); return (false); } static bool +chunk_merge_default_impl(void *chunk_a, void *chunk_b) +{ + + if (!maps_coalesce) + return (true); + if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) + return (true); + + return (false); +} + +static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind) { - if (!maps_coalesce) - return (true); - if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) - return (true); - - return (false); + return (chunk_merge_default_impl(chunk_a, chunk_b)); } static rtree_node_elm_t * chunks_rtree_node_alloc(size_t nelms) { - return ((rtree_node_elm_t *)base_alloc(nelms * + return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * sizeof(rtree_node_elm_t))); } bool chunk_boot(void) { #ifdef _WIN32 SYSTEM_INFO info; @@ -715,37 +768,16 @@ chunk_boot(void) #endif /* Set variables according to the value of opt_lg_chunk. */ chunksize = (ZU(1) << opt_lg_chunk); assert(chunksize >= PAGE); chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); - if (have_dss && chunk_dss_boot()) - return (true); + if (have_dss) + chunk_dss_boot(); if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk), chunks_rtree_node_alloc, NULL)) return (true); return (false); } - -void -chunk_prefork(void) -{ - - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); -}
--- a/memory/jemalloc/src/src/chunk_dss.c +++ b/memory/jemalloc/src/src/chunk_dss.c @@ -5,30 +5,29 @@ const char *dss_prec_names[] = { "disabled", "primary", "secondary", "N/A" }; -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - /* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. */ -static malloc_mutex_t dss_mtx; +static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT; /* Base address of the DSS. */ static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ +/* Atomic boolean indicating whether the DSS is exhausted. */ +static unsigned dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ static void *dss_max; /******************************************************************************/ static void * chunk_dss_sbrk(intptr_t increment) { @@ -42,71 +41,92 @@ chunk_dss_sbrk(intptr_t increment) dss_prec_t chunk_dss_prec_get(void) { dss_prec_t ret; if (!have_dss) return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); + ret = (dss_prec_t)atomic_read_u(&dss_prec_default); return (ret); } bool chunk_dss_prec_set(dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); + atomic_write_u(&dss_prec_default, (unsigned)dss_prec); return (false); } +static void * +chunk_dss_max_update(void *new_addr) +{ + void *max_cur; + spin_t spinner; + + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + spin_init(&spinner); + while (true) { + void *max_prev = atomic_read_p(&dss_max); + + max_cur = chunk_dss_sbrk(0); + if ((uintptr_t)max_prev > (uintptr_t)max_cur) { + /* + * Another thread optimistically updated dss_max. Wait + * for it to finish. + */ + spin_adaptive(&spinner); + continue; + } + if (!atomic_cas_p(&dss_max, max_prev, max_cur)) + break; + } + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) + return (NULL); + + return (max_cur); +} + void * -chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit) +chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { cassert(have_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - + if (!atomic_read_u(&dss_exhausted)) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ - do { - void *ret, *cpad, *dss_next; + while (true) { + void *ret, *cpad, *max_cur, *dss_next, *dss_prev; size_t gap_size, cpad_size; intptr_t incr; - /* Avoid an unnecessary system call. */ - if (new_addr != NULL && dss_max != new_addr) - break; - /* Get the current end of the DSS. */ - dss_max = chunk_dss_sbrk(0); - - /* Make sure the earlier condition still holds. */ - if (new_addr != NULL && dss_max != new_addr) - break; + max_cur = chunk_dss_max_update(new_addr); + if (max_cur == NULL) + goto label_oom; /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & chunksize_mask; /* @@ -115,100 +135,103 @@ chunk_alloc_dss(arena_t *arena, void *ne * recycled for later use. */ cpad = (void *)((uintptr_t)dss_max + gap_size); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } + (uintptr_t)dss_next < (uintptr_t)dss_max) + goto label_oom; /* Wrap-around. */ incr = gap_size + cpad_size + size; + + /* + * Optimistically update dss_max, and roll back below if + * sbrk() fails. No other thread will try to extend the + * DSS while dss_max is greater than the current DSS + * max reported by sbrk(0). + */ + if (atomic_cas_p(&dss_max, max_cur, dss_next)) + continue; + + /* Try to allocate. */ dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == dss_max) { + if (dss_prev == max_cur) { /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk_dalloc_wrapper(arena, + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, cpad, cpad_size, false, true); } if (*zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( ret, size); memset(ret, 0, size); } if (!*commit) *commit = pages_decommit(ret, size); return (ret); } - } while (dss_prev != (void *)-1); + + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. Try to roll + * back optimistic dss_max update; if rollback fails, + * it's due to another caller of this function having + * succeeded since this invocation started, in which + * case rollback is not necessary. + */ + atomic_cas_p(&dss_max, dss_next, max_cur); + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_write_u(&dss_exhausted, (unsigned)true); + goto label_oom; + } + } } - malloc_mutex_unlock(&dss_mtx); +label_oom: + return (NULL); +} - return (NULL); +static bool +chunk_in_dss_helper(void *chunk, void *max) +{ + + return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < + (uintptr_t)max); } bool chunk_in_dss(void *chunk) { - bool ret; cassert(have_dss); - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); + return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max))); } bool +chunk_dss_mergeable(void *chunk_a, void *chunk_b) +{ + void *max; + + cassert(have_dss); + + max = atomic_read_p(&dss_max); + return (chunk_in_dss_helper(chunk_a, max) == + chunk_in_dss_helper(chunk_b, max)); +} + +void chunk_dss_boot(void) { cassert(have_dss); - if (malloc_mutex_init(&dss_mtx)) - return (true); dss_base = chunk_dss_sbrk(0); - dss_prev = dss_base; + dss_exhausted = (unsigned)(dss_base == (void *)-1); dss_max = dss_base; - - return (false); -} - -void -chunk_dss_prefork(void) -{ - - if (have_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ - - if (have_dss) - malloc_mutex_postfork_parent(&dss_mtx); -} - -void -chunk_dss_postfork_child(void) -{ - - if (have_dss) - malloc_mutex_postfork_child(&dss_mtx); } /******************************************************************************/
--- a/memory/jemalloc/src/src/chunk_mmap.c +++ b/memory/jemalloc/src/src/chunk_mmap.c @@ -11,28 +11,26 @@ chunk_alloc_mmap_slow(size_t size, size_ alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); do { void *pages; size_t leadsize; - pages = pages_map(NULL, alloc_size); + pages = pages_map(NULL, alloc_size, commit); if (pages == NULL) return (NULL); leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); + ret = pages_trim(pages, alloc_size, leadsize, size, commit); } while (ret == NULL); assert(ret != NULL); *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); return (ret); } void * chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; @@ -49,30 +47,28 @@ chunk_alloc_mmap(void *new_addr, size_t * Optimistically try mapping precisely the right amount before falling * back to the slow method, with the expectation that the optimistic * approach works most of the time. */ assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - ret = pages_map(new_addr, size); + ret = pages_map(new_addr, size, commit); if (ret == NULL || ret == new_addr) return (ret); assert(new_addr == NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { pages_unmap(ret, size); return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); } assert(ret != NULL); *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); return (ret); } bool chunk_dalloc_mmap(void *chunk, size_t size) { if (config_munmap)
--- a/memory/jemalloc/src/src/ckh.c +++ b/memory/jemalloc/src/src/ckh.c @@ -94,17 +94,18 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t { ckhc_t *cell; unsigned offset, i; /* * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - offset = (unsigned)prng_lg_range(&ckh->prng_state, LG_CKH_BUCKET_CELLS); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; if (cell->key == NULL) { cell->key = key; cell->data = data; ckh->count++; return (false); @@ -136,17 +137,17 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_ /* * Choose a random item within the bucket to evict. This is * critical to correct function, because without (eventually) * evicting all items within a bucket during iteration, it * would be possible to get stuck in an infinite loop if there * were an item for which both hashes indicated the same * bucket. */ - i = (unsigned)prng_lg_range(&ckh->prng_state, + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); /* Swap cell->{key,data} and {key,data} (evict). */ tkey = cell->key; tdata = cell->data; cell->key = key; cell->data = data; key = tkey; data = tdata; @@ -265,35 +266,35 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) size_t usize; lg_curcells++; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, - true, NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: return (ret); } @@ -309,41 +310,41 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) * It is possible (though unlikely, given well behaved hashes) that the * table rebuild will fail. */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return; - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't * prevent this or future operations from proceeding. */ return; } /* Swap in new table. */ ttab = ckh->tab; ckh->tab = tab; tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), tab, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif return; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT ckh->nshrinkfails++; #endif } bool @@ -386,18 +387,18 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t m ckh->hash = hash; ckh->keycomp = keycomp; usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; } ret = false; label_return: return (ret); @@ -416,19 +417,19 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, (unsigned long long)ckh->ninserts, (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true); if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } size_t ckh_count(ckh_t *ckh) { assert(ckh != NULL);
--- a/memory/jemalloc/src/src/ctl.c +++ b/memory/jemalloc/src/src/ctl.c @@ -37,35 +37,35 @@ ctl_indexed_node(const ctl_node_t *node) return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ #define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); static bool ctl_arena_init(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, +static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena); static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); +static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); +static bool ctl_grow(tsdn_t *tsdn); +static void ctl_refresh(tsdn_t *tsdn); +static bool ctl_init(tsdn_t *tsdn); +static int ctl_lookup(tsdn_t *tsdn, const char *name, + ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); CTL_PROTO(version) CTL_PROTO(epoch) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) @@ -112,19 +112,20 @@ CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) -static void arena_i_purge(unsigned arena_ind, bool all); +static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all); CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_decay) +CTL_PROTO(arena_i_reset) CTL_PROTO(arena_i_dss) CTL_PROTO(arena_i_lg_dirty_mult) CTL_PROTO(arena_i_decay_time) CTL_PROTO(arena_i_chunk_hooks) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_run_size) @@ -186,28 +187,30 @@ CTL_PROTO(stats_arenas_i_hchunks_j_curhc INDEX_PROTO(stats_arenas_i_hchunks_j) CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_dss) CTL_PROTO(stats_arenas_i_lg_dirty_mult) CTL_PROTO(stats_arenas_i_decay_time) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_retained) CTL_PROTO(stats_arenas_i_npurge) CTL_PROTO(stats_arenas_i_nmadvise) CTL_PROTO(stats_arenas_i_purged) CTL_PROTO(stats_arenas_i_metadata_mapped) CTL_PROTO(stats_arenas_i_metadata_allocated) INDEX_PROTO(stats_arenas_i) CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_metadata) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) /******************************************************************************/ /* mallctl tree. */ /* Maximum tree depth. */ #define CTL_MAX_DEPTH 6 #define NAME(n) {true}, n @@ -294,16 +297,17 @@ static const ctl_named_node_t tcache_nod {NAME("create"), CTL(tcache_create)}, {NAME("flush"), CTL(tcache_flush)}, {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { {NAME("purge"), CTL(arena_i_purge)}, {NAME("decay"), CTL(arena_i_decay)}, + {NAME("reset"), CTL(arena_i_reset)}, {NAME("dss"), CTL(arena_i_dss)}, {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, {NAME("decay_time"), CTL(arena_i_decay_time)}, {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} }; static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} }; @@ -451,16 +455,17 @@ static const ctl_indexed_node_t stats_ar static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, {NAME("decay_time"), CTL(stats_arenas_i_decay_time)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, {NAME("npurge"), CTL(stats_arenas_i_npurge)}, {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, {NAME("purged"), CTL(stats_arenas_i_purged)}, {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, @@ -477,16 +482,17 @@ static const ctl_indexed_node_t stats_ar static const ctl_named_node_t stats_node[] = { {NAME("cactive"), CTL(stats_cactive)}, {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, @@ -549,51 +555,52 @@ ctl_arena_clear(ctl_arena_stats_t *astat memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); memset(astats->hstats, 0, nhclasses * sizeof(malloc_huge_stats_t)); } } static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) { unsigned i; if (config_stats) { - arena_stats_merge(arena, &cstats->nthreads, &cstats->dss, + arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats, cstats->hstats); for (i = 0; i < NBINS; i++) { cstats->allocated_small += cstats->bstats[i].curregs * index2size(i); cstats->nmalloc_small += cstats->bstats[i].nmalloc; cstats->ndalloc_small += cstats->bstats[i].ndalloc; cstats->nrequests_small += cstats->bstats[i].nrequests; } } else { - arena_basic_stats_merge(arena, &cstats->nthreads, &cstats->dss, - &cstats->lg_dirty_mult, &cstats->decay_time, + arena_basic_stats_merge(tsdn, arena, &cstats->nthreads, + &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, &cstats->pactive, &cstats->pdirty); } } static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) { unsigned i; sstats->nthreads += astats->nthreads; sstats->pactive += astats->pactive; sstats->pdirty += astats->pdirty; if (config_stats) { sstats->astats.mapped += astats->astats.mapped; + sstats->astats.retained += astats->astats.retained; sstats->astats.npurge += astats->astats.npurge; sstats->astats.nmadvise += astats->astats.nmadvise; sstats->astats.purged += astats->astats.purged; sstats->astats.metadata_mapped += astats->astats.metadata_mapped; sstats->astats.metadata_allocated += astats->astats.metadata_allocated; @@ -644,34 +651,34 @@ ctl_arena_stats_smerge(ctl_arena_stats_t sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; } } } static void -ctl_arena_refresh(arena_t *arena, unsigned i) +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) { ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_clear(astats); - ctl_arena_stats_amerge(astats, arena); + ctl_arena_stats_amerge(tsdn, astats, arena); /* Merge into sum stats as well. */ ctl_arena_stats_smerge(sstats, astats); } static bool -ctl_grow(void) +ctl_grow(tsdn_t *tsdn) { ctl_arena_stats_t *astats; /* Initialize new arena. */ - if (arena_init(ctl_stats.narenas) == NULL) + if (arena_init(tsdn, ctl_stats.narenas) == NULL) return (true); /* Allocate extended arena stats. */ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t)); if (astats == NULL) return (true); @@ -696,68 +703,71 @@ ctl_grow(void) a0dalloc(ctl_stats.arenas); ctl_stats.arenas = astats; ctl_stats.narenas++; return (false); } static void -ctl_refresh(void) +ctl_refresh(tsdn_t *tsdn) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); for (i = 0; i < ctl_stats.narenas; i++) - tarenas[i] = arena_get(i, false); + tarenas[i] = arena_get(tsdn, i, false); for (i = 0; i < ctl_stats.narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) - ctl_arena_refresh(tarenas[i], i); + ctl_arena_refresh(tsdn, tarenas[i], i); } if (config_stats) { size_t base_allocated, base_resident, base_mapped; - base_stats_get(&base_allocated, &base_resident, &base_mapped); + base_stats_get(tsdn, &base_allocated, &base_resident, + &base_mapped); ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; ctl_stats.active = (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); ctl_stats.metadata = base_allocated + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + ctl_stats.arenas[ctl_stats.narenas].astats .metadata_allocated; ctl_stats.resident = base_resident + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + ((ctl_stats.arenas[ctl_stats.narenas].pactive + ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); ctl_stats.mapped = base_mapped + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; + ctl_stats.retained = + ctl_stats.arenas[ctl_stats.narenas].astats.retained; } ctl_epoch++; } static bool -ctl_init(void) +ctl_init(tsdn_t *tsdn) { bool ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { /* * Allocate space for one extra arena stats element, which * contains summed stats across all arenas. */ ctl_stats.narenas = narenas_total_get(); ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); @@ -789,29 +799,29 @@ ctl_init(void) ret = true; goto label_return; } } } ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_epoch = 0; - ctl_refresh(); + ctl_refresh(tsdn); ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; const ctl_named_node_t *node; elm = name; /* Equivalent to strchrnul(). */ @@ -853,17 +863,17 @@ ctl_lookup(const char *name, ctl_node_t /* Children are indexed. */ index = malloc_strtoumax(elm, NULL, 10); if (index == UINTMAX_MAX || index > SIZE_T_MAX) { ret = ENOENT; goto label_return; } inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } if (nodesp != NULL) nodesp[i] = (const ctl_node_t *)node; mibp[i] = (size_t)index; @@ -897,71 +907,71 @@ ctl_lookup(const char *name, ctl_node_t } ret = 0; label_return: return (ret); } int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) goto label_return; node = ctl_named_node(nodes[depth-1]); if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } label_return: return(ret); } int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsdn)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(name, NULL, mibp, miblenp); + ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); label_return: return(ret); } int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } /* Iterate down the tree. */ node = super_root_node; for (i = 0; i < miblen; i++) { assert(node); @@ -973,67 +983,67 @@ ctl_bymib(const size_t *mib, size_t mibl goto label_return; } node = ctl_named_children(node, mib[i]); } else { const ctl_indexed_node_t *inode; /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; } } } /* Call the ctl function. */ if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); else { /* Partial MIB. */ ret = ENOENT; } label_return: return(ret); } bool ctl_boot(void) { - if (malloc_mutex_init(&ctl_mtx)) + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) return (true); ctl_initialized = false; return (false); } void -ctl_prefork(void) +ctl_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(&ctl_mtx); + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(void) +ctl_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(&ctl_mtx); + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(void) +ctl_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(&ctl_mtx); + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ #define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ @@ -1080,85 +1090,85 @@ ctl_postfork_child(void) } while (0) /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ #define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ if (l) \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } #define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } #define CTL_RO_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ #define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ if (!(c)) \ return (ENOENT); \ READONLY(); \ oldval = (v); \ @@ -1166,56 +1176,54 @@ n##_ctl(const size_t *mib, size_t miblen \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ - tsd_t *tsd; \ \ if (!(c)) \ return (ENOENT); \ READONLY(); \ - tsd = tsd_fetch(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ return (ret); \ } #define CTL_RO_CONFIG_GEN(n, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) \ { \ int ret; \ t oldval; \ \ READONLY(); \ oldval = n; \ READ(oldval, t); \ \ @@ -1224,31 +1232,31 @@ label_return: \ return (ret); \ } /******************************************************************************/ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) - ctl_refresh(); + ctl_refresh(tsd_tsdn(tsd)); READ(ctl_epoch, uint64_t); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } /******************************************************************************/ CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) CTL_RO_CONFIG_GEN(config_debug, bool) CTL_RO_CONFIG_GEN(config_fill, bool) @@ -1293,77 +1301,75 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_acc CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; arena_t *oldarena; unsigned newind, oldind; - tsd = tsd_fetch(); oldarena = arena_choose(tsd, NULL); if (oldarena == NULL) return (EAGAIN); - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); newind = oldind = oldarena->ind; WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *newarena; if (newind >= ctl_stats.narenas) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } /* Initialize arena if necessary. */ - newarena = arena_get(newind, true); + newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); if (config_tcache) { tcache_t *tcache = tsd_tcache_get(tsd); if (tcache != NULL) { - tcache_arena_reassociate(tcache, oldarena, - newarena); + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + oldarena, newarena); } } } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, uint64_t *) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, uint64_t) CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_tcache) return (ENOENT); oldval = tcache_enabled_get(); @@ -1377,18 +1383,18 @@ thread_tcache_enabled_ctl(const size_t * READ(oldval, bool); ret = 0; label_return: return (ret); } static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_tcache) return (ENOENT); READONLY(); WRITEONLY(); @@ -1396,146 +1402,133 @@ thread_tcache_flush_ctl(const size_t *mi tcache_flush(); ret = 0; label_return: return (ret); } static int -thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (!config_prof) return (ENOENT); READ_XOR_WRITE(); if (newp != NULL) { - tsd_t *tsd; - if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } - tsd = tsd_fetch(); - if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != 0) goto label_return; } else { - const char *oldname = prof_thread_name_get(); + const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); } ret = 0; label_return: return (ret); } static int -thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); - oldval = prof_thread_active_get(); + oldval = prof_thread_active_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - if (prof_thread_active_set(*(bool *)newp)) { + if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } } READ(oldval, bool); ret = 0; label_return: return (ret); } /******************************************************************************/ static int -tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; goto label_return; } READ(tcache_ind, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_flush(tsd, tcache_ind); ret = 0; label_return: return (ret); } static int -tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; if (!config_tcache) return (ENOENT); - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); if (tcache_ind == UINT_MAX) { ret = EFAULT; goto label_return; } tcaches_destroy(tsd, tcache_ind); @@ -1543,97 +1536,131 @@ tcache_destroy_ctl(const size_t *mib, si ret = 0; label_return: return (ret); } /******************************************************************************/ static void -arena_i_purge(unsigned arena_ind, bool all) +arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) { - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_stats.narenas; if (arena_ind == narenas) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, narenas); for (i = 0; i < narenas; i++) - tarenas[i] = arena_get(i, false); + tarenas[i] = arena_get(tsdn, i, false); /* * No further need to hold ctl_mtx, since narenas and * tarenas contain everything needed below. */ - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) - arena_purge(tarenas[i], all); + arena_purge(tsdn, tarenas[i], all); } } else { arena_t *tarena; assert(arena_ind < narenas); - tarena = arena_get(arena_ind, false); + tarena = arena_get(tsdn, arena_ind, false); /* No further need to hold ctl_mtx. */ - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); if (tarena != NULL) - arena_purge(tarena, all); + arena_purge(tsdn, tarena, all); } } } static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; READONLY(); WRITEONLY(); - arena_i_purge((unsigned)mib[1], true); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); ret = 0; label_return: return (ret); } static int -arena_i_decay_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; READONLY(); WRITEONLY(); - arena_i_purge((unsigned)mib[1], false); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false); ret = 0; label_return: return (ret); } static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind; + arena_t *arena; + + READONLY(); + WRITEONLY(); + + if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && + unlikely(opt_quarantine))) { + ret = EFAULT; + goto label_return; + } + + arena_ind = (unsigned)mib[1]; + if (config_debug) { + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + assert(arena_ind < ctl_stats.narenas); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + } + assert(arena_ind >= opt_narenas); + + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + + arena_reset(tsd, arena); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; unsigned arena_ind = (unsigned)mib[1]; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); if (dss != NULL) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strcmp(dss_prec_names[i], dss) == 0) { dss_prec = i; @@ -1644,210 +1671,213 @@ arena_i_dss_ctl(const size_t *mib, size_ if (!match) { ret = EINVAL; goto label_return; } } if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(arena_ind, false); + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(arena, dss_prec))) { + arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { ret = EFAULT; goto label_return; } - dss_prec_old = arena_dss_prec_get(arena); + dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); } else { if (dss_prec != dss_prec_limit && chunk_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } dss_prec_old = chunk_dss_prec_get(); } dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(arena_ind, false); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(arena); + size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } - if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { + if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return (ret); } static int -arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp, +arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(arena_ind, false); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_decay_time_get(arena); + size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); READ(oldval, ssize_t); } if (newp != NULL) { if (newlen != sizeof(ssize_t)) { ret = EINVAL; goto label_return; } - if (arena_decay_time_set(arena, *(ssize_t *)newp)) { + if (arena_decay_time_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } } ret = 0; label_return: return (ret); } static int -arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); if (arena_ind < narenas_total_get() && (arena = - arena_get(arena_ind, false)) != NULL) { + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { if (newp != NULL) { chunk_hooks_t old_chunk_hooks, new_chunk_hooks; WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(arena, + old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, &new_chunk_hooks); READ(old_chunk_hooks, chunk_hooks_t); } else { - chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); + chunk_hooks_t old_chunk_hooks = + chunk_hooks_get(tsd_tsdn(tsd), arena); READ(old_chunk_hooks, chunk_hooks_t); } } else { ret = EFAULT; goto label_return; } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { - const ctl_named_node_t * ret; + const ctl_named_node_t *ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas) { ret = NULL; goto label_return; } ret = super_arena_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } /******************************************************************************/ static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } narenas = ctl_stats.narenas; READ(narenas, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, +arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned nread, i; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { ret = EINVAL; nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas; } else { ret = 0; nread = ctl_stats.narenas; } for (i = 0; i < nread; i++) ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static int -arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) +arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp != NULL && oldlenp != NULL) { size_t oldval = arena_lg_dirty_mult_default_get(); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1862,17 +1892,17 @@ arenas_lg_dirty_mult_ctl(const size_t *m } ret = 0; label_return: return (ret); } static int -arenas_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp, +arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; if (oldp != NULL && oldlenp != NULL) { size_t oldval = arena_decay_time_default_get(); READ(oldval, ssize_t); } @@ -1896,187 +1926,185 @@ CTL_RO_NL_GEN(arenas_quantum, QUANTUM, s CTL_RO_NL_GEN(arenas_page, PAGE, size_t) CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) return (NULL); return (super_arenas_bin_i_node); } CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nlclasses) return (NULL); return (super_arenas_lrun_i_node); } CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) +arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nhclasses) return (NULL); return (super_arenas_hchunk_i_node); } static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); - if (ctl_grow()) { + if (ctl_grow(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } narenas = ctl_stats.narenas - 1; READ(narenas, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } /******************************************************************************/ static int -prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - oldval = prof_thread_active_init_set(*(bool *)newp); + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_thread_active_init_get(); + oldval = prof_active_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; label_return: return (ret); } static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + const char *filename = NULL; + + if (!config_prof) + return (ENOENT); + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(tsd, filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return (ret); +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (!config_prof) return (ENOENT); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - oldval = prof_active_set(*(bool *)newp); + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_active_get(); + oldval = prof_gdump_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; label_return: return (ret); } static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const char *filename = NULL; - - if (!config_prof) - return (ENOENT); - - WRITEONLY(); - WRITE(filename, const char *); - - if (prof_mdump(filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - return (ret); -} - -static int -prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_gdump_set(*(bool *)newp); - } else - oldval = prof_gdump_get(); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; - tsd_t *tsd; if (!config_prof) return (ENOENT); WRITEONLY(); WRITE(lg_sample, size_t); if (lg_sample >= (sizeof(uint64_t) << 3)) lg_sample = (sizeof(uint64_t) << 3) - 1; - tsd = tsd_fetch(); - prof_reset(tsd, lg_sample); ret = 0; label_return: return (ret); } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) @@ -2085,27 +2113,30 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_samp /******************************************************************************/ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, ssize_t) CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time, ssize_t) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + ctl_stats.arenas[mib[2]].astats.retained, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) @@ -2152,17 +2183,18 @@ CTL_RO_CGEN(config_stats && config_tcach CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > NBINS) return (NULL); return (super_stats_arenas_i_bins_j_node); } CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, @@ -2170,17 +2202,18 @@ CTL_RO_CGEN(config_stats, stats_arenas_i CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > nlclasses) return (NULL); return (super_stats_arenas_i_lruns_j_node); } CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, @@ -2189,32 +2222,33 @@ CTL_RO_CGEN(config_stats, stats_arenas_i ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) +stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { if (j > nhclasses) return (NULL); return (super_stats_arenas_i_hchunks_j_node); } static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t * ret; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); }
--- a/memory/jemalloc/src/src/huge.c +++ b/memory/jemalloc/src/src/huge.c @@ -10,96 +10,109 @@ huge_node_get(const void *ptr) node = chunk_lookup(ptr, true); assert(!extent_node_achunk_get(node)); return (node); } static bool -huge_node_set(const void *ptr, extent_node_t *node) +huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); - return (chunk_register(ptr, node)); + return (chunk_register(tsdn, ptr, node)); +} + +static void +huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node) +{ + bool err; + + err = huge_node_set(tsdn, ptr, node); + assert(!err); } static void huge_node_unset(const void *ptr, const extent_node_t *node) { chunk_deregister(ptr, node); } void * -huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero, - tcache_t *tcache) +huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); - return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); + return (huge_palloc(tsdn, arena, usize, chunksize, zero)); } void * -huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache) +huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { void *ret; size_t ausize; + arena_t *iarena; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ + assert(!tsdn_null(tsdn) || arena != NULL); + ausize = sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) return (NULL); assert(ausize >= chunksize); /* Allocate an extent node with which to track the chunk. */ - node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, tcache, true, arena); + iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get(); + node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, NULL, true, iarena); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, - usize, alignment, &is_zeroed)) == NULL) { - idalloctm(tsd, node, tcache, true, true); + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + idalloctm(tsdn, node, NULL, true, true); return (NULL); } extent_node_init(node, arena, ret, usize, is_zeroed, true); - if (huge_node_set(ret, node)) { - arena_chunk_dalloc_huge(arena, ret, usize); - idalloctm(tsd, node, tcache, true, true); + if (huge_node_set(tsdn, ret, node)) { + arena_chunk_dalloc_huge(tsdn, arena, ret, usize); + idalloctm(tsdn, node, NULL, true, true); return (NULL); } /* Insert node into huge. */ - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, usize); } else if (config_fill && unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); + memset(ret, JEMALLOC_ALLOC_JUNK, usize); - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) #endif static void @@ -107,28 +120,28 @@ huge_dalloc_junk(void *ptr, size_t usize { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(ptr))) - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void -huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) +huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool pre_zeroed, post_zeroed; /* Increase usize to incorporate extra. */ @@ -142,307 +155,319 @@ huge_ralloc_no_move_similar(void *ptr, s node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); + memset((void *)((uintptr_t)ptr + usize), + JEMALLOC_FREE_JUNK, sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - ptr, CHUNK_CEILING(oldsize), usize, sdiff); + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, + sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); - arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { if (!pre_zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), + JEMALLOC_ALLOC_JUNK, usize - oldsize); } } } static bool -huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) +huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize) { extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks; size_t cdiff; bool pre_zeroed, post_zeroed; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(arena); + chunk_hooks = chunk_hooks_get(tsdn, arena); assert(oldsize > usize); /* Split excess chunks. */ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), CHUNK_CEILING(usize), cdiff, true, arena->ind)) return (true); if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - CHUNK_ADDR2BASE((uintptr_t)ptr + usize), - CHUNK_CEILING(oldsize), + post_zeroed = !chunk_purge_wrapper(tsdn, arena, + &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ + huge_node_unset(ptr, node); extent_node_size_set(node, usize); + huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize); return (false); } static bool -huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { +huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* - * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so - * that it is possible to make correct junk/zero fill decisions below. + * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, + * update extent's zeroed field, and zero as necessary. */ - is_zeroed_chunk = zero; - - if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, + is_zeroed_chunk = false; + if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); - malloc_mutex_lock(&arena->huge_mtx); - /* Update the size of the huge allocation. */ + malloc_mutex_lock(tsdn, &arena->huge_mtx); + huge_node_unset(ptr, node); extent_node_size_set(node, usize); - malloc_mutex_unlock(&arena->huge_mtx); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + is_zeroed_chunk); + huge_node_reset(tsdn, ptr, node); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { memset((void *)((uintptr_t)ptr + oldsize), 0, CHUNK_CEILING(oldsize) - oldsize); } if (!is_zeroed_chunk) { memset((void *)((uintptr_t)ptr + CHUNK_CEILING(oldsize)), 0, usize - CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, + usize - oldsize); } return (false); } bool -huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, +huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(oldsize) == oldsize); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); /* Both allocations must be huge to avoid a move. */ if (oldsize < chunksize || usize_max < chunksize) return (true); if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, + if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, zero)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, - oldsize, usize_min, zero)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, + ptr, oldsize, usize_min, zero)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } /* * Avoid moving the allocation if the existing chunk size accommodates * the new size. */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, - zero); - arena_decay_tick(tsd, huge_aalloc(ptr)); + huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, + usize_max, zero); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Attempt to shrink the allocation in-place. */ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { - if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, + usize_max)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } return (true); } static void * -huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { if (alignment <= chunksize) - return (huge_malloc(tsd, arena, usize, zero, tcache)); - return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); + return (huge_malloc(tsdn, arena, usize, zero)); + return (huge_palloc(tsdn, arena, usize, alignment, zero)); } void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; /* The following should have been caught by callers. */ assert(usize > 0 && usize <= HUGE_MAXCLASS); /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero)) + if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, + zero)) return (ptr); /* * usize and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ - ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, - tcache); + ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, + zero); if (ret == NULL) return (NULL); copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); + isqalloc(tsd, ptr, oldsize, tcache, true); return (ret); } void -huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) +huge_dalloc(tsdn_t *tsdn, void *ptr) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); - arena_chunk_dalloc_huge(extent_node_arena_get(node), + arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), extent_node_addr_get(node), extent_node_size_get(node)); - idalloctm(tsd, node, tcache, true, true); + idalloctm(tsdn, node, NULL, true, true); - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); } arena_t * huge_aalloc(const void *ptr) { return (extent_node_arena_get(huge_node_get(ptr))); } size_t -huge_salloc(const void *ptr) +huge_salloc(tsdn_t *tsdn, const void *ptr) { size_t size; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); size = extent_node_size_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (size); } prof_tctx_t * -huge_prof_tctx_get(const void *ptr) +huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (tctx); } void -huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) +huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(&arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); } void -huge_prof_tctx_reset(const void *ptr) +huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) { - huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); + huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); }
--- a/memory/jemalloc/src/src/jemalloc.c +++ b/memory/jemalloc/src/src/jemalloc.c @@ -1,16 +1,20 @@ #define JEMALLOC_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ /* Runtime configuration options. */ -const char *je_malloc_conf JEMALLOC_ATTR(weak); +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; bool opt_abort = #ifdef JEMALLOC_DEBUG true #else false #endif ; const char *opt_junk = @@ -55,49 +59,60 @@ static malloc_mutex_t arenas_lock; * * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. */ arena_t **arenas; static unsigned narenas_total; /* Use narenas_total_*(). */ static arena_t *a0; /* arenas[0]; read-only after initialization. */ -static unsigned narenas_auto; /* Read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ typedef enum { malloc_init_uninitialized = 3, malloc_init_a0_initialized = 2, malloc_init_recursible = 1, malloc_init_initialized = 0 /* Common case --> jnz. */ } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; -/* 0 should be the common case. Set to true to trigger initialization. */ +/* False should be the common case. Set to true to trigger initialization. */ static bool malloc_slow = true; -/* When malloc_slow != 0, set the corresponding bits for sanity check. */ +/* When malloc_slow is true, set the corresponding bits for sanity check. */ enum { flag_opt_junk_alloc = (1U), flag_opt_junk_free = (1U << 1), flag_opt_quarantine = (1U << 2), flag_opt_zero = (1U << 3), flag_opt_utrace = (1U << 4), flag_in_valgrind = (1U << 5), flag_opt_xmalloc = (1U << 6) }; static uint8_t malloc_slow_flags; -/* Last entry for overflow detection only. */ JEMALLOC_ALIGNED(CACHELINE) -const size_t index2size_tab[NSIZES+1] = { -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +const size_t pind2sz_tab[NPSIZES] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), +#define PSZ_no(lg_grp, ndelta, lg_delta) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ + PSZ_##psz(lg_grp, ndelta, lg_delta) + SIZE_CLASSES +#undef PSZ_yes +#undef PSZ_no +#undef SC +}; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t index2size_tab[NSIZES] = { +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), SIZE_CLASSES #undef SC - ZU(0) }; JEMALLOC_ALIGNED(CACHELINE) const uint8_t size2index_tab[] = { #if LG_TINY_MIN == 0 #warning "Dangerous LG_TINY_MIN" #define S2B_0(i) i, #elif LG_TINY_MIN == 1 @@ -156,17 +171,17 @@ const uint8_t size2index_tab[] = { #endif #if LG_TINY_MIN < 10 #define S2B_10(i) S2B_9(i) S2B_9(i) #endif #if LG_TINY_MIN < 11 #define S2B_11(i) S2B_10(i) S2B_10(i) #endif #define S2B_no(i) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ S2B_##lg_delta_lookup(index) SIZE_CLASSES #undef S2B_3 #undef S2B_4 #undef S2B_5 #undef S2B_6 #undef S2B_7 #undef S2B_8 @@ -207,17 +222,17 @@ static void WINAPI * e.g. setup chunk hooks, it may end up running before this one, * and malloc_init_hard will crash trying to lock the uninitialized * lock. So we force an initialization of the lock in * malloc_init_hard as well. We don't try to care about atomicity * of the accessed to the init_lock_initialized boolean, since it * really only matters early in the process creation, before any * separate thread normally starts doing anything. */ if (!init_lock_initialized) - malloc_mutex_init(&init_lock); + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); init_lock_initialized = true; } #ifdef _MSC_VER # pragma section(".CRT$XCU", read) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif @@ -302,36 +317,43 @@ malloc_init(void) if (unlikely(!malloc_initialized()) && malloc_init_hard()) return (true); malloc_thread_init(); return (false); } /* - * The a0*() functions are used instead of i[mcd]alloc() in situations that + * The a0*() functions are used instead of i{d,}alloc() in situations that * cannot tolerate TLS variable access. */ static void * a0ialloc(size_t size, bool zero, bool is_metadata) { if (unlikely(malloc_init_a0())) return (NULL); - return (iallocztm(NULL, size, size2index(size), zero, false, - is_metadata, arena_get(0, false), true)); + return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, + is_metadata, arena_get(TSDN_NULL, 0, true), true)); } static void a0idalloc(void *ptr, bool is_metadata) { - idalloctm(NULL, ptr, false, is_metadata, true); + idalloctm(TSDN_NULL, ptr, false, is_metadata, true); +} + +arena_t * +a0get(void) +{ + + return (a0); } void * a0malloc(size_t size) { return (a0ialloc(size, false, true)); } @@ -408,85 +430,93 @@ unsigned narenas_total_get(void) { return (atomic_read_u(&narenas_total)); } /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(unsigned ind) +arena_init_locked(tsdn_t *tsdn, unsigned ind) { arena_t *arena; assert(ind <= narenas_total_get()); if (ind > MALLOCX_ARENA_MAX) return (NULL); if (ind == narenas_total_get()) narenas_total_inc(); /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ - arena = arena_get(ind, false); + arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); return (arena); } /* Actually initialize the arena. */ - arena = arena_new(ind); + arena = arena_new(tsdn, ind); arena_set(ind, arena); return (arena); } arena_t * -arena_init(unsigned ind) +arena_init(tsdn_t *tsdn, unsigned ind) { arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena = arena_init_locked(ind); - malloc_mutex_unlock(&arenas_lock); + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind); + malloc_mutex_unlock(tsdn, &arenas_lock); return (arena); } static void -arena_bind(tsd_t *tsd, unsigned ind) +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - arena = arena_get(ind, false); - arena_nthreads_inc(arena); - - if (tsd_nominal(tsd)) + if (!tsd_nominal(tsd)) + return; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) + tsd_iarena_set(tsd, arena); + else tsd_arena_set(tsd, arena); } void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; - oldarena = arena_get(oldind, false); - newarena = arena_get(newind, false); - arena_nthreads_dec(oldarena); - arena_nthreads_inc(newarena); + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); } static void -arena_unbind(tsd_t *tsd, unsigned ind) +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - arena = arena_get(ind, false); - arena_nthreads_dec(arena); - tsd_arena_set(tsd, NULL); + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + if (internal) + tsd_iarena_set(tsd, NULL); + else + tsd_arena_set(tsd, NULL); } arena_tdata_t * arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { arena_tdata_t *tdata, *arenas_tdata_old; arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); unsigned narenas_tdata_old, i; @@ -557,72 +587,99 @@ arena_tdata_get_hard(tsd_t *tsd, unsigne label_return: if (arenas_tdata_old != NULL) a0dalloc(arenas_tdata_old); return (tdata); } /* Slow path, called only by arena_choose(). */ arena_t * -arena_choose_hard(tsd_t *tsd) +arena_choose_hard(tsd_t *tsd, bool internal) { - arena_t *ret; + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (narenas_auto > 1) { - unsigned i, choose, first_null; - - choose = 0; + unsigned i, j, choose[2], first_null; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) + choose[j] = 0; + first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(arena_get(0, false) != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arena_get(i, false) != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arena_nthreads_get(arena_get(i, false)) < - arena_nthreads_get(arena_get(choose, - false))) - choose = i; + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) + choose[j] = i; + } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized * arena, in case all extant arenas are in use. * * NB: It is possible for there to be * discontinuities in terms of initialized * versus uninitialized arenas, due to the * "thread.arena" mallctl. */ first_null = i; } } - if (arena_nthreads_get(arena_get(choose, false)) == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arena_get(choose, false); - } else { - /* Initialize a new arena. */ - choose = first_null; - ret = arena_init_locked(choose); - if (ret == NULL) { - malloc_mutex_unlock(&arenas_lock); - return (NULL); + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j]); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return (NULL); + } + if (!!j == internal) + ret = arena; } + arena_bind(tsd, choose[j], !!j); } - arena_bind(tsd, choose); - malloc_mutex_unlock(&arenas_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); } else { - ret = arena_get(0, false); - arena_bind(tsd, 0); + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); } return (ret); } void thread_allocated_cleanup(tsd_t *tsd) { @@ -633,23 +690,33 @@ thread_allocated_cleanup(tsd_t *tsd) void thread_deallocated_cleanup(tsd_t *tsd) { /* Do nothing. */ } void +iarena_cleanup(tsd_t *tsd) +{ + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) + arena_unbind(tsd, iarena->ind, true); +} + +void arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); if (arena != NULL) - arena_unbind(tsd, arena->ind); + arena_unbind(tsd, arena->ind, false); } void arenas_tdata_cleanup(tsd_t *tsd) { arena_tdata_t *arenas_tdata; /* Prevent tsd->arenas_tdata from being (re)created. */ @@ -676,41 +743,44 @@ arenas_tdata_bypass_cleanup(tsd_t *tsd) /* Do nothing. */ } static void stats_print_atexit(void) { if (config_tcache && config_stats) { + tsdn_t *tsdn; unsigned narenas, i; + tsdn = tsdn_fetch(); + /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats * events. As a consequence, the final stats may be slightly * out of date by the time they are reported, if other threads * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arena_get(i, false); + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; /* * tcache_stats_merge() locks bins, so if any * code is introduced that acquires both arena * and bin locks in the opposite order, * deadlocks may result. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } } je_malloc_stats_print(NULL, NULL, NULL); } /* * End miscellaneous support functions. @@ -737,16 +807,30 @@ static unsigned malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, @@ -1096,31 +1180,53 @@ malloc_conf_init(void) CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", -1, (sizeof(size_t) << 3) - 1) CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, NSTIME_SEC_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { - opt_junk = "true"; - opt_junk_alloc = opt_junk_free = - true; + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "true"; + opt_junk_alloc = true; + opt_junk_free = true; + } } else if (CONF_MATCH_VALUE("false")) { opt_junk = "false"; opt_junk_alloc = opt_junk_free = false; } else if (CONF_MATCH_VALUE("alloc")) { opt_junk = "alloc"; opt_junk_alloc = true; opt_junk_free = false; } else if (CONF_MATCH_VALUE("free")) { - opt_junk = "free"; - opt_junk_alloc = false; - opt_junk_free = true; + if (config_valgrind && + unlikely(in_valgrind)) { + malloc_conf_error( + "Deallocation-time " + "junk filling cannot " + "be enabled while " + "running inside " + "Valgrind", k, klen, v, + vlen); + } else { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } } else { malloc_conf_error( "Invalid conf value", k, klen, v, vlen); } continue; } CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", @@ -1181,149 +1287,137 @@ malloc_conf_init(void) #undef CONF_HANDLE_BOOL #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P } } } -/* init_lock must be held. */ static bool malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ return (false); } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + spin_t spinner; + /* Busy-wait until the initializing thread completes. */ + spin_init(&spinner); do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); return (false); } #endif return (true); } -/* init_lock must be held. */ static bool -malloc_init_hard_a0_locked(void) +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; if (config_prof) prof_boot0(); malloc_conf_init(); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); if (opt_abort) abort(); } } + pages_boot(); if (base_boot()) return (true); if (chunk_boot()) return (true); if (ctl_boot()) return (true); if (config_prof) prof_boot1(); - if (arena_boot()) + arena_boot(); + if (config_tcache && tcache_boot(TSDN_NULL)) return (true); - if (config_tcache && tcache_boot()) - return (true); - if (malloc_mutex_init(&arenas_lock)) + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) return (true); /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ narenas_auto = 1; narenas_total_set(narenas_auto); arenas = &a0; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(0) == NULL) + if (arena_init(TSDN_NULL, 0) == NULL) return (true); + malloc_init_state = malloc_init_a0_initialized; + return (false); } static bool malloc_init_hard_a0(void) { bool ret; - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (ret); } -/* - * Initialize data structures which may trigger recursive allocation. - * - * init_lock must be held. - */ +/* Initialize data structures which may trigger recursive allocation. */ static bool malloc_init_hard_recursible(void) { - bool ret = false; malloc_init_state = malloc_init_recursible; - malloc_mutex_unlock(&init_lock); - - /* LinuxThreads' pthread_setspecific() allocates. */ - if (malloc_tsd_boot0()) { - ret = true; - goto label_return; - } ncpus = malloc_ncpus(); #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ && !defined(_WIN32) && !defined(__native_client__)) /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { - ret = true; malloc_write("<jemalloc>: Error in pthread_atfork()\n"); if (opt_abort) abort(); + return (true); } #endif -label_return: - malloc_mutex_lock(&init_lock); - return (ret); + return (false); } -/* init_lock must be held. */ static bool -malloc_init_hard_finish(void) +malloc_init_hard_finish(tsdn_t *tsdn) { - if (mutex_boot()) + if (malloc_mutex_boot()) return (true); if (opt_narenas == 0) { /* * For SMP systems, create more than one arena per CPU by * default. */ if (ncpus > 1) @@ -1338,181 +1432,202 @@ malloc_init_hard_finish(void) if (narenas_auto > MALLOCX_ARENA_MAX) { narenas_auto = MALLOCX_ARENA_MAX; malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", narenas_auto); } narenas_total_set(narenas_auto); /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * + arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * (MALLOCX_ARENA_MAX+1)); if (arenas == NULL) return (true); /* Copy the pointer to the one arena that was already initialized. */ arena_set(0, a0); malloc_init_state = malloc_init_initialized; malloc_slow_flag_init(); return (false); } static bool malloc_init_hard(void) { + tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (false); } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (malloc_init_hard_recursible()) { - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (true); } - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) + return (true); + if (malloc_init_hard_recursible()) + return (true); + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + + if (config_prof && prof_boot2(tsd)) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); return (true); } - if (malloc_init_hard_finish()) { - malloc_mutex_unlock(&init_lock); + if (malloc_init_hard_finish(tsd_tsdn(tsd))) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); return (true); } - malloc_mutex_unlock(&init_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_tsd_boot1(); return (false); } /* * End initialization functions. */ /******************************************************************************/ /* * Begin malloc(3)-compatible functions. */ static void * -imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, +ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, prof_tctx_t *tctx, bool slow_path) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { szind_t ind_large = size2index(LARGE_MINCLASS); - p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path); + p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else - p = imalloc(tsd, usize, ind, slow_path); + p = ialloc(tsd, usize, ind, zero, slow_path); return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path) +ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) { void *p; prof_tctx_t *tctx; tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path); + p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); else - p = imalloc(tsd, usize, ind, slow_path); + p = ialloc(tsd, usize, ind, zero, slow_path); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } +/* + * ialloc_body() is inlined so that fast and slow paths are generated separately + * with statically known slow_path. + * + * This function guarantees that *tsdn is non-NULL on success. + */ JEMALLOC_ALWAYS_INLINE_C void * -imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path) +ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, + bool slow_path) { + tsd_t *tsd; szind_t ind; - if (slow_path && unlikely(malloc_init())) + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; return (NULL); - *tsd = tsd_fetch(); + } + + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + ind = size2index(size); if (unlikely(ind >= NSIZES)) return (NULL); if (config_stats || (config_prof && opt_prof) || (slow_path && config_valgrind && unlikely(in_valgrind))) { *usize = index2size(ind); assert(*usize > 0 && *usize <= HUGE_MAXCLASS); } if (config_prof && opt_prof) - return (imalloc_prof(*tsd, *usize, ind, slow_path)); - - return (imalloc(*tsd, size, ind, slow_path)); + return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); + + return (ialloc(tsd, size, ind, zero, slow_path)); } JEMALLOC_ALWAYS_INLINE_C void -imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path) +ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, + bool update_errno, bool slow_path) { + + assert(!tsdn_null(tsdn) || ret == NULL); + if (unlikely(ret == NULL)) { if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); + malloc_printf("<jemalloc>: Error in %s(): out of " + "memory\n", func); abort(); } - set_errno(ENOMEM); + if (update_errno) + set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + assert(usize == isalloc(tsdn, ret, config_prof)); + *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; } + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t usize JEMALLOC_CC_SILENCE_INIT(0); if (size == 0) size = 1; if (likely(!malloc_slow)) { - /* - * imalloc_body() is inlined so that fast and slow paths are - * generated separately with statically known slow_path. - */ - ret = imalloc_body(size, &tsd, &usize, false); - imalloc_post_check(ret, tsd, usize, false); + ret = ialloc_body(size, false, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "malloc", true, false); } else { - ret = imalloc_body(size, &tsd, &usize, true); - imalloc_post_check(ret, tsd, usize, true); + ret = ialloc_body(size, false, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "malloc", true, true); UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } return (ret); } static void * imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, prof_tctx_t *tctx) @@ -1521,17 +1636,17 @@ imemalign_prof_sample(tsd_t *tsd, size_t if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = ipalloc(tsd, usize, alignment, false); return (p); } JEMALLOC_ALWAYS_INLINE_C void * imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) @@ -1543,37 +1658,39 @@ imemalign_prof(tsd_t *tsd, size_t alignm if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = imemalign_prof_sample(tsd, alignment, usize, tctx); else p = ipalloc(tsd, usize, alignment, false); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } JEMALLOC_ATTR(nonnull(1)) static int imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) { int ret; tsd_t *tsd; size_t usize; void *result; assert(min_alignment != 0); if (unlikely(malloc_init())) { + tsd = NULL; result = NULL; goto label_oom; } tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (size == 0) size = 1; /* Make sure that alignment is a large enough power of 2. */ if (unlikely(((alignment - 1) & alignment) != 0 || (alignment < min_alignment))) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error allocating " @@ -1598,196 +1715,129 @@ imemalign(void **memptr, size_t alignmen if (unlikely(result == NULL)) goto label_oom; assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); *memptr = result; ret = 0; label_return: if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(result, config_prof)); + assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); + JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, + false); + witness_assert_lockless(tsd_tsdn(tsd)); return (ret); label_oom: assert(result == NULL); if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error allocating aligned memory: " "out of memory\n"); abort(); } ret = ENOMEM; + witness_assert_lockless(tsd_tsdn(tsd)); goto label_return; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) je_posix_memalign(void **memptr, size_t alignment, size_t size) { - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); + int ret; + + ret = imemalign(memptr, alignment, size, sizeof(void *)); + return (ret); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) je_aligned_alloc(size_t alignment, size_t size) { void *ret; int err; if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { ret = NULL; set_errno(err); } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); + return (ret); } -static void * -icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - szind_t ind_large = size2index(LARGE_MINCLASS); - p = icalloc(tsd, LARGE_MINCLASS, ind_large); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(tsd, usize, ind); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = icalloc_prof_sample(tsd, usize, ind, tctx); - else - p = icalloc(tsd, usize, ind); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); - - return (p); -} - JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t num_size; - szind_t ind; size_t usize JEMALLOC_CC_SILENCE_INIT(0); - if (unlikely(malloc_init())) { - num_size = 0; - ret = NULL; - goto label_return; - } - tsd = tsd_fetch(); - num_size = num * size; if (unlikely(num_size == 0)) { if (num == 0 || size == 0) num_size = 1; - else { - ret = NULL; - goto label_return; - } + else + num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ /* * Try to avoid division here. We know that it isn't possible to * overflow during multiplication if neither operand uses any of the * most significant half of the bits in a size_t. */ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << - 2))) && (num_size / size != num))) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } - - ind = size2index(num_size); - if (unlikely(ind >= NSIZES)) { - ret = NULL; - goto label_return; - } - if (config_prof && opt_prof) { - usize = index2size(ind); - ret = icalloc_prof(tsd, usize, ind); + 2))) && (num_size / size != num))) + num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ + + if (likely(!malloc_slow)) { + ret = ialloc_body(num_size, true, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "calloc", true, false); } else { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = index2size(ind); - ret = icalloc(tsd, num_size, ind); + ret = ialloc_body(num_size, true, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "calloc", true, true); + UTRACE(0, num_size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); } -label_return: - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); return (ret); } static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); return (p); } JEMALLOC_ALWAYS_INLINE_C void * irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); @@ -1799,134 +1849,154 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, } JEMALLOC_INLINE_C void ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { size_t usize; UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + witness_assert_lockless(tsd_tsdn(tsd)); + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); prof_free(tsd, ptr, usize); } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (likely(!slow_path)) iqalloc(tsd, ptr, tcache, false); else { if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); + rzsize = p2rz(tsd_tsdn(tsd), ptr); iqalloc(tsd, ptr, tcache, true); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } } JEMALLOC_INLINE_C void -isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + witness_assert_lockless(tsd_tsdn(tsd)); + assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) prof_free(tsd, ptr, usize); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - isqalloc(tsd, ptr, usize, tcache); + rzsize = p2rz(tsd_tsdn(tsd), ptr); + isqalloc(tsd, ptr, usize, tcache, slow_path); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { void *ret; - tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(size == 0)) { if (ptr != NULL) { + tsd_t *tsd; + /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tsd = tsd_fetch(); ifree(tsd, ptr, tcache_get(tsd, false), true); return (NULL); } size = 1; } if (likely(ptr != NULL)) { + tsd_t *tsd; + assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) { + old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : + u2rz(old_usize); + } if (config_prof && opt_prof) { usize = s2u(size); ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? NULL : irealloc_prof(tsd, ptr, old_usize, usize); } else { if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = s2u(size); ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ if (likely(!malloc_slow)) - ret = imalloc_body(size, &tsd, &usize, false); + ret = ialloc_body(size, false, &tsdn, &usize, false); else - ret = imalloc_body(size, &tsd, &usize, true); + ret = ialloc_body(size, false, &tsdn, &usize, true); + assert(!tsdn_null(tsdn) || ret == NULL); } if (unlikely(ret == NULL)) { if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret, config_prof)); + tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, + JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, old_rzsize, true, false); + witness_assert_lockless(tsdn); return (ret); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) { UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (likely(!malloc_slow)) ifree(tsd, ptr, tcache_get(tsd, false), false); else ifree(tsd, ptr, tcache_get(tsd, false), true); + witness_assert_lockless(tsd_tsdn(tsd)); } } /* * End malloc(3)-compatible functions. */ /******************************************************************************/ /* @@ -1937,31 +2007,29 @@ je_free(void *ptr) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_memalign(size_t alignment, size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) je_valloc(size_t size) { void *ret JEMALLOC_CC_SILENCE_INIT(NULL); if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); return (ret); } #endif /* * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has * #define je_malloc malloc */ @@ -1981,28 +2049,51 @@ je_valloc(size_t size) */ JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; # endif + +#ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +#define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +#define PREALIAS(je_fn) ALIAS(je_fn) +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +void __libc_free(void* ptr) PREALIAS(je_free); +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +int __posix_memalign(void** r, size_t a, size_t s) + PREALIAS(je_posix_memalign); +#undef PREALIAS +#undef ALIAS + +#endif + #endif /* * End non-standard override functions. */ /******************************************************************************/ /* * Begin non-standard functions. */ JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, +imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) { if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { *alignment = 0; *usize = s2u(size); } else { *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); @@ -2015,192 +2106,191 @@ imallocx_flags_decode_hard(tsd_t *tsd, s if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) *tcache = NULL; else *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else *tcache = tcache_get(tsd, true); if ((flags & MALLOCX_ARENA_MASK) != 0) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(arena_ind, true); + *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(*arena == NULL)) return (true); } else *arena = NULL; return (false); } -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if (likely(flags == 0)) { - *usize = s2u(size); - if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) - return (true); - *alignment = 0; - *zero = false; - *tcache = tcache_get(tsd, true); - *arena = NULL; - return (false); - } else { - return (imallocx_flags_decode_hard(tsd, size, flags, usize, - alignment, zero, tcache, arena)); - } -} - JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) +imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) { szind_t ind; if (unlikely(alignment != 0)) - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); ind = size2index(usize); assert(ind < NSIZES); - if (unlikely(zero)) - return (icalloct(tsd, usize, ind, tcache, arena)); - return (imalloct(tsd, usize, ind, tcache, arena)); + return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, + slow_path)); } static void * -imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) +imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena, bool slow_path) { void *p; if (usize <= SMALL_MAXCLASS) { assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, - arena); + p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, + tcache, arena, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); + arena_prof_promoted(tsdn, p, usize); + } else { + p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, + slow_path); + } return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) { void *p; size_t alignment; bool zero; tcache_t *tcache; arena_t *arena; prof_tctx_t *tctx; if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, &zero, &tcache, &arena))) return (NULL); tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, - arena); + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); } else p = NULL; if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(p, *usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } JEMALLOC_ALWAYS_INLINE_C void * -imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, + bool slow_path) { void *p; size_t alignment; bool zero; tcache_t *tcache; arena_t *arena; + if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, + &zero, &tcache, &arena))) + return (NULL); + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, + arena, slow_path); + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + return (p); +} + +/* This function guarantees that *tsdn is non-NULL on success. */ +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, + bool slow_path) +{ + tsd_t *tsd; + + if (slow_path && unlikely(malloc_init())) { + *tsdn = NULL; + return (NULL); + } + + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); + if (likely(flags == 0)) { szind_t ind = size2index(size); if (unlikely(ind >= NSIZES)) return (NULL); - if (config_stats || (config_valgrind && - unlikely(in_valgrind))) { + if (config_stats || (config_prof && opt_prof) || (slow_path && + config_valgrind && unlikely(in_valgrind))) { *usize = index2size(ind); assert(*usize > 0 && *usize <= HUGE_MAXCLASS); } - return (imalloc(tsd, size, ind, true)); + + if (config_prof && opt_prof) { + return (ialloc_prof(tsd, *usize, ind, false, + slow_path)); + } + + return (ialloc(tsd, size, ind, false, slow_path)); } - if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, - &alignment, &zero, &tcache, &arena))) - return (NULL); - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); + if (config_prof && opt_prof) + return (imallocx_prof(tsd, size, flags, usize, slow_path)); + + return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { - tsd_t *tsd; + tsdn_t *tsdn; void *p; size_t usize; assert(size != 0); - if (unlikely(malloc_init())) - goto label_oom; - tsd = tsd_fetch(); - - if (config_prof && opt_prof) - p = imallocx_prof(tsd, size, flags, &usize); - else - p = imallocx_no_prof(tsd, size, flags, &usize); - if (unlikely(p == NULL)) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + if (likely(!malloc_slow)) { + p = imallocx_body(size, flags, &tsdn, &usize, false); + ialloc_post_check(p, tsdn, usize, "mallocx", false, false); + } else { + p = imallocx_body(size, flags, &tsdn, &usize, true); + ialloc_post_check(p, tsdn, usize, "mallocx", false, true); + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, + MALLOCX_ZERO_GET(flags)); } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); + return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); } static void * irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, prof_tctx_t *tctx) { void *p; if (tctx == NULL) return (NULL); if (usize <= SMALL_MAXCLASS) { p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, zero, tcache, arena); if (p == NULL) return (NULL); - arena_prof_promoted(p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else { p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, tcache, arena); } return (p); } @@ -2209,42 +2299,42 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, arena_t *arena) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); - tctx = prof_alloc_prep(tsd, *usize, prof_active, true); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, alignment, zero, tcache, arena, tctx); } else { p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, tcache, arena); } if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); + prof_alloc_rollback(tsd, tctx, false); return (NULL); } if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested * alignment, and that the alignment constraint was * serendipitously satisfied. Additionally, old_usize may not * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p, config_prof); } - prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); return (p); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) @@ -2260,34 +2350,35 @@ je_rallocx(void *ptr, size_t size, int f arena_t *arena; tcache_t *tcache; assert(ptr != NULL); assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(arena_ind, true); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) goto label_oom; } else arena = NULL; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, true); - old_usize = isalloc(ptr, config_prof); + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) goto label_oom; p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, @@ -2295,74 +2386,76 @@ je_rallocx(void *ptr, size_t size, int f if (unlikely(p == NULL)) goto label_oom; } else { p = iralloct(tsd, ptr, old_usize, size, alignment, zero, tcache, arena); if (unlikely(p == NULL)) goto label_oom; if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(p, config_prof); + usize = isalloc(tsd_tsdn(tsd), p, config_prof); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, - old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, + old_usize, old_rzsize, false, zero); + witness_assert_lockless(tsd_tsdn(tsd)); return (p); label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); + witness_assert_lockless(tsd_tsdn(tsd)); return (NULL); } JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero)) + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) return (old_usize); - usize = isalloc(ptr, config_prof); + usize = isalloc(tsdn, ptr, config_prof); return (usize); } static size_t -ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) return (old_usize); - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment, + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, zero); return (usize); } JEMALLOC_ALWAYS_INLINE_C size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ if (alignment == 0) { usize_max = s2u(size+extra); @@ -2377,21 +2470,21 @@ ixallocx_prof(tsd_t *tsd, void *ptr, siz * case allocation succeeds. */ usize_max = HUGE_MAXCLASS; } } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra, - alignment, zero, tctx); + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, - alignment, zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); return (usize); } prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, old_tctx); @@ -2408,18 +2501,19 @@ je_xallocx(void *ptr, size_t size, size_ bool zero = flags & MALLOCX_ZERO; assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - - old_usize = isalloc(ptr, config_prof); + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); /* * The API explicitly absolves itself of protecting against (size + * extra) numerical overflow, but we may need to clamp extra to avoid * exceeding HUGE_MAXCLASS. * * Ordinarily, size limit checking is handled deeper down, but here we * have to check as part of (size + extra) clamping, since we need the @@ -2434,180 +2528,231 @@ je_xallocx(void *ptr, size_t size, size_ if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero); } else { - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, - alignment, zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (unlikely(usize == old_usize)) goto label_not_resized; if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, - old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, + old_usize, old_rzsize, false, zero); label_not_resized: UTRACE(ptr, size, ptr); + witness_assert_lockless(tsd_tsdn(tsd)); return (usize); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { size_t usize; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); + usize = ivsalloc(tsdn, ptr, config_prof); else - usize = isalloc(ptr, config_prof); - + usize = isalloc(tsdn, ptr, config_prof); + + witness_assert_lockless(tsdn); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags) { tsd_t *tsd; tcache_t *tcache; assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); - ifree(tsd_fetch(), ptr, tcache, true); + if (likely(!malloc_slow)) + ifree(tsd, ptr, tcache, false); + else + ifree(tsd, ptr, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(size_t size, int flags) +inallocx(tsdn_t *tsdn, size_t size, int flags) { size_t usize; + witness_assert_lockless(tsdn); + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) usize = s2u(size); else usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + witness_assert_lockless(tsdn); return (usize); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags) { tsd_t *tsd; tcache_t *tcache; size_t usize; assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - usize = inallocx(size, flags); - assert(usize == isalloc(ptr, config_prof)); - tsd = tsd_fetch(); + usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); + + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; else tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); } else tcache = tcache_get(tsd, false); UTRACE(ptr, 0, 0); - isfree(tsd, ptr, usize, tcache); + if (likely(!malloc_slow)) + isfree(tsd, ptr, usize, tcache, false); + else + isfree(tsd, ptr, usize, tcache, true); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { size_t usize; + tsdn_t *tsdn; assert(size != 0); if (unlikely(malloc_init())) return (0); - usize = inallocx(size, flags); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + + usize = inallocx(tsdn, size, flags); if (unlikely(usize > HUGE_MAXCLASS)) return (0); + witness_assert_lockless(tsdn); return (usize); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsd_t *tsd; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; + tsdn_t *tsdn; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_nametomib(name, mibp, miblenp)); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + ret = ctl_nametomib(tsdn, name, mibp, miblenp); + witness_assert_lockless(tsdn); + return (ret); } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsd_t *tsd; if (unlikely(malloc_init())) return (EAGAIN); - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + witness_assert_lockless(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + witness_assert_lockless(tsd_tsdn(tsd)); + return (ret); } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { - + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); stats_print(write_cb, cbopaque, opts); + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); + ret = ivsalloc(tsdn, ptr, config_prof); else - ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); - + ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); + + witness_assert_lockless(tsdn); return (ret); } /* * End non-standard functions. */ /******************************************************************************/ /* @@ -2623,112 +2768,130 @@ je_malloc_usable_size(JEMALLOC_USABLE_SI * partially initialized the allocator. Ordinarily jemalloc prevents * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library * constructor is a partial solution to this problem. It may still be possible * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ +#ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void jemalloc_constructor(void) { malloc_init(); } +#endif #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_prefork(void) #else JEMALLOC_EXPORT void _malloc_prefork(void) #endif { + tsd_t *tsd; unsigned i, j, narenas; arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) return; #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + narenas = narenas_total_get(); + witness_prefork(tsd); /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - malloc_mutex_prefork(&arenas_lock); - prof_prefork0(); + ctl_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + prof_prefork0(tsd_tsdn(tsd)); for (i = 0; i < 3; i++) { for (j = 0; j < narenas; j++) { - if ((arena = arena_get(j, false)) != NULL) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { switch (i) { - case 0: arena_prefork0(arena); break; - case 1: arena_prefork1(arena); break; - case 2: arena_prefork2(arena); break; + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; default: not_reached(); } } } } - base_prefork(); - chunk_prefork(); + base_prefork(tsd_tsdn(tsd)); for (i = 0; i < narenas; i++) { - if ((arena = arena_get(i, false)) != NULL) - arena_prefork3(arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_prefork3(tsd_tsdn(tsd), arena); } - prof_prefork1(); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB void jemalloc_postfork_parent(void) #else JEMALLOC_EXPORT void _malloc_postfork(void) #endif { + tsd_t *tsd; unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB if (!malloc_initialized()) return; #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_parent(tsd); /* Release all mutexes, now that fork() has completed. */ - chunk_postfork_parent(); - base_postfork_parent(); + base_postfork_parent(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(i, false)) != NULL) - arena_postfork_parent(arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_parent(tsd_tsdn(tsd), arena); } - prof_postfork_parent(); - malloc_mutex_postfork_parent(&arenas_lock); - ctl_postfork_parent(); + prof_postfork_parent(tsd_tsdn(tsd)); + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_parent(tsd_tsdn(tsd)); } void jemalloc_postfork_child(void) { + tsd_t *tsd; unsigned i, narenas; assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_child(tsd); /* Release all mutexes, now that fork() has completed. */ - chunk_postfork_child(); - base_postfork_child(); + base_postfork_child(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(i, false)) != NULL) - arena_postfork_child(arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_child(tsd_tsdn(tsd), arena); } - prof_postfork_child(); - malloc_mutex_postfork_child(&arenas_lock); - ctl_postfork_child(); + prof_postfork_child(tsd_tsdn(tsd)); + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/
--- a/memory/jemalloc/src/src/mutex.c +++ b/memory/jemalloc/src/src/mutex.c @@ -64,27 +64,29 @@ pthread_create(pthread_t *__restrict thr /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif bool -malloc_mutex_init(malloc_mutex_t *mutex) +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) return (true); # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, @@ -98,51 +100,54 @@ malloc_mutex_init(malloc_mutex_t *mutex) return (true); pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return (true); } pthread_mutexattr_destroy(&attr); #endif + if (config_debug) + witness_init(&mutex->witness, name, rank, NULL); return (false); } void -malloc_mutex_prefork(malloc_mutex_t *mutex) +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_lock(mutex); + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex)) { + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); if (opt_abort) abort(); } #endif } bool -mutex_boot(void) +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) return (true);
--- a/memory/jemalloc/src/src/nstime.c +++ b/memory/jemalloc/src/src/nstime.c @@ -92,57 +92,103 @@ uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor) { assert(divisor->ns != 0); return (time->ns / divisor->ns); } +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_CLOCK_MONOTONIC +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) +{ + + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic) +#endif +bool +nstime_monotonic(void) +{ + + return (NSTIME_MONOTONIC); +#undef NSTIME_MONOTONIC +} +#ifdef JEMALLOC_JET +#undef nstime_monotonic +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic); +#endif + #ifdef JEMALLOC_JET #undef nstime_update -#define nstime_update JEMALLOC_N(nstime_update_impl) +#define nstime_update JEMALLOC_N(n_nstime_update) #endif bool nstime_update(nstime_t *time) { nstime_t old_time; nstime_copy(&old_time, time); - -#ifdef _WIN32 - { - FILETIME ft; - uint64_t ticks; - GetSystemTimeAsFileTime(&ft); - ticks = (((uint64_t)ft.dwHighDateTime) << 32) | - ft.dwLowDateTime; - time->ns = ticks * 100; - } -#elif JEMALLOC_CLOCK_GETTIME - { - struct timespec ts; - - if (sysconf(_SC_MONOTONIC_CLOCK) > 0) - clock_gettime(CLOCK_MONOTONIC, &ts); - else - clock_gettime(CLOCK_REALTIME, &ts); - time->ns = ts.tv_sec * BILLION + ts.tv_nsec; - } -#else - struct timeval tv; - gettimeofday(&tv, NULL); - time->ns = tv.tv_sec * BILLION + tv.tv_usec * 1000; -#endif + nstime_get(time); /* Handle non-monotonic clocks. */ if (unlikely(nstime_compare(&old_time, time) > 0)) { nstime_copy(time, &old_time); return (true); } return (false); } #ifdef JEMALLOC_JET #undef nstime_update #define nstime_update JEMALLOC_N(nstime_update) -nstime_update_t *nstime_update = JEMALLOC_N(nstime_update_impl); +nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update); #endif
--- a/memory/jemalloc/src/src/pages.c +++ b/memory/jemalloc/src/src/pages.c @@ -1,34 +1,54 @@ #define JEMALLOC_PAGES_C_ #include "jemalloc/internal/jemalloc_internal.h" +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include <sys/sysctl.h> +#endif + +/******************************************************************************/ +/* Data. */ + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + /******************************************************************************/ void * -pages_map(void *addr, size_t size) +pages_map(void *addr, size_t size, bool *commit) { void *ret; assert(size != 0); + if (os_overcommits) + *commit = true; + #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ @@ -62,27 +82,28 @@ pages_unmap(void *addr, size_t size) #endif "(): %s\n", buf); if (opt_abort) abort(); } } void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) +pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { void *ret = (void *)((uintptr_t)addr + leadsize); assert(alloc_size >= leadsize + size); #ifdef _WIN32 { void *new_addr; pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); + new_addr = pages_map(ret, size, commit); if (new_addr == ret) return (ret); if (new_addr) pages_unmap(new_addr, size); return (NULL); } #else { @@ -96,41 +117,40 @@ pages_trim(void *addr, size_t alloc_size } #endif } static bool pages_commit_impl(void *addr, size_t size, bool commit) { -#ifndef _WIN32 - /* - * The following decommit/commit implementation is functional, but - * always disabled because it doesn't add value beyong improved - * debugging (at the cost of extra system calls) on systems that - * overcommit. - */ - if (false) { - int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; - void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | - MAP_FIXED, -1, 0); + if (os_overcommits) + return (true); + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); if (result == MAP_FAILED) return (true); if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ pages_unmap(result, size); return (true); } return (false); } #endif - return (true); } bool pages_commit(void *addr, size_t size) { return (pages_commit_impl(addr, size, true)); } @@ -166,8 +186,88 @@ pages_purge(void *addr, size_t size) # undef JEMALLOC_MADV_ZEROS #else /* Last resort no-op. */ unzeroed = true; #endif return (unzeroed); } +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) +{ + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) + return (false); /* Error. */ + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) +{ + int fd; + char buf[1]; + ssize_t nread; + +#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_open) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); +#else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); +#endif + if (fd == -1) + return (false); /* Error. */ + +#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_read) + nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); +#else + nread = read(fd, &buf, sizeof(buf)); +#endif + +#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) + return (false); /* Error. */ + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_boot(void) +{ + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) + mmap_flags |= MAP_NORESERVE; +# endif +#else + os_overcommits = false; +#endif +}
--- a/memory/jemalloc/src/src/prof.c +++ b/memory/jemalloc/src/src/prof.c @@ -116,23 +116,23 @@ static int prof_dump_fd; static bool prof_booted = false; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ -static bool prof_tctx_should_destroy(prof_tctx_t *tctx); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(prof_tdata_t *tdata, +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); -static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ JEMALLOC_INLINE_C int prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; @@ -208,56 +208,57 @@ prof_alloc_rollback(tsd_t *tsd, prof_tct * programs. */ tdata = prof_tdata_get(tsd, true); if (tdata != NULL) prof_sample_threshold_update(tdata); } if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } void -prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { - prof_tctx_set(ptr, usize, tctx); + prof_tctx_set(tsdn, ptr, usize, tctx); - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { tctx->cnts.accumobjs++; tctx->cnts.accumbytes += usize; } tctx->prepared = false; - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } void bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); @@ -272,42 +273,42 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tda cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); if (tdata != NULL) { assert(!tdata->enq); tdata->enq = true; } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } JEMALLOC_INLINE_C void prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; assert(tdata->enq); tdata->enq = false; idump = tdata->enq_idump; tdata->enq_idump = false; gdump = tdata->enq_gdump; tdata->enq_gdump = false; if (idump) - prof_idump(); + prof_idump(tsd_tsdn(tsd)); if (gdump) - prof_gdump(); + prof_gdump(tsd_tsdn(tsd)); } } #ifdef JEMALLOC_PROF_LIBUNWIND void prof_backtrace(prof_bt_t *bt) { int nframes; @@ -541,24 +542,25 @@ prof_gctx_mutex_choose(void) static malloc_mutex_t * prof_tdata_mutex_choose(uint64_t thr_uid) { return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); } static prof_gctx_t * -prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size, - size2index(size), false, tcache_get(tsd, true), true, NULL, true); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); if (gctx == NULL) return (NULL); gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ gctx->nlimbo = 1; @@ -580,42 +582,43 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_t /* * Check that gctx is still unused by any thread cache before destroying * it. prof_lookup() increments gctx->nlimbo in order to avoid a race * condition with this function, as does prof_tctx_destroy() in order to * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ prof_enter(tsd, tdata_self); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) not_reached(); prof_leave(tsd, tdata_self); /* Destroy gctx. */ - malloc_mutex_unlock(gctx->lock); - idalloctm(tsd, gctx, tcache_get(tsd, false), true, true); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } -/* tctx->tdata->lock must be held. */ static bool -prof_tctx_should_destroy(prof_tctx_t *tctx) +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + if (opt_prof_accum) return (false); if (tctx->cnts.curobjs != 0) return (false); if (tctx->prepared) return (false); return (true); } @@ -628,35 +631,36 @@ prof_gctx_should_destroy(prof_gctx_t *gc return (false); if (!tctx_tree_empty(&gctx->tctxs)) return (false); if (gctx->nlimbo != 0) return (false); return (true); } -/* tctx->tdata->lock is held upon entry, and released before return. */ static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tdata, false); - malloc_mutex_unlock(tdata->lock); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); destroy_tctx = true; if (prof_gctx_should_destroy(gctx)) { /* * Increment gctx->nlimbo in order to keep another * thread from winning the race to destroy gctx while @@ -686,27 +690,29 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_ destroy_tctx = false; destroy_gctx = false; break; default: not_reached(); destroy_tctx = false; destroy_gctx = false; } - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + if (destroy_tdata) prof_tdata_destroy(tsd, tdata, false); if (destroy_tctx) - idalloctm(tsd, tctx, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; @@ -716,38 +722,37 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t prof_bt_t *p; void *v; } btkey; bool new_gctx; prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd, bt); + gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (gctx.v == NULL) { prof_leave(tsd, tdata); return (true); } btkey.p = &gctx.p->bt; if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); - idalloctm(tsd, gctx.v, tcache_get(tsd, false), true, - true); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); return (true); } new_gctx = true; } else { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(gctx.p->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; - malloc_mutex_unlock(gctx.p->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; return (false); @@ -764,88 +769,86 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) bool not_found; cassert(config_prof); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return (NULL); - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) /* Note double negative! */ ret.p->prepared = true; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - tcache_t *tcache; void *btkey; prof_gctx_t *gctx; bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, &new_gctx)) return (NULL); /* Link a prof_tctx_t into gctx for this thread. */ - tcache = tcache_get(tsd, true); - ret.v = iallocztm(tsd, sizeof(prof_tctx_t), - size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL, - true); + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); return (NULL); } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); ret.p->gctx = gctx; ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd, ret.v, tcache, true, true); + idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); return (NULL); } - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } return (ret.p); } +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ void prof_sample_threshold_update(prof_tdata_t *tdata) { - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ #ifdef JEMALLOC_PROF uint64_t r; double u; if (!config_prof) return; if (lg_prof_sample == 0) { @@ -866,17 +869,17 @@ prof_sample_threshold_update(prof_tdata_ * For more information on the math, see: * * Non-Uniform Random Variate Generation * Luc Devroye * Springer-Verlag, New York, 1986 * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ - r = prng_lg_range(&tdata->prng_state, 53); + r = prng_lg_range_u64(&tdata->prng_state, 53); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + (uint64_t)1U; #endif } #ifdef JEMALLOC_JET @@ -889,21 +892,23 @@ prof_tdata_count_iter(prof_tdata_tree_t return (NULL); } size_t prof_tdata_count(void) { size_t tdata_count = 0; + tsdn_t *tsdn; - malloc_mutex_lock(&tdatas_mtx); + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (tdata_count); } #endif #ifdef JEMALLOC_JET size_t prof_bt_count(void) @@ -912,19 +917,19 @@ prof_bt_count(void) tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return (0); - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); return (bt_count); } #endif #ifdef JEMALLOC_JET #undef prof_dump_open #define prof_dump_open JEMALLOC_N(prof_dump_open_impl) @@ -1027,30 +1032,31 @@ prof_dump_printf(bool propagate_err, con va_start(ap, format); malloc_vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); ret = prof_dump_write(propagate_err, buf); return (ret); } -/* tctx->tdata->lock is held. */ static void -prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_lock(tctx->gctx->lock); + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { tdata->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; @@ -1059,81 +1065,93 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, } break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: not_reached(); } } -/* gctx->lock is held. */ static void -prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; if (opt_prof_accum) { gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; } } -/* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tctx, tctx->gctx); + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); } return (NULL); } -/* gctx->lock is held. */ +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { - bool propagate_err = *(bool *)arg; + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: case prof_tctx_state_nominal: /* Not captured by this dump. */ break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - if (prof_dump_printf(propagate_err, + if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes)) return (tctx); break; default: not_reached(); } return (NULL); } -/* tctx->gctx is held. */ static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ break; case prof_tctx_state_dumping: tctx->state = prof_tctx_state_nominal; break; case prof_tctx_state_purgatory: @@ -1144,46 +1162,53 @@ prof_tctx_finish_iter(prof_tctx_tree_t * } ret = NULL; label_return: return (ret); } static void -prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ gctx->nlimbo++; gctx_tree_insert(gctxs, gctx); memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsdn, gctx->lock); } +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { - size_t *leak_ngctx = (size_t *)arg; + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) - (*leak_ngctx)++; - malloc_mutex_unlock(gctx->lock); + arg->leak_ngctx++; + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (NULL); } static void prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); @@ -1192,74 +1217,82 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_t /* * Standard tree iteration won't work here, because as soon as we * decrement gctx->nlimbo and unlock gctx, another thread can * concurrently destroy it, which will corrupt the tree. Therefore, * tear down the tree one node at a time during iteration. */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; next = NULL; do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, NULL); + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); - idalloctm(tsd, to_destroy, - tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, true, true); } else next = NULL; } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { - prof_cnt_t *cnt_all = (prof_cnt_t *)arg; + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { prof_tctx_t *p; void *v; } tctx; tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) - prof_tctx_merge_tdata(tctx.p, tdata); + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); - cnt_all->curobjs += tdata->cnt_summed.curobjs; - cnt_all->curbytes += tdata->cnt_summed.curbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { - cnt_all->accumobjs += tdata->cnt_summed.accumobjs; - cnt_all->accumbytes += tdata->cnt_summed.accumbytes; + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } } else tdata->dumping = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(arg->tsdn, tdata->lock); return (NULL); } static prof_tdata_t * prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { bool propagate_err = *(bool *)arg; @@ -1278,48 +1311,49 @@ prof_tdata_dump_iter(prof_tdata_tree_t * return (NULL); } #ifdef JEMALLOC_JET #undef prof_dump_header #define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #endif static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) +prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) return (true); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (ret); } #ifdef JEMALLOC_JET #undef prof_dump_header #define prof_dump_header JEMALLOC_N(prof_dump_header) prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); #endif -/* gctx->lock is held. */ static bool -prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, - prof_gctx_tree_t *gctxs) +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.accumobjs == 0); assert(gctx->cnt_summed.accumbytes == 0); @@ -1343,18 +1377,20 @@ prof_dump_gctx(bool propagate_err, prof_ "\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { ret = true; goto label_return; } + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&propagate_err) != NULL) { + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: return (ret); } @@ -1437,129 +1473,165 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: if (mfd != -1) close(mfd); return (ret); } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %" - FMTu64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; - bool propagate_err = *(bool *)arg; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); - if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (ret); } static bool prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) { prof_tdata_t *tdata; - prof_cnt_t cnt_all; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; size_t tabind; union { prof_gctx_t *p; void *v; } gctx; - size_t leak_ngctx; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; prof_gctx_tree_t gctxs; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); - malloc_mutex_lock(&prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ gctx_tree_new(&gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(gctx.p, &gctxs); + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(&tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); - malloc_mutex_unlock(&tdatas_mtx); + prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)&prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ - leak_ngctx = 0; - gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); + prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg.leak_ngctx = 0; + gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, + (void *)&prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); /* Create dump file. */ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) goto label_open_close_error; /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg.cnt_all)) goto label_write_error; /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg.propagate_err = propagate_err; if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, - (void *)&propagate_err) != NULL) + (void *)&prof_gctx_dump_iter_arg) != NULL) goto label_write_error; /* Dump /proc/<pid>/maps if possible. */ if (prof_dump_maps(propagate_err)) goto label_write_error; if (prof_dump_close(propagate_err)) goto label_open_close_error; prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); - if (leakcheck) - prof_leakcheck(&cnt_all, leak_ngctx, filename); - + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } return (false); label_write_error: prof_dump_close(propagate_err); label_open_close_error: prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); return (true); } #define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) #define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void prof_dump_filename(char *filename, char v, uint64_t vseq) { @@ -1589,101 +1661,99 @@ prof_fdump(void) cassert(config_prof); assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); if (!prof_booted) return; tsd = tsd_fetch(); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } void -prof_idump(void) +prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn)) return; - tsd = tsd_fetch(); + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; if (tdata->enq) { tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) +prof_mdump(tsd_t *tsd, const char *filename) { - tsd_t *tsd; char filename_buf[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); if (!opt_prof || !prof_booted) return (true); - tsd = tsd_fetch(); if (filename == NULL) { /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } return (prof_dump(tsd, true, filename, false)); } void -prof_gdump(void) +prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn)) return; - tsd = tsd_fetch(); + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; if (tdata->enq) { tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; @@ -1702,274 +1772,280 @@ prof_bt_keycomp(const void *k1, const vo cassert(config_prof); if (bt1->len != bt2->len) return (false); return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(void) +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; - malloc_mutex_lock(&next_thr_uid_mtx); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; - malloc_mutex_unlock(&next_thr_uid_mtx); + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); return (thr_uid); } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; - tcache_t *tcache; cassert(config_prof); /* Initialize an empty cache for this thread. */ - tcache = tcache_get(tsd, true); - tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), - size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) return (NULL); tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; tdata->thr_discrim = thr_discrim; tdata->thread_name = thread_name; tdata->attached = true; tdata->expired = false; tdata->tctx_uid_next = 0; - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsd, tdata, tcache, true, true); + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); return (NULL); } tdata->prng_state = (uint64_t)(uintptr_t)tdata; prof_sample_threshold_update(tdata); tdata->enq = false; tdata->enq_idump = false; tdata->enq_gdump = false; tdata->dumping = false; tdata->active = active; - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); return (tdata); } prof_tdata_t * prof_tdata_init(tsd_t *tsd) { - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, - prof_thread_active_init_get())); + return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd)))); } -/* tdata->lock must be held. */ static bool -prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) return (false); if (ckh_count(&tdata->bt2tctx) != 0) return (false); return (true); } -/* tdatas_mtx must be held. */ +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) +{ + + malloc_mutex_assert_owner(tsdn, tdata->lock); + + return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); +} + static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - tcache_t *tcache; - assert(prof_tdata_should_destroy(tdata, even_if_attached)); - assert(tsd_prof_tdata_get(tsd) != tdata); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); - tcache = tcache_get(tsd, false); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + if (tdata->thread_name != NULL) - idalloctm(tsd, tdata->thread_name, tcache, true, true); + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd, tdata, tcache, true, true); + idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true); } static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tdata, true); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ if (!destroy_tdata) tdata->attached = false; tsd_prof_tdata_set(tsd, NULL); } else destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) prof_tdata_destroy(tsd, tdata, true); } prof_tdata_t * prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, active)); } static bool -prof_tdata_expire(prof_tdata_t *tdata) +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tdata, false); + prof_tdata_should_destroy(tsdn, tdata, false); } else destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsdn, tdata->lock); return (destroy_tdata); } static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; - return (prof_tdata_expire(tdata) ? tdata : NULL); + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); - malloc_mutex_lock(&prof_dump_mtx); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, NULL); + prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); } else next = NULL; } while (next != NULL); - malloc_mutex_unlock(&tdatas_mtx); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; if (!config_prof) return; tdata = tsd_prof_tdata_get(tsd); if (tdata != NULL) prof_tdata_detach(tsd, tdata); } bool -prof_active_get(void) +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; - malloc_mutex_unlock(&prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_current); } bool -prof_active_set(bool active) +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; - malloc_mutex_unlock(&prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_old); } const char * -prof_thread_name_get(void) +prof_thread_name_get(tsd_t *tsd) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (""); return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * -prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; if (thread_name == NULL) return (NULL); size = strlen(thread_name) + 1; if (size == 1) return (""); - ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd, - true), true, NULL, true); + ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) return (NULL); memcpy(ret, thread_name, size); return (ret); } int prof_thread_name_set(tsd_t *tsd, const char *thread_name) @@ -1986,100 +2062,95 @@ prof_thread_name_set(tsd_t *tsd, const c if (thread_name == NULL) return (EFAULT); for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; if (!isgraph(c) && !isblank(c)) return (EFAULT); } - s = prof_thread_name_alloc(tsd, thread_name); + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) return (EAGAIN); if (tdata->thread_name != NULL) { - idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), - true, true); + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) tdata->thread_name = s; return (0); } bool -prof_thread_active_get(void) +prof_thread_active_get(tsd_t *tsd) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (false); return (tdata->active); } bool -prof_thread_active_set(bool active) +prof_thread_active_set(tsd_t *tsd, bool active) { - tsd_t *tsd; prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); if (tdata == NULL) return (true); tdata->active = active; return (false); } bool -prof_thread_active_init_get(void) +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init); } bool -prof_thread_active_init_set(bool active_init) +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init_old); } bool -prof_gdump_get(void) +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(&prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_current); } bool -prof_gdump_set(bool gdump) +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; - malloc_mutex_unlock(&prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_old); } void prof_boot0(void) { cassert(config_prof); @@ -2110,81 +2181,90 @@ prof_boot1(void) if (opt_lg_prof_interval >= 0) { prof_interval = (((uint64_t)1U) << opt_lg_prof_interval); } } } bool -prof_boot2(void) +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { - tsd_t *tsd; unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx)) + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE)) return (true); prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx)) + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP)) return (true); prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx)) + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) return (true); - tsd = tsd_fetch(); if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) return (true); - if (malloc_mutex_init(&bt2gctx_mtx)) + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX)) return (true); tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx)) + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS)) return (true); next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx)) + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID)) return (true); - if (malloc_mutex_init(&prof_dump_seq_mtx)) + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ)) return (true); - if (malloc_mutex_init(&prof_dump_mtx)) + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP)) return (true); if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); if (opt_abort) abort(); } - gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NCTX_LOCKS * sizeof(malloc_mutex_t)); if (gctx_locks == NULL) return (true); for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&gctx_locks[i])) + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX)) return (true); } - tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * - sizeof(malloc_mutex_t)); + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); if (tdata_locks == NULL) return (true); for (i = 0; i < PROF_NTDATA_LOCKS; i++) { - if (malloc_mutex_init(&tdata_locks[i])) + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA)) return (true); } } #ifdef JEMALLOC_PROF_LIBGCC /* * Cause the backtracing machinery to allocate its internal state * before enabling profiling. @@ -2193,82 +2273,83 @@ prof_boot2(void) #endif prof_booted = true; return (false); } void -prof_prefork0(void) +prof_prefork0(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_prefork(&prof_dump_mtx); - malloc_mutex_prefork(&bt2gctx_mtx); - malloc_mutex_prefork(&tdatas_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(&tdata_locks[i]); + malloc_mutex_prefork(tsdn, &tdata_locks[i]); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&gctx_locks[i]); + malloc_mutex_prefork(tsdn, &gctx_locks[i]); } } void -prof_prefork1(void) +prof_prefork1(tsdn_t *tsdn) { if (opt_prof) { - malloc_mutex_prefork(&prof_active_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - malloc_mutex_prefork(&prof_gdump_mtx); - malloc_mutex_prefork(&next_thr_uid_mtx); - malloc_mutex_prefork(&prof_thread_active_init_mtx); + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } void -prof_postfork_parent(void) +prof_postfork_parent(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_postfork_parent(&prof_thread_active_init_mtx); - malloc_mutex_postfork_parent(&next_thr_uid_mtx); - malloc_mutex_postfork_parent(&prof_gdump_mtx); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&prof_active_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&gctx_locks[i]); + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(&tdata_locks[i]); - malloc_mutex_postfork_parent(&tdatas_mtx); - malloc_mutex_postfork_parent(&bt2gctx_mtx); - malloc_mutex_postfork_parent(&prof_dump_mtx); + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) +prof_postfork_child(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_postfork_child(&prof_thread_active_init_mtx); - malloc_mutex_postfork_child(&next_thr_uid_mtx); - malloc_mutex_postfork_child(&prof_gdump_mtx); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&prof_active_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&gctx_locks[i]); + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(&tdata_locks[i]); - malloc_mutex_postfork_child(&tdatas_mtx); - malloc_mutex_postfork_child(&bt2gctx_mtx); - malloc_mutex_postfork_child(&prof_dump_mtx); + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } /******************************************************************************/
--- a/memory/jemalloc/src/src/quarantine.c +++ b/memory/jemalloc/src/src/quarantine.c @@ -8,34 +8,32 @@ #define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) #define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) #define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, +static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); +static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * -quarantine_init(tsd_t *tsd, size_t lg_maxobjs) +quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) { quarantine_t *quarantine; size_t size; - assert(tsd_nominal(tsd)); - size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)); - quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size), - false, tcache_get(tsd, true), true, NULL, true); + quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), + false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; quarantine->curobjs = 0; quarantine->first = 0; quarantine->lg_maxobjs = lg_maxobjs; return (quarantine); @@ -44,35 +42,35 @@ quarantine_init(tsd_t *tsd, size_t lg_ma void quarantine_alloc_hook_work(tsd_t *tsd) { quarantine_t *quarantine; if (!tsd_nominal(tsd)) return; - quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); + quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. */ if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else - idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); } static quarantine_t * quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) { quarantine_t *ret; - ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); + ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); if (ret == NULL) { - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsd_tsdn(tsd), quarantine); return (quarantine); } ret->curbytes = quarantine->curbytes; ret->curobjs = quarantine->curobjs; if (quarantine->first + quarantine->curobjs <= (ZU(1) << quarantine->lg_maxobjs)) { /* objs ring buffer data are contiguous. */ @@ -84,63 +82,63 @@ quarantine_grow(tsd_t *tsd, quarantine_t quarantine->first; size_t ncopy_b = quarantine->curobjs - ncopy_a; memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a * sizeof(quarantine_obj_t)); memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } - idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, ret); return (ret); } static void -quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) +quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloctm(tsd, obj->ptr, NULL, false, true); + assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); + idalloctm(tsdn, obj->ptr, NULL, false, true); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->lg_maxobjs) - 1); } static void -quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) +quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsdn, quarantine); } void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { - idalloctm(tsd, ptr, NULL, false, true); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); return; } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; - quarantine_drain(tsd, quarantine, upper_bound); + quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(tsd, quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { @@ -155,31 +153,31 @@ quarantine(tsd_t *tsd, void *ptr) /* * Only do redzone validation if Valgrind isn't in * operation. */ if ((!config_valgrind || likely(!in_valgrind)) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else - memset(ptr, 0x5a, usize); + memset(ptr, JEMALLOC_FREE_JUNK, usize); } } else { assert(quarantine->curbytes == 0); - idalloctm(tsd, ptr, NULL, false, true); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); } } void quarantine_cleanup(tsd_t *tsd) { quarantine_t *quarantine; if (!config_fill) return; quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { - quarantine_drain(tsd, quarantine, 0); - idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true); + quarantine_drain(tsd_tsdn(tsd), quarantine, 0); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, NULL); } }
--- a/memory/jemalloc/src/src/rtree.c +++ b/memory/jemalloc/src/src/rtree.c @@ -10,16 +10,18 @@ hmin(unsigned ha, unsigned hb) /* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc) { unsigned bits_in_leaf, height, i; + assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / + RTREE_BITS_PER_LEVEL)); assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL : (bits % RTREE_BITS_PER_LEVEL); if (bits > bits_in_leaf) { height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) height++; @@ -89,22 +91,25 @@ rtree_delete(rtree_t *rtree) } static rtree_node_elm_t * rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) { rtree_node_elm_t *node; if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { + spin_t spinner; + /* * Another thread is already in the process of initializing. * Spin-wait until initialization is complete. */ + spin_init(&spinner); do { - CPU_SPINWAIT; + spin_adaptive(&spinner); node = atomic_read_p((void **)elmp); } while (node == RTREE_NODE_INITIALIZING); } else { node = rtree->alloc(ZU(1) << rtree->levels[level].bits); if (node == NULL) return (NULL); atomic_write_p((void **)elmp, node); } @@ -118,10 +123,10 @@ rtree_subtree_read_hard(rtree_t *rtree, return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); } rtree_node_elm_t * rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) { - return (rtree_node_init(rtree, level, &elm->child)); + return (rtree_node_init(rtree, level+1, &elm->child)); }
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/src/spin.c @@ -0,0 +1,2 @@ +#define JEMALLOC_SPIN_C_ +#include "jemalloc/internal/jemalloc_internal.h"
--- a/memory/jemalloc/src/src/stats.c +++ b/memory/jemalloc/src/src/stats.c @@ -28,95 +28,116 @@ /******************************************************************************/ /* Data. */ bool opt_stats_print = false; size_t stats_cactive = 0; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_hchunks_print( - void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large, bool huge); - -/******************************************************************************/ static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) + bool json, bool large, bool huge, unsigned i) { size_t page; - bool config_tcache, in_gap; + bool config_tcache, in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { + CTL_GET("arenas.nbins", &nbins, unsigned); + if (json) { malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util nfills nflushes newruns" - " reruns\n"); + "\t\t\t\t\"bins\": [\n"); } else { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util newruns reruns\n"); + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util nfills" + " nflushes newruns reruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs" + " curruns regs pgs util newruns" + " reruns\n"); + } } - CTL_GET("arenas.nbins", &nbins, unsigned); for (j = 0, in_gap = false; j < nbins; j++) { uint64_t nruns; + size_t reg_size, run_size, curregs; + size_t curruns; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreruns; CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, uint64_t); - if (nruns == 0) - in_gap = true; - else { - size_t reg_size, run_size, curregs, availregs, milli; - size_t curruns; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - char util[6]; /* "x.yyy". */ + in_gap_prev = in_gap; + in_gap = (nruns == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); - if (in_gap) { + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + if (config_tcache) { + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, + &nfills, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, + &nflushes, uint64_t); + } + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, + size_t); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curregs\": %zu,\n" + "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n", + nmalloc, + ndalloc, + curregs, + nrequests); + if (config_tcache) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; + "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", + nfills, + nflushes); } - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, - &curregs, size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); - if (config_tcache) { - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, - j, &nfills, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", - i, j, &nflushes, uint64_t); - } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, - &reruns, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, - &curruns, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + nreruns, + curruns, + (j + 1 < nbins) ? "," : ""); + } else if (!in_gap) { + size_t availregs, milli; + char util[6]; /* "x.yyy". */ availregs = nregs * curruns; milli = (availregs != 0) ? (1000 * curregs) / availregs : 1000; assert(milli <= 1000); if (milli < 10) { malloc_snprintf(util, sizeof(util), "0.00%zu", milli); @@ -133,250 +154,925 @@ stats_arena_bins_print(void (*write_cb)( malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12zu" " %12zu %4u %3zu %-5s %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, run_size / page, util, nfills, nflushes, - nruns, reruns); + nruns, nreruns); } else { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64 " %12"FMTu64" %12"FMTu64" %12zu" " %12zu %4u %3zu %-5s %12"FMTu64 " %12"FMTu64"\n", reg_size, j, curregs * reg_size, nmalloc, ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nruns, reruns); + run_size / page, util, nruns, nreruns); } } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]%s\n", (large || huge) ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) + bool json, bool huge, unsigned i) { unsigned nbins, nlruns, j; - bool in_gap; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "large: size ind allocated nmalloc ndalloc" - " nrequests curruns\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lruns\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc" + " ndalloc nrequests curruns\n"); + } for (j = 0, in_gap = false; j < nlruns; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t run_size, curruns; CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, - &curruns, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, + size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curruns\": %zu\n" + "\t\t\t\t\t}%s\n", + curruns, + (j + 1 < nlruns) ? "," : ""); + } else if (!in_gap) { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", run_size, nbins + j, curruns * run_size, nmalloc, ndalloc, nrequests, curruns); } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]%s\n", huge ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_hchunks_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i) + void *cbopaque, bool json, unsigned i) { unsigned nbins, nlruns, nhchunks, j; - bool in_gap; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc ndalloc" - " nrequests curhchunks\n"); CTL_GET("arenas.nbins", &nbins, unsigned); CTL_GET("arenas.nlruns", &nlruns, unsigned); CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"hchunks\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: size ind allocated nmalloc" + " ndalloc nrequests curhchunks\n"); + } for (j = 0, in_gap = false; j < nhchunks; j++) { uint64_t nmalloc, ndalloc, nrequests; size_t hchunk_size, curhchunks; CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, &nmalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, &ndalloc, uint64_t); CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, - j, &curhchunks, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, + &curhchunks, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curhchunks\": %zu\n" + "\t\t\t\t\t}%s\n", + curhchunks, + (j + 1 < nhchunks) ? "," : ""); + } else if (!in_gap) { malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 " %12"FMTu64" %12zu\n", hchunk_size, nbins + nlruns + j, curhchunks * hchunk_size, nmalloc, ndalloc, nrequests, curhchunks); } } - if (in_gap) { + if (json) { malloc_cprintf(write_cb, cbopaque, - " ---\n"); + "\t\t\t\t]\n"); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } } } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large, bool huge) + bool json, unsigned i, bool bins, bool large, bool huge) { unsigned nthreads; const char *dss; ssize_t lg_dirty_mult, decay_time; - size_t page, pactive, pdirty, mapped; + size_t page, pactive, pdirty, mapped, retained; size_t metadata_mapped, metadata_allocated; uint64_t npurge, nmadvise, purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; size_t huge_allocated; uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nthreads\": %u,\n", nthreads); + } else { + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + } + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dss\": \"%s\",\n", dss); + } else { + malloc_cprintf(write_cb, cbopaque, + "dss allocation precedence: %s\n", dss); + } + CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); - if (opt_purge == purge_mode_ratio) { - if (lg_dirty_mult >= 0) { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: %u:1\n", - (1U << lg_dirty_mult)); - } else { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: N/A\n"); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); + } else { + if (opt_purge == purge_mode_ratio) { + if (lg_dirty_mult >= 0) { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: %u:1\n", + (1U << lg_dirty_mult)); + } else { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: N/A\n"); + } } } + CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t); - if (opt_purge == purge_mode_decay) { - if (decay_time >= 0) { - malloc_cprintf(write_cb, cbopaque, "decay time: %zd\n", - decay_time); - } else - malloc_cprintf(write_cb, cbopaque, "decay time: N/A\n"); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"decay_time\": %zd,\n", decay_time); + } else { + if (opt_purge == purge_mode_decay) { + if (decay_time >= 0) { + malloc_cprintf(write_cb, cbopaque, + "decay time: %zd\n", decay_time); + } else { + malloc_cprintf(write_cb, cbopaque, + "decay time: N/A\n"); + } + } } + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64", " - "purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pactive\": %zu,\n", pactive); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pdirty\": %zu,\n", pdirty); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"purged\": %"FMTu64",\n", purged); + } else { + malloc_cprintf(write_cb, cbopaque, + "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64 + ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); + } - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc" - " nrequests\n"); CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, size_t); CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"small\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc" + " ndalloc nrequests\n"); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, + small_nrequests); + } + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, size_t); CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"large\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, + large_nrequests); + } + CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated + large_allocated + huge_allocated, - small_nmalloc + large_nmalloc + huge_nmalloc, - small_ndalloc + large_ndalloc + huge_ndalloc, - small_nrequests + large_nrequests + huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "active: %12zu\n", pactive * page); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"huge\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "huge: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated + huge_allocated, + small_nmalloc + large_nmalloc + huge_nmalloc, + small_ndalloc + large_ndalloc + huge_ndalloc, + small_nrequests + large_nrequests + huge_nrequests); + } + if (!json) { + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + } + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "mapped: %12zu\n", mapped); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"mapped\": %zu,\n", mapped); + } else { + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + } + + CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"retained\": %zu,\n", retained); + } else { + malloc_cprintf(write_cb, cbopaque, + "retained: %12zu\n", retained); + } + CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, size_t); CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "metadata: mapped: %zu, allocated: %zu\n", - metadata_mapped, metadata_allocated); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"metadata\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "metadata: mapped: %zu, allocated: %zu\n", + metadata_mapped, metadata_allocated); + } + + if (bins) { + stats_arena_bins_print(write_cb, cbopaque, json, large, huge, + i); + } + if (large) + stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); + if (huge) + stats_arena_hchunks_print(write_cb, cbopaque, json, i); +} + +static void +stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool unmerged) +{ + const char *cpv; + bool bv; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"version\": \"%s\",\n", cpv); + } else + malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); + + /* config. */ +#define CONFIG_WRITE_BOOL_JSON(n, c) \ + if (json) { \ + CTL_GET("config."#n, &bv, bool); \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ + (c)); \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"config\": {\n"); + } + + CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") + + CTL_GET("config.debug", &bv, bool); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); + } else { + malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", + bv ? "enabled" : "disabled"); + } + + CONFIG_WRITE_BOOL_JSON(fill, ",") + CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"malloc_conf\": \"%s\",\n", + config_malloc_conf); + } else { + malloc_cprintf(write_cb, cbopaque, + "config.malloc_conf: \"%s\"\n", config_malloc_conf); + } + + CONFIG_WRITE_BOOL_JSON(munmap, ",") + CONFIG_WRITE_BOOL_JSON(prof, ",") + CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") + CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") + CONFIG_WRITE_BOOL_JSON(stats, ",") + CONFIG_WRITE_BOOL_JSON(tcache, ",") + CONFIG_WRITE_BOOL_JSON(tls, ",") + CONFIG_WRITE_BOOL_JSON(utrace, ",") + CONFIG_WRITE_BOOL_JSON(valgrind, ",") + CONFIG_WRITE_BOOL_JSON(xmalloc, "") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } +#undef CONFIG_WRITE_BOOL_JSON + + /* opt. */ +#define OPT_WRITE_BOOL(n, c) \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s\n", bv ? "true" : "false"); \ + } \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ + bool bv2; \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s ("#m": %s)\n", bv ? "true" \ + : "false", bv2 ? "true" : "false"); \ + } \ + } \ +} +#define OPT_WRITE_UNSIGNED(n, c) \ + if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %u\n", uv); \ + } \ + } +#define OPT_WRITE_SIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zu\n", sv); \ + } \ + } +#define OPT_WRITE_SSIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd\n", ssv); \ + } \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd ("#m": %zd)\n", \ + ssv, ssv2); \ + } \ + } \ +} +#define OPT_WRITE_CHAR_P(n, c) \ + if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": \"%s\"\n", cpv); \ + } \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"opt\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "Run-time option settings:\n"); + } + OPT_WRITE_BOOL(abort, ",") + OPT_WRITE_SIZE_T(lg_chunk, ",") + OPT_WRITE_CHAR_P(dss, ",") + OPT_WRITE_UNSIGNED(narenas, ",") + OPT_WRITE_CHAR_P(purge, ",") + if (json || opt_purge == purge_mode_ratio) { + OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, + arenas.lg_dirty_mult, ",") + } + if (json || opt_purge == purge_mode_decay) { + OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") + } + OPT_WRITE_CHAR_P(junk, ",") + OPT_WRITE_SIZE_T(quarantine, ",") + OPT_WRITE_BOOL(redzone, ",") + OPT_WRITE_BOOL(zero, ",") + OPT_WRITE_BOOL(utrace, ",") + OPT_WRITE_BOOL(xmalloc, ",") + OPT_WRITE_BOOL(tcache, ",") + OPT_WRITE_SSIZE_T(lg_tcache_max, ",") + OPT_WRITE_BOOL(prof, ",") + OPT_WRITE_CHAR_P(prof_prefix, ",") + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, + ",") + OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") + OPT_WRITE_BOOL(prof_accum, ",") + OPT_WRITE_SSIZE_T(lg_prof_interval, ",") + OPT_WRITE_BOOL(prof_gdump, ",") + OPT_WRITE_BOOL(prof_final, ",") + OPT_WRITE_BOOL(prof_leak, ",") + /* + * stats_print is always emitted, so as long as stats_print comes last + * it's safe to unconditionally omit the comma here (rather than having + * to conditionally omit it elsewhere depending on configuration). + */ + OPT_WRITE_BOOL(stats_print, "") + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } + +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_CHAR_P + + /* arenas. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &uv, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"narenas\": %u,\n", uv); + } else + malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); - if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, i); + CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_ratio) { + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "%u:1\n", (1U << ssv)); + } else { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: " + "N/A\n"); + } + } + CTL_GET("arenas.decay_time", &ssv, ssize_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"decay_time\": %zd,\n", ssv); + } else if (opt_purge == purge_mode_decay) { + malloc_cprintf(write_cb, cbopaque, + "Unused dirty page decay time: %zd%s\n", + ssv, (ssv < 0) ? " (no decay)" : ""); + } + + CTL_GET("arenas.quantum", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"quantum\": %zu,\n", sv); + } else + malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + + CTL_GET("arenas.page", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"page\": %zu,\n", sv); + } else + malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"tcache_max\": %zu,\n", sv); + } else { + malloc_cprintf(write_cb, cbopaque, + "Maximum thread-cached size class: %zu\n", sv); + } + } + + if (json) { + unsigned nbins, nlruns, nhchunks, i; + + CTL_GET("arenas.nbins", &nbins, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nbins\": %u,\n", nbins); + + CTL_GET("arenas.nhbins", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhbins\": %u,\n", uv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"bin\": [\n"); + for (i = 0; i < nbins; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu,\n", sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); + + CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"run_size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nlruns", &nlruns, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nlruns\": %u,\n", nlruns); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lrun\": [\n"); + for (i = 0; i < nlruns; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nhchunks\": %u,\n", nhchunks); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"hchunk\": [\n"); + for (i = 0; i < nhchunks; i++) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t]\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } + + /* prof. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"prof\": {\n"); + + CTL_GET("prof.thread_active_init", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : + "false"); + + CTL_GET("prof.active", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.gdump", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.interval", &u64v, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"interval\": %"FMTu64",\n", u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_sample\": %zd\n", ssv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (config_stats || merged || unmerged) ? "," : + ""); + } +} + +static void +stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool unmerged, bool bins, bool large, bool huge) +{ + size_t *cactive; + size_t allocated, active, metadata, resident, mapped, retained; + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive)); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"allocated\": %zu,\n", allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %zu,\n", active); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"metadata\": %zu,\n", metadata); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"resident\": %zu,\n", resident); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mapped\": %zu,\n", mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"retained\": %zu\n", retained); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (merged || unmerged) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, metadata: %zu," + " resident: %zu, mapped: %zu, retained: %zu\n", + allocated, active, metadata, resident, mapped, retained); + malloc_cprintf(write_cb, cbopaque, + "Current active ceiling: %zu\n", + atomic_read_z(cactive)); + } + + if (merged || unmerged) { + unsigned narenas; + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats.arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + VARIABLE_ARRAY(bool, initialized, narenas); + size_t isz; + unsigned i, j, ninitialized; + + isz = sizeof(bool) * narenas; + xmallctl("arenas.initialized", (void *)initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } + + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"merged\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "\nMerged arenas stats:\n"); + } + stats_arena_print(write_cb, cbopaque, json, + narenas, bins, large, huge); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", (ninitialized > 1) ? + "," : ""); + } + } + + /* Unmerged stats. */ + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + if (json) { + j++; + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t\"%u\": {\n", i); + } else { + malloc_cprintf(write_cb, + cbopaque, "\narenas[%u]:\n", + i); + } + stats_arena_print(write_cb, cbopaque, + json, i, bins, large, huge); + if (json) { + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t}%s\n", (j < + ninitialized) ? "," : ""); + } + } + } + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t}\n"); + } + } } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; + bool json = false; bool general = true; bool merged = true; bool unmerged = true; bool bins = true; bool large = true; bool huge = true; /* @@ -400,16 +1096,19 @@ stats_print(void (*write_cb)(void *, con abort(); } if (opts != NULL) { unsigned i; for (i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { + case 'J': + json = true; + break; case 'g': general = false; break; case 'm': merged = false; break; case 'a': unmerged = false; @@ -423,250 +1122,32 @@ stats_print(void (*write_cb)(void *, con case 'h': huge = false; break; default:; } } } - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, usz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - usz = sizeof(unsigned); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - malloc_cprintf(write_cb, cbopaque, - "config.malloc_conf: \"%s\"\n", config_malloc_conf); - -#define OPT_WRITE_BOOL(n) \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_BOOL_MUTABLE(n, m) { \ - bool bv2; \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s ("#m": %s)\n", bv ? "true" \ - : "false", bv2 ? "true" : "false"); \ - } \ -} -#define OPT_WRITE_UNSIGNED(n) \ - if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %u\n", uv); \ - } -#define OPT_WRITE_SIZE_T(n) \ - if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ - ssize_t ssv2; \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd ("#m": %zd)\n", \ - ssv, ssv2); \ - } \ -} -#define OPT_WRITE_CHAR_P(n) \ - if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } - + if (json) { malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_UNSIGNED(narenas) - OPT_WRITE_CHAR_P(purge) - if (opt_purge == purge_mode_ratio) { - OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, - arenas.lg_dirty_mult) - } - if (opt_purge == purge_mode_decay) - OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_CHAR_P(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) - OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, - prof.thread_active_init) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_BOOL_MUTABLE -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); - - CTL_GET("arenas.narenas", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); - - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", - sv); - - CTL_GET("arenas.page", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - - CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); - if (opt_purge == purge_mode_ratio) { - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: " - "%u:1\n", (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: " - "N/A\n"); - } - } - CTL_GET("arenas.decay_time", &ssv, ssize_t); - if (opt_purge == purge_mode_decay) { - malloc_cprintf(write_cb, cbopaque, - "Unused dirty page decay time: %zd%s\n", - ssv, (ssv < 0) ? " (no decay)" : ""); - } - if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { - CTL_GET("prof.lg_sample", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"FMTu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); - - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"FMTu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } - } - CTL_GET("opt.lg_chunk", &sv, size_t); + "{\n" + "\t\"jemalloc\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, - "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); + "___ Begin jemalloc statistics ___\n"); } + if (general) + stats_general_print(write_cb, cbopaque, json, merged, unmerged); if (config_stats) { - size_t *cactive; - size_t allocated, active, metadata, resident, mapped; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.metadata", &metadata, size_t); - CTL_GET("stats.resident", &resident, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, metadata: %zu," - " resident: %zu, mapped: %zu\n", - allocated, active, metadata, resident, mapped); + stats_print_helper(write_cb, cbopaque, json, merged, unmerged, + bins, large, huge); + } + if (json) { malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", - atomic_read_z(cactive)); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - if (ninitialized > 1 || !unmerged) { - /* Print merged arena stats. */ - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large, huge); - } - } - } - - if (unmerged) { - unsigned narenas; - - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large, - huge); - } - } - } - } + "\t}\n" + "}\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "--- End jemalloc statistics ---\n"); } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); }
--- a/memory/jemalloc/src/src/tcache.c +++ b/memory/jemalloc/src/src/tcache.c @@ -18,20 +18,21 @@ tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ static tcaches_t *tcaches_avail; /******************************************************************************/ -size_t tcache_salloc(const void *ptr) +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(ptr, false)); + return (arena_salloc(tsdn, ptr, false)); } void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; @@ -65,22 +66,22 @@ tcache_event_hard(tsd_t *tsd, tcache_t * tbin->low_water = tbin->ncached; tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) tcache->next_gc_bin = 0; } void * -tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ? + arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; ret = tcache_alloc_easy(tbin, tcache_success); return (ret); } @@ -101,66 +102,67 @@ tcache_bin_flush_small(tsd_t *tsd, tcach for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( *(tbin->avail - 1)); arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); tcache->prof_accumbytes = 0; } - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == bin_arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = - arena_bitselm_get(chunk, pageind); - arena_dalloc_bin_junked_locked(bin_arena, chunk, - ptr, bitselm); + arena_bitselm_get_mutable(chunk, pageind); + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, chunk, ptr, bitselm); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } - malloc_mutex_unlock(&bin->lock); - arena_decay_ticks(tsd, bin_arena, nflush - ndeferred); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } @@ -183,17 +185,17 @@ tcache_bin_flush_large(tsd_t *tsd, tcach /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( *(tbin->avail - 1)); arena_t *locked_arena = extent_node_arena_get(&chunk->node); UNUSED bool idump; if (config_prof) idump = false; - malloc_mutex_lock(&locked_arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum_locked(arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; @@ -206,136 +208,138 @@ tcache_bin_flush_large(tsd_t *tsd, tcach } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == locked_arena) { - arena_dalloc_large_junked_locked(locked_arena, - chunk, ptr); + arena_dalloc_large_junked_locked(tsd_tsdn(tsd), + locked_arena, chunk, ptr); } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } - malloc_mutex_unlock(&locked_arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); if (config_prof && idump) - prof_idump(); - arena_decay_ticks(tsd, locked_arena, nflush - ndeferred); + prof_idump(tsd_tsdn(tsd)); + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } -void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) +static void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } -void -tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) -{ - - tcache_arena_dissociate(tcache, oldarena); - tcache_arena_associate(tcache, newarena); -} - -void -tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_debug) { bool in_ql = false; tcache_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { if (iter == tcache) { in_ql = true; break; } } assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, arena); - malloc_mutex_unlock(&arena->lock); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->lock); } } +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, + arena_t *newarena) +{ + + tcache_arena_dissociate(tsdn, tcache, oldarena); + tcache_arena_associate(tsdn, tcache, newarena); +} + tcache_t * tcache_get_hard(tsd_t *tsd) { arena_t *arena; if (!tcache_enabled_get()) { if (tsd_nominal(tsd)) tcache_enabled_set(false); /* Memoize. */ return (NULL); } arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) return (NULL); - return (tcache_create(tsd, arena)); + return (tcache_create(tsd_tsdn(tsd), arena)); } tcache_t * -tcache_create(tsd_t *tsd, arena_t *arena) +tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; unsigned i; size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); - tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, - arena_get(0, false)); + tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) return (NULL); - tcache_arena_associate(tcache, arena); + tcache_arena_associate(tsdn, tcache, arena); ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); for (i = 0; i < nhbins; i++) { tcache->tbins[i].lg_fill_div = 1; stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); /* @@ -352,48 +356,48 @@ tcache_create(tsd_t *tsd, arena_t *arena static void tcache_destroy(tsd_t *tsd, tcache_t *tcache) { arena_t *arena; unsigned i; arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tcache, arena); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } } for (; i < nhbins; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); - idalloctm(tsd, tcache, false, true, true); + idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); } void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache; if (!config_tcache) @@ -407,59 +411,64 @@ tcache_cleanup(tsd_t *tsd) void tcache_enabled_cleanup(tsd_t *tsd) { /* Do nothing. */ } -/* Caller must own arena->lock. */ void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); + malloc_mutex_assert_owner(tsdn, &arena->lock); + /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; tcache_bin_t *tbin = &tcache->tbins[i]; arena->stats.nrequests_large += tbin->tstats.nrequests; lstats->nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { + arena_t *arena; tcache_t *tcache; tcaches_t *elm; if (tcaches == NULL) { - tcaches = base_alloc(sizeof(tcache_t *) * + tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1)); if (tcaches == NULL) return (true); } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); - tcache = tcache_create(tsd, arena_get(0, false)); + arena = arena_ichoose(tsd, NULL); + if (unlikely(arena == NULL)) + return (true); + tcache = tcache_create(tsd_tsdn(tsd), arena); if (tcache == NULL) return (true); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; *r_ind = (unsigned)(elm - tcaches); @@ -495,17 +504,17 @@ tcaches_destroy(tsd_t *tsd, unsigned ind { tcaches_t *elm = &tcaches[ind]; tcaches_elm_flush(tsd, elm); elm->next = tcaches_avail; tcaches_avail = elm; } bool -tcache_boot(void) +tcache_boot(tsdn_t *tsdn) { unsigned i; /* * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * known. */ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) @@ -513,17 +522,17 @@ tcache_boot(void) else if ((1U << opt_lg_tcache_max) > large_maxclass) tcache_maxclass = large_maxclass; else tcache_maxclass = (1U << opt_lg_tcache_max); nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); stack_nelms = 0; for (i = 0; i < NBINS; i++) { if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN;
--- a/memory/jemalloc/src/src/tsd.c +++ b/memory/jemalloc/src/src/tsd.c @@ -72,17 +72,17 @@ tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { case tsd_state_uninitialized: /* Do nothing. */ break; case tsd_state_nominal: -#define O(n, t) \ +#define O(n, t) \ n##_cleanup(tsd); MALLOC_TSD #undef O tsd->state = tsd_state_purgatory; tsd_set(tsd); break; case tsd_state_purgatory: /* @@ -101,25 +101,27 @@ MALLOC_TSD tsd->state = tsd_state_purgatory; tsd_set(tsd); break; default: not_reached(); } } -bool +tsd_t * malloc_tsd_boot0(void) { + tsd_t *tsd; ncleanups = 0; if (tsd_boot0()) - return (true); - *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true; - return (false); + return (NULL); + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return (tsd); } void malloc_tsd_boot1(void) { tsd_boot1(); *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; @@ -164,32 +166,32 @@ BOOL (WINAPI *const tls_callback)(HINSTA !defined(_WIN32)) void * tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); return (iter->data); } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); return (NULL); } void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif
--- a/memory/jemalloc/src/src/util.c +++ b/memory/jemalloc/src/src/util.c @@ -9,16 +9,17 @@ } \ } while (0) #define not_reached() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Unreachable code reached\n"); \ abort(); \ } \ + unreachable(); \ } while (0) #define not_implemented() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Not implemented\n"); \ abort(); \ } \ } while (0) @@ -43,17 +44,17 @@ static char *x2s(uintmax_t x, bool alt_f /******************************************************************************/ /* malloc_message() setup. */ static void wrtmessage(void *cbopaque, const char *s) { -#ifdef SYS_write +#if defined(JEMALLOC_HAVE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. * * syscall() returns long or int, depending on platform, so capture the * unused result in the widest plausible type to avoid compiler * warnings. @@ -309,20 +310,19 @@ x2s(uintmax_t x, bool alt_form, bool upp if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } return (s); } -int +size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { - int ret; size_t i; const char *f; #define APPEND_C(c) do { \ if (i < size) \ str[i] = (c); \ i++; \ } while (0) @@ -403,16 +403,18 @@ malloc_vsnprintf(char *str, size_t size, case '%': { bool alt_form = false; bool left_justify = false; bool plus_space = false; bool plus_plus = false; int prec = -1; int width = -1; unsigned char len = '?'; + char *s; + size_t slen; f++; /* Flags. */ while (true) { switch (*f) { case '#': assert(!alt_form); alt_form = true; @@ -493,18 +495,16 @@ malloc_vsnprintf(char *str, size_t size, case 'q': case 'j': case 't': case 'z': len = *f; f++; break; default: break; } /* Conversion specifier. */ switch (*f) { - char *s; - size_t slen; case '%': /* %% */ APPEND_C(*f); f++; break; case 'd': case 'i': { intmax_t val JEMALLOC_CC_SILENCE_INIT(0); char buf[D2S_BUFSIZE]; @@ -580,31 +580,29 @@ malloc_vsnprintf(char *str, size_t size, break; }} } label_out: if (i < size) str[i] = '\0'; else str[size - 1] = '\0'; - assert(i < INT_MAX); - ret = (int)i; #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC - return (ret); + return (i); } JEMALLOC_FORMAT_PRINTF(3, 4) -int +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) { - int ret; + size_t ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); return (ret); }
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/src/witness.c @@ -0,0 +1,136 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp) +{ + + witness->name = name; + witness->rank = rank; + witness->comp = comp; +} + +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(n_witness_lock_error) +#endif +void +witness_lock_error(const witness_list_t *witnesses, const witness_t *witness) +{ + witness_t *w; + + malloc_printf("<jemalloc>: Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_lock_error +#define witness_lock_error JEMALLOC_N(witness_lock_error) +witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(n_witness_owner_error) +#endif +void +witness_owner_error(const witness_t *witness) +{ + + malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_owner_error +#define witness_owner_error JEMALLOC_N(witness_owner_error) +witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error) +#endif +void +witness_not_owner_error(const witness_t *witness) +{ + + malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_not_owner_error +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +witness_not_owner_error_t *witness_not_owner_error = + JEMALLOC_N(n_witness_not_owner_error); +#endif + +#ifdef JEMALLOC_JET +#undef witness_lockless_error +#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error) +#endif +void +witness_lockless_error(const witness_list_t *witnesses) +{ + witness_t *w; + + malloc_printf("<jemalloc>: Should not own any locks:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +#ifdef JEMALLOC_JET +#undef witness_lockless_error +#define witness_lockless_error JEMALLOC_N(witness_lockless_error) +witness_lockless_error_t *witness_lockless_error = + JEMALLOC_N(n_witness_lockless_error); +#endif + +void +witnesses_cleanup(tsd_t *tsd) +{ + + witness_assert_lockless(tsd_tsdn(tsd)); + + /* Do nothing. */ +} + +void +witness_fork_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +witness_prefork(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, true); +} + +void +witness_postfork_parent(tsd_t *tsd) +{ + + tsd_witness_fork_set(tsd, false); +} + +void +witness_postfork_child(tsd_t *tsd) +{ +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = tsd_witnessesp_get(tsd); + ql_new(witnesses); +#endif + tsd_witness_fork_set(tsd, false); +}
--- a/memory/jemalloc/src/src/zone.c +++ b/memory/jemalloc/src/src/zone.c @@ -1,25 +1,26 @@ #include "jemalloc/internal/jemalloc_internal.h" #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif /* - * The malloc_default_purgeable_zone function is only available on >= 10.6. + * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static size_t zone_size(malloc_zone_t *zone, void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); @@ -51,17 +52,17 @@ zone_size(malloc_zone_t *zone, void *ptr * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If * we knew that all pointers were owned by *some* zone, we could split * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they * reside within a mapped chunk before determining size. */ - return (ivsalloc(ptr, config_prof)); + return (ivsalloc(tsdn_fetch(), ptr, config_prof)); } static void * zone_malloc(malloc_zone_t *zone, size_t size) { return (je_malloc(size)); } @@ -82,29 +83,29 @@ zone_valloc(malloc_zone_t *zone, size_t return (ret); } static void zone_free(malloc_zone_t *zone, void *ptr) { - if (ivsalloc(ptr, config_prof) != 0) { + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { je_free(ptr); return; } free(ptr); } static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { - if (ivsalloc(ptr, config_prof) != 0) + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) return (je_realloc(ptr, size)); return (realloc(ptr, size)); } #if (JEMALLOC_ZONE_VERSION >= 5) static void * zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) @@ -118,17 +119,17 @@ zone_memalign(malloc_zone_t *zone, size_ #endif #if (JEMALLOC_ZONE_VERSION >= 6) static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; - alloc_size = ivsalloc(ptr, config_prof); + alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); if (alloc_size != 0) { assert(alloc_size == size); je_free(ptr); return; } free(ptr); } @@ -159,118 +160,171 @@ zone_force_lock(malloc_zone_t *zone) if (isthreaded) jemalloc_prefork(); } static void zone_force_unlock(malloc_zone_t *zone) { + /* + * Call jemalloc_postfork_child() rather than + * jemalloc_postfork_parent(), because this function is executed by both + * parent and child. The parent can tolerate having state + * reinitialized, but the child cannot unlock mutexes that were locked + * by the parent. + */ if (isthreaded) - jemalloc_postfork_parent(); + jemalloc_postfork_child(); } -JEMALLOC_ATTR(constructor) -void -register_zone(void) +static void +zone_init(void) { - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - malloc_zone_t *default_zone = malloc_default_zone(); - malloc_zone_t *purgeable_zone = NULL; - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; + jemalloc_zone.size = (void *)zone_size; + jemalloc_zone.malloc = (void *)zone_malloc; + jemalloc_zone.calloc = (void *)zone_calloc; + jemalloc_zone.valloc = (void *)zone_valloc; + jemalloc_zone.free = (void *)zone_free; + jemalloc_zone.realloc = (void *)zone_realloc; + jemalloc_zone.destroy = (void *)zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = NULL; + jemalloc_zone.batch_free = NULL; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = JEMALLOC_ZONE_VERSION; #if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; + jemalloc_zone.memalign = zone_memalign; #endif #if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.free_definite_size = zone_free_definite_size; #endif #if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; + jemalloc_zone.pressure_relief = NULL; #endif - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; + jemalloc_zone_introspect.enumerator = NULL; + jemalloc_zone_introspect.good_size = (void *)zone_good_size; + jemalloc_zone_introspect.check = NULL; + jemalloc_zone_introspect.print = NULL; + jemalloc_zone_introspect.log = NULL; + jemalloc_zone_introspect.force_lock = (void *)zone_force_lock; + jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock; + jemalloc_zone_introspect.statistics = NULL; #if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; + jemalloc_zone_introspect.zone_locked = NULL; #endif #if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +# ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +# else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +# endif #endif -#endif +} + +static malloc_zone_t * +zone_default_get(void) +{ + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. */ - if (malloc_default_purgeable_zone != NULL) - purgeable_zone = malloc_default_purgeable_zone(); + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); + if (num_zones) + return (zones[0]); + + return (malloc_default_zone()); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) +{ + malloc_zone_t *zone; do { - default_zone = malloc_default_zone(); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it * at the location of the specified zone. Unregistering the * default zone thus makes the last registered one the default. * On OSX < 10.6, unregistering shifts all registered zones. * The first registered zone then becomes the default. */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); + /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it * owns the default zone allocated pointers. We thus * unregister/re-register it in order to ensure it's always * after the default zone. On OSX < 10.6, there is no purgeable * zone, so this does nothing. On OSX >= 10.6, unregistering * replaces the purgeable zone with the last registered zone * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ - if (purgeable_zone) { + if (purgeable_zone != NULL) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } - } while (malloc_default_zone() != &zone); + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); } + +JEMALLOC_ATTR(constructor) +void +zone_register(void) +{ + + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) + return; + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); +}
--- a/memory/jemalloc/src/test/include/test/jemalloc_test.h.in +++ b/memory/jemalloc/src/test/include/test/jemalloc_test.h.in @@ -14,49 +14,16 @@ #ifdef _WIN32 # include <windows.h> # include "msvc_compat/windows_extra.h" #else # include <pthread.h> #endif -/******************************************************************************/ -/* - * Define always-enabled assertion macros, so that test assertions execute even - * if assertions are disabled in the library code. These definitions must - * exist prior to including "jemalloc/internal/util.h". - */ -#define assert(e) do { \ - if (!(e)) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define not_implemented() do { \ - malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define assert_not_implemented(e) do { \ - if (!(e)) \ - not_implemented(); \ -} while (0) - #include "test/jemalloc_test_defs.h" #ifdef JEMALLOC_OSSPIN # include <libkern/OSAtomic.h> #endif #if defined(HAVE_ALTIVEC) && !defined(__APPLE__) # include <altivec.h> @@ -81,16 +48,24 @@ * essentially identical code within the test infrastructure). */ #elif defined(JEMALLOC_INTEGRATION_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; + # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" # define JEMALLOC_H_TYPES # define JEMALLOC_H_STRUCTS # define JEMALLOC_H_EXTERNS # define JEMALLOC_H_INLINES # include "jemalloc/internal/nstime.h" @@ -144,8 +119,45 @@ #include "test/math.h" #include "test/mtx.h" #include "test/mq.h" #include "test/test.h" #include "test/timer.h" #include "test/thd.h" #define MEXP 19937 #include "test/SFMT.h" + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented + +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) \ + not_implemented(); \ +} while (0)
--- a/memory/jemalloc/src/test/include/test/mtx.h +++ b/memory/jemalloc/src/test/include/test/mtx.h @@ -3,16 +3,18 @@ * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx);
--- a/memory/jemalloc/src/test/include/test/test.h +++ b/memory/jemalloc/src/test/include/test/test.h @@ -306,24 +306,28 @@ f(void) \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) +#define test_no_malloc_init(...) \ + p_test_no_malloc_init(__VA_ARGS__, NULL) + #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_malloc_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message);
--- a/memory/jemalloc/src/test/integration/aligned_alloc.c +++ b/memory/jemalloc/src/test/integration/aligned_alloc.c @@ -1,14 +1,25 @@ #include "test/jemalloc_test.h" #define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; alignment = 0; set_errno(0); @@ -69,16 +80,17 @@ TEST_BEGIN(test_oom_errors) assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; @@ -105,17 +117,19 @@ TEST_BEGIN(test_alignment_and_size) } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } + purge(); } +#undef NITER } TEST_END int main(void) { return (test(
--- a/memory/jemalloc/src/test/integration/mallocx.c +++ b/memory/jemalloc/src/test/integration/mallocx.c @@ -1,10 +1,14 @@ #include "test/jemalloc_test.h" +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, @@ -41,16 +45,29 @@ get_size_impl(const char *cmd, size_t in static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + TEST_BEGIN(test_overflow) { size_t hugemax; hugemax = get_huge_size(get_nhuge()-1); assert_ptr_null(mallocx(hugemax+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1); @@ -64,17 +81,17 @@ TEST_BEGIN(test_overflow) assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); } TEST_END TEST_BEGIN(test_oom) { - size_t hugemax, size, alignment; + size_t hugemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ @@ -87,66 +104,73 @@ TEST_BEGIN(test_oom) } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", hugemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) dallocx(ptrs[i], 0); } + purge(); #if LG_SIZEOF_PTR == 3 - size = ZU(0x8000000000000000); - alignment = ZU(0x8000000000000000); + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x8000000000000000ULL)), + "Expected OOM for mallocx()"); + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x80000000)), + "Expected OOM for mallocx()"); #else - size = ZU(0x80000000); - alignment = ZU(0x80000000); + assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), + "Expected OOM for mallocx()"); #endif - assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), - "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, - alignment); } TEST_END TEST_BEGIN(test_basic) { -#define MAXSZ (((size_t)1) << 26) +#define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); - assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", + nsz); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); + purge(); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { -#define MAXALIGN (((size_t)1) << 25) +#define MAXALIGN (((size_t)1) << 23) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; @@ -186,16 +210,17 @@ TEST_BEGIN(test_alignment_and_size) } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } + purge(); } #undef MAXALIGN #undef NITER } TEST_END int main(void)
--- a/memory/jemalloc/src/test/integration/posix_memalign.c +++ b/memory/jemalloc/src/test/integration/posix_memalign.c @@ -1,14 +1,25 @@ #include "test/jemalloc_test.h" #define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 23) + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) +{ + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, @@ -61,16 +72,17 @@ TEST_BEGIN(test_oom_errors) assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; @@ -99,17 +111,19 @@ TEST_BEGIN(test_alignment_and_size) } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } + purge(); } +#undef NITER } TEST_END int main(void) { return (test(
--- a/memory/jemalloc/src/test/integration/xallocx.c +++ b/memory/jemalloc/src/test/integration/xallocx.c @@ -1,10 +1,14 @@ #include "test/jemalloc_test.h" +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + /* * Use a separate arena for xallocx() extension/contraction tests so that * internal allocation e.g. by heap profiling can't interpose allocations where * xallocx() would ordinarily be able to extend. */ static unsigned arena_ind(void) {
--- a/memory/jemalloc/src/test/src/mtx.c +++ b/memory/jemalloc/src/test/src/mtx.c @@ -6,16 +6,18 @@ bool mtx_init(mtx_t *mtx) { #ifdef _WIN32 if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) return (true); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mtx->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mtx->lock = 0; #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) return (true); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); @@ -28,39 +30,44 @@ mtx_init(mtx_t *mtx) return (false); } void mtx_fini(mtx_t *mtx) { #ifdef _WIN32 +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #elif (defined(JEMALLOC_OSSPIN)) #else pthread_mutex_destroy(&mtx->lock); #endif } void mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mtx->lock); #else pthread_mutex_lock(&mtx->lock); #endif } void mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mtx->lock); #else pthread_mutex_unlock(&mtx->lock); #endif }
--- a/memory/jemalloc/src/test/src/test.c +++ b/memory/jemalloc/src/test/src/test.c @@ -55,53 +55,79 @@ p_test_init(const char *name) void p_test_fini(void) { test_counts[test_status]++; malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); } -test_status_t -p_test(test_t *t, ...) +static test_status_t +p_test_impl(bool do_malloc_init, test_t *t, va_list ap) { test_status_t ret; - va_list ap; - /* - * Make sure initialization occurs prior to running tests. Tests are - * special because they may use internal facilities prior to triggering - * initialization as a side effect of calling into the public API. This - * is a final safety that works even if jemalloc_constructor() doesn't - * run, as for MSVC builds. - */ - if (nallocx(1, 0) == 0) { - malloc_printf("Initialization error"); - return (test_status_fail); + if (do_malloc_init) { + /* + * Make sure initialization occurs prior to running tests. + * Tests are special because they may use internal facilities + * prior to triggering initialization as a side effect of + * calling into the public API. + */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return (test_status_fail); + } } ret = test_status_pass; - va_start(ap, t); for (; t != NULL; t = va_arg(ap, test_t *)) { t(); if (test_status > ret) ret = test_status; } - va_end(ap); malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", test_status_string(test_status_pass), test_counts[test_status_pass], test_count, test_status_string(test_status_skip), test_counts[test_status_skip], test_count, test_status_string(test_status_fail), test_counts[test_status_fail], test_count); return (ret); } +test_status_t +p_test(test_t *t, ...) +{ + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, t, ap); + va_end(ap); + + return (ret); +} + +test_status_t +p_test_no_malloc_init(test_t *t, ...) +{ + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(false, t, ap); + va_end(ap); + + return (ret); +} + void p_test_fail(const char *prefix, const char *message) { malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); test_status = test_status_fail; }
--- a/memory/jemalloc/src/test/src/timer.c +++ b/memory/jemalloc/src/test/src/timer.c @@ -27,19 +27,18 @@ timer_usec(const timedelta_t *timer) } void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { uint64_t t0 = timer_usec(a); uint64_t t1 = timer_usec(b); uint64_t mult; - unsigned i = 0; - unsigned j; - int n; + size_t i = 0; + size_t j, n; /* Whole. */ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); i += n; if (i >= buflen) return; mult = 1; for (j = 0; j < n; j++)
--- a/memory/jemalloc/src/test/stress/microbench.c +++ b/memory/jemalloc/src/test/stress/microbench.c @@ -1,12 +1,13 @@ #include "test/jemalloc_test.h" JEMALLOC_INLINE_C void -time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) +time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, + void (*func)(void)) { uint64_t i; for (i = 0; i < nwarmup; i++) func(); timer_start(timer); for (i = 0; i < niter; i++) func();
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/test/unit/a0.c @@ -0,0 +1,19 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_a0) +{ + void *p; + + p = a0malloc(1); + assert_ptr_not_null(p, "Unexpected a0malloc() error"); + a0dalloc(p); +} +TEST_END + +int +main(void) +{ + + return (test_no_malloc_init( + test_a0)); +}
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/test/unit/arena_reset.c @@ -0,0 +1,159 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,lg_prof_sample:0"; +#endif + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nsmall(void) +{ + + return (get_nsizes_impl("arenas.nbins")); +} + +static unsigned +get_nlarge(void) +{ + + return (get_nsizes_impl("arenas.nlruns")); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_small_size(size_t ind) +{ + + return (get_size_impl("arenas.bin.0.size", ind)); +} + +static size_t +get_large_size(size_t ind) +{ + + return (get_size_impl("arenas.lrun.0.size", ind)); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_arena_reset) +{ +#define NHUGE 4 + unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i; + size_t sz, miblen; + void **ptrs; + int flags; + size_t mib[3]; + tsdn_t *tsdn; + + test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill + && unlikely(opt_quarantine))); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + nsmall = get_nsmall(); + nlarge = get_nlarge(); + nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge(); + nptrs = nsmall + nlarge + nhuge; + ptrs = (void **)malloc(nptrs * sizeof(void *)); + assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); + + /* Allocate objects with a wide range of sizes. */ + for (i = 0; i < nsmall; i++) { + sz = get_small_size(i); + ptrs[i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nlarge; i++) { + sz = get_large_size(i); + ptrs[nsmall + i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nhuge; i++) { + sz = get_huge_size(i); + ptrs[nsmall + nlarge + i] = mallocx(sz, flags); + assert_ptr_not_null(ptrs[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + + tsdn = tsdn_fetch(); + + /* Verify allocations. */ + for (i = 0; i < nptrs; i++) { + assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, + "Allocation should have queryable size"); + } + + /* Reset. */ + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + /* Verify allocations no longer exist. */ + for (i = 0; i < nptrs; i++) { + assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0, + "Allocation should no longer exist"); + } + + free(ptrs); +} +TEST_END + +int +main(void) +{ + + return (test( + test_arena_reset)); +}
--- a/memory/jemalloc/src/test/unit/bitmap.c +++ b/memory/jemalloc/src/test/unit/bitmap.c @@ -96,17 +96,17 @@ TEST_END TEST_BEGIN(test_bitmap_sfu) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { - ssize_t j; + size_t j; bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_size(&binfo)); bitmap_init(bitmap, &binfo); /* Iteratively set bits starting at the beginning. */ for (j = 0; j < i; j++) { assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should be just after " @@ -114,17 +114,17 @@ TEST_BEGIN(test_bitmap_sfu) } assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); /* * Iteratively unset bits starting at the end, and * verify that bitmap_sfu() reaches the unset bits. */ - for (j = i - 1; j >= 0; j--) { + for (j = i - 1; j < i; j--) { /* (i..0] */ bitmap_unset(bitmap, &binfo, j); assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should the bit previously " "unset"); bitmap_unset(bitmap, &binfo, j); } assert_false(bitmap_get(bitmap, &binfo, 0), "Bit should be unset");
--- a/memory/jemalloc/src/test/unit/ckh.c +++ b/memory/jemalloc/src/test/unit/ckh.c @@ -2,18 +2,18 @@ TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END @@ -27,18 +27,18 @@ TEST_BEGIN(test_count_insert_search_remo "a string.", "A string." }; const char *missing = "A string not in the hash table."; size_t i; tsd = tsd_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1,
--- a/memory/jemalloc/src/test/unit/decay.c +++ b/memory/jemalloc/src/test/unit/decay.c @@ -1,26 +1,34 @@ #include "test/jemalloc_test.h" const char *malloc_conf = "purge:decay,decay_time:1"; +static nstime_monotonic_t *nstime_monotonic_orig; static nstime_update_t *nstime_update_orig; static unsigned nupdates_mock; static nstime_t time_mock; -static bool nonmonotonic_mock; +static bool monotonic_mock; + +static bool +nstime_monotonic_mock(void) +{ + + return (monotonic_mock); +} static bool nstime_update_mock(nstime_t *time) { nupdates_mock++; - if (!nonmonotonic_mock) + if (monotonic_mock) nstime_copy(time, &time_mock); - return (nonmonotonic_mock); + return (!monotonic_mock); } TEST_BEGIN(test_decay_ticks) { ticker_t *decay_ticker; unsigned tick0, tick1; size_t sz, huge0, large0; void *p; @@ -240,30 +248,33 @@ TEST_BEGIN(test_decay_ticker) for (i = 0; i < NPS; i++) { ps[i] = mallocx(large, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); - nonmonotonic_mock = false; + monotonic_mock = true; + nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { dallocx(ps[i], flags); nupdates0 = nupdates_mock; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.decay failure"); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } + nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; nstime_init(&time, 0); nstime_update(&time); nstime_init2(&decay_time, opt_decay_time, 0); nstime_copy(&deadline, &time); nstime_add(&deadline, &decay_time); do { @@ -311,19 +322,21 @@ TEST_BEGIN(test_decay_nonmonotonic) "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge0, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); - nonmonotonic_mock = true; + monotonic_mock = false; + nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { ps[i] = mallocx(large0, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } for (i = 0; i < NPS; i++) { @@ -337,18 +350,19 @@ TEST_BEGIN(test_decay_nonmonotonic) assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge1, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); if (config_stats) - assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); + assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); + nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END int main(void) {
--- a/memory/jemalloc/src/test/unit/fork.c +++ b/memory/jemalloc/src/test/unit/fork.c @@ -9,31 +9,50 @@ TEST_BEGIN(test_fork) #ifndef _WIN32 void *p; pid_t pid; p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); pid = fork(); + + free(p); + + p = malloc(64); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + free(p); + if (pid == -1) { /* Error. */ test_fail("Unexpected fork() failure"); } else if (pid == 0) { /* Child. */ - exit(0); + _exit(0); } else { int status; /* Parent. */ - free(p); - do { + while (true) { if (waitpid(pid, &status, 0) == -1) test_fail("Unexpected waitpid() failure"); - } while (!WIFEXITED(status) && !WIFSIGNALED(status)); + if (WIFSIGNALED(status)) { + test_fail("Unexpected child termination due to " + "signal %d", WTERMSIG(status)); + break; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) != 0) { + test_fail( + "Unexpected child exit value %d", + WEXITSTATUS(status)); + } + break; + } + } } #else test_skip("fork(2) is irrelevant to Windows"); #endif } TEST_END int
--- a/memory/jemalloc/src/test/unit/junk.c +++ b/memory/jemalloc/src/test/unit/junk.c @@ -24,32 +24,32 @@ watch_junking(void *p) static void arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); for (i = 0; i < bin_info->reg_size; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } if (ptr == watch_for_junking) saw_junking = true; } static void arena_dalloc_junk_large_intercept(void *ptr, size_t usize) { size_t i; arena_dalloc_junk_large_orig(ptr, usize); for (i = 0; i < usize; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } if (ptr == watch_for_junking) saw_junking = true; } static void @@ -64,55 +64,55 @@ huge_dalloc_junk_intercept(void *ptr, si */ if (ptr == watch_for_junking) saw_junking = true; } static void test_junk(size_t sz_min, size_t sz_max) { - char *s; + uint8_t *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; arena_dalloc_junk_large_orig = arena_dalloc_junk_large; arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; huge_dalloc_junk_orig = huge_dalloc_junk; huge_dalloc_junk = huge_dalloc_junk_intercept; } sz_prev = 0; - s = (char *)mallocx(sz_min, 0); + s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { - assert_c_eq(s[0], 'a', + assert_u_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', + assert_u_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { if (opt_junk_alloc) { - assert_c_eq(s[i], 0xa5, + assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, "Newly allocated byte %zu/%zu isn't " "junk-filled", i, sz); } s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { watch_junking(s); - s = (char *)rallocx(s, sz+1, 0); + s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); } } @@ -239,16 +239,15 @@ TEST_BEGIN(test_junk_redzone) arena_redzone_corruption = arena_redzone_corruption_orig; } TEST_END int main(void) { - assert(!config_fill || opt_junk_alloc || opt_junk_free); return (test( test_junk_small, test_junk_large, test_junk_huge, test_junk_large_ralloc_shrink, test_junk_redzone)); }
--- a/memory/jemalloc/src/test/unit/junk_alloc.c +++ b/memory/jemalloc/src/test/unit/junk_alloc.c @@ -1,3 +1,3 @@ -#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" +#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" #include "junk.c" #undef JEMALLOC_TEST_JUNK_OPT
--- a/memory/jemalloc/src/test/unit/junk_free.c +++ b/memory/jemalloc/src/test/unit/junk_free.c @@ -1,3 +1,3 @@ -#define JEMALLOC_TEST_JUNK_OPT "junk:free" +#define JEMALLOC_TEST_JUNK_OPT "junk:free" #include "junk.c" #undef JEMALLOC_TEST_JUNK_OPT
--- a/memory/jemalloc/src/test/unit/math.c +++ b/memory/jemalloc/src/test/unit/math.c @@ -1,15 +1,19 @@ #include "test/jemalloc_test.h" #define MAX_REL_ERR 1.0e-9 #define MAX_ABS_ERR 1.0e-9 #include <float.h> +#ifdef __PGI +#undef INFINITY +#endif + #ifndef INFINITY #define INFINITY (DBL_MAX + DBL_MAX) #endif static bool double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err;
--- a/memory/jemalloc/src/test/unit/nstime.c +++ b/memory/jemalloc/src/test/unit/nstime.c @@ -171,16 +171,23 @@ TEST_BEGIN(test_nstime_divide) nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_subtract(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 9, "Incorrect division result"); } TEST_END +TEST_BEGIN(test_nstime_monotonic) +{ + + nstime_monotonic(); +} +TEST_END + TEST_BEGIN(test_nstime_update) { nstime_t nst; nstime_init(&nst, 0); assert_false(nstime_update(&nst), "Basic time update failed."); @@ -193,17 +200,16 @@ TEST_BEGIN(test_nstime_update) { nstime_t nst0; nstime_copy(&nst0, &nst); assert_true(nstime_update(&nst), "Update should detect time roll-back."); assert_d_eq(nstime_compare(&nst, &nst0), 0, "Time should not have been modified"); } - } TEST_END int main(void) { return (test( @@ -211,10 +217,11 @@ main(void) test_nstime_init2, test_nstime_copy, test_nstime_compare, test_nstime_add, test_nstime_subtract, test_nstime_imultiply, test_nstime_idivide, test_nstime_divide, + test_nstime_monotonic, test_nstime_update)); }
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/test/unit/ph.c @@ -0,0 +1,290 @@ +#include "test/jemalloc_test.h" + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + phn(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) +{ + int ret; + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the heap, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return (ret); +} + +static int +node_cmp_magic(const node_t *a, const node_t *b) { + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + return (node_cmp(a, b)); +} + +typedef ph(node_t) heap_t; +ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); + +static void +node_print(const node_t *node, unsigned depth) +{ + unsigned i; + node_t *leftmost_child, *sibling; + + for (i = 0; i < depth; i++) + malloc_printf("\t"); + malloc_printf("%2"FMTu64"\n", node->key); + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) + return; + node_print(leftmost_child, depth + 1); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + node_print(sibling, depth + 1); + } +} + +static void +heap_print(const heap_t *heap) +{ + node_t *auxelm; + + malloc_printf("vvv heap %p vvv\n", heap); + if (heap->ph_root == NULL) + goto label_return; + + node_print(heap->ph_root, 0); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + node_print(auxelm, 0); + } + +label_return: + malloc_printf("^^^ heap %p ^^^\n", heap); +} + +static unsigned +node_validate(const node_t *node, const node_t *parent) +{ + unsigned nnodes = 1; + node_t *leftmost_child, *sibling; + + if (parent != NULL) { + assert_d_ge(node_cmp_magic(node, parent), 0, + "Child is less than parent"); + } + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) + return (nnodes); + assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), + (void *)node, "Leftmost child does not link to node"); + nnodes += node_validate(leftmost_child, node); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, sibling)), sibling, + "sibling's prev doesn't link to sibling"); + nnodes += node_validate(sibling, node); + } + return (nnodes); +} + +static unsigned +heap_validate(const heap_t *heap) +{ + unsigned nnodes = 0; + node_t *auxelm; + + if (heap->ph_root == NULL) + goto label_return; + + nnodes += node_validate(heap->ph_root, NULL); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + nnodes += node_validate(auxelm, NULL); + } + +label_return: + if (false) + heap_print(heap); + return (nnodes); +} + +TEST_BEGIN(test_ph_empty) +{ + heap_t heap; + + heap_new(&heap); + assert_true(heap_empty(&heap), "Heap should be empty"); + assert_ptr_null(heap_first(&heap), "Unexpected node"); +} +TEST_END + +static void +node_remove(heap_t *heap, node_t *node) +{ + + heap_remove(heap, node); + + node->magic = 0; +} + +static node_t * +node_remove_first(heap_t *heap) +{ + node_t *node = heap_remove_first(heap); + node->magic = 0; + return (node); +} + +TEST_BEGIN(test_ph_random) +{ +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + heap_t heap; + node_t nodes[NNODES]; + unsigned i, j, k; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) + bag[j] = j; + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) + bag[j] = NNODES - j - 1; + break; + default: + for (j = 0; j < NNODES; j++) + bag[j] = gen_rand64_range(sfmt, NNODES); + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize heap and nodes. */ + heap_new(&heap); + assert_u_eq(heap_validate(&heap), 0, + "Incorrect node count"); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + heap_insert(&heap, &nodes[k]); + if (i % 13 == 12) { + /* Trigger merging. */ + assert_ptr_not_null(heap_first(&heap), + "Heap should not be empty"); + } + assert_u_eq(heap_validate(&heap), k + 1, + "Incorrect node count"); + } + + assert_false(heap_empty(&heap), + "Heap should not be empty"); + + /* Remove nodes. */ + switch (i % 4) { + case 0: + for (k = 0; k < j; k++) { + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, &nodes[k]); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + case 1: + for (k = j; k > 0; k--) { + node_remove(&heap, &nodes[k-1]); + assert_u_eq(heap_validate(&heap), k - 1, + "Incorrect node count"); + } + break; + case 2: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = node_remove_first(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + prev = node; + } + break; + } case 3: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = heap_first(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + prev = node; + } + break; + } default: + not_reached(); + } + + assert_ptr_null(heap_first(&heap), + "Heap should be empty"); + assert_true(heap_empty(&heap), "Heap should be empty"); + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_ph_empty, + test_ph_random)); +}
--- a/memory/jemalloc/src/test/unit/prng.c +++ b/memory/jemalloc/src/test/unit/prng.c @@ -1,68 +1,263 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_prng_lg_range) +static void +test_prng_lg_range_u32(bool atomic) +{ + uint32_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + sa = 42; + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_u32(&sb, 32, atomic); + assert_u32_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_u32(&sa, 32, atomic); + for (lg_range = 31; lg_range > 0; lg_range--) { + sb = 42; + rb = prng_lg_range_u32(&sb, lg_range, atomic); + assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u32_eq(rb, (ra >> (32 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_u64(void) { uint64_t sa, sb, ra, rb; unsigned lg_range; sa = 42; - ra = prng_lg_range(&sa, 64); + ra = prng_lg_range_u64(&sa, 64); sa = 42; - rb = prng_lg_range(&sa, 64); + rb = prng_lg_range_u64(&sa, 64); assert_u64_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; - rb = prng_lg_range(&sb, 64); + rb = prng_lg_range_u64(&sb, 64); assert_u64_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; - ra = prng_lg_range(&sa, 64); - rb = prng_lg_range(&sa, 64); + ra = prng_lg_range_u64(&sa, 64); + rb = prng_lg_range_u64(&sa, 64); assert_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; - ra = prng_lg_range(&sa, 64); + ra = prng_lg_range_u64(&sa, 64); for (lg_range = 63; lg_range > 0; lg_range--) { sb = 42; - rb = prng_lg_range(&sb, lg_range); + rb = prng_lg_range_u64(&sb, lg_range); assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } + +static void +test_prng_lg_range_zu(bool atomic) +{ + size_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + sa = 42; + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; + lg_range--) { + sb = 42; + rb = prng_lg_range_zu(&sb, lg_range, atomic); + assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - + lg_range)), "Expected high order bits of full-width " + "result, lg_range=%u", lg_range); + } +} + +TEST_BEGIN(test_prng_lg_range_u32_nonatomic) +{ + + test_prng_lg_range_u32(false); +} TEST_END -TEST_BEGIN(test_prng_range) +TEST_BEGIN(test_prng_lg_range_u32_atomic) +{ + + test_prng_lg_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u64_nonatomic) +{ + + test_prng_lg_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_nonatomic) +{ + + test_prng_lg_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_atomic) +{ + + test_prng_lg_range_zu(true); +} +TEST_END + +static void +test_prng_range_u32(bool atomic) +{ + uint32_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + uint32_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + uint32_t r = prng_range_u32(&s, range, atomic); + + assert_u32_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_u64(void) { uint64_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint64_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { - uint64_t r = prng_range(&s, range); + uint64_t r = prng_range_u64(&s, range); assert_u64_lt(r, range, "Out of range"); } } } + +static void +test_prng_range_zu(bool atomic) +{ + size_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + size_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + size_t r = prng_range_zu(&s, range, atomic); + + assert_zu_lt(r, range, "Out of range"); + } + } +} + +TEST_BEGIN(test_prng_range_u32_nonatomic) +{ + + test_prng_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_u32_atomic) +{ + + test_prng_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_range_u64_nonatomic) +{ + + test_prng_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_nonatomic) +{ + + test_prng_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_atomic) +{ + + test_prng_range_zu(true); +} TEST_END int main(void) { return (test( - test_prng_lg_range, - test_prng_range)); + test_prng_lg_range_u32_nonatomic, + test_prng_lg_range_u32_atomic, + test_prng_lg_range_u64_nonatomic, + test_prng_lg_range_zu_nonatomic, + test_prng_lg_range_zu_atomic, + test_prng_range_u32_nonatomic, + test_prng_range_u32_atomic, + test_prng_range_u64_nonatomic, + test_prng_range_zu_nonatomic, + test_prng_range_zu_atomic)); }
--- a/memory/jemalloc/src/test/unit/prof_reset.c +++ b/memory/jemalloc/src/test/unit/prof_reset.c @@ -89,17 +89,18 @@ TEST_BEGIN(test_prof_reset_basic) "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); } TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool -prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all) +prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); return (false); }
--- a/memory/jemalloc/src/test/unit/run_quantize.c +++ b/memory/jemalloc/src/test/unit/run_quantize.c @@ -106,17 +106,17 @@ TEST_BEGIN(test_monotonic) "Unexpected mallctl failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlruns", &nlruns, &sz, NULL, 0), 0, "Unexpected mallctl failure"); floor_prev = 0; ceil_prev = 0; - for (i = 1; i < run_quantize_max >> LG_PAGE; i++) { + for (i = 1; i <= chunksize >> LG_PAGE; i++) { size_t run_size, floor, ceil; run_size = i << LG_PAGE; floor = run_quantize_floor(run_size); ceil = run_quantize_ceil(run_size); assert_zu_le(floor, run_size, "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
--- a/memory/jemalloc/src/test/unit/size_classes.c +++ b/memory/jemalloc/src/test/unit/size_classes.c @@ -75,38 +75,110 @@ TEST_BEGIN(test_size_classes) "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class-1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class), "s2u() does not compute same size class"); } TEST_END +TEST_BEGIN(test_psize_classes) +{ + size_t size_class, max_size_class; + pszind_t pind, max_pind; + + max_size_class = get_max_size_class(); + max_pind = psz2ind(max_size_class); + + for (pind = 0, size_class = pind2sz(pind); pind < max_pind || + size_class < max_size_class; pind++, size_class = + pind2sz(pind)) { + assert_true(pind < max_pind, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + + assert_u_eq(pind, psz2ind(size_class), + "psz2ind() does not reverse pind2sz(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, psz2ind(size_class), + pind2sz(psz2ind(size_class))); + assert_zu_eq(size_class, pind2sz(psz2ind(size_class)), + "pind2sz() does not reverse psz2ind(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, psz2ind(size_class), + pind2sz(psz2ind(size_class))); + + assert_u_eq(pind+1, psz2ind(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (pind > 0) ? + psz2u(pind2sz(pind-1)+1) : psz2u(1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class-1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class), + "psz2u() does not compute same size class"); + assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1), + "psz2u() does not round up to next size class"); + } + + assert_u_eq(pind, psz2ind(pind2sz(pind)), + "psz2ind() does not reverse pind2sz()"); + assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)), + "pind2sz() does not reverse psz2ind()"); + + assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class-1), + "psz2u() does not round up to size class"); + assert_zu_eq(size_class, psz2u(size_class), + "psz2u() does not compute same size class"); +} +TEST_END + TEST_BEGIN(test_overflow) { size_t max_size_class; max_size_class = get_max_size_class(); - assert_u_ge(size2index(max_size_class+1), NSIZES, - "size2index() should return >= NSIZES on overflow"); - assert_u_ge(size2index(ZU(PTRDIFF_MAX)+1), NSIZES, - "size2index() should return >= NSIZES on overflow"); - assert_u_ge(size2index(SIZE_T_MAX), NSIZES, - "size2index() should return >= NSIZES on overflow"); + assert_u_eq(size2index(max_size_class+1), NSIZES, + "size2index() should return NSIZES on overflow"); + assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES, + "size2index() should return NSIZES on overflow"); + assert_u_eq(size2index(SIZE_T_MAX), NSIZES, + "size2index() should return NSIZES on overflow"); - assert_zu_gt(s2u(max_size_class+1), HUGE_MAXCLASS, - "s2u() should return > HUGE_MAXCLASS for unsupported size"); - assert_zu_gt(s2u(ZU(PTRDIFF_MAX)+1), HUGE_MAXCLASS, - "s2u() should return > HUGE_MAXCLASS for unsupported size"); + assert_zu_eq(s2u(max_size_class+1), 0, + "s2u() should return 0 for unsupported size"); + assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0, + "s2u() should return 0 for unsupported size"); assert_zu_eq(s2u(SIZE_T_MAX), 0, "s2u() should return 0 on overflow"); + + assert_u_eq(psz2ind(max_size_class+1), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES, + "psz2ind() should return NPSIZES on overflow"); + + assert_zu_eq(psz2u(max_size_class+1), 0, + "psz2u() should return 0 for unsupported size"); + assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0, + "psz2u() should return 0 for unsupported size"); + assert_zu_eq(psz2u(SIZE_T_MAX), 0, + "psz2u() should return 0 on overflow"); } TEST_END int main(void) { return (test( test_size_classes, + test_psize_classes, test_overflow)); }
--- a/memory/jemalloc/src/test/unit/stats.c +++ b/memory/jemalloc/src/test/unit/stats.c @@ -215,21 +215,21 @@ TEST_BEGIN(test_stats_arenas_large) assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); - assert_zu_gt(nmalloc, 0, + assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); - assert_zu_ge(nmalloc, ndalloc, + assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); - assert_zu_gt(nrequests, 0, + assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_huge) @@ -257,19 +257,19 @@ TEST_BEGIN(test_stats_arenas_huge) assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); - assert_zu_gt(nmalloc, 0, + assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); - assert_zu_ge(nmalloc, ndalloc, + assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_bins)
--- a/memory/jemalloc/src/test/unit/tsd.c +++ b/memory/jemalloc/src/test/unit/tsd.c @@ -53,28 +53,28 @@ malloc_tsd_data(, data_, data_t, DATA_IN malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) static void * thd_start(void *arg) { data_t d = (data_t)(uintptr_t)arg; void *p; - assert_x_eq(*data_tsd_get(), DATA_INIT, + assert_x_eq(*data_tsd_get(true), DATA_INIT, "Initial tsd get should return initialization value"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() failure"); data_tsd_set(&d); - assert_x_eq(*data_tsd_get(), d, + assert_x_eq(*data_tsd_get(true), d, "After tsd set, tsd get should return value that was set"); d = 0; - assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, + assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg, "Resetting local data should have no effect on tsd"); free(p); return (NULL); } TEST_BEGIN(test_tsd_main_thread) { @@ -94,14 +94,19 @@ TEST_BEGIN(test_tsd_sub_thread) "Cleanup function should have executed"); } TEST_END int main(void) { + /* Core tsd bootstrapping must happen prior to data_tsd_boot(). */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return (test_status_fail); + } data_tsd_boot(); return (test( test_tsd_main_thread, test_tsd_sub_thread)); }
--- a/memory/jemalloc/src/test/unit/util.c +++ b/memory/jemalloc/src/test/unit/util.c @@ -1,35 +1,35 @@ #include "test/jemalloc_test.h" #define TEST_POW2_CEIL(t, suf, pri) do { \ unsigned i, pow2; \ t x; \ \ - assert_zu_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ \ for (i = 0; i < sizeof(t) * 8; i++) { \ - assert_zu_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) << i, \ - "Unexpected result"); \ + assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ + << i, "Unexpected result"); \ } \ \ for (i = 2; i < sizeof(t) * 8; i++) { \ - assert_zu_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ ((t)1) << i, "Unexpected result"); \ } \ \ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ - assert_zu_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ ((t)1) << (i+1), "Unexpected result"); \ } \ \ for (pow2 = 1; pow2 < 25; pow2++) { \ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ x++) { \ - assert_zu_eq(pow2_ceil_##suf(x), \ + assert_##suf##_eq(pow2_ceil_##suf(x), \ ((t)1) << pow2, \ "Unexpected result, x=%"pri, x); \ } \ } \ } while (0) TEST_BEGIN(test_pow2_ceil_u64) { @@ -155,24 +155,24 @@ TEST_BEGIN(test_malloc_strtoumax) } } TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { #define BUFLEN 15 char buf[BUFLEN]; - int result; + size_t result; size_t len; -#define TEST(expected_str_untruncated, ...) do { \ +#define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ - buf, expected_str_untruncated); \ - assert_d_eq(result, strlen(expected_str_untruncated), \ + buf, expected_str_untruncated); \ + assert_zu_eq(result, strlen(expected_str_untruncated), \ "Unexpected result"); \ } while (0) for (len = 1; len < BUFLEN; len++) { TEST("012346789", "012346789"); TEST("a0123b", "a%sb", "0123"); TEST("a01234567", "a%s%s", "0123", "4567"); TEST("a0123 ", "a%-6s", "0123"); @@ -188,21 +188,21 @@ TEST_BEGIN(test_malloc_snprintf_truncate #undef TEST } TEST_END TEST_BEGIN(test_malloc_snprintf) { #define BUFLEN 128 char buf[BUFLEN]; - int result; + size_t result; #define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ - assert_d_eq(result, strlen(expected_str), "Unexpected result"); \ + assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ } while (0) TEST("hello", "hello"); TEST("50%, 100%", "50%%, %d%%", 100); TEST("a0123b", "a%sb", "0123");
new file mode 100644 --- /dev/null +++ b/memory/jemalloc/src/test/unit/witness.c @@ -0,0 +1,278 @@ +#include "test/jemalloc_test.h" + +static witness_lock_error_t *witness_lock_error_orig; +static witness_owner_error_t *witness_owner_error_orig; +static witness_not_owner_error_t *witness_not_owner_error_orig; +static witness_lockless_error_t *witness_lockless_error_orig; + +static bool saw_lock_error; +static bool saw_owner_error; +static bool saw_not_owner_error; +static bool saw_lockless_error; + +static void +witness_lock_error_intercept(const witness_list_t *witnesses, + const witness_t *witness) +{ + + saw_lock_error = true; +} + +static void +witness_owner_error_intercept(const witness_t *witness) +{ + + saw_owner_error = true; +} + +static void +witness_not_owner_error_intercept(const witness_t *witness) +{ + + saw_not_owner_error = true; +} + +static void +witness_lockless_error_intercept(const witness_list_t *witnesses) +{ + + saw_lockless_error = true; +} + +static int +witness_comp(const witness_t *a, const witness_t *b) +{ + + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + return (strcmp(a->name, b->name)); +} + +static int +witness_comp_reverse(const witness_t *a, const witness_t *b) +{ + + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + return (-strcmp(a->name, b->name)); +} + +TEST_BEGIN(test_witness) +{ + witness_t a, b; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); + + witness_init(&b, "b", 2, NULL); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); + + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); + + witness_assert_lockless(tsdn); +} +TEST_END + +TEST_BEGIN(test_witness_comp) +{ + witness_t a, b, c, d; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, witness_comp); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); + + witness_init(&b, "b", 1, witness_comp); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); + witness_unlock(tsdn, &b); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_init(&c, "c", 1, witness_comp_reverse); + witness_assert_not_owner(tsdn, &c); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &c); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(tsdn, &c); + + saw_lock_error = false; + + witness_init(&d, "d", 1, NULL); + witness_assert_not_owner(tsdn, &d); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &d); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(tsdn, &d); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_reversal) +{ + witness_t a, b; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + witness_init(&b, "b", 2, NULL); + + witness_lock(tsdn, &b); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); + + witness_assert_lockless(tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_recursive) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_not_owner_error_orig = witness_not_owner_error; + witness_not_owner_error = witness_not_owner_error_intercept; + saw_not_owner_error = false; + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + witness_lock(tsdn, &a); + assert_false(saw_lock_error, "Unexpected witness lock error"); + assert_false(saw_not_owner_error, "Unexpected witness not owner error"); + witness_lock(tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + assert_true(saw_not_owner_error, "Expected witness not owner error"); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_owner_error = witness_owner_error_orig; + witness_lock_error = witness_lock_error_orig; + +} +TEST_END + +TEST_BEGIN(test_witness_unlock_not_owned) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_owner_error_orig = witness_owner_error; + witness_owner_error = witness_owner_error_intercept; + saw_owner_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + assert_false(saw_owner_error, "Unexpected owner error"); + witness_unlock(tsdn, &a); + assert_true(saw_owner_error, "Expected owner error"); + + witness_assert_lockless(tsdn); + + witness_owner_error = witness_owner_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_lockful) +{ + witness_t a; + tsdn_t *tsdn; + + test_skip_if(!config_debug); + + witness_lockless_error_orig = witness_lockless_error; + witness_lockless_error = witness_lockless_error_intercept; + saw_lockless_error = false; + + tsdn = tsdn_fetch(); + + witness_assert_lockless(tsdn); + + witness_init(&a, "a", 1, NULL); + + assert_false(saw_lockless_error, "Unexpected lockless error"); + witness_assert_lockless(tsdn); + + witness_lock(tsdn, &a); + witness_assert_lockless(tsdn); + assert_true(saw_lockless_error, "Expected lockless error"); + + witness_unlock(tsdn, &a); + + witness_assert_lockless(tsdn); + + witness_lockless_error = witness_lockless_error_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_witness, + test_witness_comp, + test_witness_reversal, + test_witness_recursive, + test_witness_unlock_not_owned, + test_witness_lockful)); +}
--- a/memory/jemalloc/src/test/unit/zero.c +++ b/memory/jemalloc/src/test/unit/zero.c @@ -3,49 +3,51 @@ #ifdef JEMALLOC_FILL const char *malloc_conf = "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; #endif static void test_zero(size_t sz_min, size_t sz_max) { - char *s; + uint8_t *s; size_t sz_prev, sz, i; +#define MAGIC ((uint8_t)0x61) sz_prev = 0; - s = (char *)mallocx(sz_min, 0); + s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { - assert_c_eq(s[0], 'a', + assert_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', + assert_u_eq(s[sz_prev-1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { - assert_c_eq(s[i], 0x0, + assert_u_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); - s[i] = 'a'; + s[i] = MAGIC; } if (xallocx(s, sz+1, 0, 0) == sz) { - s = (char *)rallocx(s, sz+1, 0); + s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); +#undef MAGIC } TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SMALL_MAXCLASS-1); }
--- a/memory/jemalloc/upstream.info +++ b/memory/jemalloc/upstream.info @@ -1,2 +1,2 @@ UPSTREAM_REPO=https://github.com/jemalloc/jemalloc -UPSTREAM_COMMIT=4.1.1 +UPSTREAM_COMMIT=4.3.1
--- a/memory/mozjemalloc/moz.build +++ b/memory/mozjemalloc/moz.build @@ -23,16 +23,17 @@ if not CONFIG['MOZ_JEMALLOC4']: # For non release/esr builds, enable (some) fatal jemalloc assertions. This # helps us catch memory errors. if CONFIG['MOZ_UPDATE_CHANNEL'] not in ('release', 'esr'): DEFINES['MOZ_JEMALLOC_HARD_ASSERTS'] = True DEFINES['abort'] = 'moz_abort' +DEFINES['MOZ_HAS_MOZGLUE'] = True DEFINES['MOZ_JEMALLOC_IMPL'] = True #XXX: PGO on Linux causes problems here # See bug 419470 if CONFIG['OS_TARGET'] == 'Linux': NO_PGO = True LOCAL_INCLUDES += [
--- a/memory/replace/logalloc/replay/moz.build +++ b/memory/replace/logalloc/replay/moz.build @@ -3,16 +3,17 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. Program('logalloc-replay') SOURCES += [ '../FdPrintf.cpp', + '/mfbt/Assertions.cpp', 'Replay.cpp', ] LOCAL_INCLUDES += [ '..', ] # Link replace-malloc and the default allocator.
new file mode 100644 --- /dev/null +++ b/mfbt/Assertions.cpp @@ -0,0 +1,17 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/Types.h" + +/* + * The crash reason is defined as a global variable here rather than in the + * crash reporter itself to make it available to all code, even libraries like + * JS that don't link with the crash reporter directly. This value will only + * be consumed if the crash reporter is used by the target application. + */ + +MOZ_BEGIN_EXTERN_C +MOZ_EXPORT const char* gMozCrashReason = nullptr; +MOZ_END_EXTERN_C
--- a/mfbt/Assertions.h +++ b/mfbt/Assertions.h @@ -13,33 +13,38 @@ #define MOZ_DUMP_ASSERTION_STACK #endif #include "mozilla/Attributes.h" #include "mozilla/Compiler.h" #include "mozilla/Likely.h" #include "mozilla/MacroArgs.h" #include "mozilla/StaticAnalysisFunctions.h" +#include "mozilla/Types.h" #ifdef MOZ_DUMP_ASSERTION_STACK #include "nsTraceRefcnt.h" #endif -#if defined(MOZ_CRASHREPORTER) && defined(MOZILLA_INTERNAL_API) && \ - !defined(MOZILLA_EXTERNAL_LINKAGE) && defined(__cplusplus) -namespace CrashReporter { -// This declaration is present here as well as in nsExceptionHandler.h -// nsExceptionHandler.h is not directly included in this file as it includes -// windows.h, which can cause problems when it is imported into some files due -// to the number of macros defined. -// XXX If you change this definition - also change the definition in -// nsExceptionHandler.h -void AnnotateMozCrashReason(const char* aReason); -} // namespace CrashReporter +#if defined(MOZ_HAS_MOZGLUE) || defined(MOZILLA_INTERNAL_API) +/* + * The crash reason set by MOZ_CRASH_ANNOTATE is consumed by the crash reporter + * if present. It is declared here (and defined in Assertions.cpp) to make it + * available to all code, even libraries that don't link with the crash reporter + * directly. + */ +MOZ_BEGIN_EXTERN_C +extern MFBT_DATA const char* gMozCrashReason; +MOZ_END_EXTERN_C -# define MOZ_CRASH_ANNOTATE(...) CrashReporter::AnnotateMozCrashReason(__VA_ARGS__) +static inline void +AnnotateMozCrashReason(const char* reason) +{ + gMozCrashReason = reason; +} +# define MOZ_CRASH_ANNOTATE(...) AnnotateMozCrashReason(__VA_ARGS__) #else # define MOZ_CRASH_ANNOTATE(...) do { /* nothing */ } while (0) #endif #include <stddef.h> #include <stdio.h> #include <stdlib.h> #ifdef WIN32
--- a/mfbt/objs.mozbuild +++ b/mfbt/objs.mozbuild @@ -1,15 +1,16 @@ # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- # vim: set filetype=python: # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. mfbt_src_lcppsrcs = [ + 'Assertions.cpp', 'ChaosMode.cpp', 'double-conversion/bignum-dtoa.cc', 'double-conversion/bignum.cc', 'double-conversion/cached-powers.cc', 'double-conversion/diy-fp.cc', 'double-conversion/double-conversion.cc', 'double-conversion/fast-dtoa.cc', 'double-conversion/fixed-dtoa.cc',
--- a/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java +++ b/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java @@ -108,16 +108,17 @@ import android.widget.Toast; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.lang.ref.WeakReference; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Locale; @@ -147,31 +148,35 @@ public abstract class GeckoApp public static final String ACTION_LOAD = "org.mozilla.gecko.LOAD"; public static final String ACTION_INIT_PW = "org.mozilla.gecko.INIT_PW"; public static final String ACTION_SWITCH_TAB = "org.mozilla.gecko.SWITCH_TAB"; public static final String INTENT_REGISTER_STUMBLER_LISTENER = "org.mozilla.gecko.STUMBLER_REGISTER_LOCAL_LISTENER"; public static final String EXTRA_STATE_BUNDLE = "stateBundle"; + public static final String LAST_SELECTED_TAB = "lastSelectedTab"; + public static final String PREFS_ALLOW_STATE_BUNDLE = "allowStateBundle"; public static final String PREFS_VERSION_CODE = "versionCode"; public static final String PREFS_WAS_STOPPED = "wasStopped"; public static final String PREFS_CRASHED_COUNT = "crashedCount"; public static final String PREFS_CLEANUP_TEMP_FILES = "cleanupTempFiles"; public static final String SAVED_STATE_IN_BACKGROUND = "inBackground"; public static final String SAVED_STATE_PRIVATE_SESSION = "privateSession"; // Delay before running one-time "cleanup" tasks that may be needed // after a version upgrade. private static final int CLEANUP_DEFERRAL_SECONDS = 15; private static boolean sAlreadyLoaded; + private static WeakReference<GeckoApp> lastActiveGeckoApp; + protected RelativeLayout mRootLayout; protected RelativeLayout mMainLayout; protected RelativeLayout mGeckoLayout; private OrientationEventListener mCameraOrientationEventListener; public List<GeckoAppShell.AppStateListener> mAppStateListeners = new LinkedList<GeckoAppShell.AppStateListener>(); protected MenuPanel mMenuPanel; protected Menu mMenu; @@ -196,16 +201,18 @@ public abstract class GeckoApp private final HashMap<String, PowerManager.WakeLock> mWakeLocks = new HashMap<String, PowerManager.WakeLock>(); protected boolean mLastSessionCrashed; protected boolean mShouldRestore; private boolean mSessionRestoreParsingFinished = false; private EventDispatcher eventDispatcher; + private int lastSelectedTabId = -1; + private static final class LastSessionParser extends SessionParser { private JSONArray tabs; private JSONObject windowObject; private boolean isExternalURL; private boolean selectNextTab; private boolean tabsWereSkipped; private boolean tabsWereProcessed; @@ -572,16 +579,22 @@ public abstract class GeckoApp } @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putBoolean(SAVED_STATE_IN_BACKGROUND, isApplicationInBackground()); outState.putString(SAVED_STATE_PRIVATE_SESSION, mPrivateBrowsingSession); + outState.putInt(LAST_SELECTED_TAB, lastSelectedTabId); + } + + @Override + protected void onRestoreInstanceState(final Bundle inState) { + lastSelectedTabId = inState.getInt(LAST_SELECTED_TAB); } public void addTab() { } public void addPrivateTab() { } public void showNormalTabs() { } @@ -1993,43 +2006,46 @@ public abstract class GeckoApp if (!TextUtils.isEmpty(uri)) { passedUri = uri; } else { passedUri = null; } if (ACTION_LOAD.equals(action)) { Tabs.getInstance().loadUrl(intent.getDataString()); + lastSelectedTabId = -1; } else if (Intent.ACTION_VIEW.equals(action)) { processActionViewIntent(new Runnable() { @Override public void run() { final String url = intent.getDataString(); int flags = Tabs.LOADURL_NEW_TAB | Tabs.LOADURL_USER_ENTERED | Tabs.LOADURL_EXTERNAL; if (isFirstTab) { flags |= Tabs.LOADURL_FIRST_AFTER_ACTIVITY_UNHIDDEN; } Tabs.getInstance().loadUrlWithIntentExtras(url, intent, flags); } }); + lastSelectedTabId = -1; } else if (ACTION_HOMESCREEN_SHORTCUT.equals(action)) { mLayerView.loadUri(uri, GeckoView.LOAD_SWITCH_TAB); } else if (Intent.ACTION_SEARCH.equals(action)) { mLayerView.loadUri(uri, GeckoView.LOAD_NEW_TAB); } else if (NotificationHelper.HELPER_BROADCAST_ACTION.equals(action)) { NotificationHelper.getInstance(getApplicationContext()).handleNotificationIntent(intent); } else if (ACTION_LAUNCH_SETTINGS.equals(action)) { // Check if launched from data reporting notification. Intent settingsIntent = new Intent(GeckoApp.this, GeckoPreferences.class); // Copy extras. settingsIntent.putExtras(intent.getUnsafe()); startActivity(settingsIntent); } else if (ACTION_SWITCH_TAB.equals(action)) { final int tabId = intent.getIntExtra("TabId", -1); Tabs.getInstance().selectTab(tabId); + lastSelectedTabId = -1; } recordStartupActionTelemetry(passedUri, action); } /** * Handles getting a URI from an intent in a way that is backwards- * compatible with our previous implementations. @@ -2055,16 +2071,20 @@ public abstract class GeckoApp // Undo whatever we did in onPause. super.onResume(); if (mIsAbortingAppLaunch) { return; } GeckoAppShell.setGeckoInterface(this); + if (lastSelectedTabId >= 0 && (lastActiveGeckoApp == null || lastActiveGeckoApp.get() != this)) { + Tabs.getInstance().selectTab(lastSelectedTabId); + } + int newOrientation = getResources().getConfiguration().orientation; if (GeckoScreenOrientation.getInstance().update(newOrientation)) { refreshChrome(); } if (mAppStateListeners != null) { for (GeckoAppShell.AppStateListener listener : mAppStateListeners) { listener.onResume(); @@ -2129,16 +2149,19 @@ public abstract class GeckoApp @Override public void onPause() { if (mIsAbortingAppLaunch) { super.onPause(); return; } + lastSelectedTabId = Tabs.getInstance().getSelectedTab().getId(); + lastActiveGeckoApp = new WeakReference<GeckoApp>(this); + final HealthRecorder rec = mHealthRecorder; final Context context = this; // In some way it's sad that Android will trigger StrictMode warnings // here as the whole point is to save to disk while the activity is not // interacting with the user. ThreadUtils.postToBackgroundThread(new Runnable() { @Override
--- a/mobile/android/base/java/org/mozilla/gecko/customtabs/CustomTabsActivity.java +++ b/mobile/android/base/java/org/mozilla/gecko/customtabs/CustomTabsActivity.java @@ -30,42 +30,57 @@ import org.mozilla.gecko.util.NativeJSOb import org.mozilla.gecko.util.ThreadUtils; import java.lang.reflect.Field; import static android.support.customtabs.CustomTabsIntent.EXTRA_TOOLBAR_COLOR; public class CustomTabsActivity extends GeckoApp implements Tabs.OnTabsChangedListener { private static final String LOGTAG = "CustomTabsActivity"; + private static final String SAVED_TOOLBAR_COLOR = "SavedToolbarColor"; + private static final String SAVED_TOOLBAR_TITLE = "SavedToolbarTitle"; private static final int NO_COLOR = -1; private Toolbar toolbar; private ActionBar actionBar; private int tabId = -1; private boolean useDomainTitle = true; + private int toolbarColor; + private String toolbarTitle; + @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); + + if (savedInstanceState != null) { + toolbarColor = savedInstanceState.getInt(SAVED_TOOLBAR_COLOR, NO_COLOR); + toolbarTitle = savedInstanceState.getString(SAVED_TOOLBAR_TITLE, AppConstants.MOZ_APP_BASENAME); + } else { + toolbarColor = NO_COLOR; + toolbarTitle = AppConstants.MOZ_APP_BASENAME; + } + Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar); updateActionBarWithToolbar(toolbar); try { // Since we don't create the Toolbar's TextView ourselves, this seems // to be the only way of changing the ellipsize setting. Field f = toolbar.getClass().getDeclaredField("mTitleTextView"); f.setAccessible(true); TextView textView = (TextView) f.get(toolbar); textView.setEllipsize(TextUtils.TruncateAt.START); } catch (Exception e) { // If we can't ellipsize at the start of the title, we shouldn't display the host // so as to avoid displaying a misleadingly truncated host. Log.w(LOGTAG, "Failed to get Toolbar TextView, using default title."); useDomainTitle = false; } actionBar = getSupportActionBar(); + actionBar.setTitle(toolbarTitle); updateToolbarColor(toolbar); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { onBackPressed(); } }); @@ -102,23 +117,32 @@ public class CustomTabsActivity extends if (msg == Tabs.TabEvents.LOCATION_CHANGE) { tabId = tab.getId(); final Uri uri = Uri.parse(tab.getURL()); String title = null; if (uri != null) { title = uri.getHost(); } if (!useDomainTitle || title == null || title.isEmpty()) { - actionBar.setTitle(AppConstants.MOZ_APP_BASENAME); + toolbarTitle = AppConstants.MOZ_APP_BASENAME; } else { - actionBar.setTitle(title); + toolbarTitle = title; } + actionBar.setTitle(toolbarTitle); } } + @Override + protected void onSaveInstanceState(Bundle outState) { + super.onSaveInstanceState(outState); + + outState.putInt(SAVED_TOOLBAR_COLOR, toolbarColor); + outState.putString(SAVED_TOOLBAR_TITLE, toolbarTitle); + } + public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case android.R.id.home: finish(); return true; } return super.onOptionsItemSelected(item); } @@ -127,20 +151,24 @@ public class CustomTabsActivity extends setSupportActionBar(toolbar); final ActionBar ab = getSupportActionBar(); if (ab != null) { ab.setDisplayHomeAsUpEnabled(true); } } private void updateToolbarColor(final Toolbar toolbar) { - final int color = getIntent().getIntExtra(EXTRA_TOOLBAR_COLOR, NO_COLOR); - if (color == NO_COLOR) { - return; + if (toolbarColor == NO_COLOR) { + final int color = getIntent().getIntExtra(EXTRA_TOOLBAR_COLOR, NO_COLOR); + if (color == NO_COLOR) { + return; + } + toolbarColor = color; } - toolbar.setBackgroundColor(color); + + toolbar.setBackgroundColor(toolbarColor); final Window window = getWindow(); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { window.addFlags(WindowManager.LayoutParams.FLAG_DRAWS_SYSTEM_BAR_BACKGROUNDS); - window.setStatusBarColor(ColorUtil.darken(color, 0.25)); + window.setStatusBarColor(ColorUtil.darken(toolbarColor, 0.25)); } } }
--- a/modules/fdlibm/src/moz.build +++ b/modules/fdlibm/src/moz.build @@ -3,17 +3,17 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. EXPORTS += [ 'fdlibm.h', ] -FINAL_LIBRARY = 'fdlibm' +FINAL_LIBRARY = 'js' if CONFIG['GNU_CXX']: CXXFLAGS += [ '-Wno-parentheses', '-Wno-sign-compare', ] if CONFIG['CLANG_CXX']:
--- a/modules/libpref/init/all.js +++ b/modules/libpref/init/all.js @@ -1980,16 +1980,17 @@ pref("network.proxy.proxy_over_tls", pref("network.proxy.no_proxies_on", "localhost, 127.0.0.1"); pref("network.proxy.failover_timeout", 1800); // 30 minutes pref("network.online", true); //online/offline pref("network.cookie.cookieBehavior", 0); // 0-Accept, 1-dontAcceptForeign, 2-dontAcceptAny, 3-limitForeign #ifdef ANDROID pref("network.cookie.cookieBehavior", 0); // Keep the old default of accepting all cookies #endif pref("network.cookie.thirdparty.sessionOnly", false); +pref("network.cookie.leave-secure-alone", true); pref("network.cookie.lifetimePolicy", 0); // 0-accept, 1-dontUse 2-acceptForSession, 3-acceptForNDays pref("network.cookie.prefsMigrated", false); pref("network.cookie.lifetime.days", 90); // Ignored unless network.cookie.lifetimePolicy is 3. // The PAC file to load. Ignored unless network.proxy.type is 2. pref("network.proxy.autoconfig_url", ""); // Strip off paths when sending URLs to PAC scripts pref("network.proxy.autoconfig_url.include_path", false); @@ -2130,16 +2131,17 @@ pref("signed.applets.codebase_principal_ pref("security.checkloaduri", true); pref("security.xpconnect.plugin.unrestricted", true); // security-sensitive dialogs should delay button enabling. In milliseconds. pref("security.dialog_enable_delay", 1000); pref("security.notification_enable_delay", 500); pref("security.csp.enable", true); pref("security.csp.experimentalEnabled", false); +pref("security.csp.enableStrictDynamic", true); // Default Content Security Policy to apply to signed contents. pref("security.signed_content.CSP.default", "script-src 'self'; style-src 'self'"); // Mixed content blocking pref("security.mixed_content.block_active_content", false); pref("security.mixed_content.block_display_content", false);
--- a/mozglue/build/moz.build +++ b/mozglue/build/moz.build @@ -86,16 +86,17 @@ if not CONFIG['JS_STANDALONE']: 'cpuacct.c', ] USE_LIBS += [ 'mfbt', ] DEFINES['IMPL_MFBT'] = True +LIBRARY_DEFINES['MOZ_HAS_MOZGLUE'] = True LDFLAGS += CONFIG['MOZ_GLUE_WRAP_LDFLAGS'] if CONFIG['OS_TARGET'] == 'Darwin': # On OSX 10.10.3, a dead lock happens in some cases involving dynamic # symbol resolution for symbols that jemalloc itself uses. While it # might be possible to find a way to avoid all such symbol resolutions, # it's currently not possible because at the very least there's a call
--- a/mozglue/linker/tests/moz.build +++ b/mozglue/linker/tests/moz.build @@ -7,16 +7,17 @@ DIST_INSTALL = False SimplePrograms([ 'TestZip', ]) LOCAL_INCLUDES += ['..'] USE_LIBS += [ 'linker', + 'mfbt', ] OS_LIBS += CONFIG['MOZ_ZLIB_LIBS'] DISABLE_STL_WRAPPING = True PYTHON_UNIT_TESTS += ['run_test_zip.py'] if CONFIG['GNU_CXX']: CXXFLAGS += ['-Wno-error=shadow']
--- a/netwerk/cache2/CacheFile.cpp +++ b/netwerk/cache2/CacheFile.cpp @@ -2295,22 +2295,19 @@ CacheFile::InitIndexEntry() { MOZ_ASSERT(mHandle); if (mHandle->IsDoomed()) return NS_OK; nsresult rv; - // Bug 1201042 - will pass OriginAttributes directly. - rv = CacheFileIOManager::InitIndexEntry(mHandle, - mMetadata->OriginAttributes().mAppId, - mMetadata->IsAnonymous(), - mMetadata->OriginAttributes().mInIsolatedMozBrowser, - mPinned); + rv = CacheFileIOManager::InitIndexEntry( + mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()), + mMetadata->IsAnonymous(), mPinned); NS_ENSURE_SUCCESS(rv, rv); uint32_t expTime; mMetadata->GetExpirationTime(&expTime); uint32_t frecency; mMetadata->GetFrecency(&frecency);
--- a/netwerk/cache2/CacheFileIOManager.cpp +++ b/netwerk/cache2/CacheFileIOManager.cpp @@ -960,22 +960,22 @@ public: protected: RefPtr<CacheFileHandle> mHandle; nsCString mNewName; nsCOMPtr<CacheFileIOListener> mCallback; }; class InitIndexEntryEvent : public Runnable { public: - InitIndexEntryEvent(CacheFileHandle *aHandle, uint32_t aAppId, - bool aAnonymous, bool aInIsolatedMozBrowser, bool aPinning) + InitIndexEntryEvent(CacheFileHandle *aHandle, + OriginAttrsHash aOriginAttrsHash, bool aAnonymous, + bool aPinning) : mHandle(aHandle) - , mAppId(aAppId) + , mOriginAttrsHash(aOriginAttrsHash) , mAnonymous(aAnonymous) - , mInIsolatedMozBrowser(aInIsolatedMozBrowser) , mPinning(aPinning) { MOZ_COUNT_CTOR(InitIndexEntryEvent); } protected: ~InitIndexEntryEvent() { @@ -984,34 +984,34 @@ protected: public: NS_IMETHOD Run() override { if (mHandle->IsClosed() || mHandle->IsDoomed()) { return NS_OK; } - CacheIndex::InitEntry(mHandle->Hash(), mAppId, mAnonymous, mInIsolatedMozBrowser, mPinning); + CacheIndex::InitEntry(mHandle->Hash(), mOriginAttrsHash, mAnonymous, + mPinning); // We cannot set the filesize before we init the entry. If we're opening // an existing entry file, frecency and expiration time will be set after // parsing the entry file, but we must set the filesize here since nobody is // going to set it if there is no write to the file. uint32_t sizeInK = mHandle->FileSizeInK(); CacheIndex::UpdateEntry(mHandle->Hash(), nullptr, nullptr, &sizeInK); return NS_OK; } protected: RefPtr<CacheFileHandle> mHandle; - uint32_t mAppId; - bool mAnonymous; - bool mInIsolatedMozBrowser; - bool mPinning; + OriginAttrsHash mOriginAttrsHash; + bool mAnonymous; + bool mPinning; }; class UpdateIndexEntryEvent : public Runnable { public: UpdateIndexEntryEvent(CacheFileHandle *aHandle, const uint32_t *aFrecency, const uint32_t *aExpirationTime) : mHandle(aHandle) , mHasFrecency(false) @@ -3484,38 +3484,37 @@ CacheFileIOManager::FindTrashDirToRemove // trash directories next time. mFailedTrashDirs.Clear(); return NS_ERROR_NOT_AVAILABLE; } // static nsresult CacheFileIOManager::InitIndexEntry(CacheFileHandle *aHandle, - uint32_t aAppId, + OriginAttrsHash aOriginAttrsHash, bool aAnonymous, - bool aInIsolatedMozBrowser, bool aPinning) { - LOG(("CacheFileIOManager::InitIndexEntry() [handle=%p, appId=%u, anonymous=%d" - ", inIsolatedMozBrowser=%d, pinned=%d]", aHandle, aAppId, aAnonymous, - aInIsolatedMozBrowser, aPinning)); + LOG(("CacheFileIOManager::InitIndexEntry() [handle=%p, originAttrsHash=%llx, " + "anonymous=%d, pinning=%d]", aHandle, aOriginAttrsHash, aAnonymous, + aPinning)); nsresult rv; RefPtr<CacheFileIOManager> ioMan = gInstance; if (aHandle->IsClosed() || !ioMan) { return NS_ERROR_NOT_INITIALIZED; } if (aHandle->IsSpecialFile()) { return NS_ERROR_UNEXPECTED; } RefPtr<InitIndexEntryEvent> ev = - new InitIndexEntryEvent(aHandle, aAppId, aAnonymous, aInIsolatedMozBrowser, aPinning); + new InitIndexEntryEvent(aHandle, aOriginAttrsHash, aAnonymous, aPinning); rv = ioMan->mIOThread->Dispatch(ev, aHandle->mPriority ? CacheIOThread::WRITE_PRIORITY : CacheIOThread::WRITE); NS_ENSURE_SUCCESS(rv, rv); return NS_OK; }
--- a/netwerk/cache2/CacheFileIOManager.h +++ b/netwerk/cache2/CacheFileIOManager.h @@ -2,16 +2,17 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef CacheFileIOManager__h__ #define CacheFileIOManager__h__ #include "CacheIOThread.h" #include "CacheStorageService.h" +#include "CacheHashUtils.h" #include "nsIEventTarget.h" #include "nsITimer.h" #include "nsCOMPtr.h" #include "mozilla/Atomics.h" #include "mozilla/SHA1.h" #include "mozilla/StaticPtr.h" #include "mozilla/TimeStamp.h" #include "nsTArray.h" @@ -325,19 +326,18 @@ public: const nsACString &aNewName, CacheFileIOListener *aCallback); static nsresult EvictIfOverLimit(); static nsresult EvictAll(); static nsresult EvictByContext(nsILoadContextInfo *aLoadContextInfo, bool aPinning); static nsresult InitIndexEntry(CacheFileHandle *aHandle, - uint32_t aAppId, + OriginAttrsHash aOriginAttrsHash, bool aAnonymous, - bool aInIsolatedMozBrowser, bool aPinning); static nsresult UpdateIndexEntry(CacheFileHandle *aHandle, const uint32_t *aFrecency, const uint32_t *aExpirationTime); static nsresult UpdateIndexEntry(); enum EEnumerateMode {
--- a/netwerk/cache2/CacheHashUtils.cpp +++ b/netwerk/cache2/CacheHashUtils.cpp @@ -1,14 +1,15 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "CacheHashUtils.h" +#include "mozilla/BasePrincipal.h" #include "plstr.h" namespace mozilla { namespace net { /** * CacheHash::Hash(const char * key, uint32_t initval) * @@ -182,10 +183,24 @@ CacheHash::GetHash() CacheHash::Hash16_t CacheHash::GetHash16() { Hash32_t hash = GetHash(); return (hash & 0xFFFF); } +OriginAttrsHash +GetOriginAttrsHash(const mozilla::OriginAttributes &aOA) +{ + nsAutoCString suffix; + aOA.CreateSuffix(suffix); + + SHA1Sum sum; + SHA1Sum::Hash hash; + sum.update(suffix.BeginReading(), suffix.Length()); + sum.finish(hash); + + return BigEndian::readUint64(&hash); +} + } // namespace net } // namespace mozilla
--- a/netwerk/cache2/CacheHashUtils.h +++ b/netwerk/cache2/CacheHashUtils.h @@ -16,16 +16,19 @@ PR_htonl((reinterpret_cast<const uint32_t *>(x))[2]), \ PR_htonl((reinterpret_cast<const uint32_t *>(x))[3]), \ PR_htonl((reinterpret_cast<const uint32_t *>(x))[4]) #define SHA1STRING(x) \ (nsPrintfCString("%08x%08x%08x%08x%08x", LOGSHA1(x)).get()) namespace mozilla { + +class OriginAttributes; + namespace net { class CacheHash : public nsISupports { public: NS_DECL_THREADSAFE_ISUPPORTS typedef uint16_t Hash16_t; @@ -49,13 +52,16 @@ private: uint32_t mA, mB, mC; uint8_t mPos; uint32_t mBuf; uint8_t mBufPos; uint32_t mLength; bool mFinalized; }; +typedef uint64_t OriginAttrsHash; + +OriginAttrsHash GetOriginAttrsHash(const mozilla::OriginAttributes &aOA); } // namespace net } // namespace mozilla #endif
--- a/netwerk/cache2/CacheIndex.cpp +++ b/netwerk/cache2/CacheIndex.cpp @@ -22,17 +22,17 @@ #include <algorithm> #include "mozilla/Telemetry.h" #include "mozilla/Unused.h" #define kMinUnwrittenChanges 300 #define kMinDumpInterval 20000 // in milliseconds #define kMaxBufSize 16384 -#define kIndexVersion 0x00000001 +#define kIndexVersion 0x00000002 #define kUpdateIndexStartDelay 50000 // in milliseconds #define INDEX_NAME "index" #define TEMP_INDEX_NAME "index.tmp" #define JOURNAL_NAME "index.log" namespace mozilla { namespace net { @@ -704,24 +704,23 @@ CacheIndex::EnsureEntryExists(const SHA1 index->WriteIndexToDiskIfNeeded(); return NS_OK; } // static nsresult CacheIndex::InitEntry(const SHA1Sum::Hash *aHash, - uint32_t aAppId, + OriginAttrsHash aOriginAttrsHash, bool aAnonymous, - bool aInIsolatedMozBrowser, bool aPinned) { - LOG(("CacheIndex::InitEntry() [hash=%08x%08x%08x%08x%08x, appId=%u, " - "anonymous=%d, inIsolatedMozBrowser=%d, pinned=%d]", LOGSHA1(aHash), - aAppId, aAnonymous, aInIsolatedMozBrowser, aPinned)); + LOG(("CacheIndex::InitEntry() [hash=%08x%08x%08x%08x%08x, " + "originAttrsHash=%llx, anonymous=%d, pinned=%d]", LOGSHA1(aHash), + aOriginAttrsHash, aAnonymous, aPinned)); MOZ_ASSERT(CacheFileIOManager::IsOnIOThread()); StaticMutexAutoLock lock(sLock); RefPtr<CacheIndex> index = gInstance; if (!index) { @@ -744,17 +743,17 @@ CacheIndex::InitEntry(const SHA1Sum::Has } if (index->mState == READY || index->mState == UPDATING || index->mState == BUILDING) { MOZ_ASSERT(index->mPendingUpdates.Count() == 0); MOZ_ASSERT(entry); MOZ_ASSERT(entry->IsFresh()); - if (IsCollision(entry, aAppId, aAnonymous, aInIsolatedMozBrowser)) { + if (IsCollision(entry, aOriginAttrsHash, aAnonymous)) { index->mIndexNeedsUpdate = true; // TODO Does this really help in case of collision? reinitEntry = true; } else { if (entry->IsInitialized()) { return NS_OK; } } } else { @@ -762,28 +761,28 @@ CacheIndex::InitEntry(const SHA1Sum::Has DebugOnly<bool> removed = updated && updated->IsRemoved(); MOZ_ASSERT(updated || !removed); MOZ_ASSERT(updated || entry); if (updated) { MOZ_ASSERT(updated->IsFresh()); - if (IsCollision(updated, aAppId, aAnonymous, aInIsolatedMozBrowser)) { + if (IsCollision(updated, aOriginAttrsHash, aAnonymous)) { index->mIndexNeedsUpdate = true; reinitEntry = true; } else { if (updated->IsInitialized()) { return NS_OK; } } } else { MOZ_ASSERT(entry->IsFresh()); - if (IsCollision(entry, aAppId, aAnonymous, aInIsolatedMozBrowser)) { + if (IsCollision(entry, aOriginAttrsHash, aAnonymous)) { index->mIndexNeedsUpdate = true; reinitEntry = true; } else { if (entry->IsInitialized()) { return NS_OK; } } @@ -801,20 +800,20 @@ CacheIndex::InitEntry(const SHA1Sum::Has updated->MarkFresh(); } else { entry->InitNew(); entry->MarkFresh(); } } if (updated) { - updated->Init(aAppId, aAnonymous, aInIsolatedMozBrowser, aPinned); + updated->Init(aOriginAttrsHash, aAnonymous, aPinned); updated->MarkDirty(); } else { - entry->Init(aAppId, aAnonymous, aInIsolatedMozBrowser, aPinned); + entry->Init(aOriginAttrsHash, aAnonymous, aPinned); entry->MarkDirty(); } } index->StartUpdatingIndexIfNeeded(); index->WriteIndexToDiskIfNeeded(); return NS_OK; @@ -1488,32 +1487,30 @@ CacheIndex::IsIndexUsable() } return true; } // static bool CacheIndex::IsCollision(CacheIndexEntry *aEntry, - uint32_t aAppId, - bool aAnonymous, - bool aInIsolatedMozBrowser) + OriginAttrsHash aOriginAttrsHash, + bool aAnonymous) { if (!aEntry->IsInitialized()) { return false; } - if (aEntry->AppId() != aAppId || aEntry->Anonymous() != aAnonymous || - aEntry->InIsolatedMozBrowser() != aInIsolatedMozBrowser) { + if (aEntry->Anonymous() != aAnonymous || + aEntry->OriginAttrsHash() != aOriginAttrsHash) { LOG(("CacheIndex::IsCollision() - Collision detected for entry hash=%08x" - "%08x%08x%08x%08x, expected values: appId=%u, anonymous=%d, " - "inIsolatedMozBrowser=%d; actual values: appId=%u, anonymous=%d, " - "inIsolatedMozBrowser=%d]", - LOGSHA1(aEntry->Hash()), aAppId, aAnonymous, aInIsolatedMozBrowser, - aEntry->AppId(), aEntry->Anonymous(), aEntry->InIsolatedMozBrowser())); + "%08x%08x%08x%08x, expected values: originAttrsHash=%llx, " + "anonymous=%d; actual values: originAttrsHash=%llx, anonymous=%d]", + LOGSHA1(aEntry->Hash()), aOriginAttrsHash, aAnonymous, + aEntry->OriginAttrsHash(), aEntry->Anonymous())); return true; } return false; } // static bool @@ -2652,20 +2649,18 @@ void CacheIndex::InitEntryFromDiskData(CacheIndexEntry *aEntry, CacheFileMetadata *aMetaData, int64_t aFileSize) { aEntry->InitNew(); aEntry->MarkDirty(); aEntry->MarkFresh(); - // Bug 1201042 - will pass OriginAttributes directly. - aEntry->Init(aMetaData->OriginAttributes().mAppId, + aEntry->Init(GetOriginAttrsHash(aMetaData->OriginAttributes()), aMetaData->IsAnonymous(), - aMetaData->OriginAttributes().mInIsolatedMozBrowser, aMetaData->Pinned()); uint32_t expirationTime; aMetaData->GetExpirationTime(&expirationTime); aEntry->SetExpirationTime(expirationTime); uint32_t frecency; aMetaData->GetFrecency(&frecency);
--- a/netwerk/cache2/CacheIndex.h +++ b/netwerk/cache2/CacheIndex.h @@ -52,37 +52,37 @@ typedef struct { // We set this flag as soon as possible after parsing index during startup // and clean it after we write journal to disk during shutdown. We ignore the // journal and start update process whenever this flag is set during index // parsing. uint32_t mIsDirty; } CacheIndexHeader; struct CacheIndexRecord { - SHA1Sum::Hash mHash; - uint32_t mFrecency; - uint32_t mExpirationTime; - uint32_t mAppId; + SHA1Sum::Hash mHash; + uint32_t mFrecency; + uint32_t mExpirationTime; + OriginAttrsHash mOriginAttrsHash; /* * 1000 0000 0000 0000 0000 0000 0000 0000 : initialized * 0100 0000 0000 0000 0000 0000 0000 0000 : anonymous - * 0010 0000 0000 0000 0000 0000 0000 0000 : inIsolatedMozBrowser - * 0001 0000 0000 0000 0000 0000 0000 0000 : removed - * 0000 1000 0000 0000 0000 0000 0000 0000 : dirty - * 0000 0100 0000 0000 0000 0000 0000 0000 : fresh + * 0010 0000 0000 0000 0000 0000 0000 0000 : removed + * 0001 0000 0000 0000 0000 0000 0000 0000 : dirty + * 0000 1000 0000 0000 0000 0000 0000 0000 : fresh + * 0000 0100 0000 0000 0000 0000 0000 0000 : pinned * 0000 0011 0000 0000 0000 0000 0000 0000 : reserved * 0000 0000 1111 1111 1111 1111 1111 1111 : file size (in kB) */ uint32_t mFlags; CacheIndexRecord() : mFrecency(0) , mExpirationTime(nsICacheEntry::NO_EXPIRATION_TIME) - , mAppId(nsILoadContextInfo::NO_APP_ID) + , mOriginAttrsHash(0) , mFlags(0) {} }; class CacheIndexEntry : public PLDHashEntryHdr { public: typedef const SHA1Sum::Hash& KeyType; @@ -131,60 +131,54 @@ public: } CacheIndexEntry& operator=(const CacheIndexEntry& aOther) { MOZ_ASSERT(memcmp(&mRec->mHash, &aOther.mRec->mHash, sizeof(SHA1Sum::Hash)) == 0); mRec->mFrecency = aOther.mRec->mFrecency; mRec->mExpirationTime = aOther.mRec->mExpirationTime; - mRec->mAppId = aOther.mRec->mAppId; + mRec->mOriginAttrsHash = aOther.mRec->mOriginAttrsHash; mRec->mFlags = aOther.mRec->mFlags; return *this; } void InitNew() { mRec->mFrecency = 0; mRec->mExpirationTime = nsICacheEntry::NO_EXPIRATION_TIME; - mRec->mAppId = nsILoadContextInfo::NO_APP_ID; + mRec->mOriginAttrsHash = 0; mRec->mFlags = 0; } - void Init(uint32_t aAppId, bool aAnonymous, bool aInIsolatedMozBrowser, bool aPinned) + void Init(OriginAttrsHash aOriginAttrsHash, bool aAnonymous, bool aPinned) { MOZ_ASSERT(mRec->mFrecency == 0); MOZ_ASSERT(mRec->mExpirationTime == nsICacheEntry::NO_EXPIRATION_TIME); - MOZ_ASSERT(mRec->mAppId == nsILoadContextInfo::NO_APP_ID); + MOZ_ASSERT(mRec->mOriginAttrsHash == 0); // When we init the entry it must be fresh and may be dirty MOZ_ASSERT((mRec->mFlags & ~kDirtyMask) == kFreshMask); - mRec->mAppId = aAppId; + mRec->mOriginAttrsHash = aOriginAttrsHash; mRec->mFlags |= kInitializedMask; if (aAnonymous) { mRec->mFlags |= kAnonymousMask; } - if (aInIsolatedMozBrowser) { - mRec->mFlags |= kInIsolatedMozBrowserMask; - } if (aPinned) { mRec->mFlags |= kPinnedMask; } } const SHA1Sum::Hash * Hash() const { return &mRec->mHash; } bool IsInitialized() const { return !!(mRec->mFlags & kInitializedMask); } - uint32_t AppId() const { return mRec->mAppId; } - bool Anonymous() const { return !!(mRec->mFlags & kAnonymousMask); } - bool InIsolatedMozBrowser() const - { - return !!(mRec->mFlags & kInIsolatedMozBrowserMask); - } + mozilla::net::OriginAttrsHash OriginAttrsHash() const { return mRec->mOriginAttrsHash; } + + bool Anonymous() const { return !!(mRec->mFlags & kAnonymousMask); } bool IsRemoved() const { return !!(mRec->mFlags & kRemovedMask); } void MarkRemoved() { mRec->mFlags |= kRemovedMask; } bool IsDirty() const { return !!(mRec->mFlags & kDirtyMask); } void MarkDirty() { mRec->mFlags |= kDirtyMask; } void ClearDirty() { mRec->mFlags &= ~kDirtyMask; } @@ -237,50 +231,48 @@ public: dst->mFlags &= ~kDirtyMask; dst->mFlags &= ~kFreshMask; #if defined(IS_LITTLE_ENDIAN) // Data in the buffer are in machine byte order and we want them in network // byte order. NetworkEndian::writeUint32(&dst->mFrecency, dst->mFrecency); NetworkEndian::writeUint32(&dst->mExpirationTime, dst->mExpirationTime); - NetworkEndian::writeUint32(&dst->mAppId, dst->mAppId); + NetworkEndian::writeUint64(&dst->mOriginAttrsHash, dst->mOriginAttrsHash); NetworkEndian::writeUint32(&dst->mFlags, dst->mFlags); #endif } void ReadFromBuf(void *aBuf) { CacheIndexRecord *src= reinterpret_cast<CacheIndexRecord *>(aBuf); MOZ_ASSERT(memcmp(&mRec->mHash, &src->mHash, sizeof(SHA1Sum::Hash)) == 0); mRec->mFrecency = NetworkEndian::readUint32(&src->mFrecency); mRec->mExpirationTime = NetworkEndian::readUint32(&src->mExpirationTime); - mRec->mAppId = NetworkEndian::readUint32(&src->mAppId); + mRec->mOriginAttrsHash = NetworkEndian::readUint64(&src->mOriginAttrsHash); mRec->mFlags = NetworkEndian::readUint32(&src->mFlags); } void Log() const { - LOG(("CacheIndexEntry::Log() [this=%p, hash=%08x%08x%08x%08x%08x, " - "fresh=%u, initialized=%u, removed=%u, dirty=%u, anonymous=%u, " - "inIsolatedMozBrowser=%u, appId=%u, frecency=%u, expirationTime=%u, " - "size=%u]", + LOG(("CacheIndexEntry::Log() [this=%p, hash=%08x%08x%08x%08x%08x, fresh=%u," + " initialized=%u, removed=%u, dirty=%u, anonymous=%u, " + "originAttrsHash=%llx, frecency=%u, expirationTime=%u, size=%u]", this, LOGSHA1(mRec->mHash), IsFresh(), IsInitialized(), IsRemoved(), - IsDirty(), Anonymous(), InIsolatedMozBrowser(), AppId(), GetFrecency(), + IsDirty(), Anonymous(), OriginAttrsHash(), GetFrecency(), GetExpirationTime(), GetFileSize())); } static bool RecordMatchesLoadContextInfo(CacheIndexRecord *aRec, nsILoadContextInfo *aInfo) { if (!aInfo->IsPrivate() && - aInfo->OriginAttributesPtr()->mAppId == aRec->mAppId && - aInfo->IsAnonymous() == !!(aRec->mFlags & kAnonymousMask) && - aInfo->OriginAttributesPtr()->mInIsolatedMozBrowser == !!(aRec->mFlags & kInIsolatedMozBrowserMask)) { + GetOriginAttrsHash(*aInfo->OriginAttributesPtr()) == aRec->mOriginAttrsHash && + aInfo->IsAnonymous() == !!(aRec->mFlags & kAnonymousMask)) { return true; } return false; } // Memory reporting size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const @@ -295,35 +287,34 @@ public: private: friend class CacheIndexEntryUpdate; friend class CacheIndex; friend class CacheIndexEntryAutoManage; static const uint32_t kInitializedMask = 0x80000000; static const uint32_t kAnonymousMask = 0x40000000; - static const uint32_t kInIsolatedMozBrowserMask = 0x20000000; // This flag is set when the entry was removed. We need to keep this // information in memory until we write the index file. - static const uint32_t kRemovedMask = 0x10000000; + static const uint32_t kRemovedMask = 0x20000000; // This flag is set when the information in memory is not in sync with the // information in index file on disk. - static const uint32_t kDirtyMask = 0x08000000; + static const uint32_t kDirtyMask = 0x10000000; // This flag is set when the information about the entry is fresh, i.e. // we've created or opened this entry during this session, or we've seen // this entry during update or build process. - static const uint32_t kFreshMask = 0x04000000; + static const uint32_t kFreshMask = 0x08000000; // Indicates a pinned entry. - static const uint32_t kPinnedMask = 0x02000000; + static const uint32_t kPinnedMask = 0x04000000; - static const uint32_t kReservedMask = 0x01000000; + static const uint32_t kReservedMask = 0x03000000; // FileSize in kilobytes static const uint32_t kFileSizeMask = 0x00FFFFFF; nsAutoPtr<CacheIndexRecord> mRec; }; class CacheIndexEntryUpdate : public CacheIndexEntry @@ -380,17 +371,17 @@ public: MOZ_ASSERT(memcmp(&mRec->mHash, &aDst->mRec->mHash, sizeof(SHA1Sum::Hash)) == 0); if (mUpdateFlags & kFrecencyUpdatedMask) { aDst->mRec->mFrecency = mRec->mFrecency; } if (mUpdateFlags & kExpirationUpdatedMask) { aDst->mRec->mExpirationTime = mRec->mExpirationTime; } - aDst->mRec->mAppId = mRec->mAppId; + aDst->mRec->mOriginAttrsHash = mRec->mOriginAttrsHash; if (mUpdateFlags & kFileSizeUpdatedMask) { aDst->mRec->mFlags = mRec->mFlags; } else { // Copy all flags except file size. aDst->mRec->mFlags &= kFileSizeMask; aDst->mRec->mFlags |= (mRec->mFlags & ~kFileSizeMask); } } @@ -620,19 +611,18 @@ public: // created. Like in case of AddEntry(), either InitEntry() or RemoveEntry() // must be called on the entry, since the entry is not initizlized if the // index is outdated. static nsresult EnsureEntryExists(const SHA1Sum::Hash *aHash); // Initialize the entry. It MUST be present in index. Call to AddEntry() or // EnsureEntryExists() must precede the call to this method. static nsresult InitEntry(const SHA1Sum::Hash *aHash, - uint32_t aAppId, + OriginAttrsHash aOriginAttrsHash, bool aAnonymous, - bool aInIsolatedMozBrowser, bool aPinned); // Remove entry from index. The entry should be present in index. static nsresult RemoveEntry(const SHA1Sum::Hash *aHash); // Update some information in entry. The entry MUST be present in index and // MUST be initialized. Call to AddEntry() or EnsureEntryExists() and to // InitEntry() must precede the call to this method. @@ -721,24 +711,23 @@ private: NS_IMETHOD OnFileRenamed(CacheFileHandle *aHandle, nsresult aResult) override; nsresult InitInternal(nsIFile *aCacheDirectory); void PreShutdownInternal(); // This method returns false when index is not initialized or is shut down. bool IsIndexUsable(); - // This method checks whether the entry has the same values of appId, - // isAnonymous and isInBrowser. We don't expect to find a collision since - // these values are part of the key that we hash and we use a strong hash - // function. + // This method checks whether the entry has the same values of + // originAttributes and isAnonymous. We don't expect to find a collision + // since these values are part of the key that we hash and we use a strong + // hash function. static bool IsCollision(CacheIndexEntry *aEntry, - uint32_t aAppId, - bool aAnonymous, - bool aInIsolatedMozBrowser); + OriginAttrsHash aOriginAttrsHash, + bool aAnonymous); // Checks whether any of the information about the entry has changed. static bool HasEntryChanged(CacheIndexEntry *aEntry, const uint32_t *aFrecency, const uint32_t *aExpirationTime, const uint32_t *aSize); // Merge all pending operations from mPendingUpdates into mIndex.
--- a/netwerk/cookie/CookieServiceParent.cpp +++ b/netwerk/cookie/CookieServiceParent.cpp @@ -147,23 +147,11 @@ CookieServiceParent::RecvSetCookieString // NB: dummyChannel could be null if something failed in CreateDummyChannel. nsDependentCString cookieString(aCookieString, 0); mCookieService->SetCookieStringInternal(hostURI, aIsForeign, cookieString, aServerTime, aFromHttp, aAttrs, isPrivate, dummyChannel); return true; } -mozilla::ipc::IProtocol* -CookieServiceParent::CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) -{ - NeckoParent* manager = aCtx->GetNeckoParent(); - nsAutoPtr<PCookieServiceParent> actor(manager->AllocPCookieServiceParent()); - if (!actor || !manager->RecvPCookieServiceConstructor(actor)) { - return nullptr; - } - return actor.forget(); -} - } // namespace net } // namespace mozilla
--- a/netwerk/cookie/CookieServiceParent.h +++ b/netwerk/cookie/CookieServiceParent.h @@ -31,20 +31,16 @@ protected: virtual bool RecvSetCookieString(const URIParams& aHost, const bool& aIsForeign, const nsCString& aCookieString, const nsCString& aServerTime, const bool& aFromHttp, const NeckoOriginAttributes& aAttrs) override; - virtual mozilla::ipc::IProtocol* - CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) override; - RefPtr<nsCookieService> mCookieService; }; } // namespace net } // namespace mozilla #endif // mozilla_net_CookieServiceParent_h
--- a/netwerk/cookie/nsCookieService.cpp +++ b/netwerk/cookie/nsCookieService.cpp @@ -108,21 +108,31 @@ static const int64_t kCookiePurgeAge = // default limits for the cookie list. these can be tuned by the // network.cookie.maxNumber and network.cookie.maxPerHost prefs respectively. static const uint32_t kMaxNumberOfCookies = 3000; static const uint32_t kMaxCookiesPerHost = 150; static const uint32_t kMaxBytesPerCookie = 4096; static const uint32_t kMaxBytesPerPath = 1024; // pref string constants -static const char kPrefCookieBehavior[] = "network.cookie.cookieBehavior"; -static const char kPrefMaxNumberOfCookies[] = "network.cookie.maxNumber"; -static const char kPrefMaxCookiesPerHost[] = "network.cookie.maxPerHost"; -static const char kPrefCookiePurgeAge[] = "network.cookie.purgeAge"; -static const char kPrefThirdPartySession[] = "network.cookie.thirdparty.sessionOnly"; +static const char kPrefCookieBehavior[] = "network.cookie.cookieBehavior"; +static const char kPrefMaxNumberOfCookies[] = "network.cookie.maxNumber"; +static const char kPrefMaxCookiesPerHost[] = "network.cookie.maxPerHost"; +static const char kPrefCookiePurgeAge[] = "network.cookie.purgeAge"; +static const char kPrefThirdPartySession[] = "network.cookie.thirdparty.sessionOnly"; +static const char kCookieLeaveSecurityAlone[] = "network.cookie.leave-secure-alone"; + +// For telemetry COOKIE_LEAVE_SECURE_ALONE +#define BLOCKED_SECURE_SET_FROM_HTTP 0 +#define BLOCKED_DOWNGRADE_SECURE 1 +#define DOWNGRADE_SECURE_FROM_SECURE 2 +#define EVICTED_NEWER_INSECURE 3 +#define EVICTED_OLDEST_COOKIE 4 +#define EVICTED_PREFERRED_COOKIE 5 +#define EVICTING_SECURE_BLOCKED 6 static void bindCookieParameters(mozIStorageBindingParamsArray *aParamsArray, const nsCookieKey &aKey, const nsCookie *aCookie); // struct for temporarily storing cookie attributes during header parsing struct nsCookieAttributes @@ -361,17 +371,17 @@ public: /****************************************************************************** * InsertCookieDBListener impl: * mozIStorageStatementCallback used to track asynchronous insertion operations. ******************************************************************************/ class InsertCookieDBListener final : public DBListenerErrorHandler { private: - virtual const char *GetOpType() override { return "INSERT"; } + const char *GetOpType() override { return "INSERT"; } ~InsertCookieDBListener() = default; public: NS_DECL_ISUPPORTS explicit InsertCookieDBListener(DBState* dbState) : DBListenerErrorHandler(dbState) { } NS_IMETHOD HandleResult(mozIStorageResultSet*) override @@ -397,17 +407,17 @@ NS_IMPL_ISUPPORTS(InsertCookieDBListener /****************************************************************************** * UpdateCookieDBListener impl: * mozIStorageStatementCallback used to track asynchronous update operations. ******************************************************************************/ class UpdateCookieDBListener final : public DBListenerErrorHandler { private: - virtual const char *GetOpType() override { return "UPDATE"; } + const char *GetOpType() override { return "UPDATE"; } ~UpdateCookieDBListener() = default; public: NS_DECL_ISUPPORTS explicit UpdateCookieDBListener(DBState* dbState) : DBListenerErrorHandler(dbState) { } NS_IMETHOD HandleResult(mozIStorageResultSet*) override @@ -425,17 +435,17 @@ NS_IMPL_ISUPPORTS(UpdateCookieDBListener /****************************************************************************** * RemoveCookieDBListener impl: * mozIStorageStatementCallback used to track asynchronous removal operations. ******************************************************************************/ class RemoveCookieDBListener final : public DBListenerErrorHandler { private: - virtual const char *GetOpType() override { return "REMOVE"; } + const char *GetOpType() override { return "REMOVE"; } ~RemoveCookieDBListener() = default; public: NS_DECL_ISUPPORTS explicit RemoveCookieDBListener(DBState* dbState) : DBListenerErrorHandler(dbState) { } NS_IMETHOD HandleResult(mozIStorageResultSet*) override @@ -453,17 +463,17 @@ NS_IMPL_ISUPPORTS(RemoveCookieDBListener /****************************************************************************** * ReadCookieDBListener impl: * mozIStorageStatementCallback used to track asynchronous removal operations. ******************************************************************************/ class ReadCookieDBListener final : public DBListenerErrorHandler { private: - virtual const char *GetOpType() override { return "READ"; } + const char *GetOpType() override { return "READ"; } bool mCanceled; ~ReadCookieDBListener() = default; public: NS_DECL_ISUPPORTS explicit ReadCookieDBListener(DBState* dbState) @@ -697,16 +707,17 @@ NS_IMPL_ISUPPORTS(nsCookieService, nsIObserver, nsISupportsWeakReference, nsIMemoryReporter) nsCookieService::nsCookieService() : mDBState(nullptr) , mCookieBehavior(nsICookieService::BEHAVIOR_ACCEPT) , mThirdPartySession(false) + , mLeaveSecureAlone(true) , mMaxNumberOfCookies(kMaxNumberOfCookies) , mMaxCookiesPerHost(kMaxCookiesPerHost) , mCookiePurgeAge(kCookiePurgeAge) { } nsresult nsCookieService::Init() @@ -719,21 +730,22 @@ nsCookieService::Init() NS_ENSURE_SUCCESS(rv, rv); mThirdPartyUtil = do_GetService(THIRDPARTYUTIL_CONTRACTID); NS_ENSURE_SUCCESS(rv, rv); // init our pref and observer nsCOMPtr<nsIPrefBranch> prefBranch = do_GetService(NS_PREFSERVICE_CONTRACTID); if (prefBranch) { - prefBranch->AddObserver(kPrefCookieBehavior, this, true); - prefBranch->AddObserver(kPrefMaxNumberOfCookies, this, true); - prefBranch->AddObserver(kPrefMaxCookiesPerHost, this, true); - prefBranch->AddObserver(kPrefCookiePurgeAge, this, true); - prefBranch->AddObserver(kPrefThirdPartySession, this, true); + prefBranch->AddObserver(kPrefCookieBehavior, this, true); + prefBranch->AddObserver(kPrefMaxNumberOfCookies, this, true); + prefBranch->AddObserver(kPrefMaxCookiesPerHost, this, true); + prefBranch->AddObserver(kPrefCookiePurgeAge, this, true); + prefBranch->AddObserver(kPrefThirdPartySession, this, true); + prefBranch->AddObserver(kCookieLeaveSecurityAlone, this, true); PrefChanged(prefBranch); } mStorageService = do_GetService("@mozilla.org/storage/service;1", &rv); NS_ENSURE_SUCCESS(rv, rv); // Init our default, and possibly private DBStates. InitDBStates(); @@ -2189,16 +2201,19 @@ nsCookieService::PrefChanged(nsIPrefBran if (NS_SUCCEEDED(aPrefBranch->GetIntPref(kPrefCookiePurgeAge, &val))) { mCookiePurgeAge = int64_t(LIMIT(val, 0, INT32_MAX, INT32_MAX)) * PR_USEC_PER_SEC; } bool boolval; if (NS_SUCCEEDED(aPrefBranch->GetBoolPref(kPrefThirdPartySession, &boolval))) mThirdPartySession = boolval; + + if (NS_SUCCEEDED(aPrefBranch->GetBoolPref(kCookieLeaveSecurityAlone, &boolval))) + mLeaveSecureAlone = boolval; } /****************************************************************************** * nsICookieManager impl: * nsICookieManager ******************************************************************************/ NS_IMETHODIMP @@ -3437,40 +3452,84 @@ nsCookieService::AddInternal(const nsCoo // if the new cookie is httponly, make sure we're not coming from script if (!aFromHttp && aCookie->IsHttpOnly()) { COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, "cookie is httponly; coming from script"); return; } - nsListIter matchIter; - bool foundCookie = FindCookie(aKey, aCookie->Host(), - aCookie->Name(), aCookie->Path(), matchIter); + bool isSecure = true; + if (aHostURI && NS_FAILED(aHostURI->SchemeIs("https", &isSecure))) { + isSecure = false; + } + + // If the new cookie is non-https and wants to set secure flag, + // browser have to ignore this new cookie. + // (draft-ietf-httpbis-cookie-alone section 3.1) + if (mLeaveSecureAlone && aCookie->IsSecure() && !isSecure) { + COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, + "non-https cookie can't set secure flag"); + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + BLOCKED_SECURE_SET_FROM_HTTP); + return; + } + nsListIter exactIter; + bool foundCookie = false; + if (mLeaveSecureAlone) { + // Step1, call FindSecureCookie(). FindSecureCookie() would + // find the existing cookie with the security flag and has + // the same name, host and path of the new cookie, if there is any. + // Step2, Confirm new cookie's security setting. If any targeted + // cookie had been found in Step1, then confirm whether the + // new cookie could modify it. If the new created cookie’s + // "secure-only-flag" is not set, and the "scheme" component + // of the "request-uri" does not denote a "secure" protocol, + // then ignore the new cookie. + // (draft-ietf-httpbis-cookie-alone section 3.2) + foundCookie = FindSecureCookie(aKey, aCookie); + if (foundCookie && !aCookie->IsSecure()) { + if (!isSecure) { + COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, + "cookie can't save because older cookie is secure cookie but newer cookie is non-secure cookie"); + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + BLOCKED_DOWNGRADE_SECURE); + return; + } else { + // A secure site is allowed to downgrade a secure cookie + // but we want to measure anyway + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + DOWNGRADE_SECURE_FROM_SECURE); + } + } + } + + foundCookie = FindCookie(aKey, aCookie->Host(), + aCookie->Name(), aCookie->Path(), exactIter); RefPtr<nsCookie> oldCookie; nsCOMPtr<nsIArray> purgedList; if (foundCookie) { - oldCookie = matchIter.Cookie(); + oldCookie = exactIter.Cookie(); // Check if the old cookie is stale (i.e. has already expired). If so, we // need to be careful about the semantics of removing it and adding the new // cookie: we want the behavior wrt adding the new cookie to be the same as // if it didn't exist, but we still want to fire a removal notification. if (oldCookie->Expiry() <= currentTime) { if (aCookie->Expiry() <= currentTime) { // The new cookie has expired and the old one is stale. Nothing to do. COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, "cookie has already expired"); return; } // Remove the stale cookie. We save notification for later, once all list // modifications are complete. - RemoveCookieFromList(matchIter); + RemoveCookieFromList(exactIter); COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, "stale cookie was purged"); purgedList = CreatePurgeList(oldCookie); // We've done all we need to wrt removing and notifying the stale cookie. // From here on out, we pretend pretend it didn't exist, so that we // preserve expected notification semantics when adding the new cookie. foundCookie = false; @@ -3497,17 +3556,17 @@ nsCookieService::AddInternal(const nsCoo !oldCookie->IsStale()) { // Update the last access time on the old cookie. oldCookie->SetLastAccessed(aCookie->LastAccessed()); UpdateCookieOldestTime(mDBState, oldCookie); return; } // Remove the old cookie. - RemoveCookieFromList(matchIter); + RemoveCookieFromList(exactIter); // If the new cookie has expired -- i.e. the intent was simply to delete // the old cookie -- then we're done. if (aCookie->Expiry() <= currentTime) { COOKIE_LOGFAILURE(SET_COOKIE, aHostURI, aCookieHeader, "previously stored cookie was deleted"); NotifyChanged(oldCookie, u"deleted"); return; @@ -3524,24 +3583,44 @@ nsCookieService::AddInternal(const nsCoo "cookie has already expired"); return; } // check if we have to delete an old cookie. nsCookieEntry *entry = mDBState->hostTable.GetEntry(aKey); if (entry && entry->GetCookies().Length() >= mMaxCookiesPerHost) { nsListIter iter; - FindStaleCookie(entry, currentTime, aHostURI, iter); + // Prioritize evicting insecure cookies. + // (draft-ietf-httpbis-cookie-alone section 3.3) + mozilla::Maybe<bool> optionalSecurity = mLeaveSecureAlone ? Some(false) : Nothing(); + int64_t oldestCookieTime = FindStaleCookie(entry, currentTime, aHostURI, optionalSecurity, iter); + if (iter.entry == nullptr) { + if (aCookie->IsSecure()) { + // It's valid to evict a secure cookie for another secure cookie. + oldestCookieTime = FindStaleCookie(entry, currentTime, aHostURI, Some(true), iter); + } else { + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + EVICTING_SECURE_BLOCKED); + COOKIE_LOGEVICTED(aCookie, + "Too many cookies for this domain and the new cookie is not a secure cookie"); + return; + } + } + + MOZ_ASSERT(iter.entry); + oldCookie = iter.Cookie(); + if (oldestCookieTime > 0 && mLeaveSecureAlone) { + TelemetryForEvictingStaleCookie(oldCookie, oldestCookieTime); + } // remove the oldest cookie from the domain RemoveCookieFromList(iter); COOKIE_LOGEVICTED(oldCookie, "Too many cookies for this domain"); purgedList = CreatePurgeList(oldCookie); - } else if (mDBState->cookieCount >= ADD_TEN_PERCENT(mMaxNumberOfCookies)) { int64_t maxAge = aCurrentTimeInUsec - mDBState->cookieOldestTime; int64_t purgeAge = ADD_TEN_PERCENT(mCookiePurgeAge); if (maxAge >= purgeAge) { // we're over both size and age limits by 10%; time to purge the table! // do this by: // 1) removing expired cookies; // 2) evicting the balance of old cookies until we reach the size limit. @@ -4425,22 +4504,24 @@ nsCookieService::CookieExistsNative(nsIC nsListIter iter; *aFoundCookie = FindCookie(nsCookieKey(baseDomain, *aOriginAttributes), host, name, path, iter); return NS_OK; } // For a given base domain, find either an expired cookie or the oldest cookie // by lastAccessed time. -void +int64_t nsCookieService::FindStaleCookie(nsCookieEntry *aEntry, int64_t aCurrentTime, nsIURI* aSource, + mozilla::Maybe<bool> aIsSecure, nsListIter &aIter) { + aIter.entry = nullptr; bool requireHostMatch = true; nsAutoCString baseDomain, sourceHost, sourcePath; if (aSource) { GetBaseDomain(aSource, baseDomain, requireHostMatch); aSource->GetAsciiHost(sourceHost); sourcePath = GetPathFromURI(aSource); } @@ -4457,40 +4538,56 @@ nsCookieService::FindStaleCookie(nsCooki int64_t oldestNonMatchingNonSessionCookieTime = 0; nsListIter oldestNonMatchingNonSessionCookie; oldestNonMatchingNonSessionCookie.entry = nullptr; int64_t oldestCookieTime = 0; nsListIter oldestCookie; oldestCookie.entry = nullptr; + int64_t actualOldestCookieTime = cookies.Length() ? cookies[0]->LastAccessed() : 0; for (nsCookieEntry::IndexType i = 0; i < cookies.Length(); ++i) { nsCookie *cookie = cookies[i]; // If we found an expired cookie, we're done. if (cookie->Expiry() <= aCurrentTime) { aIter.entry = aEntry; aIter.index = i; - return; + return -1; + } + + int64_t lastAccessed = cookie->LastAccessed(); + // Record the age of the oldest cookie that is stored for this host. + // oldestCookieTime is the age of the oldest cookie with a matching + // secure flag, which may be more recent than an older cookie with + // a non-matching secure flag. + if (actualOldestCookieTime > lastAccessed) { + actualOldestCookieTime = lastAccessed; + } + if (aIsSecure.isSome() && !aIsSecure.value()) { + // We want to look for the oldest non-secure cookie first time through, + // then find the oldest secure cookie the second time we are called. + if (cookie->IsSecure()) { + continue; + } } // Update our various records of oldest cookies fitting several restrictions: // * session cookies // * non-session cookies // * cookies with paths and domains that don't match the cookie triggering this purge // This cookie is a candidate for eviction if we have no information about // the source request, or if it is not a path or domain match against the // source request. bool isPrimaryEvictionCandidate = true; if (aSource) { isPrimaryEvictionCandidate = !PathMatches(cookie, sourcePath) || !DomainMatches(cookie, sourceHost); } - int64_t lastAccessed = cookie->LastAccessed(); if (cookie->IsSession()) { if (!oldestSessionCookie.entry || oldestSessionCookieTime > lastAccessed) { oldestSessionCookieTime = lastAccessed; oldestSessionCookie.entry = aEntry; oldestSessionCookie.index = i; } if (isPrimaryEvictionCandidate && @@ -4524,16 +4621,37 @@ nsCookieService::FindStaleCookie(nsCooki aIter = oldestNonMatchingSessionCookie; } else if (oldestSessionCookie.entry) { aIter = oldestSessionCookie; } else if (oldestNonMatchingNonSessionCookie.entry) { aIter = oldestNonMatchingNonSessionCookie; } else { aIter = oldestCookie; } + + return actualOldestCookieTime; +} + +void +nsCookieService::TelemetryForEvictingStaleCookie(nsCookie *aEvicted, + int64_t oldestCookieTime) +{ + // We need to record the evicting cookie to telemetry. + if (!aEvicted->IsSecure()) { + if (aEvicted->LastAccessed() > oldestCookieTime) { + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + EVICTED_NEWER_INSECURE); + } else { + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + EVICTED_OLDEST_COOKIE); + } + } else { + Telemetry::Accumulate(Telemetry::COOKIE_LEAVE_SECURE_ALONE, + EVICTED_PREFERRED_COOKIE); + } } // count the number of cookies stored by a particular host. this is provided by the // nsICookieManager2 interface. NS_IMETHODIMP nsCookieService::CountCookiesFromHost(const nsACString &aHost, uint32_t *aCountFromHost) { @@ -4731,16 +4849,50 @@ nsCookieService::RemoveCookiesWithOrigin nsresult rv = Remove(host, entry->mOriginAttributes, name, path, false); NS_ENSURE_SUCCESS(rv, rv); } } return NS_OK; } +// find an secure cookie specified by host and name +bool +nsCookieService::FindSecureCookie(const nsCookieKey &aKey, + nsCookie *aCookie) +{ + EnsureReadDomain(aKey); + + nsCookieEntry *entry = mDBState->hostTable.GetEntry(aKey); + if (!entry) + return false; + + const nsCookieEntry::ArrayType &cookies = entry->GetCookies(); + for (nsCookieEntry::IndexType i = 0; i < cookies.Length(); ++i) { + nsCookie *cookie = cookies[i]; + // isn't a match if insecure or a different name + if (!cookie->IsSecure() || !aCookie->Name().Equals(cookie->Name())) + continue; + + // The host must "domain-match" an existing cookie or vice-versa + if (DomainMatches(cookie, aCookie->Host()) || + DomainMatches(aCookie, cookie->Host())) { + // If the path of new cookie and the path of existing cookie + // aren't "/", then this situation needs to compare paths to + // ensure only that a newly-created non-secure cookie does not + // overlay an existing secure cookie. + if (PathMatches(cookie, aCookie->Path())) { + return true; + } + } + } + + return false; +} + // find an exact cookie specified by host, name, and path that hasn't expired. bool nsCookieService::FindCookie(const nsCookieKey &aKey, const nsAFlatCString &aHost, const nsAFlatCString &aName, const nsAFlatCString &aPath, nsListIter &aIter) {
--- a/netwerk/cookie/nsCookieService.h +++ b/netwerk/cookie/nsCookieService.h @@ -24,18 +24,18 @@ #include "mozIStorageConnection.h" #include "mozIStorageRow.h" #include "mozIStorageCompletionCallback.h" #include "mozIStorageStatementCallback.h" #include "mozIStorageFunction.h" #include "nsIVariant.h" #include "nsIFile.h" #include "mozilla/BasePrincipal.h" - #include "mozilla/MemoryReporting.h" +#include "mozilla/Maybe.h" using mozilla::NeckoOriginAttributes; using mozilla::OriginAttributes; class nsICookiePermission; class nsIEffectiveTLDService; class nsIIDNService; class nsIPrefBranch; @@ -288,36 +288,38 @@ class nsCookieService final : public nsI void AsyncReadComplete(); void CancelAsyncRead(bool aPurgeReadSet); void EnsureReadDomain(const nsCookieKey &aKey); void EnsureReadComplete(); nsresult NormalizeHost(nsCString &aHost); nsresult GetBaseDomain(nsIURI *aHostURI, nsCString &aBaseDomain, bool &aRequireHostMatch); nsresult GetBaseDomainFromHost(const nsACString &aHost, nsCString &aBaseDomain); nsresult GetCookieStringCommon(nsIURI *aHostURI, nsIChannel *aChannel, bool aHttpBound, char** aCookie); - void GetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, bool aHttpBound, const NeckoOriginAttributes aOriginAttrs, bool aIsPrivate, nsCString &aCookie); + void GetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, bool aHttpBound, const NeckoOriginAttributes aOriginAttrs, bool aIsPrivate, nsCString &aCookie); nsresult SetCookieStringCommon(nsIURI *aHostURI, const char *aCookieHeader, const char *aServerTime, nsIChannel *aChannel, bool aFromHttp); - void SetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, nsDependentCString &aCookieHeader, const nsCString &aServerTime, bool aFromHttp, const NeckoOriginAttributes &aOriginAttrs, bool aIsPrivate, nsIChannel* aChannel); + void SetCookieStringInternal(nsIURI *aHostURI, bool aIsForeign, nsDependentCString &aCookieHeader, const nsCString &aServerTime, bool aFromHttp, const NeckoOriginAttributes &aOriginAttrs, bool aIsPrivate, nsIChannel* aChannel); bool SetCookieInternal(nsIURI *aHostURI, const nsCookieKey& aKey, bool aRequireHostMatch, CookieStatus aStatus, nsDependentCString &aCookieHeader, int64_t aServerTime, bool aFromHttp, nsIChannel* aChannel); void AddInternal(const nsCookieKey& aKey, nsCookie *aCookie, int64_t aCurrentTimeInUsec, nsIURI *aHostURI, const char *aCookieHeader, bool aFromHttp); void RemoveCookieFromList(const nsListIter &aIter, mozIStorageBindingParamsArray *aParamsArray = nullptr); void AddCookieToList(const nsCookieKey& aKey, nsCookie *aCookie, DBState *aDBState, mozIStorageBindingParamsArray *aParamsArray, bool aWriteToDB = true); void UpdateCookieInList(nsCookie *aCookie, int64_t aLastAccessed, mozIStorageBindingParamsArray *aParamsArray); static bool GetTokenValue(nsASingleFragmentCString::const_char_iterator &aIter, nsASingleFragmentCString::const_char_iterator &aEndIter, nsDependentCSubstring &aTokenString, nsDependentCSubstring &aTokenValue, bool &aEqualsFound); static bool ParseAttributes(nsDependentCString &aCookieHeader, nsCookieAttributes &aCookie); bool RequireThirdPartyCheck(); CookieStatus CheckPrefs(nsIURI *aHostURI, bool aIsForeign, const char *aCookieHeader); bool CheckDomain(nsCookieAttributes &aCookie, nsIURI *aHostURI, const nsCString &aBaseDomain, bool aRequireHostMatch); static bool CheckPath(nsCookieAttributes &aCookie, nsIURI *aHostURI); static bool CheckPrefixes(nsCookieAttributes &aCookie, bool aSecureRequest); static bool GetExpiry(nsCookieAttributes &aCookie, int64_t aServerTime, int64_t aCurrentTime); void RemoveAllFromMemory(); already_AddRefed<nsIArray> PurgeCookies(int64_t aCurrentTimeInUsec); bool FindCookie(const nsCookieKey& aKey, const nsAFlatCString &aHost, const nsAFlatCString &aName, const nsAFlatCString &aPath, nsListIter &aIter); - void FindStaleCookie(nsCookieEntry *aEntry, int64_t aCurrentTime, nsIURI* aSource, nsListIter &aIter); + bool FindSecureCookie(const nsCookieKey& aKey, nsCookie* aCookie); + int64_t FindStaleCookie(nsCookieEntry *aEntry, int64_t aCurrentTime, nsIURI* aSource, mozilla::Maybe<bool> aIsSecure, nsListIter &aIter); + void TelemetryForEvictingStaleCookie(nsCookie* aEvicted, int64_t oldestCookieTime); void NotifyRejected(nsIURI *aHostURI); void NotifyThirdParty(nsIURI *aHostURI, bool aAccepted, nsIChannel *aChannel); void NotifyChanged(nsISupports *aSubject, const char16_t *aData); void NotifyPurged(nsICookie2* aCookie); already_AddRefed<nsIArray> CreatePurgeList(nsICookie2* aCookie); void UpdateCookieOldestTime(DBState* aDBState, nsCookie* aCookie); nsresult GetCookiesWithOriginAttributes(const mozilla::OriginAttributesPattern& aPattern, const nsCString& aBaseDomain, nsISimpleEnumerator **aEnumerator); @@ -347,16 +349,17 @@ class nsCookieService final : public nsI // want to be dealing with the on-disk DB when in private browsing. DBState *mDBState; RefPtr<DBState> mDefaultDBState; RefPtr<DBState> mPrivateDBState; // cached prefs uint8_t mCookieBehavior; // BEHAVIOR_{ACCEPT, REJECTFOREIGN, REJECT, LIMITFOREIGN} bool mThirdPartySession; + bool mLeaveSecureAlone; uint16_t mMaxNumberOfCookies; uint16_t mMaxCookiesPerHost; int64_t mCookiePurgeAge; // friends! friend class DBListenerErrorHandler; friend class ReadCookieDBListener; friend class CloseCookieDBListener;
--- a/netwerk/dns/nsDNSService2.cpp +++ b/netwerk/dns/nsDNSService2.cpp @@ -393,19 +393,19 @@ class nsDNSSyncRequest : public nsResolv { public: explicit nsDNSSyncRequest(PRMonitor *mon) : mDone(false) , mStatus(NS_OK) , mMonitor(mon) {} virtual ~nsDNSSyncRequest() = default; - void OnLookupComplete(nsHostResolver *, nsHostRecord *, nsresult); - bool EqualsAsyncListener(nsIDNSListener *aListener); - size_t SizeOfIncludingThis(mozilla::MallocSizeOf) const; + void OnLookupComplete(nsHostResolver *, nsHostRecord *, nsresult) override; + bool EqualsAsyncListener(nsIDNSListener *aListener) override; + size_t SizeOfIncludingThis(mozilla::MallocSizeOf) const override; bool mDone; nsresult mStatus; RefPtr<nsHostRecord> mHostRecord; private: PRMonitor *mMonitor; };
--- a/netwerk/ipc/NeckoParent.cpp +++ b/netwerk/ipc/NeckoParent.cpp @@ -740,28 +740,16 @@ NeckoParent::AllocPTransportProviderPare bool NeckoParent::DeallocPTransportProviderParent(PTransportProviderParent* aActor) { RefPtr<TransportProviderParent> provider = dont_AddRef(static_cast<TransportProviderParent*>(aActor)); return true; } -mozilla::ipc::IProtocol* -NeckoParent::CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) -{ - ContentParent* contentParent = aCtx->GetContentParent(); - nsAutoPtr<PNeckoParent> actor(contentParent->AllocPNeckoParent()); - if (!actor || !contentParent->RecvPNeckoConstructor(actor)) { - return nullptr; - } - return actor.forget(); -} - namespace { std::map<uint64_t, nsCOMPtr<nsIAuthPromptCallback> >& CallbackMap() { MOZ_ASSERT(NS_IsMainThread()); static std::map<uint64_t, nsCOMPtr<nsIAuthPromptCallback> > sCallbackMap; return sCallbackMap; }
--- a/netwerk/ipc/NeckoParent.h +++ b/netwerk/ipc/NeckoParent.h @@ -162,20 +162,16 @@ protected: const uint16_t& flags) override; virtual bool RecvCancelHTMLDNSPrefetch(const nsString& hostname, const uint16_t& flags, const nsresult& reason) override; virtual PWebSocketEventListenerParent* AllocPWebSocketEventListenerParent(const uint64_t& aInnerWindowID) override; virtual bool DeallocPWebSocketEventListenerParent(PWebSocketEventListenerParent*) override; - virtual mozilla::ipc::IProtocol* - CloneProtocol(Channel* aChannel, - mozilla::ipc::ProtocolCloneContext* aCtx) override; - virtual PDataChannelParent* AllocPDataChannelParent(const uint32_t& channelId) override; virtual bool DeallocPDataChannelParent(PDataChannelParent* parent) override; virtual bool RecvPDataChannelConstructor(PDataChannelParent* aActor, const uint32_t& channelId) override; virtual PRtspControllerParent* AllocPRtspControllerParent() override;
--- a/netwerk/sctp/datachannel/DataChannel.cpp +++ b/netwerk/sctp/datachannel/DataChannel.cpp @@ -2339,17 +2339,17 @@ DataChannelConnection::SendBlob(uint16_t class DataChannelBlobSendRunnable : public Runnable { public: DataChannelBlobSendRunnable(already_AddRefed<DataChannelConnection>& aConnection, uint16_t aStream) : mConnection(aConnection) , mStream(aStream) {} - ~DataChannelBlobSendRunnable() + ~DataChannelBlobSendRunnable() override { if (!NS_IsMainThread() && mConnection) { MOZ_ASSERT(false); // explicitly leak the connection if destroyed off mainthread Unused << mConnection.forget().take(); } }
--- a/netwerk/streamconv/nsStreamConverterService.cpp +++ b/netwerk/streamconv/nsStreamConverterService.cpp @@ -188,17 +188,17 @@ nsStreamConverterService::ParseFromTo(co typedef nsClassHashtable<nsCStringHashKey, BFSTableData> BFSHashTable; // nsObjectHashtable enumerator functions. class CStreamConvDeallocator : public nsDequeFunctor { public: - virtual void* operator()(void* anObject) { + void* operator()(void* anObject) override { nsCString *string = (nsCString*)anObject; delete string; return 0; } }; // walks the graph using a breadth-first-search algorithm which generates a discovered // verticies tree. This tree is then walked up (from destination vertex, to origin vertex)
--- a/netwerk/test/TestCookie.cpp +++ b/netwerk/test/TestCookie.cpp @@ -25,16 +25,17 @@ static NS_DEFINE_CID(kCookieServiceCID, static NS_DEFINE_CID(kPrefServiceCID, NS_PREFSERVICE_CID); // various pref strings static const char kCookiesPermissions[] = "network.cookie.cookieBehavior"; static const char kCookiesLifetimeEnabled[] = "network.cookie.lifetime.enabled"; static const char kCookiesLifetimeDays[] = "network.cookie.lifetime.days"; static const char kCookiesLifetimeCurrentSession[] = "network.cookie.lifetime.behavior"; static const char kCookiesMaxPerHost[] = "network.cookie.maxPerHost"; +static const char kCookieLeaveSecurityAlone[] = "network.cookie.leave-secure-alone"; static char *sBuffer; #define OFFSET_ONE_WEEK int64_t(604800) * PR_USEC_PER_SEC #define OFFSET_ONE_DAY int64_t(86400) * PR_USEC_PER_SEC //Set server time or expiry time void @@ -208,16 +209,17 @@ InitPrefs(nsIPrefBranch *aPrefBranch) { // init some relevant prefs, so the tests don't go awry. // we use the most restrictive set of prefs we can; // however, we don't test third party blocking here. aPrefBranch->SetIntPref(kCookiesPermissions, 0); // accept all aPrefBranch->SetBoolPref(kCookiesLifetimeEnabled, true); aPrefBranch->SetIntPref(kCookiesLifetimeCurrentSession, 0); aPrefBranch->SetIntPref(kCookiesLifetimeDays, 1); + aPrefBranch->SetBoolPref(kCookieLeaveSecurityAlone, true); // Set the base domain limit to 50 so we have a known value. aPrefBranch->SetIntPref(kCookiesMaxPerHost, 50); } int main(int32_t argc, char *argv[]) { @@ -681,16 +683,59 @@ main(int32_t argc, char *argv[]) SetACookie(cookieService, "https://host.prefixed.test/some/path", nullptr, "__Host-e=test; secure", nullptr); SetACookie(cookieService, "https://host.prefixed.test/some/path", nullptr, "__Host-f=test; secure; path=/", nullptr); SetACookie(cookieService, "https://host.prefixed.test/some/path", nullptr, "__Host-g=test; secure; path=/some", nullptr); GetACookie(cookieService, "https://host.prefixed.test/", nullptr, getter_Copies(cookie)); rv[6] = CheckResult(cookie.get(), MUST_EQUAL, "__Host-f=test"); allTestsPassed = PrintResult(rv, 7) && allTestsPassed; + // *** leave-secure-alone tests + sBuffer = PR_sprintf_append(sBuffer, "*** Beginning leave-secure-alone tests...\n"); + + // testing items 0 & 1 for 3.1 of spec Deprecate modification of ’secure’ + // cookies from non-secure origins + SetACookie(cookieService, "http://www.security.test/", nullptr, "test=non-security; secure", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/", getter_Copies(cookie)); + rv[0] = CheckResult(cookie.get(), MUST_BE_NULL); + SetACookie(cookieService, "https://www.security.test/path/", nullptr, "test=security; secure; path=/path/", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/path/", getter_Copies(cookie)); + rv[1] = CheckResult(cookie.get(), MUST_EQUAL, "test=security"); + // testing items 2 & 3 & 4 for 3.2 of spec Deprecate modification of ’secure’ + // cookies from non-secure origins + // Secure site can modify cookie value + SetACookie(cookieService, "https://www.security.test/path/", nullptr, "test=security2; secure; path=/path/", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/path/", getter_Copies(cookie)); + rv[2] = CheckResult(cookie.get(), MUST_EQUAL, "test=security2"); + // If new cookie contains same name, same host and partially matching path with + // an existing security cookie on non-security site, it can't modify an existing + // security cookie. + SetACookie(cookieService, "http://www.security.test/path/foo/", nullptr, "test=non-security; path=/path/foo", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/path/foo/", getter_Copies(cookie)); + rv[3] = CheckResult(cookie.get(), MUST_EQUAL, "test=security2"); + // Non-secure cookie can set by same name, same host and non-matching path. + SetACookie(cookieService, "http://www.security.test/bar/", nullptr, "test=non-security; path=/bar", nullptr); + GetACookieNoHttp(cookieService, "http://www.security.test/bar/", getter_Copies(cookie)); + rv[4] = CheckResult(cookie.get(), MUST_EQUAL, "test=non-security"); + // Modify value and downgrade secure level. + SetACookie(cookieService, "https://www.security.test/", nullptr, "test_modify_cookie=security-cookie; secure; domain=.security.test", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/", getter_Copies(cookie)); + rv[5] = CheckResult(cookie.get(), MUST_EQUAL, "test_modify_cookie=security-cookie"); + SetACookie(cookieService, "https://www.security.test/", nullptr, "test_modify_cookie=non-security-cookie; domain=.security.test", nullptr); + GetACookieNoHttp(cookieService, "https://www.security.test/", getter_Copies(cookie)); + rv[6] = CheckResult(cookie.get(), MUST_EQUAL, "test_modify_cookie=non-security-cookie"); + // Test the non-security cookie can set when domain or path not same to secure cookie of same name. + SetACookie(cookieService, "https://www.security.test/", nullptr, "test=security3", nullptr); + GetACookieNoHttp(cookieService, "http://www.security.test/", getter_Copies(cookie)); + rv[7] = CheckResult(cookie.get(), MUST_CONTAIN, "test=security3"); + SetACookie(cookieService, "http://www.security.test/", nullptr, "test=non-security2; domain=security.test", nullptr); + GetACookieNoHttp(cookieService, "http://www.security.test/", getter_Copies(cookie)); + rv[8] = CheckResult(cookie.get(), MUST_CONTAIN, "test=non-security2"); + + allTestsPassed = PrintResult(rv, 9) && allTestsPassed; // *** nsICookieManager{2} interface tests sBuffer = PR_sprintf_append(sBuffer, "*** Beginning nsICookieManager{2} interface tests...\n"); nsCOMPtr<nsICookieManager> cookieMgr = do_GetService(NS_COOKIEMANAGER_CONTRACTID, &rv0); if (NS_FAILED(rv0)) return -1; nsCOMPtr<nsICookieManager2> cookieMgr2 = do_QueryInterface(cookieMgr); if (!cookieMgr2) return -1; @@ -815,16 +860,40 @@ main(int32_t argc, char *argv[]) expected += NS_LITERAL_CSTRING("; "); } } GetACookie(cookieService, "http://creation.ordering.tests/", nullptr, getter_Copies(cookie)); rv[0] = CheckResult(cookie.get(), MUST_EQUAL, expected.get()); allTestsPassed = PrintResult(rv, 1) && allTestsPassed; + // *** eviction and creation ordering tests after enable network.cookie.leave-secure-alone + sBuffer = PR_sprintf_append(sBuffer, "*** Beginning eviction and creation tests after enable nework.cookie.leave-secure-alone...\n"); + // reset cookie + cookieMgr->RemoveAll(); + + for (int32_t i = 0; i < 60; ++i) { + name = NS_LITERAL_CSTRING("test"); + name.AppendInt(i); + name += NS_LITERAL_CSTRING("=delete_non_security"); + + // Create 50 cookies that include the secure flag. + if (i < 50) { + name += NS_LITERAL_CSTRING("; secure"); + SetACookie(cookieService, "https://creation.ordering.tests/", nullptr, name.get(), nullptr); + } else { + // non-security cookies will be removed beside the latest cookie that be created. + SetACookie(cookieService, "http://creation.ordering.tests/", nullptr, name.get(), nullptr); + } + } + GetACookie(cookieService, "http://creation.ordering.tests/", nullptr, getter_Copies(cookie)); + rv[0] = CheckResult(cookie.get(), MUST_BE_NULL); + + allTestsPassed = PrintResult(rv, 1) && allTestsPassed; + // XXX the following are placeholders: add these tests please! // *** "noncompliant cookie" tests // *** IP address tests // *** speed tests sBuffer = PR_sprintf_append(sBuffer, "\n*** Result: %s!\n\n", allTestsPassed ? "all tests passed" : "TEST(S) FAILED");
--- a/python/gdbpp/gdbpp/smartptr.py +++ b/python/gdbpp/gdbpp/smartptr.py @@ -35,8 +35,21 @@ class smartptr_printer(object): def to_string(self): if not self.value: type_name = str(self.value.type) else: type_name = str(self.value.dereference().dynamic_type.pointer()) return '[(%s) %s]' % (type_name, str(self.value)) + +@GeckoPrettyPrinter('UniquePtr', '^mozilla::UniquePtr<.*>$') +class uniqueptr_printer(object): + def __init__(self, value): + self.value = value['mTuple']['mFirstA'] + + def to_string(self): + if not self.value: + type_name = str(self.value.type) + else: + type_name = str(self.value.dereference().dynamic_type.pointer()) + + return '[(%s) %s]' % (type_name, str(self.value))
--- a/security/manager/ssl/nsNSSComponent.cpp +++ b/security/manager/ssl/nsNSSComponent.cpp @@ -1277,26 +1277,36 @@ nsNSSComponent::ConfigureInternalPKCS11T nsresult nsNSSComponent::InitializePIPNSSBundle() { // Called during init only, no mutex required. nsresult rv; nsCOMPtr<nsIStringBundleService> bundleService(do_GetService(NS_STRINGBUNDLE_CONTRACTID, &rv)); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + MOZ_RELEASE_ASSERT(bundleService); +#endif if (NS_FAILED(rv) || !bundleService) return NS_ERROR_FAILURE; bundleService->CreateBundle("chrome://pipnss/locale/pipnss.properties", getter_AddRefs(mPIPNSSBundle)); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(mPIPNSSBundle); +#endif if (!mPIPNSSBundle) rv = NS_ERROR_FAILURE; bundleService->CreateBundle("chrome://pipnss/locale/nsserrors.properties", getter_AddRefs(mNSSErrorsBundle)); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(mNSSErrorsBundle); +#endif if (!mNSSErrorsBundle) rv = NS_ERROR_FAILURE; return rv; } // Table of pref names and SSL cipher ID typedef struct { @@ -1458,16 +1468,19 @@ StaticRefPtr<CipherSuiteChangeObserver> // static nsresult CipherSuiteChangeObserver::StartObserve() { NS_ASSERTION(NS_IsMainThread(), "CipherSuiteChangeObserver::StartObserve() can only be accessed in main thread"); if (!sObserver) { RefPtr<CipherSuiteChangeObserver> observer = new CipherSuiteChangeObserver(); nsresult rv = Preferences::AddStrongObserver(observer.get(), "security."); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { sObserver = nullptr; return rv; } nsCOMPtr<nsIObserverService> observerService = mozilla::services::GetObserverService(); observerService->AddObserver(observer, NS_XPCOM_SHUTDOWN_OBSERVER_ID, @@ -1707,16 +1720,19 @@ GetNSSProfilePath(nsAutoCString& aProfil MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("Could not get nsILocalFileWin for profile directory.\n")); return NS_ERROR_FAILURE; } rv = profileFileWin->GetNativeCanonicalPath(aProfilePath); #else rv = profileFile->GetNativePath(aProfilePath); #endif +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("Could not get native path for profile directory.\n")); return rv; } MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("NSS profile at '%s'\n", aProfilePath.get())); @@ -1734,16 +1750,19 @@ nsNSSComponent::InitializeNSS() static_assert(nsINSSErrorsService::NSS_SEC_ERROR_BASE == SEC_ERROR_BASE && nsINSSErrorsService::NSS_SEC_ERROR_LIMIT == SEC_ERROR_LIMIT && nsINSSErrorsService::NSS_SSL_ERROR_BASE == SSL_ERROR_BASE && nsINSSErrorsService::NSS_SSL_ERROR_LIMIT == SSL_ERROR_LIMIT, "You must update the values in nsINSSErrorsService.idl"); MutexAutoLock lock(mutex); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(!mNSSInitialized); +#endif if (mNSSInitialized) { // We should never try to initialize NSS more than once in a process. MOZ_ASSERT_UNREACHABLE("Trying to initialize NSS twice"); return NS_ERROR_FAILURE; } MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("NSS Initialization beginning\n")); @@ -1752,29 +1771,35 @@ nsNSSComponent::InitializeNSS() // If we could assume i18n will not change between profiles, one call per application // run were sufficient. As I can't predict what happens in the future, let's repeat // this call for every re-init of NSS. ConfigureInternalPKCS11Token(); nsAutoCString profileStr; nsresult rv = GetNSSProfilePath(profileStr); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { return NS_ERROR_NOT_AVAILABLE; } SECStatus init_rv = SECFailure; bool nocertdb = Preferences::GetBool("security.nocertdb", false); bool inSafeMode = true; nsCOMPtr<nsIXULRuntime> runtime(do_GetService("@mozilla.org/xre/runtime;1")); // There might not be an nsIXULRuntime in embedded situations. This will // default to assuming we are in safe mode (as a result, no external PKCS11 // modules will be loaded). if (runtime) { rv = runtime->GetInSafeMode(&inSafeMode); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { return rv; } } MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("inSafeMode: %u\n", inSafeMode)); if (!nocertdb && !profileStr.IsEmpty()) { // First try to initialize the NSS DB in read/write mode. @@ -1789,18 +1814,24 @@ nsNSSComponent::InitializeNSS() MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("could not init in r/o either\n")); } } // If we haven't succeeded in initializing the DB in our profile // directory or we don't have a profile at all, or the "security.nocertdb" // pref has been set to "true", attempt to initialize with no DB. if (nocertdb || init_rv != SECSuccess) { init_rv = NSS_NoDB_Init(nullptr); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(init_rv == SECSuccess); +#endif } if (init_rv != SECSuccess) { +#ifdef ANDROID + MOZ_RELEASE_ASSERT(false); +#endif MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("could not initialize NSS - panicking\n")); return NS_ERROR_NOT_AVAILABLE; } // ensure we have an initial value for the content signer root mContentSigningRootHash = Preferences::GetString("security.content.signature.root_hash"); @@ -1812,24 +1843,30 @@ nsNSSComponent::InitializeNSS() // Register an observer so we can inform NSS when these prefs change Preferences::AddStrongObserver(this, "security."); SSL_OptionSetDefault(SSL_ENABLE_SSL2, false); SSL_OptionSetDefault(SSL_V2_COMPATIBLE_HELLO, false); rv = setEnabledTLSVersions(); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { return NS_ERROR_UNEXPECTED; } DisableMD5(); LoadLoadableRoots(); rv = LoadExtendedValidationInfo(); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("failed to load EV info")); return rv; } MaybeEnableFamilySafetyCompatibility(); MaybeImportEnterpriseRoots(); @@ -1859,57 +1896,72 @@ nsNSSComponent::InitializeNSS() Preferences::GetBool("security.ssl.enable_alpn", ALPN_ENABLED_DEFAULT)); SSL_OptionSetDefault(SSL_ENABLE_0RTT_DATA, Preferences::GetBool("security.tls.enable_0rtt_data", ENABLED_0RTT_DATA_DEFAULT)); if (NS_FAILED(InitializeCipherSuite())) { +#ifdef ANDROID + MOZ_RELEASE_ASSERT(false); +#endif MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("Unable to initialize cipher suite settings\n")); return NS_ERROR_FAILURE; } // TLSServerSocket may be run with the session cache enabled. It is necessary // to call this once before that can happen. This specifies a maximum of 1000 // cache entries (the default number of cache entries is 10000, which seems a // little excessive as there probably won't be that many clients connecting to // any TLSServerSockets the browser runs.) // Note that this must occur before any calls to SSL_ClearSessionCache // (otherwise memory will leak). if (SSL_ConfigServerSessionIDCache(1000, 0, 0, nullptr) != SECSuccess) { +#ifdef ANDROID + MOZ_RELEASE_ASSERT(false); +#endif return NS_ERROR_FAILURE; } // ensure the CertBlocklist is initialised nsCOMPtr<nsICertBlocklist> certList = do_GetService(NS_CERTBLOCKLIST_CONTRACTID); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(certList); +#endif if (!certList) { return NS_ERROR_FAILURE; } // dynamic options from prefs setValidationOptions(true, lock); #ifndef MOZ_NO_SMART_CARDS LaunchSmartCardThreads(); #endif mozilla::pkix::RegisterErrorTable(); // Initialize the site security service nsCOMPtr<nsISiteSecurityService> sssService = do_GetService(NS_SSSERVICE_CONTRACTID); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(sssService); +#endif if (!sssService) { MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("Cannot initialize site security service\n")); return NS_ERROR_FAILURE; } // Initialize the cert override service nsCOMPtr<nsICertOverrideService> coService = do_GetService(NS_CERTOVERRIDE_CONTRACTID); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(coService); +#endif if (!coService) { MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("Cannot initialize cert override service\n")); return NS_ERROR_FAILURE; } if (PK11_IsFIPS()) { Telemetry::Accumulate(Telemetry::FIPS_ENABLED, true); } @@ -1971,16 +2023,19 @@ nsNSSComponent::Init() return NS_ERROR_NOT_SAME_THREAD; } nsresult rv = NS_OK; MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("Beginning NSS initialization\n")); rv = InitializePIPNSSBundle(); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("Unable to create pipnss bundle.\n")); return rv; } // Access our string bundles now, this prevents assertions from I/O // - nsStandardURL not thread-safe // - wrong thread: 'NS_IsMainThread()' in nsIOService.cpp @@ -1991,16 +2046,19 @@ nsNSSComponent::Init() mPIPNSSBundle->GetStringFromName(dummy_name.get(), getter_Copies(result)); mNSSErrorsBundle->GetStringFromName(dummy_name.get(), getter_Copies(result)); } rv = InitializeNSS(); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); +#endif if (NS_FAILED(rv)) { MOZ_LOG(gPIPNSSLog, LogLevel::Error, ("nsNSSComponent::InitializeNSS() failed\n")); return rv; } RememberCertErrorsTable::Init(); @@ -2152,16 +2210,19 @@ nsresult nsNSSComponent::LogoutAuthentic nsresult nsNSSComponent::RegisterObservers() { // Happens once during init only, no mutex protection. nsCOMPtr<nsIObserverService> observerService( do_GetService("@mozilla.org/observer-service;1")); +#ifdef ANDROID + MOZ_RELEASE_ASSERT(observerService); +#endif if (!observerService) { MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("nsNSSComponent: couldn't get observer service\n")); return NS_ERROR_FAILURE; } MOZ_LOG(gPIPNSSLog, LogLevel::Debug, ("nsNSSComponent: adding observers\n")); // Using false for the ownsweak parameter means the observer service will @@ -2373,16 +2434,19 @@ namespace mozilla { namespace psm { nsresult InitializeCipherSuite() { NS_ASSERTION(NS_IsMainThread(), "InitializeCipherSuite() can only be accessed in main thread"); if (NSS_SetDomesticPolicy() != SECSuccess) { +#ifdef ANDROID + MOZ_RELEASE_ASSERT(false); +#endif return NS_ERROR_FAILURE; } // Disable any ciphers that NSS might have enabled by default for (uint16_t i = 0; i < SSL_NumImplementedCiphers; ++i) { uint16_t cipher_id = SSL_ImplementedCiphers[i]; SSL_CipherPrefSetDefault(cipher_id, false); }
--- a/taskcluster/taskgraph/decision.py +++ b/taskcluster/taskgraph/decision.py @@ -41,16 +41,21 @@ PER_PROJECT_PARAMETERS = { 'optimize_target_tasks': True, }, 'ash': { 'target_tasks_method': 'ash_tasks', 'optimize_target_tasks': True, }, + 'cedar': { + 'target_tasks_method': 'cedar_tasks', + 'optimize_target_tasks': True, + }, + # the default parameters are used for projects that do not match above. 'default': { 'target_tasks_method': 'default', 'optimize_target_tasks': True, } }
--- a/taskcluster/taskgraph/target_tasks.py +++ b/taskcluster/taskgraph/target_tasks.py @@ -99,16 +99,32 @@ def target_tasks_ash(full_task_graph, pa return False # don't upload symbols if task.attributes['kind'] == 'upload-symbols': return False return True return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)] +@_target_task('cedar_tasks') +def target_tasks_cedar(full_task_graph, parameters): + """Target tasks that only run on the cedar branch.""" + def filter(task): + platform = task.attributes.get('build_platform') + # only select platforms + if platform not in ['linux64']: + return False + if task.attributes.get('unittest_suite'): + if not (task.attributes['unittest_suite'].startswith('mochitest') + or 'xpcshell' in task.attributes['unittest_suite']): + return False + return True + return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)] + + @_target_task('nightly_fennec') def target_tasks_nightly(full_task_graph, parameters): """Select the set of tasks required for a nightly build of fennec. The nightly build process involves a pipeline of builds, signing, and, eventually, uploading the tasks to balrog.""" def filter(task): return task.attributes.get('nightly', False) return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
--- a/testing/cppunittest.ini +++ b/testing/cppunittest.ini @@ -1,16 +1,13 @@ [ShowSSEConfig] [TestAppShellSteadyState] [TestArray] [TestArrayUtils] [TestAtomics] -[TestAudioBuffers] -skip-if = os == 'b2g' # Bug 1062937 -[TestAudioMixer] [TestBinarySearch] [TestBind] [TestBloomFilter] [TestCOM] skip-if = os != 'win' [TestCasting] [TestCeilingFloor] [TestCertDB]
--- a/testing/web-platform/meta/MANIFEST.json +++ b/testing/web-platform/meta/MANIFEST.json @@ -14811,28 +14811,20 @@ "path": "content-security-policy/svg/svg-policy-with-resource.html", "url": "/content-security-policy/svg/svg-policy-with-resource.html" }, { "path": "cookies/secure/set-from-dom.https.sub.html", "url": "/cookies/secure/set-from-dom.https.sub.html" }, { - "path": "cookies/secure/set-from-dom.sub.html", - "url": "/cookies/secure/set-from-dom.sub.html" - }, - { "path": "cookies/secure/set-from-http.https.sub.html", "url": "/cookies/secure/set-from-http.https.sub.html" }, { - "path": "cookies/secure/set-from-http.sub.html", - "url": "/cookies/secure/set-from-http.sub.html" - }, - { "path": "cookies/secure/set-from-ws.https.sub.html", "url": "/cookies/secure/set-from-ws.https.sub.html" }, { "path": "cookies/secure/set-from-wss.https.sub.html", "url": "/cookies/secure/set-from-wss.https.sub.html" }, {
deleted file mode 100644 --- a/testing/web-platform/meta/cookies/secure/set-from-dom.sub.html.ini +++ /dev/null @@ -1,5 +0,0 @@ -[set-from-dom.sub.html] - type: testharness - ['secure' cookie not sent in HTTP request] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/cookies/secure/set-from-http.sub.html.ini +++ /dev/null @@ -1,5 +0,0 @@ -[set-from-http.sub.html] - type: testharness - ['secure' cookie not sent in HTTP request] - expected: FAIL -
--- a/testing/web-platform/meta/fetch/api/request/request-init-003.sub.html.ini +++ b/testing/web-platform/meta/fetch/api/request/request-init-003.sub.html.ini @@ -1,8 +1,4 @@ [request-init-003.sub.html] type: testharness [Check request values when initialized from url string] expected: FAIL - - [Check request values when initialized from url and init values] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/tests/cookies/secure/set-from-dom.sub.html +++ /dev/null @@ -1,47 +0,0 @@ -<!doctype html> -<html> -<head> - <meta charset=utf-8> - <title>Set 'secure' cookie from `document.cookie` on a non-secure page</title> - <meta name=help href="https://tools.ietf.org/html/draft-west-leave-secure-cookies-alone"> - <script src="/resources/testharness.js"></script> - <script src="/resources/testharnessreport.js"></script> - <script src="/cookies/resources/testharness-helpers.js"></script> -</head> -<body> -<div id=log></div> -<script> - var tests = [ - [ - "'secure' cookie not set in `document.cookie`", - function () { - var originalCookie = document.cookie; - document.cookie = "secure_from_nonsecure_dom=1; secure; path=/"; - assert_equals(document.cookie, originalCookie); - this.done(); - } - ], - [ - "'secure' cookie not sent in HTTP request", - function () { - document.cookie = "secure_from_nonsecure_dom=1; secure; path=/"; - fetch("https://{{host}}:{{ports[https][0]}}/cookies/resources/echo-json.py", { "credentials": "include" }) - .then(this.step_func(function (r) { - return r.json(); - })) - .then(this.step_func_done(function (j) { - assert_equals(j["secure_from_nonsecure_dom"], undefined); - })); - } - ] - ]; - - function clearKnownCookie() { - document.cookie = "secure_from_nonsecure_dom=0; Secure; expires=Thu, 01 Jan 1970 00:00:01 GMT; path=/"; - } - - executeTestsSerially(tests, clearKnownCookie, clearKnownCookie); -</script> -</body> -</html> -
deleted file mode 100644 --- a/testing/web-platform/tests/cookies/secure/set-from-http.sub.html +++ /dev/null @@ -1,36 +0,0 @@ -<!doctype html> -<html> -<head> - <meta charset=utf-8> - <title>Set 'secure' cookie from `Set-Cookie` HTTP header on a non-secure page</title> - <meta name=help href="https://tools.ietf.org/html/draft-west-leave-secure-cookies-alone"> - <script src="/resources/testharness.js"></script> - <script src="/resources/testharnessreport.js"></script> - <script src="/cookies/resources/testharness-helpers.js"></script> -</head> -<body> -<div id=log></div> -<script> - function clearKnownCookie() { - document.cookie = "secure_from_nonsecure_http=0; Secure; expires=Thu, 01 Jan 1970 00:00:01 GMT; path=/"; - } - - test(function () { - assert_equals(document.cookie.match(/secure_from_nonsecure_http=1/), null); - }, "'secure' cookie not present in `document.cookie`"); - - promise_test(function (t) { - t.add_cleanup(clearKnownCookie); - return fetch("https://{{host}}:{{ports[https][0]}}/cookies/resources/echo-json.py", - { "credentials": "include" }) - .then(function (r) { - return r.json(); - }) - .then(function (j) { - assert_equals(j["secure_from_nonsecure_http"], undefined); - }); - }, "'secure' cookie not sent in HTTP request"); -</script> -</body> -</html> -
--- a/testing/web-platform/tests/service-workers/service-worker/fetch-event.https.html +++ b/testing/web-platform/tests/service-workers/service-worker/fetch-event.https.html @@ -414,24 +414,23 @@ async_test(function(t) { service_worker_unregister_and_register(t, worker, scope) .then(function(reg) { return wait_for_state(t, reg.installing, 'activated'); }) .then(function() { return with_iframe(scope + fragment); }) .then(function(frame) { assert_equals( frame.contentDocument.body.textContent, - 'Fragment Not Found', - 'Service worker should not expose URL fragments.'); + 'Fragment Found :' + fragment, + 'Service worker should expose URL fragments in request.'); frame.remove(); return service_worker_unregister_and_done(t, scope); }) .catch(unreached_rejection(t)); }, 'Service Worker must not expose FetchEvent URL fragments.'); - async_test(function(t) { var scope = 'resources/simple.html?cache'; var frame; var cacheTypes = [ undefined, 'default', 'no-store', 'reload', 'no-cache', 'force-cache', 'only-if-cached' ]; service_worker_unregister_and_register(t, worker, scope) .then(function(reg) {
--- a/testing/web-platform/tests/service-workers/service-worker/resources/fetch-event-test-worker.js +++ b/testing/web-platform/tests/service-workers/service-worker/resources/fetch-event-test-worker.js @@ -75,31 +75,29 @@ function handleUsedCheck(event) { lastResponseForUsedCheck = response; return response; })); } else { event.respondWith(new Response( 'bodyUsed: ' + lastResponseForUsedCheck.bodyUsed)); } } - function handleFragmentCheck(event) { var body; if (event.request.url.indexOf('#') === -1) { body = 'Fragment Not Found'; } else { - body = 'Fragment Found'; + body = 'Fragment Found :' + + event.request.url.substring(event.request.url.indexOf('#')); } event.respondWith(new Response(body)); } - function handleCache(event) { event.respondWith(new Response(event.request.cache)); } - function handleEventSource(event) { if (event.request.mode === 'navigate') { return; } var data = { mode: event.request.mode, cache: event.request.cache, credentials: event.request.credentials
--- a/toolkit/components/extensions/.eslintrc.js +++ b/toolkit/components/extensions/.eslintrc.js @@ -1,13 +1,17 @@ "use strict"; module.exports = { // eslint-disable-line no-undef "extends": "../../.eslintrc.js", + "parserOptions": { + "ecmaVersion": 8, + }, + "globals": { "Cc": true, "Ci": true, "Components": true, "Cr": true, "Cu": true, "dump": true, "TextDecoder": false, @@ -27,17 +31,17 @@ module.exports = { // eslint-disable-lin "TabManager": true, "WindowListManager": true, "XPCOMUtils": true, }, "rules": { // Rules from the mozilla plugin "mozilla/balanced-listeners": 2, - "mozilla/no-aArgs": 1, + "mozilla/no-aArgs": 2, "mozilla/no-cpows-in-tests": 1, "mozilla/var-only-at-top-level": 1, "valid-jsdoc": [2, { "prefer": { "return": "returns", }, "preferType": { @@ -75,17 +79,17 @@ module.exports = { // eslint-disable-lin // No space before always a space after a comma "comma-spacing": [2, {"before": false, "after": true}], // Commas at the end of the line not the start "comma-style": 2, // Don't require spaces around computed properties - "computed-property-spacing": [1, "never"], + "computed-property-spacing": [2, "never"], // Functions are not required to consistently return something or nothing "consistent-return": 0, // Require braces around blocks that start a new line "curly": [2, "all"], // Always require a trailing EOL
--- a/toolkit/components/extensions/ExtensionUtils.jsm +++ b/toolkit/components/extensions/ExtensionUtils.jsm @@ -537,44 +537,48 @@ let IconDetails = { let imageData = details.imageData; if (typeof imageData == "string") { imageData = {"19": imageData}; } for (let size of Object.keys(imageData)) { if (!INTEGER.test(size)) { - throw new Error(`Invalid icon size ${size}, must be an integer`); + throw new ExtensionError(`Invalid icon size ${size}, must be an integer`); } result[size] = imageData[size]; } } if (details.path) { let path = details.path; if (typeof path != "object") { path = {"19": path}; } let baseURI = context ? context.uri : extension.baseURI; for (let size of Object.keys(path)) { if (!INTEGER.test(size)) { - throw new Error(`Invalid icon size ${size}, must be an integer`); + throw new ExtensionError(`Invalid icon size ${size}, must be an integer`); } let url = baseURI.resolve(path[size]); // The Chrome documentation specifies these parameters as // relative paths. We currently accept absolute URLs as well, // which means we need to check that the extension is allowed // to load them. This will throw an error if it's not allowed. - Services.scriptSecurityManager.checkLoadURIStrWithPrincipal( - extension.principal, url, - Services.scriptSecurityManager.DISALLOW_SCRIPT); + try { + Services.scriptSecurityManager.checkLoadURIStrWithPrincipal( + extension.principal, url, + Services.scriptSecurityManager.DISALLOW_SCRIPT); + } catch (e) { + throw new ExtensionError(`Illegal URL ${url}`); + } result[size] = url; } } } catch (e) { // Function is called from extension code, delegate error. if (context) { throw e;
--- a/toolkit/components/extensions/Schemas.jsm +++ b/toolkit/components/extensions/Schemas.jsm @@ -1032,17 +1032,22 @@ class ObjectType extends Type { pattern, type: parseProperty(schema.patternProperties[propName]), }); } // Parse "additionalProperties" schema. let additionalProperties = null; if (schema.additionalProperties) { - additionalProperties = Schemas.parseSchema(schema.additionalProperties, path); + let type = schema.additionalProperties; + if (type === true) { + type = {"type": "any"}; + } + + additionalProperties = Schemas.parseSchema(type, path); } return new this(schema, properties, additionalProperties, patternProperties, schema.isInstanceOf || null); } constructor(schema, properties, additionalProperties, patternProperties, isInstanceOf) { super(schema); this.properties = properties; @@ -1381,17 +1386,17 @@ class FunctionType extends Type { static get EXTRA_PROPERTIES() { return ["parameters", "async", "returns", ...super.EXTRA_PROPERTIES]; } static parseSchema(schema, path, extraProperties = []) { this.checkSchemaProperties(schema, path, extraProperties); let isAsync = !!schema.async; - let isExpectingCallback = isAsync; + let isExpectingCallback = typeof schema.async === "string"; let parameters = null; if ("parameters" in schema) { parameters = []; for (let param of schema.parameters) { // Callbacks default to optional for now, because of promise // handling. let isCallback = isAsync && param.name == schema.async; if (isCallback) { @@ -1407,19 +1412,20 @@ class FunctionType extends Type { } } if (isExpectingCallback) { throw new Error(`Internal error: Expected a callback parameter with name ${schema.async}`); } let hasAsyncCallback = false; if (isAsync) { - if (parameters && parameters.length && parameters[parameters.length - 1].name == schema.async) { - hasAsyncCallback = true; - } + hasAsyncCallback = (parameters && + parameters.length && + parameters[parameters.length - 1].name == schema.async); + if (schema.returns) { throw new Error("Internal error: Async functions must not have return values."); } if (schema.allowAmbiguousOptionalArguments && !hasAsyncCallback) { throw new Error("Internal error: Async functions with ambiguous arguments must declare the callback as the last parameter"); } } @@ -1950,17 +1956,21 @@ this.Schemas = { parseSchemas() { Object.defineProperty(this, "namespaces", { enumerable: true, configurable: true, value: new Map(), }); for (let json of this.schemaJSON.values()) { - this.loadSchema(json); + try { + this.loadSchema(json); + } catch (e) { + Cu.reportError(e); + } } return this.namespaces; }, loadSchema(json) { for (let namespace of json) { let name = namespace.namespace;
--- a/toolkit/components/extensions/ext-c-test.js +++ b/toolkit/components/extensions/ext-c-test.js @@ -1,11 +1,85 @@ "use strict"; -function testApiFactory(context) { +/** + * Checks whether the given error matches the given expectations. + * + * @param {*} error + * The error to check. + * @param {string|RegExp|function|null} expectedError + * The expectation to check against. If this parameter is: + * + * - a string, the error message must exactly equal the string. + * - a regular expression, it must match the error message. + * - a function, it is called with the error object and its + * return value is returned. + * - null, the function always returns true. + * @param {BaseContext} context + * + * @returns {boolean} + * True if the error matches the expected error. + */ +function errorMatches(error, expectedError, context) { + if (expectedError === null) { + return true; + } + + if (typeof expectedError === "function") { + return context.runSafeWithoutClone(expectedError, error); + } + + if (typeof error !== "object" || error == null || + typeof error.message !== "string") { + return false; + } + + if (typeof expectedError === "string") { + return error.message === expectedError; + } + + try { + return expectedError.test(error.message); + } catch (e) { + Cu.reportError(e); + } + + return false; +} + +/** + * Calls .toSource() on the given value, but handles null, undefined, + * and errors. + * + * @param {*} value + * @returns {string} + */ +function toSource(value) { + if (value === null) { + return null; + } + if (value === undefined) { + return null; + } + if (typeof value === "string") { + return JSON.stringify(value); + } + + try { + return String(value.toSource()); + } catch (e) { + return "<unknown>"; + } +} + +function makeTestAPI(context) { + function assertTrue(...args) { + context.childManager.callParentFunctionNoReturn("test.assertTrue", args); + } + return { test: { // These functions accept arbitrary values. Convert the parameters to // make sure that the values can be cloned structurally for IPC. sendMessage(...args) { args = Cu.cloneInto(args, context.cloneScope); context.childManager.callParentFunctionNoReturn("test.sendMessage", args); @@ -35,14 +109,51 @@ function testApiFactory(context) { actual += " (different)"; } context.childManager.callParentFunctionNoReturn("test.assertEq", [ expected, actual, String(msg), ]); }, + + assertRejects(promise, expectedError, msg) { + // Wrap in a native promise for consistency. + promise = Promise.resolve(promise); + + if (msg) { + msg = `: ${msg}`; + } + + return promise.then(result => { + assertTrue(false, `Promise resolved, expected rejection${msg}`); + }, error => { + let errorMessage = toSource(error && error.message); + + assertTrue(errorMatches(error, expectedError, context), + `Promise rejected, expecting rejection to match ${toSource(expectedError)}, ` + + `got ${errorMessage}${msg}`); + }); + }, + + assertThrows(func, expectedError, msg) { + if (msg) { + msg = `: ${msg}`; + } + + try { + func(); + + assertTrue(false, `Function did not throw, expected error${msg}`); + } catch (error) { + let errorMessage = toSource(error && error.message); + + assertTrue(errorMatches(error, expectedError, context), + `Promise rejected, expecting rejection to match ${toSource(expectedError)}` + + `got ${errorMessage}${msg}`); + } + }, }, }; } -extensions.registerSchemaAPI("test", "addon_child", testApiFactory); -extensions.registerSchemaAPI("test", "content_child", testApiFactory); +extensions.registerSchemaAPI("test", "addon_child", makeTestAPI); +extensions.registerSchemaAPI("test", "content_child", makeTestAPI);
--- a/toolkit/components/extensions/ext-test.js +++ b/toolkit/components/extensions/ext-test.js @@ -20,17 +20,17 @@ extensions.on("shutdown", (type, extensi extensions.on("test-message", (type, extension, ...args) => { let handlers = messageHandlers.get(extension); for (let handler of handlers) { handler(...args); } }); /* eslint-enable mozilla/balanced-listeners */ -function testApiFactory(context) { +function makeTestAPI(context) { let {extension} = context; return { test: { sendMessage: function(...args) { extension.emit("test-message", ...args); }, notifyPass: function(msg) { @@ -77,10 +77,10 @@ function testApiFactory(context) { return () => { handlers.delete(fire); }; }).api(), }, }; } -extensions.registerSchemaAPI("test", "addon_parent", testApiFactory); -extensions.registerSchemaAPI("test", "content_parent", testApiFactory); +extensions.registerSchemaAPI("test", "addon_parent", makeTestAPI); +extensions.registerSchemaAPI("test", "content_parent", makeTestAPI);
--- a/toolkit/components/extensions/schemas/test.json +++ b/toolkit/components/extensions/schemas/test.json @@ -121,29 +121,81 @@ "name": "assertLastError", "type": "function", "unsupported": true, "parameters": [ {"type": "string", "name": "expectedError"} ] }, { + "name": "assertRejects", + "type": "function", + "async": true, + "parameters": [ + { + "name": "promise", + "$ref": "Promise" + }, + { + "name": "expectedError", + "$ref": "ExpectedError", + "optional": true + }, + { + "name": "message", + "type": "string", + "optional": true + } + ] + }, + { "name": "assertThrows", "type": "function", - "unsupported": true, "parameters": [ - {"type": "function", "name": "fn"}, + { + "name": "func", + "type": "function" + }, + { + "name": "expectedError", + "$ref": "ExpectedError", + "optional": true + }, + { + "name": "message", + "type": "string", + "optional": true + } + ] + } + ], + "types": [ + { + "id": "ExpectedError", + "choices": [ + {"type": "string"}, + {"type": "object", "isInstanceOf": "RegExp", "additionalProperties": true}, + {"type": "function"} + ] + }, + { + "id": "Promise", + "choices": [ { "type": "object", - "name": "self", - "additionalProperties": {"type": "any"}, - "optional": true + "properties": { + "then": {"type": "function"} + }, + "additionalProperties": true }, - {"type": "array", "items": {"type": "any"}, "name": "args", "optional": true}, - {"choices": [ {"type": "string"}, {"type": "object", "isInstanceOf": "RegExp"} ], "name": "message", "optional": true} + { + "type": "object", + "isInstanceOf": "Promise", + "additionalProperties": true + } ] } ], "events": [ { "name": "onMessage", "type": "function", "description": "Used to test sending messages to extensions.",
--- a/toolkit/components/extensions/test/mochitest/.eslintrc.js +++ b/toolkit/components/extensions/test/mochitest/.eslintrc.js @@ -15,9 +15,13 @@ module.exports = { // eslint-disable-lin "waitForLoad": true, "promiseConsoleOutput": true, "ExtensionTestUtils": false, "NetUtil": true, "webrequest_test": false, "XPCOMUtils": true, }, + + "rules": { + "no-shadow": 0, + }, };
--- a/toolkit/components/extensions/test/mochitest/test_chrome_ext_background_debug_global.html +++ b/toolkit/components/extensions/test/mochitest/test_chrome_ext_background_debug_global.html @@ -59,17 +59,17 @@ add_task(function* () { is("test!", context.testThing, "global context is the background script context"); resolve(); } }, }); }); let addon = yield new Promise((resolve, reject) => { - AddonManager.getAddonByID(ID, aAddon => aAddon ? resolve(aAddon) : reject()); + AddonManager.getAddonByID(ID, addon => addon ? resolve(addon) : reject()); }); ok(addon, `Got the addon wrapper for ${addon.id}`); function waitForDebugGlobalChanges(times, initialAddonInstanceID) { return new Promise((resolve) => { AddonManager.addAddonListener({ count: 0,
--- a/toolkit/components/extensions/test/mochitest/test_chrome_ext_contentscript_unrecognizedprop_warning.html +++ b/toolkit/components/extensions/test/mochitest/test_chrome_ext_contentscript_unrecognizedprop_warning.html @@ -13,26 +13,25 @@ <script type="text/javascript"> "use strict"; const BASE = "http://mochi.test:8888/chrome/toolkit/components/extensions/test/mochitest"; add_task(function* test_contentscript() { function background() { - browser.runtime.onMessage.addListener((msg) => { + browser.runtime.onMessage.addListener(async (msg) => { if (msg == "loaded") { - browser.tabs.query({active: true, currentWindow: true}).then((tabs) => { - // NOTE: we're removing the tab from here because doing a win.close() - // from the chrome test code is raising a "TypeError: can 't access - // dead object" exception. - browser.tabs.remove(tabs[0].id); + // NOTE: we're removing the tab from here because doing a win.close() + // from the chrome test code is raising a "TypeError: can't access + // dead object" exception. + let tabs = await browser.tabs.query({active: true, currentWindow: true}); + await browser.tabs.remove(tabs[0].id); - browser.test.notifyPass("content-script-loaded"); - }); + browser.test.notifyPass("content-script-loaded"); } }); } function contentScript() { chrome.runtime.sendMessage("loaded"); }
--- a/toolkit/components/extensions/test/mochitest/test_chrome_ext_downloads_saveAs.html +++ b/toolkit/components/extensions/test/mochitest/test_chrome_ext_downloads_saveAs.html @@ -10,25 +10,28 @@ <body> <script type="text/javascript"> "use strict"; add_task(function* test_downloads_saveAs() { function background() { const url = URL.createObjectURL(new Blob(["file content"])); - browser.test.onMessage.addListener(() => - browser.downloads.download({url, saveAs: true}) - .then(id => browser.downloads.onChanged.addListener(delta => { + browser.test.onMessage.addListener(async () => { + try { + let id = await browser.downloads.download({url, saveAs: true}); + browser.downloads.onChanged.addListener(delta => { if (delta.state.current === "complete") { browser.test.sendMessage("done", {ok: true, id}); } - })).catch(({message}) => { - browser.test.sendMessage("done", {ok: false, message}); - })); + }); + } catch ({message}) { + browser.test.sendMessage("done", {ok: false, message}); + } + }); browser.test.sendMessage("ready"); } const {MockFilePicker} = SpecialPowers; const manifest = {background, manifest: {permissions: ["downloads"]}}; const extension = ExtensionTestUtils.loadExtension(manifest); MockFilePicker.init(window);
--- a/toolkit/components/extensions/test/mochitest/test_chrome_ext_hybrid_addons.html +++ b/toolkit/components/extensions/test/mochitest/test_chrome_ext_hybrid_addons.html @@ -95,23 +95,22 @@ add_task(function* test_sdk_hybrid_addon function backgroundScript() { browser.runtime.sendMessage("background message", (reply) => { browser.test.assertEq("sdk received message: background message", reply, "Got the expected reply from the SDK context"); browser.test.notifyPass("sdk.webext-api.onmessage"); }); } - function sdkMainScript() { + async function sdkMainScript() { /* globals require */ const webext = require("sdk/webextension"); - webext.startup().then(({browser}) => { - browser.runtime.onMessage.addListener((msg, sender, sendReply) => { - sendReply(`sdk received message: ${msg}`); - }); + let {browser} = await webext.startup(); + browser.runtime.onMessage.addListener((msg, sender, sendReply) => { + sendReply(`sdk received message: ${msg}`); }); } let id = "fake@sdk.hybrid.addon"; let extension = ExtensionTestUtils.loadExtension({ useAddonManager: "temporary", files: generateClassicExtensionFiles({ id,
--- a/toolkit/components/extensions/test/mochitest/test_chrome_ext_webnavigation_resolved_urls.html +++ b/toolkit/components/extensions/test/mochitest/test_chrome_ext_webnavigation_resolved_urls.html @@ -13,23 +13,22 @@ <script type="text/javascript"> "use strict"; add_task(function* webnav_unresolved_uri_on_expected_URI_scheme() { function background() { let checkURLs; - browser.webNavigation.onCompleted.addListener((msg) => { + browser.webNavigation.onCompleted.addListener(async msg => { if (checkURLs.length > 0) { let expectedURL = checkURLs.shift(); browser.test.assertEq(expectedURL, msg.url, "Got the expected URL"); - browser.tabs.remove(msg.tabId).then(() => { - browser.test.sendMessage("next"); - }); + await browser.tabs.remove(msg.tabId); + browser.test.sendMessage("next"); } }); browser.test.onMessage.addListener((name, urls) => { if (name == "checkURLs") { checkURLs = urls; } });
--- a/toolkit/components/extensions/test/mochitest/test_ext_all_apis.js +++ b/toolkit/components/extensions/test/mochitest/test_ext_all_apis.js @@ -34,16 +34,18 @@ let expectedCommonApis = [ "runtime.id", "runtime.lastError", "runtime.onConnect", "runtime.onMessage", "runtime.sendMessage", // If you want to add a new powerful test API, please see bug 1287233. "test.assertEq", "test.assertFalse", + "test.assertRejects", + "test.assertThrows", "test.assertTrue", "test.fail", "test.log", "test.notifyFail", "test.notifyPass", "test.onMessage", "test.sendMessage", "test.succeed",
--- a/toolkit/components/extensions/test/mochitest/test_ext_cookies.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_cookies.html @@ -9,17 +9,17 @@ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/> </head> <body> <script type="text/javascript"> "use strict"; add_task(function* test_cookies() { - function background() { + async function background() { function assertExpected(expected, cookie) { for (let key of Object.keys(cookie)) { browser.test.assertTrue(key in expected, `found property ${key}`); browser.test.assertEq(expected[key], cookie[key], `property value for ${key} is correct`); } browser.test.assertEq(Object.keys(expected).length, Object.keys(cookie).length, "all expected properties found"); } @@ -40,193 +40,195 @@ add_task(function* test_cookies() { path: "/", secure: false, httpOnly: false, session: false, expirationDate: THE_FUTURE, storeId: STORE_ID, }; - browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", expirationDate: THE_FUTURE}).then(cookie => { - assertExpected(expected, cookie); - return browser.cookies.get({url: TEST_URL, name: "name1"}); - }).then(cookie => { - assertExpected(expected, cookie); - return browser.cookies.getAll({name: "name1"}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one cookie found for matching name"); - assertExpected(expected, cookies[0]); - return browser.cookies.getAll({domain: "example.org"}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one cookie found for matching domain"); - assertExpected(expected, cookies[0]); - return browser.cookies.getAll({domain: "example.net"}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "no cookies found for non-matching domain"); - return browser.cookies.getAll({secure: false}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one non-secure cookie found"); - assertExpected(expected, cookies[0]); - return browser.cookies.getAll({secure: true}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "no secure cookies found"); - return browser.cookies.getAll({storeId: STORE_ID}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one cookie found for valid storeId"); - assertExpected(expected, cookies[0]); - return browser.cookies.getAll({storeId: "invalid_id"}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "no cookies found for invalid storeId"); - return browser.cookies.remove({url: TEST_URL, name: "name1"}); - }).then(details => { - assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); - return browser.cookies.get({url: TEST_URL, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "removed cookie not found"); - return browser.cookies.getAllCookieStores(); - }).then(stores => { - browser.test.assertEq(1, stores.length, "expected number of stores returned"); + let cookie = await browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", expirationDate: THE_FUTURE}); + assertExpected(expected, cookie); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1"}); + assertExpected(expected, cookie); + + let cookies = await browser.cookies.getAll({name: "name1"}); + browser.test.assertEq(cookies.length, 1, "one cookie found for matching name"); + assertExpected(expected, cookies[0]); + + cookies = await browser.cookies.getAll({domain: "example.org"}); + browser.test.assertEq(cookies.length, 1, "one cookie found for matching domain"); + assertExpected(expected, cookies[0]); + + cookies = await browser.cookies.getAll({domain: "example.net"}); + browser.test.assertEq(cookies.length, 0, "no cookies found for non-matching domain"); + + cookies = await browser.cookies.getAll({secure: false}); + browser.test.assertEq(cookies.length, 1, "one non-secure cookie found"); + assertExpected(expected, cookies[0]); + + cookies = await browser.cookies.getAll({secure: true}); + browser.test.assertEq(cookies.length, 0, "no secure cookies found"); + + cookies = await browser.cookies.getAll({storeId: STORE_ID}); + browser.test.assertEq(cookies.length, 1, "one cookie found for valid storeId"); + assertExpected(expected, cookies[0]); + + cookies = await browser.cookies.getAll({storeId: "invalid_id"}); + browser.test.assertEq(cookies.length, 0, "no cookies found for invalid storeId"); + + let details = await browser.cookies.remove({url: TEST_URL, name: "name1"}); + assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1"}); + browser.test.assertEq(null, cookie, "removed cookie not found"); + + let stores = await browser.cookies.getAllCookieStores(); + browser.test.assertEq(1, stores.length, "expected number of stores returned"); + browser.test.assertEq(STORE_ID, stores[0].id, "expected store id returned"); + browser.test.assertEq(1, stores[0].tabIds.length, "one tab returned for store"); + + { + let privateWindow = await browser.windows.create({incognito: true}); + let stores = await browser.cookies.getAllCookieStores(); + + browser.test.assertEq(2, stores.length, "expected number of stores returned"); browser.test.assertEq(STORE_ID, stores[0].id, "expected store id returned"); browser.test.assertEq(1, stores[0].tabIds.length, "one tab returned for store"); - return browser.windows.create({incognito: true}); - }).then(privateWindow => { - return browser.cookies.getAllCookieStores().then(stores => { - browser.test.assertEq(2, stores.length, "expected number of stores returned"); - browser.test.assertEq(STORE_ID, stores[0].id, "expected store id returned"); - browser.test.assertEq(1, stores[0].tabIds.length, "one tab returned for store"); - browser.test.assertEq(PRIVATE_STORE_ID, stores[1].id, "expected private store id returned"); - browser.test.assertEq(1, stores[0].tabIds.length, "one tab returned for private store"); - return browser.windows.remove(privateWindow.id); - }); - }).then(() => { - return browser.cookies.set({url: TEST_URL, name: "name2", domain: ".example.org", expirationDate: THE_FUTURE}); - }).then(cookie => { - browser.test.assertEq(false, cookie.hostOnly, "cookie is not a hostOnly cookie"); - return browser.cookies.remove({url: TEST_URL, name: "name2"}); - }).then(details => { - assertExpected({url: TEST_URL, name: "name2", storeId: STORE_ID}, details); - // Create a session cookie. - return browser.cookies.set({url: TEST_URL, name: "name1", value: "value1"}); - }).then(cookie => { - browser.test.assertEq(true, cookie.session, "session cookie set"); - return browser.cookies.get({url: TEST_URL, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(true, cookie.session, "got session cookie"); - return browser.cookies.getAll({session: true}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one session cookie found"); - browser.test.assertEq(true, cookies[0].session, "found session cookie"); - return browser.cookies.getAll({session: false}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "no non-session cookies found"); - return browser.cookies.remove({url: TEST_URL, name: "name1"}); - }).then(details => { - assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); - return browser.cookies.get({url: TEST_URL, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "removed cookie not found"); - return browser.cookies.set({url: TEST_SECURE_URL, name: "name1", value: "value1", secure: true}); - }).then(cookie => { - browser.test.assertEq(true, cookie.secure, "secure cookie set"); - return browser.cookies.get({url: TEST_SECURE_URL, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(true, cookie.session, "got secure cookie"); - return browser.cookies.getAll({secure: true}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one secure cookie found"); - browser.test.assertEq(true, cookies[0].secure, "found secure cookie"); - return browser.cookies.getAll({secure: false}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "no non-secure cookies found"); - return browser.cookies.remove({url: TEST_SECURE_URL, name: "name1"}); - }).then(details => { - assertExpected({url: TEST_SECURE_URL, name: "name1", storeId: STORE_ID}, details); - return browser.cookies.get({url: TEST_SECURE_URL, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "removed cookie not found"); - return browser.cookies.set({url: TEST_URL_WITH_PATH, path: TEST_COOKIE_PATH, name: "name1", value: "value1", expirationDate: THE_FUTURE}); - }).then(cookie => { - browser.test.assertEq(TEST_COOKIE_PATH, cookie.path, "created cookie with path"); - return browser.cookies.get({url: TEST_URL_WITH_PATH, name: "name1"}); - }).then(cookie => { - browser.test.assertEq(TEST_COOKIE_PATH, cookie.path, "got cookie with path"); - return browser.cookies.getAll({path: TEST_COOKIE_PATH}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 1, "one cookie with path found"); - browser.test.assertEq(TEST_COOKIE_PATH, cookies[0].path, "found cookie with path"); - return browser.cookies.get({url: TEST_URL + "invalid_path", name: "name1"}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "get with invalid path returns null"); - return browser.cookies.getAll({path: "/invalid_path"}); - }).then(cookies => { - browser.test.assertEq(cookies.length, 0, "getAll with invalid path returns 0 cookies"); - return browser.cookies.remove({url: TEST_URL_WITH_PATH, name: "name1"}); - }).then(details => { - assertExpected({url: TEST_URL_WITH_PATH, name: "name1", storeId: STORE_ID}, details); - return browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", httpOnly: true}); - }).then(cookie => { - browser.test.assertEq(true, cookie.httpOnly, "httpOnly cookie set"); - return browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", httpOnly: false}); - }).then(cookie => { - browser.test.assertEq(false, cookie.httpOnly, "non-httpOnly cookie set"); - return browser.cookies.remove({url: TEST_URL, name: "name1"}); - }).then(details => { - assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); - return browser.cookies.set({url: TEST_URL}); - }).then(cookie => { - browser.test.assertEq("", cookie.name, "default name set"); - browser.test.assertEq("", cookie.value, "default value set"); - browser.test.assertEq(true, cookie.session, "no expiry date created session cookie"); - return browser.windows.create({incognito: true}); - }).then(privateWindow => { + browser.test.assertEq(PRIVATE_STORE_ID, stores[1].id, "expected private store id returned"); + browser.test.assertEq(1, stores[0].tabIds.length, "one tab returned for private store"); + + await browser.windows.remove(privateWindow.id); + } + + cookie = await browser.cookies.set({url: TEST_URL, name: "name2", domain: ".example.org", expirationDate: THE_FUTURE}); + browser.test.assertEq(false, cookie.hostOnly, "cookie is not a hostOnly cookie"); + + details = await browser.cookies.remove({url: TEST_URL, name: "name2"}); + assertExpected({url: TEST_URL, name: "name2", storeId: STORE_ID}, details); + + // Create a session cookie. + cookie = await browser.cookies.set({url: TEST_URL, name: "name1", value: "value1"}); + browser.test.assertEq(true, cookie.session, "session cookie set"); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1"}); + browser.test.assertEq(true, cookie.session, "got session cookie"); + + cookies = await browser.cookies.getAll({session: true}); + browser.test.assertEq(cookies.length, 1, "one session cookie found"); + browser.test.assertEq(true, cookies[0].session, "found session cookie"); + + cookies = await browser.cookies.getAll({session: false}); + browser.test.assertEq(cookies.length, 0, "no non-session cookies found"); + + details = await browser.cookies.remove({url: TEST_URL, name: "name1"}); + assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1"}); + browser.test.assertEq(null, cookie, "removed cookie not found"); + + cookie = await browser.cookies.set({url: TEST_SECURE_URL, name: "name1", value: "value1", secure: true}); + browser.test.assertEq(true, cookie.secure, "secure cookie set"); + + cookie = await browser.cookies.get({url: TEST_SECURE_URL, name: "name1"}); + browser.test.assertEq(true, cookie.session, "got secure cookie"); + + cookies = await browser.cookies.getAll({secure: true}); + browser.test.assertEq(cookies.length, 1, "one secure cookie found"); + browser.test.assertEq(true, cookies[0].secure, "found secure cookie"); + + cookies = await browser.cookies.getAll({secure: false}); + browser.test.assertEq(cookies.length, 0, "no non-secure cookies found"); + + details = await browser.cookies.remove({url: TEST_SECURE_URL, name: "name1"}); + assertExpected({url: TEST_SECURE_URL, name: "name1", storeId: STORE_ID}, details); + + cookie = await browser.cookies.get({url: TEST_SECURE_URL, name: "name1"}); + browser.test.assertEq(null, cookie, "removed cookie not found"); + + cookie = await browser.cookies.set({url: TEST_URL_WITH_PATH, path: TEST_COOKIE_PATH, name: "name1", value: "value1", expirationDate: THE_FUTURE}); + browser.test.assertEq(TEST_COOKIE_PATH, cookie.path, "created cookie with path"); + + cookie = await browser.cookies.get({url: TEST_URL_WITH_PATH, name: "name1"}); + browser.test.assertEq(TEST_COOKIE_PATH, cookie.path, "got cookie with path"); + + cookies = await browser.cookies.getAll({path: TEST_COOKIE_PATH}); + browser.test.assertEq(cookies.length, 1, "one cookie with path found"); + browser.test.assertEq(TEST_COOKIE_PATH, cookies[0].path, "found cookie with path"); + + cookie = await browser.cookies.get({url: TEST_URL + "invalid_path", name: "name1"}); + browser.test.assertEq(null, cookie, "get with invalid path returns null"); + + cookies = await browser.cookies.getAll({path: "/invalid_path"}); + browser.test.assertEq(cookies.length, 0, "getAll with invalid path returns 0 cookies"); + + details = await browser.cookies.remove({url: TEST_URL_WITH_PATH, name: "name1"}); + assertExpected({url: TEST_URL_WITH_PATH, name: "name1", storeId: STORE_ID}, details); + + cookie = await browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", httpOnly: true}); + browser.test.assertEq(true, cookie.httpOnly, "httpOnly cookie set"); + + cookie = await browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", httpOnly: false}); + browser.test.assertEq(false, cookie.httpOnly, "non-httpOnly cookie set"); + + details = await browser.cookies.remove({url: TEST_URL, name: "name1"}); + assertExpected({url: TEST_URL, name: "name1", storeId: STORE_ID}, details); + + cookie = await browser.cookies.set({url: TEST_URL}); + browser.test.assertEq("", cookie.name, "default name set"); + browser.test.assertEq("", cookie.value, "default value set"); + browser.test.assertEq(true, cookie.session, "no expiry date created session cookie"); + + { + let privateWindow = await browser.windows.create({incognito: true}); + // Hacky work-around for bugzil.la/1309637 - return new Promise(resolve => setTimeout(resolve, 700, privateWindow)); - }).then(privateWindow => { - return browser.cookies.set({url: TEST_URL, name: "store", value: "private", expirationDate: THE_FUTURE, storeId: PRIVATE_STORE_ID}).then(cookie => { - browser.test.assertEq("private", cookie.value, "set the private cookie"); - return browser.cookies.set({url: TEST_URL, name: "store", value: "default", expirationDate: THE_FUTURE, storeId: STORE_ID}); - }).then(cookie => { - browser.test.assertEq("default", cookie.value, "set the default cookie"); - return browser.cookies.get({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); - }).then(cookie => { - browser.test.assertEq("private", cookie.value, "get the private cookie"); - browser.test.assertEq(PRIVATE_STORE_ID, cookie.storeId, "get the private cookie storeId"); - return browser.cookies.get({url: TEST_URL, name: "store", storeId: STORE_ID}); - }).then(cookie => { - browser.test.assertEq("default", cookie.value, "get the default cookie"); - browser.test.assertEq(STORE_ID, cookie.storeId, "get the default cookie storeId"); - return browser.cookies.remove({url: TEST_URL, name: "store", storeId: STORE_ID}); - }).then(details => { - assertExpected({url: TEST_URL, name: "store", storeId: STORE_ID}, details); - return browser.cookies.get({url: TEST_URL, name: "store", storeId: STORE_ID}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "deleted the default cookie"); - return browser.cookies.remove({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); - }).then(details => { - assertExpected({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}, details); - return browser.cookies.get({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); - }).then(cookie => { - browser.test.assertEq(null, cookie, "deleted the private cookie"); - return browser.windows.remove(privateWindow.id); - }); - }).then(() => { - browser.test.notifyPass("cookies"); - }); + await new Promise(resolve => setTimeout(resolve, 700)); + + let cookie = await browser.cookies.set({url: TEST_URL, name: "store", value: "private", expirationDate: THE_FUTURE, storeId: PRIVATE_STORE_ID}); + browser.test.assertEq("private", cookie.value, "set the private cookie"); + + cookie = await browser.cookies.set({url: TEST_URL, name: "store", value: "default", expirationDate: THE_FUTURE, storeId: STORE_ID}); + browser.test.assertEq("default", cookie.value, "set the default cookie"); + + cookie = await browser.cookies.get({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); + browser.test.assertEq("private", cookie.value, "get the private cookie"); + browser.test.assertEq(PRIVATE_STORE_ID, cookie.storeId, "get the private cookie storeId"); + + cookie = await browser.cookies.get({url: TEST_URL, name: "store", storeId: STORE_ID}); + browser.test.assertEq("default", cookie.value, "get the default cookie"); + browser.test.assertEq(STORE_ID, cookie.storeId, "get the default cookie storeId"); + + let details = await browser.cookies.remove({url: TEST_URL, name: "store", storeId: STORE_ID}); + assertExpected({url: TEST_URL, name: "store", storeId: STORE_ID}, details); + + cookie = await browser.cookies.get({url: TEST_URL, name: "store", storeId: STORE_ID}); + browser.test.assertEq(null, cookie, "deleted the default cookie"); + + details = await browser.cookies.remove({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); + assertExpected({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}, details); + + cookie = await browser.cookies.get({url: TEST_URL, name: "store", storeId: PRIVATE_STORE_ID}); + browser.test.assertEq(null, cookie, "deleted the private cookie"); + + await browser.windows.remove(privateWindow.id); + } + + browser.test.notifyPass("cookies"); } let extension = ExtensionTestUtils.loadExtension({ background, manifest: { permissions: ["cookies", "*://example.org/"], }, }); yield extension.startup(); - info("extension loaded"); yield extension.awaitFinish("cookies"); yield extension.unload(); - info("extension unloaded"); }); </script> </body> </html>
--- a/toolkit/components/extensions/test/mochitest/test_ext_cookies_containers.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_cookies_containers.html @@ -17,17 +17,17 @@ add_task(function* setup() { // make sure userContext is enabled. return SpecialPowers.pushPrefEnv({"set": [ ["privacy.userContext.enabled", true], ]}); }); add_task(function* test_cookie_containers() { - function background() { + async function background() { function assertExpected(expected, cookie) { for (let key of Object.keys(cookie)) { browser.test.assertTrue(key in expected, `found property ${key}`); browser.test.assertEq(expected[key], cookie[key], `property value for ${key} is correct`); } browser.test.assertEq(Object.keys(expected).length, Object.keys(cookie).length, "all expected properties found"); } @@ -42,49 +42,42 @@ add_task(function* test_cookie_container path: "/", secure: false, httpOnly: false, session: false, expirationDate: THE_FUTURE, storeId: "firefox-container-1", }; - browser.cookies.set({url: TEST_URL, name: "name1", value: "value1", - expirationDate: THE_FUTURE, storeId: "firefox-container-1"}) - .then(cookie => { - browser.test.assertEq("firefox-container-1", cookie.storeId, "the cookie has the correct storeId"); - return browser.cookies.get({url: TEST_URL, name: "name1"}); - }) - .then(cookie => { - browser.test.assertEq(null, cookie, "get() without storeId returns null"); - return browser.cookies.get({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); - }) - .then(cookie => { - assertExpected(expected, cookie); - return browser.cookies.getAll({storeId: "firefox-default"}); - }) - .then(cookies => { - browser.test.assertEq(0, cookies.length, "getAll() with default storeId returns an empty array"); - return browser.cookies.getAll({storeId: "firefox-container-1"}); - }) - .then(cookies => { - browser.test.assertEq(1, cookies.length, "one cookie found for matching domain"); - assertExpected(expected, cookies[0]); - return browser.cookies.remove({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); - }) - .then(details => { - assertExpected({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}, details); - return browser.cookies.get({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); - }) - .then(cookie => { - browser.test.assertEq(null, cookie, "removed cookie not found"); - }) - .then(() => { - browser.test.notifyPass("cookies"); + let cookie = await browser.cookies.set({ + url: TEST_URL, name: "name1", value: "value1", + expirationDate: THE_FUTURE, storeId: "firefox-container-1", }); + browser.test.assertEq("firefox-container-1", cookie.storeId, "the cookie has the correct storeId"); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1"}); + browser.test.assertEq(null, cookie, "get() without storeId returns null"); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); + assertExpected(expected, cookie); + + let cookies = await browser.cookies.getAll({storeId: "firefox-default"}); + browser.test.assertEq(0, cookies.length, "getAll() with default storeId returns an empty array"); + + cookies = await browser.cookies.getAll({storeId: "firefox-container-1"}); + browser.test.assertEq(1, cookies.length, "one cookie found for matching domain"); + assertExpected(expected, cookies[0]); + + let details = await browser.cookies.remove({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); + assertExpected({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}, details); + + cookie = await browser.cookies.get({url: TEST_URL, name: "name1", storeId: "firefox-container-1"}); + browser.test.assertEq(null, cookie, "removed cookie not found"); + + browser.test.notifyPass("cookies"); } let extension = ExtensionTestUtils.loadExtension({ background, manifest: { permissions: ["cookies", "*://example.org/"], }, });
--- a/toolkit/components/extensions/test/mochitest/test_ext_cookies_permissions.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_cookies_permissions.html @@ -14,17 +14,17 @@ <script type="text/javascript"> "use strict"; function* testCookies(options) { // Changing the options object is a bit of a hack, but it allows us to easily // pass an expiration date to the background script. options.expiry = Date.now() / 1000 + 3600; - function background(backgroundOptions) { + async function background(backgroundOptions) { // Ask the parent scope to change some cookies we may or may not have // permission for. let awaitChanges = new Promise(resolve => { browser.test.onMessage.addListener(msg => { browser.test.assertEq("cookies-changed", msg, "browser.test.onMessage"); resolve(); }); }); @@ -39,35 +39,35 @@ function* testCookies(options) { // Try to access some cookies in various ways. let {url, domain, secure} = backgroundOptions; let failures = 0; let tallyFailure = error => { failures++; }; - awaitChanges.then(() => { - return browser.cookies.get({url, name: "foo"}); - }).then(cookie => { + try { + await awaitChanges; + + let cookie = await browser.cookies.get({url, name: "foo"}); browser.test.assertEq(backgroundOptions.shouldPass, cookie != null, "should pass == get cookie"); - return browser.cookies.getAll({domain}); - }).then(cookies => { + let cookies = await browser.cookies.getAll({domain}); if (backgroundOptions.shouldPass) { browser.test.assertEq(2, cookies.length, "expected number of cookies"); } else { browser.test.assertEq(0, cookies.length, "expected number of cookies"); } - return Promise.all([ + await Promise.all([ browser.cookies.set({url, domain, secure, name: "foo", "value": "baz", expirationDate: backgroundOptions.expiry}).catch(tallyFailure), browser.cookies.set({url, domain, secure, name: "bar", "value": "quux", expirationDate: backgroundOptions.expiry}).catch(tallyFailure), browser.cookies.remove({url, name: "deleted"}), ]); - }).then(() => { + if (backgroundOptions.shouldPass) { // The order of eviction events isn't guaranteed, so just check that // it's there somewhere. let evicted = changed.indexOf("evicted:evicted"); if (evicted < 0) { browser.test.fail("got no eviction event"); } else { browser.test.succeed("got eviction event"); @@ -75,26 +75,27 @@ function* testCookies(options) { } browser.test.assertEq("x:explicit,x:overwrite,x:explicit,x:explicit,foo:overwrite,foo:explicit,bar:explicit,deleted:explicit", changed.join(","), "expected changes"); } else { browser.test.assertEq("", changed.join(","), "expected no changes"); } - browser.test.notifyPass("cookie-permissions"); - }).then(() => { if (!(backgroundOptions.shouldPass || backgroundOptions.shouldWrite)) { browser.test.assertEq(2, failures, "Expected failures"); } else { browser.test.assertEq(0, failures, "Expected no failures"); } - }).catch(error => { + + browser.test.notifyPass("cookie-permissions"); + } catch (error) { browser.test.fail(`Error: ${error} :: ${error.stack}`); - }); + browser.test.notifyFail("cookie-permissions"); + } } let extension = ExtensionTestUtils.loadExtension({ manifest: { "permissions": options.permissions, }, background: `(${background})(${JSON.stringify(options)})`,
--- a/toolkit/components/extensions/test/mochitest/test_ext_i18n.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_i18n.html @@ -178,27 +178,24 @@ add_task(function* test_get_accept_langu let tabId; browser.tabs.query({currentWindow: true, active: true}, tabs => { tabId = tabs[0].id; browser.test.sendMessage("ready"); }); - browser.test.onMessage.addListener(([msg, expected]) => { - Promise.all([ - new Promise( - resolve => browser.tabs.sendMessage(tabId, "get-results", resolve)), - browser.i18n.getAcceptLanguages(), - ]).then(([contentResults, backgroundResults]) => { - checkResults("contentScript", contentResults, expected); - checkResults("background", backgroundResults, expected); + browser.test.onMessage.addListener(async ([msg, expected]) => { + let contentResults = await browser.tabs.sendMessage(tabId, "get-results"); + let backgroundResults = await browser.i18n.getAcceptLanguages(); - browser.test.sendMessage("done"); - }); + checkResults("contentScript", contentResults, expected); + checkResults("background", backgroundResults, expected); + + browser.test.sendMessage("done"); }); } function content() { browser.runtime.onMessage.addListener((msg, sender, respond) => { browser.i18n.getAcceptLanguages(respond); return true; }); @@ -352,27 +349,24 @@ add_task(function* test_detect_language( let tabId; browser.tabs.query({currentWindow: true, active: true}, tabs => { tabId = tabs[0].id; browser.test.sendMessage("ready"); }); - browser.test.onMessage.addListener(([msg, expected]) => { - Promise.all([ - browser.i18n.detectLanguage(msg), - new Promise( - resolve => browser.tabs.sendMessage(tabId, msg, resolve)), - ]).then(([backgroundResults, contentResults]) => { - checkResult("background", backgroundResults, expected); - checkResult("contentScript", contentResults, expected); + browser.test.onMessage.addListener(async ([msg, expected]) => { + let backgroundResults = await browser.i18n.detectLanguage(msg); + let contentResults = await browser.tabs.sendMessage(tabId, msg); - browser.test.sendMessage("done"); - }); + checkResult("background", backgroundResults, expected); + checkResult("contentScript", contentResults, expected); + + browser.test.sendMessage("done"); }); } function content() { browser.runtime.onMessage.addListener((msg, sender, respond) => { browser.i18n.detectLanguage(msg, respond); return true; });
--- a/toolkit/components/extensions/test/mochitest/test_ext_notifications.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_notifications.html @@ -14,44 +14,44 @@ // A 1x1 PNG image. // Source: https://commons.wikimedia.org/wiki/File:1x1.png (Public Domain) let image = atob("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAA" + "ACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII="); const IMAGE_ARRAYBUFFER = Uint8Array.from(image, byte => byte.charCodeAt(0)).buffer; add_task(function* test_notification() { - function background() { + async function background() { let opts = { type: "basic", title: "Testing Notification", message: "Carry on", }; - browser.notifications.create(opts).then(id => { - browser.test.sendMessage("running", id); - browser.test.notifyPass("background test passed"); - }); + let id = await browser.notifications.create(opts); + + browser.test.sendMessage("running", id); + browser.test.notifyPass("background test passed"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["notifications"], }, background, }); yield extension.startup(); let x = yield extension.awaitMessage("running"); is(x, "0", "got correct id from notifications.create"); yield extension.awaitFinish(); yield extension.unload(); }); add_task(function* test_notification_events() { - function background() { + async function background() { let opts = { type: "basic", title: "Testing Notification", message: "Carry on", }; // Test an ignored listener. browser.notifications.onButtonClicked.addListener(function() {}); @@ -61,21 +61,19 @@ add_task(function* test_notification_eve browser.notifications.onClicked.addListener(function() {}); // Test onClosed listener. browser.notifications.onClosed.addListener(id => { browser.test.sendMessage("closed", id); browser.test.notifyPass("background test passed"); }); - browser.notifications.create("5", opts).then(id => { - return browser.notifications.create("5", opts); - }).then(id => { - browser.test.sendMessage("running", id); - }); + await browser.notifications.create("5", opts); + let id = await browser.notifications.create("5", opts); + browser.test.sendMessage("running", id); } let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["notifications"], }, background, }); @@ -84,33 +82,33 @@ add_task(function* test_notification_eve is(x, "5", "got correct id from onClosed listener"); x = yield extension.awaitMessage("running"); is(x, "5", "got correct id from notifications.create"); yield extension.awaitFinish(); yield extension.unload(); }); add_task(function* test_notification_clear() { - function background() { + async function background() { let opts = { type: "basic", title: "Testing Notification", message: "Carry on", }; browser.notifications.onClosed.addListener(id => { browser.test.sendMessage("closed", id); }); - browser.notifications.create("99", opts).then(id => { - return browser.notifications.clear(id); - }).then(wasCleared => { - browser.test.sendMessage("cleared", wasCleared); - browser.test.notifyPass("background test passed"); - }); + let id = await browser.notifications.create("99", opts); + + let wasCleared = await browser.notifications.clear(id); + browser.test.sendMessage("cleared", wasCleared); + + browser.test.notifyPass("background test passed"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["notifications"], }, background, }); @@ -119,62 +117,62 @@ add_task(function* test_notification_cle is(x, "99", "got correct id from onClosed listener"); x = yield extension.awaitMessage("cleared"); is(x, true, "got correct boolean from notifications.clear"); yield extension.awaitFinish(); yield extension.unload(); }); add_task(function* test_notifications_empty_getAll() { - function background() { - browser.notifications.getAll().then(notifications => { - browser.test.assertEq("object", typeof notifications, "getAll() returned an object"); - browser.test.assertEq(0, Object.keys(notifications).length, "the object has no properties"); - browser.test.notifyPass("getAll empty"); - }); + async function background() { + let notifications = await browser.notifications.getAll(); + + browser.test.assertEq("object", typeof notifications, "getAll() returned an object"); + browser.test.assertEq(0, Object.keys(notifications).length, "the object has no properties"); + browser.test.notifyPass("getAll empty"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["notifications"], }, background, }); yield extension.startup(); yield extension.awaitFinish("getAll empty"); yield extension.unload(); }); add_task(function* test_notifications_populated_getAll() { - function background() { + async function background() { let opts = { type: "basic", iconUrl: "a.png", title: "Testing Notification", message: "Carry on", }; - browser.notifications.create("p1", opts).then(() => { - return browser.notifications.create("p2", opts); - }).then(() => { - return browser.notifications.getAll(); - }).then(notifications => { - browser.test.assertEq("object", typeof notifications, "getAll() returned an object"); - browser.test.assertEq(2, Object.keys(notifications).length, "the object has 2 properties"); - for (let notificationId of ["p1", "p2"]) { - for (let key of Object.keys(opts)) { - browser.test.assertEq( - opts[key], - notifications[notificationId][key], - `the notification has the expected value for option: ${key}` - ); - } + await browser.notifications.create("p1", opts); + await browser.notifications.create("p2", opts); + let notifications = await browser.notifications.getAll(); + + browser.test.assertEq("object", typeof notifications, "getAll() returned an object"); + browser.test.assertEq(2, Object.keys(notifications).length, "the object has 2 properties"); + + for (let notificationId of ["p1", "p2"]) { + for (let key of Object.keys(opts)) { + browser.test.assertEq( + opts[key], + notifications[notificationId][key], + `the notification has the expected value for option: ${key}` + ); } - browser.test.notifyPass("getAll populated"); - }); + } + + browser.test.notifyPass("getAll populated"); } let extension = ExtensionTestUtils.loadExtension({ manifest: { permissions: ["notifications"], }, background, files: {
--- a/toolkit/components/extensions/test/mochitest/test_ext_permission_xhr.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_permission_xhr.html @@ -11,17 +11,17 @@ <body> <script type="text/javascript"> "use strict"; /* eslint-disable mozilla/balanced-listeners */ add_task(function* test_simple() { - function runTests(cx) { + async function runTests(cx) { function xhr(XMLHttpRequest) { return (url) => { return new Promise((resolve, reject) => { let req = new XMLHttpRequest(); req.open("GET", url); req.addEventListener("load", resolve); req.addEventListener("error", reject); req.send(); @@ -42,69 +42,69 @@ add_task(function* test_simple() { if (shouldFail) { return fetch("http://example.org/example.txt").then(failListener, passListener); } else { return fetch("http://example.com/example.txt").then(passListener, failListener); } /* eslint-enable no-else-return */ } - return run(true, xhr(XMLHttpRequest)) - .then(() => run(false, xhr(XMLHttpRequest))) - .then(() => run(true, xhr(window.XMLHttpRequest))) - .then(() => run(false, xhr(window.XMLHttpRequest))) - .then(() => run(true, fetch)) - .then(() => run(false, fetch)) - .then(() => run(true, window.fetch)) - .then(() => run(false, window.fetch)) - .catch(err => { - browser.test.fail(`Error: ${err} :: ${err.stack}`); - browser.test.notifyFail("permission_xhr"); - }); + try { + await run(true, xhr(XMLHttpRequest)); + await run(false, xhr(XMLHttpRequest)); + await run(true, xhr(window.XMLHttpRequest)); + await run(false, xhr(window.XMLHttpRequest)); + await run(true, fetch); + await run(false, fetch); + await run(true, window.fetch); + await run(false, window.fetch); + } catch (err) { + browser.test.fail(`Error: ${err} :: ${err.stack}`); + browser.test.notifyFail("permission_xhr"); + } } - function background(runTestsFn) { - runTestsFn("bg").then(() => { - browser.test.notifyPass("permission_xhr"); - }); + async function background(runTestsFn) { + await runTestsFn("bg"); + browser.test.notifyPass("permission_xhr"); } let extensionData = { background: `(${background})(${runTests})`, manifest: { permissions: ["http://example.com/"], content_scripts: [{ "matches": ["http://mochi.test/*/file_permission_xhr.html"], "js": ["content.js"], }], }, files: { - "content.js": "new " + function(runTestsFn) { - runTestsFn("content").then(() => { - window.wrappedJSObject.privilegedFetch = fetch; - window.wrappedJSObject.privilegedXHR = XMLHttpRequest; + "content.js": `(${async runTestsFn => { + await runTestsFn("content"); - window.addEventListener("message", function rcv({data}) { - switch (data.msg) { - case "test": - break; + window.wrappedJSObject.privilegedFetch = fetch; + window.wrappedJSObject.privilegedXHR = XMLHttpRequest; + + window.addEventListener("message", function rcv({data}) { + switch (data.msg) { + case "test": + break; - case "assertTrue": - browser.test.assertTrue(data.condition, data.description); - break; + case "assertTrue": + browser.test.assertTrue(data.condition, data.description); + break; - case "finish": - window.removeEventListener("message", rcv, false); - browser.test.sendMessage("content-script-finished"); - break; - } - }, false); - window.postMessage("test", "*"); - }); - } + `(${runTests})`, + case "finish": + window.removeEventListener("message", rcv, false); + browser.test.sendMessage("content-script-finished"); + break; + } + }, false); + window.postMessage("test", "*"); + }})(${runTests})`, }, }; let extension = ExtensionTestUtils.loadExtension(extensionData); yield extension.startup(); let win = window.open("file_permission_xhr.html"); yield extension.awaitMessage("content-script-finished");
--- a/toolkit/components/extensions/test/mochitest/test_ext_sendmessage_no_receiver.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_sendmessage_no_receiver.html @@ -24,24 +24,22 @@ function loadContentScriptExtension(cont files: { "contentscript.js": contentScript, }, }; return ExtensionTestUtils.loadExtension(extensionData); } add_task(function* test_content_script_sendMessage_without_listener() { - function contentScript() { - browser.runtime.sendMessage("msg").then(reply => { - browser.test.assertEq(undefined, reply); - browser.test.notifyFail("Did not expect a reply to sendMessage"); - }, error => { - browser.test.assertEq("Could not establish connection. Receiving end does not exist.", error.message); - browser.test.notifyPass("sendMessage callback was invoked"); - }); + async function contentScript() { + await browser.test.assertRejects( + browser.runtime.sendMessage("msg"), + "Could not establish connection. Receiving end does not exist."); + + browser.test.notifyPass("sendMessage callback was invoked"); } let extension = loadContentScriptExtension(contentScript); yield extension.startup(); let win = window.open("file_sample.html"); yield extension.awaitFinish("sendMessage callback was invoked"); win.close();
--- a/toolkit/components/extensions/test/mochitest/test_ext_storage_content.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_storage_content.html @@ -11,38 +11,37 @@ <body> <script type="application/javascript"> "use strict"; // Copied from toolkit/components/extensions/test/xpcshell/test_ext_storage.js. // The storage API in content scripts should behave identical to the storage API // in background pages. -function contentScript() { +async function contentScript() { let storage = browser.storage.local; - function check(prop, value) { - return storage.get(null).then(data => { - browser.test.assertEq(value, data[prop], "null getter worked for " + prop); - return storage.get(prop); - }).then(data => { - browser.test.assertEq(value, data[prop], "string getter worked for " + prop); - return storage.get([prop]); - }).then(data => { - browser.test.assertEq(value, data[prop], "array getter worked for " + prop); - return storage.get({[prop]: undefined}); - }).then(data => { - browser.test.assertEq(value, data[prop], "object getter worked for " + prop); - }); + async function check(prop, value) { + let data = await storage.get(null); + browser.test.assertEq(value, data[prop], "null getter worked for " + prop); + + data = await storage.get(prop); + browser.test.assertEq(value, data[prop], "string getter worked for " + prop); + + data = await storage.get([prop]); + browser.test.assertEq(value, data[prop], "array getter worked for " + prop); + + data = await storage.get({[prop]: undefined}); + browser.test.assertEq(value, data[prop], "object getter worked for " + prop); } let globalChanges = {}; - browser.storage.onChanged.addListener((aChanges, aStorage) => { - browser.test.assertEq("local", aStorage, "storage is local"); - Object.assign(globalChanges, aChanges); + browser.storage.onChanged.addListener((changes, storage) => { + browser.test.assertEq("local", storage, "storage is local"); + Object.assign(globalChanges, changes); }); function checkChanges(changes) { function checkSub(obj1, obj2) { for (let prop in obj1) { browser.test.assertEq(obj1[prop].oldValue, obj2[prop].oldValue); browser.test.assertEq(obj1[prop].newValue, obj2[prop].newValue); } @@ -51,111 +50,102 @@ function contentScript() { checkSub(changes, globalChanges); checkSub(globalChanges, changes); globalChanges = {}; } /* eslint-disable dot-notation */ // Set some data and then test getters. - storage.set({"test-prop1": "value1", "test-prop2": "value2"}).then(() => { + try { + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); checkChanges({"test-prop1": {newValue: "value1"}, "test-prop2": {newValue: "value2"}}); - return check("test-prop1", "value1"); - }).then(() => { - return check("test-prop2", "value2"); - }).then(() => { - return storage.get({"test-prop1": undefined, "test-prop2": undefined, "other": "default"}); - }).then(data => { + + await check("test-prop1", "value1"); + await check("test-prop2", "value2"); + + let data = await storage.get({"test-prop1": undefined, "test-prop2": undefined, "other": "default"}); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); browser.test.assertEq("default", data["other"], "other correct"); - return storage.get(["test-prop1", "test-prop2", "other"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2", "other"]); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); browser.test.assertFalse("other" in data, "other correct"); - // Remove data in various ways. - }).then(() => { - return storage.remove("test-prop1"); - }).then(() => { + // Remove data in various ways. + await storage.remove("test-prop1"); checkChanges({"test-prop1": {oldValue: "value1"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertTrue("test-prop2" in data, "prop2 present"); - return storage.set({"test-prop1": "value1"}); - }).then(() => { + await storage.set({"test-prop1": "value1"}); checkChanges({"test-prop1": {newValue: "value1"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); - }).then(() => { - return storage.remove(["test-prop1", "test-prop2"]); - }).then(() => { + + await storage.remove(["test-prop1", "test-prop2"]); checkChanges({"test-prop1": {oldValue: "value1"}, "test-prop2": {oldValue: "value2"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertFalse("test-prop2" in data, "prop2 absent"); - // test storage.clear - }).then(() => { - return storage.set({"test-prop1": "value1", "test-prop2": "value2"}); - }).then(() => { - return storage.clear(); - }).then(() => { + // test storage.clear + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); + + await storage.clear(); checkChanges({"test-prop1": {oldValue: "value1"}, "test-prop2": {oldValue: "value2"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertFalse("test-prop2" in data, "prop2 absent"); - // Test cache invalidation. - }).then(() => { - return storage.set({"test-prop1": "value1", "test-prop2": "value2"}); - }).then(() => { + // Test cache invalidation. + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); + globalChanges = {}; // Schedule sendMessage after onMessage because the other end immediately // sends a message. Promise.resolve().then(() => { browser.test.sendMessage("invalidate"); }); - return new Promise(resolve => browser.test.onMessage.addListener(resolve)); - }).then(() => { - return check("test-prop1", "value1"); - }).then(() => { - return check("test-prop2", "value2"); + + await new Promise(resolve => browser.test.onMessage.addListener(resolve)); - // Make sure we can store complex JSON data. - }).then(() => { - return storage.set({ + await check("test-prop1", "value1"); + await check("test-prop2", "value2"); + + // Make sure we can store complex JSON data. + await storage.set({ "test-prop1": { str: "hello", bool: true, null: null, undef: undefined, obj: {}, arr: [1, 2], date: new Date(0), regexp: /regexp/, func: function func() {}, window, }, }); - }).then(() => { - return storage.set({"test-prop2": function func() {}}); - }).then(() => { + + await storage.set({"test-prop2": function func() {}}); browser.test.assertEq("value1", globalChanges["test-prop1"].oldValue, "oldValue correct"); browser.test.assertEq("object", typeof(globalChanges["test-prop1"].newValue), "newValue is obj"); globalChanges = {}; - return storage.get({"test-prop1": undefined, "test-prop2": undefined}); - }).then(data => { + + data = await storage.get({"test-prop1": undefined, "test-prop2": undefined}); let obj = data["test-prop1"]; browser.test.assertEq("hello", obj.str, "string part correct"); browser.test.assertEq(true, obj.bool, "bool part correct"); browser.test.assertEq(null, obj.null, "null part correct"); browser.test.assertEq(undefined, obj.undef, "undefined part correct"); browser.test.assertEq(undefined, obj.func, "function part correct"); browser.test.assertEq(undefined, obj.window, "window part correct"); @@ -166,22 +156,22 @@ function contentScript() { browser.test.assertEq(1, obj.arr[0], "arr[0] part correct"); browser.test.assertEq(2, obj.arr[1], "arr[1] part correct"); browser.test.assertEq(2, obj.arr.length, "arr.length part correct"); obj = data["test-prop2"]; browser.test.assertEq("[object Object]", {}.toString.call(obj), "function serialized as a plain object"); browser.test.assertEq(0, Object.keys(obj).length, "function serialized as an empty object"); - }).then(() => { + browser.test.notifyPass("storage"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("storage"); - }); + } } let extensionData = { manifest: { content_scripts: [{ "matches": ["http://mochi.test/*/file_sample.html"], "js": ["content_script.js"], "run_at": "document_idle",
--- a/toolkit/components/extensions/test/mochitest/test_ext_storage_tab.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_storage_tab.html @@ -9,17 +9,17 @@ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/> </head> <body> <script type="text/javascript"> "use strict"; add_task(function* test_multiple_pages() { - function background() { + async function background() { let tabReady = new Promise(resolve => { browser.runtime.onMessage.addListener(function listener(msg) { browser.test.log("onMessage " + msg); if (msg == "tab-ready") { browser.runtime.onMessage.removeListener(listener); resolve(); } }); @@ -32,51 +32,52 @@ add_task(function* test_multiple_pages() browser.tabs.onRemoved.removeListener(listener); // Delay long enough to be sure the inner window has been nuked. setTimeout(resolve, 0); } }); }); - let storage = browser.storage.local; + try { + let storage = browser.storage.local; - browser.test.log("create"); - browser.tabs.create({url: "tab.html"}).then(tabObj => { - tabId = tabObj.id; + browser.test.log("create"); + let tab = await browser.tabs.create({url: "tab.html"}); + tabId = tab.id; - return tabReady; - }).then(() => { - return storage.get("key"); - }).then(result => { + await tabReady; + + let result = await storage.get("key"); browser.test.assertEq(undefined, result.key, "Key should be undefined"); - return browser.runtime.sendMessage("tab-set-key"); - }).then(() => { - return storage.get("key"); - }).then(result => { + await browser.runtime.sendMessage("tab-set-key"); + + result = await storage.get("key"); browser.test.assertEq(JSON.stringify({foo: {bar: "baz"}}), JSON.stringify(result.key), "Key should be set to the value from the tab"); - }).then(() => { + browser.test.log("Remove tab"); - return Promise.all([browser.tabs.remove(tabId), - tabRemoved]); - }).then(() => { - return storage.get("key"); - }).then(result => { + + await Promise.all([ + browser.tabs.remove(tabId), + tabRemoved, + ]); + + result = await storage.get("key"); browser.test.assertEq(JSON.stringify({foo: {bar: "baz"}}), JSON.stringify(result.key), "Key should still be set to the value from the tab"); - }).then(() => { + browser.test.notifyPass("storage-multiple"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("storage-multiple"); - }); + } } function tab() { browser.test.log("tab"); browser.runtime.onMessage.addListener(msg => { if (msg == "tab-set-key") { return browser.storage.local.set({key: {foo: {bar: "baz"}}}); }
--- a/toolkit/components/extensions/test/mochitest/test_ext_subframes_privileges.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_subframes_privileges.html @@ -10,48 +10,49 @@ </head> <body> <script type="text/javascript"> "use strict"; add_task(function* test_webext_tab_subframe_privileges() { function background() { - browser.runtime.onMessage.addListener(({msg, success, tabId, error}) => { + browser.runtime.onMessage.addListener(async ({msg, success, tabId, error}) => { if (msg == "webext-tab-subframe-privileges") { if (success) { - browser.tabs.remove(tabId) - .then(() => browser.test.notifyPass(msg)); + await browser.tabs.remove(tabId); + + browser.test.notifyPass(msg); } else { browser.test.log(`Got an unexpected error: ${error}`); - browser.tabs.query({active: true}) - .then(tabs => browser.tabs.remove(tabs[0].id)) - .then(() => browser.test.notifyFail(msg)); + + let tabs = await browser.tabs.query({active: true}); + await browser.tabs.remove(tabs[0].id); + + browser.test.notifyFail(msg); } } }); browser.tabs.create({url: browser.runtime.getURL("/tab.html")}); } - function tabSubframeScript() { + async function tabSubframeScript() { browser.test.assertTrue(browser.tabs != undefined, "Subframe of a privileged page has access to privileged APIs"); if (browser.tabs) { - browser.tabs.getCurrent() - .then(tab => { - browser.runtime.sendMessage({ - msg: "webext-tab-subframe-privileges", - success: true, - tabId: tab.id, - }, () => { - // NOTE: this empty callback prevents the promise returned from runtime.sendmessage - // to be reported as resolved after context unloaded. - }); - }) - .catch(e => browser.runtime.sendMessage({msg: "webext-tab-subframe-privileges", success: false, error: `${e}`})); + try { + let tab = await browser.tabs.getCurrent(); + browser.runtime.sendMessage({ + msg: "webext-tab-subframe-privileges", + success: true, + tabId: tab.id, + }); + } catch (e) { + browser.runtime.sendMessage({msg: "webext-tab-subframe-privileges", success: false, error: `${e}`}); + } } else { browser.runtime.sendMessage({ msg: "webext-tab-subframe-privileges", success: false, error: `Privileged APIs missing in WebExtension tab sub-frame`, }); } }
--- a/toolkit/components/extensions/test/mochitest/test_ext_web_accessible_resources.html +++ b/toolkit/components/extensions/test/mochitest/test_ext_web_accessible_resources.html @@ -18,17 +18,17 @@ SimpleTest.registerCleanupFunction(() => { SpecialPowers.clearUserPref("security.mixed_content.block_display_content"); }); let image = atob("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAA" + "ACnej3aAAAAAXRSTlMAQObYZgAAAApJREFUCNdjYAAAAAIAAeIhvDMAAAAASUVORK5CYII="); const IMAGE_ARRAYBUFFER = Uint8Array.from(image, byte => byte.charCodeAt(0)).buffer; -function testImageLoading(src, expectedAction) { +async function testImageLoading(src, expectedAction) { let imageLoadingPromise = new Promise((resolve, reject) => { let cleanupListeners; let testImage = document.createElement("img"); testImage.setAttribute("src", src); let loadListener = () => { cleanupListeners(); resolve(expectedAction === "loaded"); @@ -45,19 +45,18 @@ function testImageLoading(src, expectedA }; testImage.addEventListener("load", loadListener); testImage.addEventListener("error", errorListener); document.body.appendChild(testImage); }); - imageLoadingPromise.then(success => { - browser.runtime.sendMessage({name: "image-loading", expectedAction, success}); - }); + let success = await imageLoadingPromise; + browser.runtime.sendMessage({name: "image-loading", expectedAction, success}); } add_task(function* test_web_accessible_resources() { function background() { let gotURL; let tabId; function loadFrame(url) { @@ -72,40 +71,36 @@ add_task(function* test_web_accessible_r [browser.extension.getURL("accessible.html"), true], [browser.extension.getURL("accessible.html") + "?foo=bar", true], [browser.extension.getURL("accessible.html") + "#!foo=bar", true], [browser.extension.getURL("forbidden.html"), false], [browser.extension.getURL("wild1.html"), true], [browser.extension.getURL("wild2.htm"), false], ]; - function runTest() { - if (!urls.length) { - browser.test.notifyPass("web-accessible-resources"); - return; - } + async function runTests() { + for (let [url, shouldLoad] of urls) { + let success = await loadFrame(url); - let [url, shouldLoad] = urls.shift(); - return loadFrame(url).then(success => { browser.test.assertEq(shouldLoad, success, "Load was successful"); if (shouldLoad) { browser.test.assertEq(url, gotURL, "Got expected url"); } else { browser.test.assertEq(undefined, gotURL, "Got no url"); } gotURL = undefined; + } - return runTest(); - }); + browser.test.notifyPass("web-accessible-resources"); } browser.runtime.onMessage.addListener(([msg, url], sender) => { if (msg == "content-script-ready") { tabId = sender.tab.id; - runTest(); + runTests(); } else if (msg == "page-script") { browser.test.assertEq(undefined, gotURL, "Should have gotten only one message"); browser.test.assertEq("string", typeof(url), "URL should be a string"); gotURL = url; } }); browser.test.sendMessage("ready");
--- a/toolkit/components/extensions/test/xpcshell/test_ext_alarms.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_alarms.js @@ -30,21 +30,20 @@ add_task(function* test_alarm_fires() { browser.alarms.onAlarm.addListener(alarm => { browser.test.assertEq(ALARM_NAME, alarm.name, "alarm has the correct name"); clearTimeout(timer); browser.test.notifyPass("alarm-fires"); }); browser.alarms.create(ALARM_NAME, {delayInMinutes: 0.02}); - timer = setTimeout(() => { + timer = setTimeout(async () => { browser.test.fail("alarm fired within expected time"); - browser.alarms.clear(ALARM_NAME).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - }); + let wasCleared = await browser.alarms.clear(ALARM_NAME); + browser.test.assertTrue(wasCleared, "alarm was cleared"); browser.test.notifyFail("alarm-fires"); }, 10000); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], @@ -65,21 +64,20 @@ add_task(function* test_alarm_fires_with browser.alarms.onAlarm.addListener(alarm => { browser.test.assertEq(ALARM_NAME, alarm.name, "alarm has the expected name"); clearTimeout(timer); browser.test.notifyPass("alarm-when"); }); browser.alarms.create(ALARM_NAME, {when: Date.now() + 1000}); - timer = setTimeout(() => { + timer = setTimeout(async () => { browser.test.fail("alarm fired within expected time"); - browser.alarms.clear(ALARM_NAME).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - }); + let wasCleared = await browser.alarms.clear(ALARM_NAME); + browser.test.assertTrue(wasCleared, "alarm was cleared"); browser.test.notifyFail("alarm-when"); }, 10000); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], @@ -88,121 +86,112 @@ add_task(function* test_alarm_fires_with yield extension.startup(); yield extension.awaitFinish("alarm-when"); yield extension.unload(); }); add_task(function* test_alarm_clear_non_matching_name() { - function backgroundScript() { + async function backgroundScript() { let ALARM_NAME = "test_ext_alarms"; browser.alarms.create(ALARM_NAME, {when: Date.now() + 2000}); - browser.alarms.clear(ALARM_NAME + "1").then(wasCleared => { - browser.test.assertFalse(wasCleared, "alarm was not cleared"); - return browser.alarms.getAll(); - }).then(alarms => { - browser.test.assertEq(1, alarms.length, "alarm was not removed"); - browser.test.notifyPass("alarm-clear"); - }); + let wasCleared = await browser.alarms.clear(ALARM_NAME + "1"); + browser.test.assertFalse(wasCleared, "alarm was not cleared"); + + let alarms = await browser.alarms.getAll(); + browser.test.assertEq(1, alarms.length, "alarm was not removed"); + browser.test.notifyPass("alarm-clear"); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], }, }); yield extension.startup(); yield extension.awaitFinish("alarm-clear"); yield extension.unload(); }); add_task(function* test_alarm_get_and_clear_single_argument() { - function backgroundScript() { + async function backgroundScript() { browser.alarms.create({when: Date.now() + 2000}); - browser.alarms.get().then(alarm => { - browser.test.assertEq("", alarm.name, "expected alarm returned"); - return browser.alarms.clear(); - }).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - return browser.alarms.getAll(); - }).then(alarms => { - browser.test.assertEq(0, alarms.length, "alarm was removed"); - browser.test.notifyPass("alarm-single-arg"); - }); + let alarm = await browser.alarms.get(); + browser.test.assertEq("", alarm.name, "expected alarm returned"); + + let wasCleared = await browser.alarms.clear(); + browser.test.assertTrue(wasCleared, "alarm was cleared"); + + let alarms = await browser.alarms.getAll(); + browser.test.assertEq(0, alarms.length, "alarm was removed"); + + browser.test.notifyPass("alarm-single-arg"); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], }, }); yield extension.startup(); yield extension.awaitFinish("alarm-single-arg"); yield extension.unload(); }); add_task(function* test_get_get_all_clear_all_alarms() { - function backgroundScript() { + async function backgroundScript() { const ALARM_NAME = "test_alarm"; let suffixes = [0, 1, 2]; for (let suffix of suffixes) { browser.alarms.create(ALARM_NAME + suffix, {when: Date.now() + (suffix + 1) * 10000}); } - browser.alarms.getAll().then(alarms => { - browser.test.assertEq(suffixes.length, alarms.length, "expected number of alarms were found"); - alarms.forEach((alarm, index) => { - browser.test.assertEq(ALARM_NAME + index, alarm.name, "alarm has the expected name"); - }); + let alarms = await browser.alarms.getAll(); + browser.test.assertEq(suffixes.length, alarms.length, "expected number of alarms were found"); + alarms.forEach((alarm, index) => { + browser.test.assertEq(ALARM_NAME + index, alarm.name, "alarm has the expected name"); + }); + - return Promise.all( - suffixes.map(suffix => { - return browser.alarms.get(ALARM_NAME + suffix).then(alarm => { - browser.test.assertEq(ALARM_NAME + suffix, alarm.name, "alarm has the expected name"); - browser.test.sendMessage(`get-${suffix}`); - }); - }) - ); - }).then(() => { - return browser.alarms.clear(ALARM_NAME + suffixes[0]); - }).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); + for (let suffix of suffixes) { + let alarm = await browser.alarms.get(ALARM_NAME + suffix); + browser.test.assertEq(ALARM_NAME + suffix, alarm.name, "alarm has the expected name"); + browser.test.sendMessage(`get-${suffix}`); + } + + let wasCleared = await browser.alarms.clear(ALARM_NAME + suffixes[0]); + browser.test.assertTrue(wasCleared, "alarm was cleared"); - return browser.alarms.getAll(); - }).then(alarms => { - browser.test.assertEq(2, alarms.length, "alarm was removed"); + alarms = await browser.alarms.getAll(); + browser.test.assertEq(2, alarms.length, "alarm was removed"); - return browser.alarms.get(ALARM_NAME + suffixes[0]); - }).then(alarm => { - browser.test.assertEq(undefined, alarm, "non-existent alarm is undefined"); - browser.test.sendMessage(`get-invalid`); + let alarm = await browser.alarms.get(ALARM_NAME + suffixes[0]); + browser.test.assertEq(undefined, alarm, "non-existent alarm is undefined"); + browser.test.sendMessage(`get-invalid`); - return browser.alarms.clearAll(); - }).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarms were cleared"); + wasCleared = await browser.alarms.clearAll(); + browser.test.assertTrue(wasCleared, "alarms were cleared"); - return browser.alarms.getAll(); - }).then(alarms => { - browser.test.assertEq(0, alarms.length, "no alarms exist"); - browser.test.sendMessage("clearAll"); - browser.test.sendMessage("clear"); - browser.test.sendMessage("getAll"); - }); + alarms = await browser.alarms.getAll(); + browser.test.assertEq(0, alarms.length, "no alarms exist"); + browser.test.sendMessage("clearAll"); + browser.test.sendMessage("clear"); + browser.test.sendMessage("getAll"); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], }, });
--- a/toolkit/components/extensions/test/xpcshell/test_ext_alarms_does_not_fire.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_alarms_does_not_fire.js @@ -1,28 +1,28 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_cleared_alarm_does_not_fire() { - function backgroundScript() { + async function backgroundScript() { let ALARM_NAME = "test_ext_alarms"; browser.alarms.onAlarm.addListener(alarm => { browser.test.fail("cleared alarm does not fire"); browser.test.notifyFail("alarm-cleared"); }); browser.alarms.create(ALARM_NAME, {when: Date.now() + 1000}); - browser.alarms.clear(ALARM_NAME).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - setTimeout(() => { - browser.test.notifyPass("alarm-cleared"); - }, 2000); - }); + let wasCleared = await browser.alarms.clear(ALARM_NAME); + browser.test.assertTrue(wasCleared, "alarm was cleared"); + + await new Promise(resolve => setTimeout(resolve, 2000)); + + browser.test.notifyPass("alarm-cleared"); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"], }, });
--- a/toolkit/components/extensions/test/xpcshell/test_ext_alarms_periodic.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_alarms_periodic.js @@ -3,34 +3,35 @@ "use strict"; add_task(function* test_periodic_alarm_fires() { function backgroundScript() { const ALARM_NAME = "test_ext_alarms"; let count = 0; let timer; - browser.alarms.onAlarm.addListener(alarm => { + browser.alarms.onAlarm.addListener(async alarm => { browser.test.assertEq(alarm.name, ALARM_NAME, "alarm has the expected name"); if (count++ === 3) { clearTimeout(timer); - browser.alarms.clear(ALARM_NAME).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - browser.test.notifyPass("alarm-periodic"); - }); + let wasCleared = await browser.alarms.clear(ALARM_NAME); + browser.test.assertTrue(wasCleared, "alarm was cleared"); + + browser.test.notifyPass("alarm-periodic"); } }); browser.alarms.create(ALARM_NAME, {periodInMinutes: 0.02}); - timer = setTimeout(() => { + timer = setTimeout(async () => { browser.test.fail("alarm fired expected number of times"); - browser.alarms.clear(ALARM_NAME).then(wasCleared => { - browser.test.assertTrue(wasCleared, "alarm was cleared"); - }); + + let wasCleared = await browser.alarms.clear(ALARM_NAME); + browser.test.assertTrue(wasCleared, "alarm was cleared"); + browser.test.notifyFail("alarm-periodic"); }, 30000); } let extension = ExtensionTestUtils.loadExtension({ background: `(${backgroundScript})()`, manifest: { permissions: ["alarms"],
--- a/toolkit/components/extensions/test/xpcshell/test_ext_alarms_replaces.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_alarms_replaces.js @@ -2,32 +2,31 @@ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_duplicate_alarm_name_replaces_alarm() { function backgroundScript() { let count = 0; - browser.alarms.onAlarm.addListener(alarm => { + browser.alarms.onAlarm.addListener(async alarm => { if (alarm.name === "master alarm") { browser.alarms.create("child alarm", {delayInMinutes: 0.05}); - browser.alarms.getAll().then(results => { - browser.test.assertEq(2, results.length, "exactly two alarms exist"); - browser.test.assertEq("master alarm", results[0].name, "first alarm has the expected name"); - browser.test.assertEq("child alarm", results[1].name, "second alarm has the expected name"); - }).then(() => { - if (count++ === 3) { - browser.alarms.clear("master alarm").then(wasCleared => { - return browser.alarms.clear("child alarm"); - }).then(wasCleared => { - browser.test.notifyPass("alarm-duplicate"); - }); - } - }); + let results = await browser.alarms.getAll(); + + browser.test.assertEq(2, results.length, "exactly two alarms exist"); + browser.test.assertEq("master alarm", results[0].name, "first alarm has the expected name"); + browser.test.assertEq("child alarm", results[1].name, "second alarm has the expected name"); + + if (count++ === 3) { + await browser.alarms.clear("master alarm"); + await browser.alarms.clear("child alarm"); + + browser.test.notifyPass("alarm-duplicate"); + } } else { browser.test.fail("duplicate named alarm replaced existing alarm"); browser.test.notifyFail("alarm-duplicate"); } }); browser.alarms.create("master alarm", {delayInMinutes: 0.025, periodInMinutes: 0.025}); }
--- a/toolkit/components/extensions/test/xpcshell/test_ext_downloads.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_downloads.js @@ -45,26 +45,23 @@ add_task(function* test_downloads_open_p let extension = ExtensionTestUtils.loadExtension(extensionData); yield extension.startup(); yield extension.awaitFinish("downloads tests"); yield extension.unload(); }); add_task(function* test_downloads_open() { - function backgroundScript() { - browser.downloads.open(10).then(() => { - browser.test.fail("Expected an error"); - browser.test.notifyFail("downloads tests"); - }, error => { - browser.test.assertEq(error.message, "Invalid download id 10", - "The error is informative."); + async function backgroundScript() { + await browser.test.assertRejects( + browser.downloads.open(10), + "Invalid download id 10", + "The error is informative."); - browser.test.notifyPass("downloads tests"); - }); + browser.test.notifyPass("downloads tests"); // TODO: Once downloads.{pause,cancel,resume} lands (bug 1245602) test that this gives a good // error when called with an incompleted download. } let extensionData = { background: backgroundScript, manifest: {
--- a/toolkit/components/extensions/test/xpcshell/test_ext_downloads_download.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_downloads_download.js @@ -40,55 +40,51 @@ function setup() { } downloadDir.remove(false); }); } function backgroundScript() { let blobUrl; - browser.test.onMessage.addListener((msg, ...args) => { + browser.test.onMessage.addListener(async (msg, ...args) => { if (msg == "download.request") { let options = args[0]; if (options.blobme) { let blob = new Blob(options.blobme); delete options.blobme; blobUrl = options.url = window.URL.createObjectURL(blob); } - // download() throws on bad arguments, we can remove the extra - // promise when bug 1250223 is fixed. - return Promise.resolve().then(() => browser.downloads.download(options)) - .then(id => { - browser.test.sendMessage("download.done", {status: "success", id}); - }) - .catch(error => { - browser.test.sendMessage("download.done", {status: "error", errmsg: error.message}); - }); + try { + let id = await browser.downloads.download(options); + browser.test.sendMessage("download.done", {status: "success", id}); + } catch (error) { + browser.test.sendMessage("download.done", {status: "error", errmsg: error.message}); + } } else if (msg == "killTheBlob") { window.URL.revokeObjectURL(blobUrl); blobUrl = null; } }); browser.test.sendMessage("ready"); } // This function is a bit of a sledgehammer, it looks at every download // the browser knows about and waits for all active downloads to complete. // But we only start one at a time and only do a handful in total, so // this lets us test download() without depending on anything else. -function waitForDownloads() { - return Downloads.getList(Downloads.ALL) - .then(list => list.getAll()) - .then(downloads => { - let inprogress = downloads.filter(dl => !dl.stopped); - return Promise.all(inprogress.map(dl => dl.whenSucceeded())); - }); +async function waitForDownloads() { + let list = await Downloads.getList(Downloads.ALL); + let downloads = await list.getAll(); + + let inprogress = downloads.filter(dl => !dl.stopped); + return Promise.all(inprogress.map(dl => dl.whenSucceeded())); } // Create a file in the downloads directory. function touch(filename) { let file = downloadDir.clone(); file.append(filename); file.create(Ci.nsIFile.NORMAL_FILE_TYPE, FileUtils.PERMS_FILE); } @@ -110,27 +106,28 @@ add_task(function* test_downloads() { }, }); function download(options) { extension.sendMessage("download.request", options); return extension.awaitMessage("download.done"); } - function testDownload(options, localFile, expectedSize, description) { - return download(options).then(msg => { - equal(msg.status, "success", `downloads.download() works with ${description}`); - return waitForDownloads(); - }).then(() => { - let localPath = downloadDir.clone(); - let parts = Array.isArray(localFile) ? localFile : [localFile]; - parts.map(p => localPath.append(p)); - equal(localPath.fileSize, expectedSize, "Downloaded file has expected size"); - localPath.remove(false); - }); + async function testDownload(options, localFile, expectedSize, description) { + let msg = await download(options); + equal(msg.status, "success", `downloads.download() works with ${description}`); + + await waitForDownloads(); + + let localPath = downloadDir.clone(); + let parts = Array.isArray(localFile) ? localFile : [localFile]; + + parts.map(p => localPath.append(p)); + equal(localPath.fileSize, expectedSize, "Downloaded file has expected size"); + localPath.remove(false); } yield extension.startup(); yield extension.awaitMessage("ready"); do_print("extension started"); // Call download() with just the url property. yield testDownload({url: FILE_URL}, FILE_NAME, FILE_LEN, "just source"); @@ -277,20 +274,22 @@ add_task(function* test_download_post() if (body) { const str = NetUtil.readInputStreamToString(received.bodyInputStream, received.bodyInputStream.available()); equal(str, body, "body is correct"); } } function background() { - browser.test.onMessage.addListener(options => { - Promise.resolve() - .then(() => browser.downloads.download(options)) - .catch(err => browser.test.sendMessage("done", {err: err.message})); + browser.test.onMessage.addListener(async options => { + try { + await browser.downloads.download(options); + } catch (err) { + browser.test.sendMessage("done", {err: err.message}); + } }); browser.downloads.onChanged.addListener(({state}) => { if (state && state.current === "complete") { browser.test.sendMessage("done", {ok: true}); } }); }
--- a/toolkit/components/extensions/test/xpcshell/test_ext_downloads_misc.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_downloads_misc.js @@ -161,81 +161,81 @@ function backgroundScript() { fail(`Mismatched event: expecting ${JSON.stringify(expected[0])} but got ${JSON.stringify(Array.from(remaining)[0])}`); } } eventWaiter = check; check(); }); } - browser.test.onMessage.addListener(function(msg, ...args) { + browser.test.onMessage.addListener(async (msg, ...args) => { let match = msg.match(/(\w+).request$/); if (!match) { return; } + let what = match[1]; if (what == "waitForEvents") { - waitForEvents(...args).then(() => { + try { + await waitForEvents(...args); browser.test.sendMessage("waitForEvents.done", {status: "success"}); - }).catch(error => { + } catch (error) { browser.test.sendMessage("waitForEvents.done", {status: "error", errmsg: error.message}); - }); + } } else if (what == "clearEvents") { events = new Set(); browser.test.sendMessage("clearEvents.done", {status: "success"}); } else { - // extension functions throw on bad arguments, we can remove the extra - // promise when bug 1250223 is fixed. - Promise.resolve().then(() => { - return browser.downloads[what](...args); - }).then(result => { + try { + let result = await browser.downloads[what](...args); browser.test.sendMessage(`${what}.done`, {status: "success", result}); - }).catch(error => { + } catch (error) { browser.test.sendMessage(`${what}.done`, {status: "error", errmsg: error.message}); - }); + } } }); browser.test.sendMessage("ready"); } let downloadDir; let extension; -function clearDownloads(callback) { - return Downloads.getList(Downloads.ALL).then(list => { - return list.getAll().then(downloads => { - return Promise.all(downloads.map(download => list.remove(download))) - .then(() => downloads); - }); - }); +async function clearDownloads(callback) { + let list = await Downloads.getList(Downloads.ALL); + let downloads = await list.getAll(); + + await Promise.all(downloads.map(download => list.remove(download))); + + return downloads; } function runInExtension(what, ...args) { extension.sendMessage(`${what}.request`, ...args); return extension.awaitMessage(`${what}.done`); } // This is pretty simplistic, it looks for a progress update for a // download of the given url in which the total bytes are exactly equal // to the given value. Unless you know exactly how data will arrive from // the server (eg see interruptible.sjs), it probably isn't very useful. -function waitForProgress(url, bytes) { - return Downloads.getList(Downloads.ALL) - .then(list => new Promise(resolve => { - const view = { - onDownloadChanged(download) { - if (download.source.url == url && download.currentBytes == bytes) { - list.removeView(view); - resolve(); - } - }, - }; - list.addView(view); - })); +async function waitForProgress(url, bytes) { + let list = await Downloads.getList(Downloads.ALL); + + return new Promise(resolve => { + const view = { + onDownloadChanged(download) { + if (download.source.url == url && download.currentBytes == bytes) { + list.removeView(view); + resolve(); + } + }, + }; + list.addView(view); + }); } add_task(function* setup() { const nsIFile = Ci.nsIFile; downloadDir = FileUtils.getDir("TmpD", ["downloads"]); downloadDir.createUnique(nsIFile.DIRECTORY_TYPE, FileUtils.PERMS_DIRECTORY); do_print(`downloadDir ${downloadDir.path}`);
--- a/toolkit/components/extensions/test/xpcshell/test_ext_downloads_search.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_downloads_search.js @@ -34,52 +34,47 @@ function backgroundScript() { browser.downloads.onChanged.addListener(change => { if (change.state && change.state.current == "complete") { // Make sure we have a promise. waitForComplete(change.id); complete.get(change.id).resolve(); } }); - browser.test.onMessage.addListener(function(msg) { - // extension functions throw on bad arguments, we can remove the extra - // promise when bug 1250223 is fixed. + browser.test.onMessage.addListener(async (msg, ...args) => { if (msg == "download.request") { - Promise.resolve().then(() => browser.downloads.download(arguments[1])) - .then(id => { - browser.test.sendMessage("download.done", {status: "success", id}); - }) - .catch(error => { - browser.test.sendMessage("download.done", {status: "error", errmsg: error.message}); - }); + try { + let id = await browser.downloads.download(args[0]); + browser.test.sendMessage("download.done", {status: "success", id}); + } catch (error) { + browser.test.sendMessage("download.done", {status: "error", errmsg: error.message}); + } } else if (msg == "search.request") { - Promise.resolve().then(() => browser.downloads.search(arguments[1])) - .then(downloads => { - browser.test.sendMessage("search.done", {status: "success", downloads}); - }) - .catch(error => { - browser.test.sendMessage("search.done", {status: "error", errmsg: error.message}); - }); + try { + let downloads = await browser.downloads.search(args[0]); + browser.test.sendMessage("search.done", {status: "success", downloads}); + } catch (error) { + browser.test.sendMessage("search.done", {status: "error", errmsg: error.message}); + } } else if (msg == "waitForComplete.request") { - waitForComplete(arguments[1]).then(() => { - browser.test.sendMessage("waitForComplete.done"); - }); + await waitForComplete(args[0]); + browser.test.sendMessage("waitForComplete.done"); } }); browser.test.sendMessage("ready"); } -function clearDownloads(callback) { - return Downloads.getList(Downloads.ALL).then(list => { - return list.getAll().then(downloads => { - return Promise.all(downloads.map(download => list.remove(download))) - .then(() => downloads); - }); - }); +async function clearDownloads(callback) { + let list = await Downloads.getList(Downloads.ALL); + let downloads = await list.getAll(); + + await Promise.all(downloads.map(download => list.remove(download))); + + return downloads; } add_task(function* test_search() { const nsIFile = Ci.nsIFile; let downloadDir = FileUtils.getDir("TmpD", ["downloads"]); downloadDir.createUnique(nsIFile.DIRECTORY_TYPE, FileUtils.PERMS_DIRECTORY); do_print(`downloadDir ${downloadDir.path}`); @@ -87,46 +82,46 @@ add_task(function* test_search() { let path = downloadDir.clone(); path.append(filename); return path.path; } Services.prefs.setIntPref("browser.download.folderList", 2); Services.prefs.setComplexValue("browser.download.dir", nsIFile, downloadDir); - do_register_cleanup(() => { + do_register_cleanup(async () => { Services.prefs.clearUserPref("browser.download.folderList"); Services.prefs.clearUserPref("browser.download.dir"); - return cleanupDir(downloadDir).then(clearDownloads); + await cleanupDir(downloadDir); + await clearDownloads(); }); yield clearDownloads().then(downloads => { do_print(`removed ${downloads.length} pre-existing downloads from history`); }); let extension = ExtensionTestUtils.loadExtension({ background: backgroundScript, manifest: { permissions: ["downloads"], }, }); - function download(options) { + async function download(options) { extension.sendMessage("download.request", options); - return extension.awaitMessage("download.done").then(result => { - let promise; - if (result.status == "success") { - do_print(`wait for onChanged event to indicate ${result.id} is complete`); - extension.sendMessage("waitForComplete.request", result.id); - promise = extension.awaitMessage("waitForComplete.done"); - } else { - promise = Promise.resolve(); - } - return promise.then(() => result); - }); + let result = await extension.awaitMessage("download.done"); + + if (result.status == "success") { + do_print(`wait for onChanged event to indicate ${result.id} is complete`); + extension.sendMessage("waitForComplete.request", result.id); + + await extension.awaitMessage("waitForComplete.done"); + } + + return result; } function search(query) { extension.sendMessage("search.request", query); return extension.awaitMessage("search.done"); } yield extension.startup();
--- a/toolkit/components/extensions/test/xpcshell/test_ext_extension.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_extension.js @@ -1,18 +1,18 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_is_allowed_incognito_access() { - function background() { - browser.extension.isAllowedIncognitoAccess().then(isAllowedIncognitoAccess => { - browser.test.assertEq(true, isAllowedIncognitoAccess, "isAllowedIncognitoAccess is true"); - browser.test.notifyPass("isAllowedIncognitoAccess"); - }); + async function background() { + let allowed = await browser.extension.isAllowedIncognitoAccess(); + + browser.test.assertEq(true, allowed, "isAllowedIncognitoAccess is true"); + browser.test.notifyPass("isAllowedIncognitoAccess"); } let extension = ExtensionTestUtils.loadExtension({ background, manifest: {}, }); yield extension.startup(); @@ -32,21 +32,21 @@ add_task(function* test_in_incognito_con }); yield extension.startup(); yield extension.awaitFinish("inIncognitoContext"); yield extension.unload(); }); add_task(function* test_is_allowed_file_scheme_access() { - function background() { - browser.extension.isAllowedFileSchemeAccess().then(isAllowedFileSchemeAccess => { - browser.test.assertEq(false, isAllowedFileSchemeAccess, "isAllowedFileSchemeAccess is false"); - browser.test.notifyPass("isAllowedFileSchemeAccess"); - }); + async function background() { + let allowed = await browser.extension.isAllowedFileSchemeAccess(); + + browser.test.assertEq(false, allowed, "isAllowedFileSchemeAccess is false"); + browser.test.notifyPass("isAllowedFileSchemeAccess"); } let extension = ExtensionTestUtils.loadExtension({ background, manifest: {}, }); yield extension.startup();
--- a/toolkit/components/extensions/test/xpcshell/test_ext_legacy_extension_context.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_legacy_extension_context.js @@ -26,23 +26,23 @@ add_task(function* test_legacy_extension // Extract the assigned uuid from the background page url. uuid: window.location.hostname, }; browser.test.sendMessage("webextension-ready", extensionInfo); let port; - browser.test.onMessage.addListener(msg => { + browser.test.onMessage.addListener(async msg => { if (msg == "do-send-message") { - browser.runtime.sendMessage("webextension -> legacy_extension message").then(reply => { - browser.test.assertEq("legacy_extension -> webextension reply", reply, - "Got the expected message from the LegacyExtensionContext"); - browser.test.sendMessage("got-reply-message"); - }); + let reply = await browser.runtime.sendMessage("webextension -> legacy_extension message"); + + browser.test.assertEq("legacy_extension -> webextension reply", reply, + "Got the expected message from the LegacyExtensionContext"); + browser.test.sendMessage("got-reply-message"); } else if (msg == "do-connect") { port = browser.runtime.connect(); port.onMessage.addListener(msg => { browser.test.assertEq("legacy_extension -> webextension port message", msg, "Got the expected message from the LegacyExtensionContext"); port.postMessage("webextension -> legacy_extension port message"); });
--- a/toolkit/components/extensions/test/xpcshell/test_ext_management_uninstall_self.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_management_uninstall_self.js @@ -102,25 +102,23 @@ add_task(function* test_management_unins equal(promptService._confirmExArgs[5], "Keep Installed"); Services.obs.notifyObservers(extension.extension.file, "flush-cache-entry", null); }); add_task(function* test_management_uninstall_prompt_keep() { promptService._response = 1; function background() { - browser.test.onMessage.addListener(msg => { - browser.management.uninstallSelf({showConfirmDialog: true}).then(() => { - browser.test.fail("uninstallSelf rejects when user declines uninstall"); - }, error => { - browser.test.assertEq("User cancelled uninstall of extension", - error.message, - "Expected rejection when user declines uninstall"); - browser.test.sendMessage("uninstall-rejected"); - }); + browser.test.onMessage.addListener(async msg => { + await browser.test.assertRejects( + browser.management.uninstallSelf({showConfirmDialog: true}), + "User cancelled uninstall of extension", + "Expected rejection when user declines uninstall"); + + browser.test.sendMessage("uninstall-rejected"); }); } let extension = ExtensionTestUtils.loadExtension({ manifest, background, useAddonManager: "temporary", });
--- a/toolkit/components/extensions/test/xpcshell/test_ext_native_messaging.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_native_messaging.js @@ -170,33 +170,33 @@ if (AppConstants.platform == "win") { let exitPromise = waitForSubprocessExit(); yield extension.unload(); yield exitPromise; }); } // Test sendNativeMessage() add_task(function* test_sendNativeMessage() { - function background() { + async function background() { let MSG = {test: "hello world"}; // Check error handling - browser.runtime.sendNativeMessage("nonexistent", MSG).then(() => { - browser.test.fail("sendNativeMessage() to a nonexistent app should have failed"); - }, err => { - browser.test.succeed("sendNativeMessage() to a nonexistent app failed"); - }).then(() => { - // Check regular message exchange - return browser.runtime.sendNativeMessage("echo", MSG); - }).then(reply => { - let expected = JSON.stringify(MSG); - let received = JSON.stringify(reply); - browser.test.assertEq(expected, received, "Received echoed native message"); - browser.test.sendMessage("finished"); - }); + await browser.test.assertRejects( + browser.runtime.sendNativeMessage("nonexistent", MSG), + /Attempt to postMessage on disconnected port/, + "sendNativeMessage() to a nonexistent app failed"); + + // Check regular message exchange + let reply = await browser.runtime.sendNativeMessage("echo", MSG); + + let expected = JSON.stringify(MSG); + let received = JSON.stringify(reply); + browser.test.assertEq(expected, received, "Received echoed native message"); + + browser.test.sendMessage("finished"); } let extension = ExtensionTestUtils.loadExtension({ background, manifest: { applications: {gecko: {id: ID}}, permissions: ["nativeMessaging"], },
--- a/toolkit/components/extensions/test/xpcshell/test_ext_runtime_getBrowserInfo.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_runtime_getBrowserInfo.js @@ -3,24 +3,24 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ "use strict"; add_task(function* setup() { ExtensionTestUtils.mockAppInfo(); }); add_task(function* test_getBrowserInfo() { - function background() { - browser.runtime.getBrowserInfo().then(info => { - browser.test.assertEq(info.name, "XPCShell", "name is valid"); - browser.test.assertEq(info.vendor, "Mozilla", "vendor is Mozilla"); - browser.test.assertEq(info.version, "48", "version is correct"); - browser.test.assertEq(info.buildID, "20160315", "buildID is correct"); + async function background() { + let info = await browser.runtime.getBrowserInfo(); - browser.test.notifyPass("runtime.getBrowserInfo"); - }); + browser.test.assertEq(info.name, "XPCShell", "name is valid"); + browser.test.assertEq(info.vendor, "Mozilla", "vendor is Mozilla"); + browser.test.assertEq(info.version, "48", "version is correct"); + browser.test.assertEq(info.buildID, "20160315", "buildID is correct"); + + browser.test.notifyPass("runtime.getBrowserInfo"); } const extension = ExtensionTestUtils.loadExtension({background}); yield extension.startup(); yield extension.awaitFinish("runtime.getBrowserInfo"); yield extension.unload(); });
--- a/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_errors.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_errors.js @@ -1,14 +1,14 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_sendMessage_error() { - function background() { + async function background() { let circ = {}; circ.circ = circ; let testCases = [ // [arguments, expected error string], [[], "runtime.sendMessage's message argument is missing"], [[null, null, null, null], "runtime.sendMessage's last argument is not a function"], [[null, null, 1], "runtime.sendMessage's options argument is invalid"], [[1, null, null], "runtime.sendMessage's extensionId argument is invalid"], @@ -30,31 +30,26 @@ add_task(function* test_sendMessage_erro ]; // Repeat all tests with the undefined value instead of null. for (let [args, expectedError] of testCases.slice()) { args = args.map(arg => arg === null ? undefined : arg); testCases.push([args, expectedError]); } - function next() { - if (!testCases.length) { - browser.test.notifyPass("sendMessage parameter validation"); - return; - } - let [args, expectedError] = testCases.shift(); + for (let [args, expectedError] of testCases) { let description = `runtime.sendMessage(${args.map(String).join(", ")})`; - return browser.runtime.sendMessage(...args) - .then(() => { - browser.test.fail(`Unexpectedly got no error for ${description}`); - }, err => { - browser.test.assertEq(expectedError, err.message, `expected error message for ${description}`); - }).then(next); + + await browser.test.assertRejects( + browser.runtime.sendMessage(...args), + expectedError, + `expected error message for ${description}`); } - next(); + + browser.test.notifyPass("sendMessage parameter validation"); } let extensionData = { background, }; let extension = ExtensionTestUtils.loadExtension(extensionData); yield extension.startup();
--- a/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_no_receiver.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_no_receiver.js @@ -1,21 +1,20 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_sendMessage_without_listener() { - function background() { - browser.runtime.sendMessage("msg").then(reply => { - browser.test.assertEq(undefined, reply); - browser.test.notifyFail("Did not expect a reply to sendMessage"); - }, error => { - browser.test.assertEq("Could not establish connection. Receiving end does not exist.", error.message); - browser.test.notifyPass("sendMessage callback was invoked"); - }); + async function background() { + await browser.test.assertRejects( + browser.runtime.sendMessage("msg"), + "Could not establish connection. Receiving end does not exist.", + "sendMessage callback was invoked"); + + browser.test.notifyPass("sendMessage callback was invoked"); } let extensionData = { background, }; let extension = ExtensionTestUtils.loadExtension(extensionData); yield extension.startup();
--- a/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_self.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_runtime_sendMessage_self.js @@ -1,35 +1,32 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; add_task(function* test_sendMessage_to_self_should_not_trigger_onMessage() { - function background() { + async function background() { browser.runtime.onMessage.addListener(msg => { browser.test.assertEq("msg from child", msg); browser.test.notifyPass("sendMessage did not call same-frame onMessage"); }); browser.test.onMessage.addListener(msg => { browser.test.assertEq("sendMessage with a listener in another frame", msg); browser.runtime.sendMessage("should only reach another frame"); }); - browser.runtime.sendMessage("should not trigger same-frame onMessage") - .then(reply => { - browser.test.fail(`Unexpected reply to sendMessage: ${reply}`); - }, err => { - browser.test.assertEq("Could not establish connection. Receiving end does not exist.", err.message); + await browser.test.assertRejects( + browser.runtime.sendMessage("should not trigger same-frame onMessage"), + "Could not establish connection. Receiving end does not exist."); - let anotherFrame = document.createElement("iframe"); - anotherFrame.src = browser.extension.getURL("extensionpage.html"); - document.body.appendChild(anotherFrame); - }); + let anotherFrame = document.createElement("iframe"); + anotherFrame.src = browser.extension.getURL("extensionpage.html"); + document.body.appendChild(anotherFrame); } function lastScript() { browser.runtime.onMessage.addListener(msg => { browser.test.assertEq("should only reach another frame", msg); browser.runtime.sendMessage("msg from child"); }); browser.test.sendMessage("sendMessage callback called");
--- a/toolkit/components/extensions/test/xpcshell/test_ext_storage.js +++ b/toolkit/components/extensions/test/xpcshell/test_ext_storage.js @@ -1,27 +1,26 @@ /* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set sts=2 sw=2 et tw=80: */ "use strict"; -function backgroundScript() { +async function backgroundScript() { let storage = browser.storage.local; - function check(prop, value) { - return storage.get(null).then(data => { - browser.test.assertEq(value, data[prop], "null getter worked for " + prop); - return storage.get(prop); - }).then(data => { - browser.test.assertEq(value, data[prop], "string getter worked for " + prop); - return storage.get([prop]); - }).then(data => { - browser.test.assertEq(value, data[prop], "array getter worked for " + prop); - return storage.get({[prop]: undefined}); - }).then(data => { - browser.test.assertEq(value, data[prop], "object getter worked for " + prop); - }); + async function check(prop, value) { + let data = await storage.get(null); + browser.test.assertEq(value, data[prop], "null getter worked for " + prop); + + data = await storage.get(prop); + browser.test.assertEq(value, data[prop], "string getter worked for " + prop); + + data = await storage.get([prop]); + browser.test.assertEq(value, data[prop], "array getter worked for " + prop); + + data = await storage.get({[prop]: undefined}); + browser.test.assertEq(value, data[prop], "object getter worked for " + prop); } let globalChanges = {}; browser.storage.onChanged.addListener((changes, storage) => { browser.test.assertEq("local", storage, "storage is local"); Object.assign(globalChanges, changes); }); @@ -37,111 +36,100 @@ function backgroundScript() { checkSub(changes, globalChanges); checkSub(globalChanges, changes); globalChanges = {}; } /* eslint-disable dot-notation */ // Set some data and then test getters. - storage.set({"test-prop1": "value1", "test-prop2": "value2"}).then(() => { + + try { + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); checkChanges({"test-prop1": {newValue: "value1"}, "test-prop2": {newValue: "value2"}}); - return check("test-prop1", "value1"); - }).then(() => { - return check("test-prop2", "value2"); - }).then(() => { - return storage.get({"test-prop1": undefined, "test-prop2": undefined, "other": "default"}); - }).then(data => { + + await check("test-prop1", "value1"); + await check("test-prop2", "value2"); + + let data = await storage.get({"test-prop1": undefined, "test-prop2": undefined, "other": "default"}); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); browser.test.assertEq("default", data["other"], "other correct"); - return storage.get(["test-prop1", "test-prop2", "other"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2", "other"]); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); browser.test.assertFalse("other" in data, "other correct"); - // Remove data in various ways. - }).then(() => { - return storage.remove("test-prop1"); - }).then(() => { + // Remove data in various ways. + await storage.remove("test-prop1"); checkChanges({"test-prop1": {oldValue: "value1"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertTrue("test-prop2" in data, "prop2 present"); - return storage.set({"test-prop1": "value1"}); - }).then(() => { + + await storage.set({"test-prop1": "value1"}); checkChanges({"test-prop1": {newValue: "value1"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertEq("value1", data["test-prop1"], "prop1 correct"); browser.test.assertEq("value2", data["test-prop2"], "prop2 correct"); - }).then(() => { - return storage.remove(["test-prop1", "test-prop2"]); - }).then(() => { + + await storage.remove(["test-prop1", "test-prop2"]); checkChanges({"test-prop1": {oldValue: "value1"}, "test-prop2": {oldValue: "value2"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertFalse("test-prop2" in data, "prop2 absent"); - // test storage.clear - }).then(() => { - return storage.set({"test-prop1": "value1", "test-prop2": "value2"}); - }).then(() => { - return storage.clear(); - }).then(() => { + // test storage.clear + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); + await storage.clear(); + checkChanges({"test-prop1": {oldValue: "value1"}, "test-prop2": {oldValue: "value2"}}); - return storage.get(["test-prop1", "test-prop2"]); - }).then(data => { + data = await storage.get(["test-prop1", "test-prop2"]); browser.test.assertFalse("test-prop1" in data, "prop1 absent"); browser.test.assertFalse("test-prop2" in data, "prop2 absent"); - // Test cache invalidation. - }).then(() => { - return storage.set({"test-prop1": "value1", "test-prop2": "value2"}); - }).then(() => { + // Test cache invalidation. + await storage.set({"test-prop1": "value1", "test-prop2": "value2"}); + globalChanges = {}; // Schedule sendMessage after onMessage because the other end immediately // sends a message. Promise.resolve().then(() => { browser.test.sendMessage("invalidate"); }); - return new Promise(resolve => browser.test.onMessage.addListener(resolve)); - }).then(() => { - return check("test-prop1", "value1"); - }).then(() => { - return check("test-prop2", "value2"); + await new Promise(resolve => browser.test.onMessage.addListener(resolve)); - // Make sure we can store complex JSON data. - }).then(() => { - return storage.set({ + await check("test-prop1", "value1"); + await check("test-prop2", "value2"); + + // Make sure we can store complex JSON data. + await storage.set({ "test-prop1": { str: "hello", bool: true, null: null, undef: undefined, obj: {}, arr: [1, 2], date: new Date(0), regexp: /regexp/, func: function func() {}, window, }, }); - }).then(() => { - return storage.set({"test-prop2": function func() {}}); - }).then(() => { + + await storage.set({"test-prop2": function func() {}}); browser.test.assertEq("value1", globalChanges["test-prop1"].oldValue, "oldValue correct"); browser.test.assertEq("object", typeof(globalChanges["test-prop1"].newValue), "newValue is obj"); globalChanges = {}; - return storage.get({"test-prop1": undefined, "test-prop2": undefined}); - }).then(data => { + + data = await storage.get({"test-prop1": undefined, "test-prop2": undefined}); let obj = data["test-prop1"]; browser.test.assertEq("hello", obj.str, "string part correct"); browser.test.assertEq(true, obj.bool, "bool part correct"); browser.test.assertEq(null, obj.null, "null part correct"); browser.test.assertEq(undefined, obj.undef, "undefined part correct"); browser.test.assertEq(undefined, obj.func, "function part correct"); browser.test.assertEq(undefined, obj.window, "window part correct"); @@ -152,22 +140,22 @@ function backgroundScript() { browser.test.assertEq(1, obj.arr[0], "arr[0] part correct"); browser.test.assertEq(2, obj.arr[1], "arr[1] part correct"); browser.test.assertEq(2, obj.arr.length, "arr.length part correct"); obj = data["test-prop2"]; browser.test.assertEq("[object Object]", {}.toString.call(obj), "function serialized as a plain object"); browser.test.assertEq(0, Object.keys(obj).length, "function serialized as an empty object"); - }).then(() => { + browser.test.notifyPass("storage"); - }).catch(e => { + } catch (e) { browser.test.fail(`Error: ${e} :: ${e.stack}`); browser.test.notifyFail("storage"); - }); + } } let extensionData = { background: backgroundScript, manifest: { permissions: ["storage"], }, };
--- a/toolkit/components/telemetry/Histograms.json +++ b/toolkit/components/telemetry/Histograms.json @@ -7893,16 +7893,24 @@ "COOKIE_SCHEME_SECURITY": { "alert_emails": ["seceng@mozilla.org"], "expires_in_version": "55", "kind": "enumerated", "n_values": 10, "releaseChannelCollection": "opt-out", "description": "How often are secure cookies set from non-secure origins, and vice-versa? 0=nonsecure/http, 1=nonsecure/https, 2=secure/http, 3=secure/https" }, + "COOKIE_LEAVE_SECURE_ALONE": { + "alert_emails": ["seceng@mozilla.org"], + "expires_in_version": "57", + "kind": "enumerated", + "n_values": 10, + "releaseChannelCollection": "opt-out", + "description": "Measuring the effects of draft-ietf-httpbis-cookie-alone blocking. 0=blocked http setting secure cookie; 1=blocked http downgrading secure cookie; 2=blocked evicting secure cookie; 3=evicting newer insecure cookie; 4=evicting the oldest insecure cookie; 5=evicting the preferred cookie; 6=evicting the secure blocked" + }, "NTLM_MODULE_USED_2": { "expires_in_version": "never", "kind": "enumerated", "n_values": 8, "description": "The module used for the NTLM protocol (Windows_API, Kerberos, Samba_auth or Generic) and whether or not the authentication was used to connect to a proxy server. This data is collected only once per session (at first NTLM authentification) ; fixed version." }, "FX_THUMBNAILS_BG_QUEUE_SIZE_ON_CAPTURE": { "expires_in_version": "default",
--- a/toolkit/components/telemetry/histogram-whitelists.json +++ b/toolkit/components/telemetry/histogram-whitelists.json @@ -853,16 +853,17 @@ "CHARSET_OVERRIDE_USED", "CHECK_ADDONS_MODIFIED_MS", "CHECK_JAVA_ENABLED", "COMPONENTS_SHIM_ACCESSED_BY_CONTENT", "COMPOSITE_FRAME_ROUNDTRIP_TIME", "COMPOSITE_TIME", "CONTENT_DOCUMENTS_DESTROYED", "COOKIE_SCHEME_SECURITY", + "COOKIE_LEAVE_SECURE_ALONE", "CRASH_STORE_COMPRESSED_BYTES", "CYCLE_COLLECTOR", "CYCLE_COLLECTOR_ASYNC_SNOW_WHITE_FREEING", "CYCLE_COLLECTOR_COLLECTED", "CYCLE_COLLECTOR_FINISH_IGC", "CYCLE_COLLECTOR_FULL", "CYCLE_COLLECTOR_MAX_PAUSE", "CYCLE_COLLECTOR_NEED_GC",
--- a/toolkit/crashreporter/nsExceptionHandler.cpp +++ b/toolkit/crashreporter/nsExceptionHandler.cpp @@ -553,23 +553,16 @@ Concat(XP_CHAR* str, const XP_CHAR* toAp memcpy(str, toAppend, appendLen * sizeof(XP_CHAR)); str += appendLen; *str = '\0'; *size -= appendLen; return str; } -static const char* gMozCrashReason = nullptr; - -void AnnotateMozCrashReason(const char* aReason) -{ - gMozCrashReason = aReason; -} - static size_t gOOMAllocationSize = 0; void AnnotateOOMAllocationSize(size_t size) { gOOMAllocationSize = size; } static size_t gTexturesSize = 0;
--- a/toolkit/crashreporter/nsExceptionHandler.h +++ b/toolkit/crashreporter/nsExceptionHandler.h @@ -1,16 +1,18 @@ /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef nsExceptionHandler_h__ #define nsExceptionHandler_h__ +#include "mozilla/Assertions.h" + #include <stddef.h> #include <stdint.h> #include "nsError.h" #include "nsStringGlue.h" #if defined(XP_WIN32) #ifdef WIN32_LEAN_AND_MEAN #undef WIN32_LEAN_AND_MEAN @@ -68,19 +70,16 @@ nsresult SetMinidumpPath(const nsAString // AnnotateCrashReport, RemoveCrashReportAnnotation and // AppendAppNotesToCrashReport may be called from any thread in a chrome // process, but may only be called from the main thread in a content process. nsresult AnnotateCrashReport(const nsACString& key, const nsACString& data); nsresult RemoveCrashReportAnnotation(const nsACString& key); nsresult AppendAppNotesToCrashReport(const nsACString& data); -// NOTE: If you change this definition, also change the definition in Assertions.h -// as it is intended to be defining this same function. -void AnnotateMozCrashReason(const char* aReason); void AnnotateOOMAllocationSize(size_t size); void AnnotateTexturesSize(size_t size); void AnnotatePendingIPC(size_t aNumOfPendingIPC, uint32_t aTopPendingIPCCount, const char* aTopPendingIPCName, uint32_t aTopPendingIPCType); nsresult SetGarbageCollecting(bool collecting); void SetEventloopNestingLevel(uint32_t level);
--- a/widget/nsBaseWidget.cpp +++ b/widget/nsBaseWidget.cpp @@ -330,24 +330,19 @@ nsBaseWidget::OnRenderingDeviceReset() // accelerated layers again. RefPtr<ClientLayerManager> clm = mLayerManager->AsClientLayerManager(); if (!ComputeShouldAccelerate() && clm->GetTextureFactoryIdentifier().mParentBackend != LayersBackend::LAYERS_BASIC) { return; } - RefPtr<CompositorBridgeParent> parent = mCompositorSession->GetInProcessBridge(); - if (!parent) { - return; - } - // Recreate the compositor. TextureFactoryIdentifier identifier; - if (!parent->ResetCompositor(backendHints, &identifier)) { + if (!mCompositorSession->Reset(backendHints, &identifier)) { // No action was taken, so we don't have to do anything. return; } // Invalidate all layers. FrameLayerBuilder::InvalidateAllLayers(mLayerManager); // Update the texture factory identifier.
--- a/widget/nsBaseWidget.h +++ b/widget/nsBaseWidget.h @@ -53,16 +53,17 @@ class BasicLayerManager; class CompositorBridgeChild; class CompositorBridgeParent; class IAPZCTreeManager; class GeckoContentController; class APZEventState; class CompositorSession; class ImageContainer; struct ScrollableLayerGuid; +class RemoteCompositorSession; } // namespace layers namespace widget { class CompositorWidgetDelegate; class InProcessCompositorWidget; class WidgetRenderingContext; } // namespace widget @@ -107,16 +108,17 @@ public: * class, but it gives them a head start.) */ class nsBaseWidget : public nsIWidget, public nsSupportsWeakReference { friend class nsAutoRollup; friend class DispatchWheelEventOnMainThread; friend class mozilla::widget::InProcessCompositorWidget; + friend class mozilla::layers::RemoteCompositorSession; protected: typedef base::Thread Thread; typedef mozilla::gfx::DrawTarget DrawTarget; typedef mozilla::gfx::SourceSurface SourceSurface; typedef mozilla::layers::BasicLayerManager BasicLayerManager; typedef mozilla::layers::BufferMode BufferMode; typedef mozilla::layers::CompositorBridgeChild CompositorBridgeChild;
--- a/xpcom/base/ErrorList.h +++ b/xpcom/base/ErrorList.h @@ -977,16 +977,17 @@ ERROR(NS_ERROR_DOM_MEDIA_METADATA_ERR, FAILURE(6)), ERROR(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, FAILURE(7)), ERROR(NS_ERROR_DOM_MEDIA_END_OF_STREAM, FAILURE(8)), ERROR(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, FAILURE(9)), ERROR(NS_ERROR_DOM_MEDIA_CANCELED, FAILURE(10)), ERROR(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, FAILURE(11)), ERROR(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, FAILURE(12)), ERROR(NS_ERROR_DOM_MEDIA_CDM_ERR, FAILURE(13)), + ERROR(NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER, FAILURE(14)), /* Internal platform-related errors */ ERROR(NS_ERROR_DOM_MEDIA_CUBEB_INITIALIZATION_ERR, FAILURE(101)), #undef MODULE /* ======================================================================= */ /* 51: NS_ERROR_MODULE_GENERAL */ /* ======================================================================= */