author | Wes Kocher <wkocher@mozilla.com> |
Mon, 22 May 2017 16:26:12 -0700 | |
changeset 360017 | 5bc1c758ab57c1885dceab4e7837e58af27b998c |
parent 359963 | d712c82c59ec5a277047a75d09bec48be4a64b87 (current diff) |
parent 360016 | ec6edd4fec8f62db57c1f075d1d3b536c74af5f8 (diff) |
child 360047 | 88875a0619d6916b54967ff79c8c999135099ff6 |
child 360110 | 8740b130efef4571b554aa588de13857a3915257 |
push id | 31866 |
push user | kwierso@gmail.com |
push date | Mon, 22 May 2017 23:26:23 +0000 |
treeherder | mozilla-central@5bc1c758ab57 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | merge |
milestone | 55.0a1 |
first release with | nightly linux32
5bc1c758ab57
/
55.0a1
/
20170523100217
/
files
nightly linux64
5bc1c758ab57
/
55.0a1
/
20170523100217
/
files
nightly mac
5bc1c758ab57
/
55.0a1
/
20170523030206
/
files
nightly win32
5bc1c758ab57
/
55.0a1
/
20170523030206
/
files
nightly win64
5bc1c758ab57
/
55.0a1
/
20170523030206
/
files
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
releases | nightly linux32
55.0a1
/
20170523100217
/
pushlog to previous
nightly linux64
55.0a1
/
20170523100217
/
pushlog to previous
nightly mac
55.0a1
/
20170523030206
/
pushlog to previous
nightly win32
55.0a1
/
20170523030206
/
pushlog to previous
nightly win64
55.0a1
/
20170523030206
/
pushlog to previous
|
--- a/browser/base/content/test/tabs/browser_new_web_tab_in_file_process_pref.js +++ b/browser/base/content/test/tabs/browser_new_web_tab_in_file_process_pref.js @@ -14,28 +14,28 @@ function CheckBrowserInPid(browser, expe function CheckBrowserNotInPid(browser, unExpectedPid, message) { return ContentTask.spawn(browser, { unExpectedPid, message }, (arg) => { isnot(Services.appinfo.processID, arg.unExpectedPid, arg.message); }); } // Test for bug 1343184. add_task(async function() { + // Set prefs to ensure file content process, to allow linked web content in + // file URI process and allow more that one file content process. + await SpecialPowers.pushPrefEnv( + {set: [["browser.tabs.remote.separateFileUriProcess", true], + ["browser.tabs.remote.allowLinkedWebInFileUriProcess", true], + ["dom.ipc.processCount.file", 2]]}); + // Open file:// page. let dir = getChromeDir(getResolvedURI(gTestPath)); dir.append(TEST_FILE); const uriString = Services.io.newFileURI(dir).spec; await BrowserTestUtils.withNewTab(uriString, async function(fileBrowser) { - // Set prefs to ensure file content process, to allow linked web content - // in file URI process and allow more that one file content process. - await SpecialPowers.pushPrefEnv( - {set: [["browser.tabs.remote.separateFileUriProcess", true], - ["browser.tabs.remote.allowLinkedWebInFileUriProcess", true], - ["dom.ipc.processCount.file", 2]]}); - // Get the file:// URI pid for comparison later. let filePid = await ContentTask.spawn(fileBrowser, null, () => { return Services.appinfo.processID; }); // Check that http tab opened from JS in file:// page is in same process. let promiseTabOpened = BrowserTestUtils.waitForNewTab(gBrowser, TEST_HTTP, true); await ContentTask.spawn(fileBrowser, TEST_HTTP, uri => {
--- a/devtools/client/netmonitor/src/components/toolbar.js +++ b/devtools/client/netmonitor/src/components/toolbar.js @@ -87,16 +87,21 @@ const Toolbar = createClass({ "aria-pressed": checked, "data-key": type, }, L10N.getStr(`netmonitor.toolbar.filter.${type}`) ) ); }); + // Setup autocomplete list + let negativeAutocompleteList = FILTER_FLAGS.map((item) => `-${item}`); + let autocompleteList = [...FILTER_FLAGS, ...negativeAutocompleteList] + .map((item) => `${item}:`); + return ( span({ className: "devtools-toolbar devtools-toolbar-container" }, span({ className: "devtools-toolbar-group" }, button({ className: "devtools-button devtools-clear-icon requests-list-clear-button", title: TOOLBAR_CLEAR, onClick: clearRequests, }), @@ -104,17 +109,17 @@ const Toolbar = createClass({ ), span({ className: "devtools-toolbar-group" }, SearchBox({ delay: FILTER_SEARCH_DELAY, keyShortcut: SEARCH_KEY_SHORTCUT, placeholder: SEARCH_PLACE_HOLDER, type: "filter", onChange: setRequestFilterText, - autocompleteList: FILTER_FLAGS.map((item) => `${item}:`), + autocompleteList, }), button({ className: toggleButtonClassName.join(" "), title: networkDetailsOpen ? COLLPASE_DETAILS_PANE : EXPAND_DETAILS_PANE, disabled: networkDetailsToggleDisabled, tabIndex: "0", onClick: toggleNetworkDetails, }),
--- a/dom/base/nsContentUtils.cpp +++ b/dom/base/nsContentUtils.cpp @@ -296,16 +296,17 @@ bool nsContentUtils::sIsWebComponentsEna bool nsContentUtils::sIsCustomElementsEnabled = false; bool nsContentUtils::sSendPerformanceTimingNotifications = false; bool nsContentUtils::sUseActivityCursor = false; bool nsContentUtils::sAnimationsAPICoreEnabled = false; bool nsContentUtils::sAnimationsAPIElementAnimateEnabled = false; bool nsContentUtils::sGetBoxQuadsEnabled = false; bool nsContentUtils::sSkipCursorMoveForSameValueSet = false; bool nsContentUtils::sRequestIdleCallbackEnabled = false; +bool nsContentUtils::sLowerNetworkPriority = false; int32_t nsContentUtils::sPrivacyMaxInnerWidth = 1000; int32_t nsContentUtils::sPrivacyMaxInnerHeight = 1000; nsContentUtils::UserInteractionObserver* nsContentUtils::sUserInteractionObserver = nullptr; uint32_t nsContentUtils::sHandlingInputTimeout = 1000; @@ -710,16 +711,19 @@ nsContentUtils::Init() Preferences::AddBoolVarCache(&sSkipCursorMoveForSameValueSet, "dom.input.skip_cursor_move_for_same_value_set", true); Preferences::AddBoolVarCache(&sRequestIdleCallbackEnabled, "dom.requestIdleCallback.enabled", false); + Preferences::AddBoolVarCache(&sLowerNetworkPriority, + "privacy.trackingprotection.lower_network_priority", false); + Element::InitCCCallbacks(); Unused << nsRFPService::GetOrCreate(); nsCOMPtr<nsIUUIDGenerator> uuidGenerator = do_GetService("@mozilla.org/uuid-generator;1", &rv); if (NS_WARN_IF(NS_FAILED(rv))) { return rv;
--- a/dom/base/nsContentUtils.h +++ b/dom/base/nsContentUtils.h @@ -2948,16 +2948,20 @@ public: /** * Determine whether or not the user is currently interacting with the web * browser. This method is safe to call from off of the main thread. */ static bool GetUserIsInteracting(); + // Check pref "privacy.trackingprotection.lower_network_priority" to see + // if we want to lower the priority of the channel. + static bool IsLowerNetworkPriority() { return sLowerNetworkPriority; } + private: static bool InitializeEventTable(); static nsresult EnsureStringBundle(PropertiesFile aFile); static bool CanCallerAccess(nsIPrincipal* aSubjectPrincipal, nsIPrincipal* aPrincipal); @@ -3074,16 +3078,17 @@ private: static bool sIsCustomElementsEnabled; static bool sSendPerformanceTimingNotifications; static bool sUseActivityCursor; static bool sAnimationsAPICoreEnabled; static bool sAnimationsAPIElementAnimateEnabled; static bool sGetBoxQuadsEnabled; static bool sSkipCursorMoveForSameValueSet; static bool sRequestIdleCallbackEnabled; + static bool sLowerNetworkPriority; static uint32_t sCookiesLifetimePolicy; static uint32_t sCookiesBehavior; static int32_t sPrivacyMaxInnerWidth; static int32_t sPrivacyMaxInnerHeight; class UserInteractionObserver; static UserInteractionObserver* sUserInteractionObserver;
--- a/dom/canvas/WebGLTexture.cpp +++ b/dom/canvas/WebGLTexture.cpp @@ -309,16 +309,22 @@ WebGLTexture::IsCubeComplete() const } bool WebGLTexture::IsComplete(const char* funcName, uint32_t texUnit, const char** const out_reason, bool* const out_initFailed) { *out_initFailed = false; + const auto maxLevel = kMaxLevelCount - 1; + if (mBaseMipmapLevel > maxLevel) { + *out_reason = "`level_base` too high."; + return false; + } + if (!EnsureLevelInitialized(funcName, mBaseMipmapLevel)) { *out_initFailed = true; return false; } // Texture completeness is established at GLES 3.0.4, p160-161. // "[A] texture is complete unless any of the following conditions hold true:"
--- a/dom/fetch/Fetch.cpp +++ b/dom/fetch/Fetch.cpp @@ -9,16 +9,17 @@ #include "nsIDocument.h" #include "nsIGlobalObject.h" #include "nsIStreamLoader.h" #include "nsIThreadRetargetableRequest.h" #include "nsIUnicodeDecoder.h" #include "nsCharSeparatedTokenizer.h" #include "nsDOMString.h" +#include "nsJSUtils.h" #include "nsNetUtil.h" #include "nsReadableUtils.h" #include "nsStreamUtils.h" #include "nsStringStream.h" #include "mozilla/ErrorResult.h" #include "mozilla/dom/BindingDeclarations.h" #include "mozilla/dom/BodyUtil.h" @@ -274,17 +275,20 @@ public: NS_WARNING("Aborting Fetch because worker already shut down"); return NS_OK; } nsCOMPtr<nsIPrincipal> principal = proxy->GetWorkerPrivate()->GetPrincipal(); MOZ_ASSERT(principal); nsCOMPtr<nsILoadGroup> loadGroup = proxy->GetWorkerPrivate()->GetLoadGroup(); MOZ_ASSERT(loadGroup); - fetch = new FetchDriver(mRequest, principal, loadGroup); + + // We don't track if a worker is spawned from a tracking script for now, + // so pass false as the last argument to FetchDriver(). + fetch = new FetchDriver(mRequest, principal, loadGroup, false); nsAutoCString spec; if (proxy->GetWorkerPrivate()->GetBaseURI()) { proxy->GetWorkerPrivate()->GetBaseURI()->GetAsciiSpec(spec); } fetch->SetWorkerScript(spec); } RefPtr<FetchSignal> signal = mResolver->GetFetchSignal(); @@ -339,24 +343,30 @@ FetchRequest(nsIGlobalObject* aGlobal, c aInit.mObserve.Value().HandleEvent(*observer); } if (NS_IsMainThread()) { nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal); nsCOMPtr<nsIDocument> doc; nsCOMPtr<nsILoadGroup> loadGroup; nsIPrincipal* principal; + bool isTrackingFetch = false; if (window) { doc = window->GetExtantDoc(); if (!doc) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; } principal = doc->NodePrincipal(); loadGroup = doc->GetDocumentLoadGroup(); + + nsAutoCString fileNameString; + if (nsJSUtils::GetCallingLocation(cx, fileNameString)) { + isTrackingFetch = doc->IsScriptTracking(fileNameString); + } } else { principal = aGlobal->PrincipalOrNull(); if (NS_WARN_IF(!principal)) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; } nsresult rv = NS_NewLoadGroup(getter_AddRefs(loadGroup), principal); if (NS_WARN_IF(NS_FAILED(rv))) { @@ -364,17 +374,18 @@ FetchRequest(nsIGlobalObject* aGlobal, c return nullptr; } } Telemetry::Accumulate(Telemetry::FETCH_IS_MAINTHREAD, 1); RefPtr<MainThreadFetchResolver> resolver = new MainThreadFetchResolver(p, observer); - RefPtr<FetchDriver> fetch = new FetchDriver(r, principal, loadGroup); + RefPtr<FetchDriver> fetch = + new FetchDriver(r, principal, loadGroup, isTrackingFetch); fetch->SetDocument(doc); resolver->SetLoadGroup(loadGroup); aRv = fetch->Fetch(signal, resolver); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } } else { WorkerPrivate* worker = GetCurrentThreadWorkerPrivate();
--- a/dom/fetch/FetchDriver.cpp +++ b/dom/fetch/FetchDriver.cpp @@ -9,16 +9,17 @@ #include "nsIAsyncVerifyRedirectCallback.h" #include "nsIDocument.h" #include "nsIInputStream.h" #include "nsIOutputStream.h" #include "nsIHttpChannel.h" #include "nsIHttpChannelInternal.h" #include "nsIScriptSecurityManager.h" +#include "nsISupportsPriority.h" #include "nsIThreadRetargetableRequest.h" #include "nsIUploadChannel2.h" #include "nsIInterfaceRequestorUtils.h" #include "nsIPipe.h" #include "nsContentPolicyUtils.h" #include "nsDataHandler.h" #include "nsHostObjectProtocolHandler.h" @@ -42,20 +43,21 @@ namespace mozilla { namespace dom { NS_IMPL_ISUPPORTS(FetchDriver, nsIStreamListener, nsIChannelEventSink, nsIInterfaceRequestor, nsIThreadRetargetableStreamListener) FetchDriver::FetchDriver(InternalRequest* aRequest, nsIPrincipal* aPrincipal, - nsILoadGroup* aLoadGroup) + nsILoadGroup* aLoadGroup, bool aIsTrackingFetch) : mPrincipal(aPrincipal) , mLoadGroup(aLoadGroup) , mRequest(aRequest) + , mIsTrackingFetch(aIsTrackingFetch) #ifdef DEBUG , mResponseAvailableCalled(false) , mFetchCalled(false) #endif { MOZ_ASSERT(aRequest); MOZ_ASSERT(aPrincipal); } @@ -368,16 +370,23 @@ FetchDriver::HttpFetch() AutoTArray<nsCString, 5> unsafeHeaders; mRequest->Headers()->GetUnsafeHeaders(unsafeHeaders); nsCOMPtr<nsILoadInfo> loadInfo = chan->GetLoadInfo(); if (loadInfo) { loadInfo->SetCorsPreflightInfo(unsafeHeaders, false); } } + if (mIsTrackingFetch && nsContentUtils::IsLowerNetworkPriority()) { + nsCOMPtr<nsISupportsPriority> p = do_QueryInterface(chan); + if (p) { + p->SetPriority(nsISupportsPriority::PRIORITY_LOWEST); + } + } + rv = chan->AsyncOpen2(this); NS_ENSURE_SUCCESS(rv, rv); // Step 4 onwards of "HTTP Fetch" is handled internally by Necko. mChannel = chan; return NS_OK; }
--- a/dom/fetch/FetchDriver.h +++ b/dom/fetch/FetchDriver.h @@ -91,17 +91,18 @@ public: NS_DECL_NSIREQUESTOBSERVER NS_DECL_NSISTREAMLISTENER NS_DECL_NSICHANNELEVENTSINK NS_DECL_NSIINTERFACEREQUESTOR NS_DECL_NSITHREADRETARGETABLESTREAMLISTENER FetchDriver(InternalRequest* aRequest, nsIPrincipal* aPrincipal, - nsILoadGroup* aLoadGroup); + nsILoadGroup* aLoadGroup, + bool aIsTrackingFetch); nsresult Fetch(FetchSignal* aSignal, FetchDriverObserver* aObserver); void SetDocument(nsIDocument* aDocument); void @@ -123,16 +124,17 @@ private: RefPtr<InternalResponse> mResponse; nsCOMPtr<nsIOutputStream> mPipeOutputStream; RefPtr<FetchDriverObserver> mObserver; nsCOMPtr<nsIDocument> mDocument; nsCOMPtr<nsIChannel> mChannel; nsAutoPtr<SRICheckDataVerifier> mSRIDataVerifier; SRIMetadata mSRIMetadata; nsCString mWorkerScript; + bool mIsTrackingFetch; #ifdef DEBUG bool mResponseAvailableCalled; bool mFetchCalled; #endif FetchDriver() = delete; FetchDriver(const FetchDriver&) = delete;
--- a/dom/file/ipc/IPCBlobInputStreamChild.cpp +++ b/dom/file/ipc/IPCBlobInputStreamChild.cpp @@ -1,24 +1,25 @@ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "IPCBlobInputStreamChild.h" +#include "WorkerHolder.h" namespace mozilla { namespace dom { namespace { // This runnable is used in case the last stream is forgotten on the 'wrong' // thread. -class ShutdownRunnable final : public Runnable +class ShutdownRunnable final : public CancelableRunnable { public: explicit ShutdownRunnable(IPCBlobInputStreamChild* aActor) : mActor(aActor) {} NS_IMETHOD Run() override @@ -28,17 +29,17 @@ public: } private: RefPtr<IPCBlobInputStreamChild> mActor; }; // This runnable is used in case StreamNeeded() has been called on a non-owning // thread. -class StreamNeededRunnable final : public Runnable +class StreamNeededRunnable final : public CancelableRunnable { public: explicit StreamNeededRunnable(IPCBlobInputStreamChild* aActor) : mActor(aActor) {} NS_IMETHOD Run() override @@ -74,53 +75,88 @@ public: return NS_OK; } private: RefPtr<IPCBlobInputStream> mDestinationStream; nsCOMPtr<nsIInputStream> mCreatedStream; }; +class IPCBlobInputStreamWorkerHolder final : public WorkerHolder +{ +public: + explicit IPCBlobInputStreamWorkerHolder(IPCBlobInputStreamChild* aActor) + : mActor(aActor) + {} + + bool Notify(Status aStatus) override + { + if (aStatus > Running) { + mActor->Shutdown(); + // After this the WorkerHolder is gone. + } + return true; + } + +private: + RefPtr<IPCBlobInputStreamChild> mActor; +}; + } // anonymous IPCBlobInputStreamChild::IPCBlobInputStreamChild(const nsID& aID, uint64_t aSize) : mMutex("IPCBlobInputStreamChild::mMutex") , mID(aID) , mSize(aSize) , mActorAlive(true) , mOwningThread(NS_GetCurrentThread()) -{} +{ + // If we are running in a worker, we need to send a Close() to the parent side + // before the thread is released. + if (!NS_IsMainThread()) { + WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate(); + if (workerPrivate) { + UniquePtr<WorkerHolder> workerHolder( + new IPCBlobInputStreamWorkerHolder(this)); + if (workerHolder->HoldWorker(workerPrivate, Canceling)) { + mWorkerHolder.swap(workerHolder); + } + } + } +} IPCBlobInputStreamChild::~IPCBlobInputStreamChild() {} void IPCBlobInputStreamChild::Shutdown() { MutexAutoLock lock(mMutex); RefPtr<IPCBlobInputStreamChild> kungFuDeathGrip = this; + mWorkerHolder = nullptr; mPendingOperations.Clear(); if (mActorAlive) { SendClose(); mActorAlive = false; } } void IPCBlobInputStreamChild::ActorDestroy(IProtocol::ActorDestroyReason aReason) { { MutexAutoLock lock(mMutex); mActorAlive = false; } + // Let's cleanup the workerHolder and the pending operation queue. Shutdown(); } bool IPCBlobInputStreamChild::IsAlive() { MutexAutoLock lock(mMutex); return mActorAlive;
--- a/dom/file/ipc/IPCBlobInputStreamChild.h +++ b/dom/file/ipc/IPCBlobInputStreamChild.h @@ -4,22 +4,27 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef mozilla_dom_ipc_IPCBlobInputStreamChild_h #define mozilla_dom_ipc_IPCBlobInputStreamChild_h #include "mozilla/ipc/PIPCBlobInputStreamChild.h" #include "mozilla/Mutex.h" +#include "mozilla/UniquePtr.h" #include "nsIThread.h" #include "nsTArray.h" namespace mozilla { namespace dom { +namespace workers { +class WorkerHolder; +} + class IPCBlobInputStream; class IPCBlobInputStreamChild final : public mozilla::ipc::PIPCBlobInputStreamChild { public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(IPCBlobInputStreamChild) @@ -80,14 +85,16 @@ private: struct PendingOperation { RefPtr<IPCBlobInputStream> mStream; nsCOMPtr<nsIEventTarget> mEventTarget; }; nsTArray<PendingOperation> mPendingOperations; nsCOMPtr<nsIThread> mOwningThread; + + UniquePtr<workers::WorkerHolder> mWorkerHolder; }; } // namespace dom } // namespace mozilla #endif // mozilla_dom_ipc_IPCBlobInputStreamChild_h
--- a/dom/file/ipc/tests/mochitest.ini +++ b/dom/file/ipc/tests/mochitest.ini @@ -1,5 +1,6 @@ [DEFAULT] support-files = script_file.js [test_ipcBlob_fileReaderSync.html] +[test_ipcBlob_workers.html]
new file mode 100644 --- /dev/null +++ b/dom/file/ipc/tests/test_ipcBlob_workers.html @@ -0,0 +1,42 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test IPCBlob and Workers</title> + <script type="text/javascript" src="/MochiKit/MochiKit.js"></script> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script type="text/javascript"> + +function workerScript() { + onmessage = e => { + e.ports[0].onmessage = event => { + let reader = new FileReader(); + reader.readAsText(event.data); + reader.onloadend = () => { + let status = reader.result == 'hello world'; + postMessage(status); + } + } + } +} + +let mc = new MessageChannel(); +mc.port1.postMessage(new Blob(['hello world'])); + +let workerUrl = URL.createObjectURL(new Blob(["(", workerScript.toSource(), ")()"])); +let worker = new Worker(workerUrl); + +worker.postMessage("", [mc.port2]); +worker.onmessage = event => { + ok(event.data, "All is done!"); + SimpleTest.finish(); +} + +SimpleTest.waitForExplicitFinish(); + +</script> +</pre> +</body> +</html>
--- a/dom/html/test/browser_form_post_from_file_to_http.js +++ b/dom/html/test/browser_form_post_from_file_to_http.js @@ -1,16 +1,22 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ const TEST_HTTP_POST = "http://example.org/browser/dom/html/test/form_submit_server.sjs"; // Test for bug 1351358. add_task(async function() { + // Set prefs to ensure file content process, to allow linked web content in + // file URI process and allow more that one file content process. + await SpecialPowers.pushPrefEnv( + {set: [["browser.tabs.remote.separateFileUriProcess", true], + ["browser.tabs.remote.allowLinkedWebInFileUriProcess", true]]}); + // Create file URI and test data file paths. let testFile = getChromeDir(getResolvedURI(gTestPath)); testFile.append("dummy_page.html"); const fileUriString = Services.io.newFileURI(testFile).spec; let filePaths = []; testFile.leafName = "form_data_file.txt"; filePaths.push(testFile.path); testFile.leafName = "form_data_file.bin";
--- a/dom/xhr/XMLHttpRequestMainThread.cpp +++ b/dom/xhr/XMLHttpRequestMainThread.cpp @@ -41,16 +41,17 @@ #include "nsIURI.h" #include "nsILoadGroup.h" #include "nsNetUtil.h" #include "nsStringStream.h" #include "nsIAuthPrompt.h" #include "nsIAuthPrompt2.h" #include "nsIOutputStream.h" #include "nsISupportsPrimitives.h" +#include "nsISupportsPriority.h" #include "nsIInterfaceRequestorUtils.h" #include "nsStreamUtils.h" #include "nsThreadUtils.h" #include "nsIUploadChannel.h" #include "nsIUploadChannel2.h" #include "nsIDOMSerializer.h" #include "nsXPCOM.h" #include "nsIDOMEventListener.h" @@ -2545,16 +2546,47 @@ XMLHttpRequestMainThread::CreateChannel( if (loadInfo) { rv = loadInfo->SetPrincipalToInherit(resultingDocumentPrincipal); NS_ENSURE_SUCCESS(rv, rv); } return NS_OK; } +void +XMLHttpRequestMainThread::MaybeLowerChannelPriority() +{ + nsCOMPtr<nsIDocument> doc = GetDocumentIfCurrent(); + if (!doc) { + return; + } + + AutoJSAPI jsapi; + if (!jsapi.Init(GetOwnerGlobal())) { + return; + } + + JSContext* cx = jsapi.cx(); + nsAutoCString fileNameString; + if (!nsJSUtils::GetCallingLocation(cx, fileNameString)) { + return; + } + + if (!doc->IsScriptTracking(fileNameString)) { + return; + } + + nsCOMPtr<nsISupportsPriority> p = do_QueryInterface(mChannel); + if (!p) { + return; + } + + p->SetPriority(nsISupportsPriority::PRIORITY_LOWEST); +} + nsresult XMLHttpRequestMainThread::InitiateFetch(nsIInputStream* aUploadStream, int64_t aUploadLength, nsACString& aUploadContentType) { nsresult rv; // nsIRequest::LOAD_BACKGROUND prevents throbber from becoming active, which @@ -2733,16 +2765,22 @@ XMLHttpRequestMainThread::InitiateFetch( internalHttpChannel->SetBlockAuthPrompt(ShouldBlockAuthPrompt()); } // Because of bug 682305, we can't let listener be the XHR object itself // because JS wouldn't be able to use it. So create a listener around 'this'. // Make sure to hold a strong reference so that we don't leak the wrapper. nsCOMPtr<nsIStreamListener> listener = new net::nsStreamListenerWrapper(this); + // Check if this XHR is created from a tracking script. + // If yes, lower the channel's priority. + if (nsContentUtils::IsLowerNetworkPriority()) { + MaybeLowerChannelPriority(); + } + // Start reading from the channel rv = mChannel->AsyncOpen2(listener); listener = nullptr; if (NS_WARN_IF(NS_FAILED(rv))) { // Drop our ref to the channel to avoid cycles. Also drop channel's // ref to us to be extra safe. mChannel->SetNotificationCallbacks(mNotificationCallbacks); mChannel = nullptr;
--- a/dom/xhr/XMLHttpRequestMainThread.h +++ b/dom/xhr/XMLHttpRequestMainThread.h @@ -309,16 +309,18 @@ private: // Check pref "dom.mapped_arraybuffer.enabled" to make sure ArrayBuffer is // supported. static bool IsMappedArrayBufferEnabled(); // Check pref "dom.xhr.lowercase_header.enabled" to make sure lowercased // response header is supported. static bool IsLowercaseResponseHeader(); + void MaybeLowerChannelPriority(); + public: virtual void Send(JSContext* /*aCx*/, ErrorResult& aRv) override { aRv = SendInternal(nullptr); } virtual void
--- a/js/src/frontend/Parser.cpp +++ b/js/src/frontend/Parser.cpp @@ -1484,23 +1484,24 @@ Parser<ParseHandler, CharT>::noteDeclare if (!pc->functionScope().addDeclaredName(pc, p, name, kind, pos.begin)) return false; break; } case DeclarationKind::LexicalFunction: { ParseContext::Scope* scope = pc->innermostScope(); - if (AddDeclaredNamePtr p = scope->lookupDeclaredNameForAdd(name)) { + AddDeclaredNamePtr p = scope->lookupDeclaredNameForAdd(name); + if (p) { reportRedeclaration(name, p->value()->kind(), pos, p->value()->pos()); return false; - } else { - if (!scope->addDeclaredName(pc, p, name, kind, pos.begin)) - return false; - } + } + + if (!scope->addDeclaredName(pc, p, name, kind, pos.begin)) + return false; break; } case DeclarationKind::SloppyLexicalFunction: { // Functions in block have complex allowances in sloppy mode for being // labelled that other lexical declarations do not have. Those checks // are more complex than calling checkLexicalDeclarationDirectlyWithin-
--- a/js/src/gc/Memory.cpp +++ b/js/src/gc/Memory.cpp @@ -761,17 +761,21 @@ UnmapPages(void* p, size_t size) bool MarkPagesUnused(void* p, size_t size) { if (!DecommitEnabled()) return false; MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0); +#if defined(XP_SOLARIS) + int result = posix_madvise(p, size, POSIX_MADV_DONTNEED); +#else int result = madvise(p, size, MADV_DONTNEED); +#endif return result != -1; } void MarkPagesInUse(void* p, size_t size) { if (!DecommitEnabled()) return;
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/ion/array-push-frozen-array.js @@ -0,0 +1,19 @@ +function maybeFreeze(arr, b) { + with(this) {}; // Don't inline this. + if (b) { + Object.freeze(arr); + } +} +function test() { + var arr = []; + for (var i = 0; i < 1800; i++) { + maybeFreeze(arr, i > 1500); + try { + arr.push(2); + assertEq(i <= 1500, true); + } catch(e) { + assertEq(e instanceof TypeError, true); + } + } +} +test();
--- a/js/src/jit/arm/Simulator-arm.cpp +++ b/js/src/jit/arm/Simulator-arm.cpp @@ -1555,25 +1555,25 @@ Simulator::exclusiveMonitorClear() void Simulator::handleWasmInterrupt() { void* pc = (void*)get_pc(); uint8_t* fp = (uint8_t*)get_register(r11); WasmActivation* activation = wasm::MaybeActiveActivation(cx_); const wasm::Code* code = activation->compartment()->wasm.lookupCode(pc); - if (!code || !code->segment().containsFunctionPC(pc)) + if (!code || !code->segmentTier().containsFunctionPC(pc)) return; // fp can be null during the prologue/epilogue of the entry function. if (!fp) return; activation->startInterrupt(pc, fp); - set_pc(int32_t(code->segment().interruptCode())); + set_pc(int32_t(code->segmentTier().interruptCode())); } // WebAssembly memories contain an extra region of guard pages (see // WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses // using a signal handler that redirects PC to a stub that safely reports an // error. However, if the handler is hit by the simulator, the PC is in C++ code // and cannot be redirected. Therefore, we must avoid hitting the handler by // redirecting in the simulator before the real handler would have been hit. @@ -1588,22 +1588,22 @@ Simulator::handleWasmFault(int32_t addr, uint8_t* fp = reinterpret_cast<uint8_t*>(get_register(r11)); wasm::Instance* instance = wasm::LookupFaultingInstance(act, pc, fp); if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) return false; const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc); if (!memoryAccess) { act->startInterrupt(pc, fp); - set_pc(int32_t(instance->codeSegment().outOfBoundsCode())); + set_pc(int32_t(instance->codeSegmentTier().outOfBoundsCode())); return true; } MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode()); - set_pc(int32_t(memoryAccess->trapOutOfLineCode(instance->codeBase()))); + set_pc(int32_t(memoryAccess->trapOutOfLineCode(instance->codeBaseTier()))); return true; } uint64_t Simulator::readQ(int32_t addr, SimInstruction* instr, UnalignedPolicy f) { if (handleWasmFault(addr, 8)) return -1;
--- a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp +++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp @@ -240,21 +240,21 @@ void Simulator::trigger_wasm_interrupt() // the current PC may have advanced once since the signal handler's guard. So we // re-check here. void Simulator::handle_wasm_interrupt() { void* pc = (void*)get_pc(); uint8_t* fp = (uint8_t*)xreg(30); js::WasmActivation* activation = js::wasm::MaybeActiveActivation(cx_); const js::wasm::Code* code = activation->compartment()->wasm.lookupCode(pc); - if (!code || !code->segment().containsFunctionPC(pc)) + if (!code || !code->segmentTier().containsFunctionPC(pc)) return; activation->startInterrupt(pc, fp); - set_pc((Instruction*)code->segment().interruptCode()); + set_pc((Instruction*)code->segmentTier().interruptCode()); } int64_t Simulator::call(uint8_t* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); // First eight arguments passed in registers.
--- a/js/src/jit/x86-shared/Disassembler-x86-shared.cpp +++ b/js/src/jit/x86-shared/Disassembler-x86-shared.cpp @@ -235,33 +235,33 @@ js::jit::Disassembler::DisassembleHeapAc r = (c5 >> 7) & 0x1; l = (c5 >> 2) & 0x1; p = (c5 >> 0) & 0x3; break; } default: goto rex_done; } + if (l != 0) // 256-bit SIMD + MOZ_CRASH("Unable to disassemble instruction"); type = VexOperandType(p); rex = MakeREXFlags(w, r, x, b); switch (m) { case 0x1: opcode = Pack2ByteOpcode(*ptr++); goto opcode_done; case 0x2: opcode = Pack3ByteOpcode(ESCAPE_38, *ptr++); goto opcode_done; case 0x3: opcode = Pack3ByteOpcode(ESCAPE_3A, *ptr++); goto opcode_done; default: MOZ_CRASH("Unable to disassemble instruction"); } - if (l != 0) // 256-bit SIMD - MOZ_CRASH("Unable to disassemble instruction"); } rex_done:; if (REX_W(rex)) opsize = 8; // Opcode. opcode = *ptr++; switch (opcode) {
--- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -885,35 +885,18 @@ js::ArraySetLength(JSContext* cx, Handle // so that all element fields remain properly synchronized. // Trim the initialized length, if needed, to preserve the <= length // invariant. (Capacity was already reduced during element deletion, if // necessary.) ObjectElements* header = arr->getElementsHeader(); header->initializedLength = Min(header->initializedLength, newLen); - if (attrs & JSPROP_READONLY) { - if (header->numShiftedElements() > 0) { - arr->unshiftElements(); - header = arr->getElementsHeader(); - } - - header->setNonwritableArrayLength(); - - // When an array's length becomes non-writable, writes to indexes - // greater than or equal to the length don't change the array. We - // handle this with a check for non-writable length in most places. - // But in JIT code every check counts -- so we piggyback the check on - // the already-required range check for |index < capacity| by making - // capacity of arrays with non-writable length never exceed the length. - if (arr->getDenseCapacity() > newLen) { - arr->shrinkElements(cx, newLen); - arr->getElementsHeader()->capacity = newLen; - } - } + if (attrs & JSPROP_READONLY) + arr->setNonWritableLength(cx); if (!succeeded) return result.fail(JSMSG_CANT_TRUNCATE_ARRAY); return result.succeed(); } bool @@ -2849,23 +2832,28 @@ array_splice_impl(JSContext* cx, unsigne uint64_t targetIndex = actualStart + itemCount; if (CanOptimizeForDenseStorage<ArrayAccess::Write>(obj, len, cx)) { MOZ_ASSERT(sourceIndex <= len && targetIndex <= len && len <= UINT32_MAX, "sourceIndex and targetIndex are uint32 array indices"); MOZ_ASSERT(finalLength < len, "finalLength is strictly less than len"); /* Steps 15.a-b. */ - DenseElementResult result = - MoveAnyBoxedOrUnboxedDenseElements(cx, obj, uint32_t(targetIndex), - uint32_t(sourceIndex), - uint32_t(len - sourceIndex)); - MOZ_ASSERT(result != DenseElementResult::Incomplete); - if (result == DenseElementResult::Failure) - return false; + if (targetIndex != 0 || + !obj->is<NativeObject>() || + !obj->as<NativeObject>().tryShiftDenseElements(sourceIndex)) + { + DenseElementResult result = + MoveAnyBoxedOrUnboxedDenseElements(cx, obj, uint32_t(targetIndex), + uint32_t(sourceIndex), + uint32_t(len - sourceIndex)); + MOZ_ASSERT(result != DenseElementResult::Incomplete); + if (result == DenseElementResult::Failure) + return false; + } /* Steps 15.c-d. */ SetAnyBoxedOrUnboxedInitializedLength(cx, obj, uint32_t(finalLength)); } else { /* * This is all very slow if the length is very large. We don't yet * have the ability to iterate in sorted order, so we just do the * pessimistic thing and let CheckForInterrupt handle the
--- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -529,27 +529,20 @@ js::SetIntegrityLevel(JSContext* cx, Han return false; } MOZ_ASSERT(nobj->lastProperty()->slotSpan() == last->slotSpan()); JS_ALWAYS_TRUE(nobj->setLastProperty(cx, last)); // Ordinarily ArraySetLength handles this, but we're going behind its back // right now, so we must do this manually. - // - // ArraySetLength also implements the capacity <= length invariant for - // arrays with non-writable length. We don't need to do anything special - // for that, because capacity was zeroed out by preventExtensions. (See - // the assertion about getDenseCapacity above.) if (level == IntegrityLevel::Frozen && obj->is<ArrayObject>()) { if (!obj->as<ArrayObject>().maybeCopyElementsForWrite(cx)) return false; - if (nobj->getElementsHeader()->numShiftedElements() > 0) - nobj->unshiftElements(); - obj->as<ArrayObject>().getElementsHeader()->setNonwritableArrayLength(); + obj->as<ArrayObject>().setNonWritableLength(cx); } } else { RootedId id(cx); Rooted<PropertyDescriptor> desc(cx); const unsigned AllowConfigure = JSPROP_IGNORE_ENUMERATE | JSPROP_IGNORE_READONLY | JSPROP_IGNORE_VALUE; const unsigned AllowConfigureAndWritable = AllowConfigure & ~JSPROP_IGNORE_READONLY;
--- a/js/src/vm/ArrayObject.h +++ b/js/src/vm/ArrayObject.h @@ -26,16 +26,36 @@ class ArrayObject : public NativeObject bool lengthIsWritable() const { return !getElementsHeader()->hasNonwritableArrayLength(); } uint32_t length() const { return getElementsHeader()->length; } + void setNonWritableLength(JSContext* cx) { + if (getElementsHeader()->numShiftedElements() > 0) + unshiftElements(); + + // When an array's length becomes non-writable, writes to indexes + // greater than or equal to the length don't change the array. We + // handle this with a check for non-writable length in most places. + // But in JIT code every check counts -- so we piggyback the check on + // the already-required range check for |index < capacity| by making + // capacity of arrays with non-writable length never exceed the length. + ObjectElements* header = getElementsHeader(); + uint32_t len = header->initializedLength; + if (header->capacity > len) { + shrinkElements(cx, len); + header = getElementsHeader(); + header->capacity = len; + } + header->setNonwritableArrayLength(); + } + inline void setLength(JSContext* cx, uint32_t length); // Variant of setLength for use on arrays where the length cannot overflow int32_t. void setLengthInt32(uint32_t length) { MOZ_ASSERT(lengthIsWritable()); MOZ_ASSERT(length <= INT32_MAX); getElementsHeader()->length = length; }
--- a/js/src/vm/NativeObject-inl.h +++ b/js/src/vm/NativeObject-inl.h @@ -167,27 +167,27 @@ NativeObject::initDenseElements(uint32_t memcpy(&elements_[dstStart], src, count * sizeof(HeapSlot)); elementsRangeWriteBarrierPost(dstStart, count); } inline bool NativeObject::tryShiftDenseElements(uint32_t count) { ObjectElements* header = getElementsHeader(); - if (header->isCopyOnWrite() || + if (header->initializedLength == count || + count > ObjectElements::MaxShiftedElements || + header->isCopyOnWrite() || header->isFrozen() || - header->hasNonwritableArrayLength() || - header->initializedLength == count) + header->hasNonwritableArrayLength()) { return false; } MOZ_ASSERT(count > 0); MOZ_ASSERT(count < header->initializedLength); - MOZ_ASSERT(count <= ObjectElements::MaxShiftedElements); if (MOZ_UNLIKELY(header->numShiftedElements() + count > ObjectElements::MaxShiftedElements)) { unshiftElements(); header = getElementsHeader(); } prepareElementRangeForOverwrite(0, count); header->addShiftedElements(count);
--- a/js/src/vm/NativeObject.h +++ b/js/src/vm/NativeObject.h @@ -254,16 +254,19 @@ class ObjectElements void clearShouldConvertDoubleElements() { MOZ_ASSERT(!isCopyOnWrite()); flags &= ~CONVERT_DOUBLE_ELEMENTS; } bool hasNonwritableArrayLength() const { return flags & NONWRITABLE_ARRAY_LENGTH; } void setNonwritableArrayLength() { + // See ArrayObject::setNonWritableLength. + MOZ_ASSERT(capacity == initializedLength); + MOZ_ASSERT(numShiftedElements() == 0); MOZ_ASSERT(!isCopyOnWrite()); flags |= NONWRITABLE_ARRAY_LENGTH; } bool isCopyOnWrite() const { return flags & COPY_ON_WRITE; } void clearCopyOnWrite() { MOZ_ASSERT(isCopyOnWrite());
--- a/js/src/vm/Scope.cpp +++ b/js/src/vm/Scope.cpp @@ -560,17 +560,17 @@ LexicalScope::XDR(XDRState<mode>* xdr, S Rooted<Data*> data(cx); if (!XDRSizedBindingNames<LexicalScope>(xdr, scope.as<LexicalScope>(), &data)) return false; { auto deleteOnLeave = MakeScopeExit([&data]() { if (mode == XDR_DECODE) - js_delete(data.get()); + DeleteScopeData(data.get()); }); uint32_t firstFrameSlot; uint32_t nextFrameSlot; if (mode == XDR_ENCODE) { firstFrameSlot = scope->as<LexicalScope>().firstFrameSlot(); nextFrameSlot = data->nextFrameSlot; } @@ -872,17 +872,17 @@ VarScope::XDR(XDRState<mode>* xdr, Scope JSContext* cx = xdr->cx(); Rooted<Data*> data(cx); if (!XDRSizedBindingNames<VarScope>(xdr, scope.as<VarScope>(), &data)) return false; { auto deleteOnLeave = MakeScopeExit([&data]() { if (mode == XDR_DECODE) - js_delete(data.get()); + DeleteScopeData(data.get()); }); uint8_t needsEnvironment; uint32_t firstFrameSlot; uint32_t nextFrameSlot; if (mode == XDR_ENCODE) { needsEnvironment = scope->hasEnvironment(); firstFrameSlot = scope->as<VarScope>().firstFrameSlot();
--- a/js/src/wasm/AsmJS.cpp +++ b/js/src/wasm/AsmJS.cpp @@ -333,18 +333,18 @@ struct js::AsmJSMetadata : Metadata, Asm uint32_t srcEndBeforeCurly() const { return srcStart + srcLength; } uint32_t srcEndAfterCurly() const { return srcStart + srcLengthWithRightBrace; } - AsmJSMetadata() - : Metadata(ModuleKind::AsmJS), + explicit AsmJSMetadata(UniqueMetadataTier tier) + : Metadata(Move(tier), ModuleKind::AsmJS), cacheResult(CacheResult::Miss), srcStart(0), srcBodyStart(0), strict(false) {} ~AsmJSMetadata() override {} const AsmJSExport& lookupAsmJSExport(uint32_t funcIndex) const { @@ -1770,17 +1770,21 @@ class MOZ_STACK_CLASS ModuleValidator } } va_end(args); } public: bool init() { - asmJSMetadata_ = cx_->new_<AsmJSMetadata>(); + auto tierMetadata = js::MakeUnique<MetadataTier>(CompileMode::Ion); + if (!tierMetadata) + return false; + + asmJSMetadata_ = cx_->new_<AsmJSMetadata>(Move(tierMetadata)); if (!asmJSMetadata_) return false; asmJSMetadata_->toStringStart = moduleFunctionNode_->pn_funbox->toStringStart; asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin; asmJSMetadata_->srcBodyStart = parser_.tokenStream.currentToken().pos.end; asmJSMetadata_->strict = parser_.pc->sc()->strict() && !parser_.pc->sc()->hasExplicitUseStrict(); @@ -8546,17 +8550,21 @@ LookupAsmJSModuleInCache(JSContext* cx, Assumptions assumptions; if (!assumptions.initBuildIdFromContext(cx)) return false; if (!Module::assumptionsMatch(assumptions, cursor, remain)) return true; - MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>(); + auto tierMetadata = js::MakeUnique<MetadataTier>(CompileMode::Ion); + if (!tierMetadata) + return false; + + MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>(Move(tierMetadata)); if (!asmJSMetadata) return false; *module = Module::deserialize(/* bytecodeBegin = */ nullptr, /* bytecodeSize = */ 0, cursor, compiledSize, asmJSMetadata.get()); if (!*module) { ReportOutOfMemory(cx); return false;
--- a/js/src/wasm/WasmCode.cpp +++ b/js/src/wasm/WasmCode.cpp @@ -81,19 +81,19 @@ CodeSegment::FreeCode::operator()(uint8_ #ifdef MOZ_VTUNE vtune::UnmarkBytes(bytes, codeLength); #endif DeallocateExecutableMemory(bytes, codeLength); } static bool -StaticallyLink(const CodeSegment& cs, const LinkData& linkData) +StaticallyLink(const CodeSegment& cs, const LinkDataTier& linkData) { - for (LinkData::InternalLink link : linkData.internalLinks) { + for (LinkDataTier::InternalLink link : linkData.internalLinks) { uint8_t* patchAt = cs.base() + link.patchAtOffset; void* target = cs.base() + link.targetOffset; if (link.isRawPointerPatch()) *(void**)(patchAt) = target; else Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target)); } @@ -113,19 +113,19 @@ StaticallyLink(const CodeSegment& cs, co PatchedImmPtr((void*)-1)); } } return true; } static void -StaticallyUnlink(uint8_t* base, const LinkData& linkData) +StaticallyUnlink(uint8_t* base, const LinkDataTier& linkData) { - for (LinkData::InternalLink link : linkData.internalLinks) { + for (LinkDataTier::InternalLink link : linkData.internalLinks) { uint8_t* patchAt = base + link.patchAtOffset; void* target = 0; if (link.isRawPointerPatch()) *(void**)(patchAt) = target; else Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target)); } @@ -152,17 +152,17 @@ SendCodeRangesToProfiler(const CodeSegme enabled |= PerfFuncEnabled(); #endif #ifdef MOZ_VTUNE enabled |= vtune::IsProfilingActive(); #endif if (!enabled) return; - for (const CodeRange& codeRange : metadata.codeRanges) { + for (const CodeRange& codeRange : metadata.tier().codeRanges) { if (!codeRange.isFunction()) continue; uintptr_t start = uintptr_t(cs.base() + codeRange.begin()); uintptr_t end = uintptr_t(cs.base() + codeRange.end()); uintptr_t size = end - start; UTF8Bytes name; @@ -188,19 +188,20 @@ SendCodeRangesToProfiler(const CodeSegme vtune::MarkWasm(vtune::GenerateUniqueMethodID(), name.begin(), (void*)start, size); #endif } return; } /* static */ UniqueConstCodeSegment -CodeSegment::create(MacroAssembler& masm, +CodeSegment::create(CompileMode mode, + MacroAssembler& masm, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata) { // Round up the code size to page size since this is eventually required by // the executable-code allocator and for setting memory protection. uint32_t bytesNeeded = masm.bytesNeeded(); uint32_t padding = ComputeByteAlignment(bytesNeeded, gc::SystemPageSize()); uint32_t codeLength = bytesNeeded + padding; @@ -211,71 +212,75 @@ CodeSegment::create(MacroAssembler& masm return nullptr; // We'll flush the icache after static linking, in initialize(). masm.executableCopy(codeBytes.get(), /* flushICache = */ false); // Zero the padding. memset(codeBytes.get() + bytesNeeded, 0, padding); - return create(Move(codeBytes), codeLength, bytecode, linkData, metadata); + return create(mode, Move(codeBytes), codeLength, bytecode, linkData, metadata); } /* static */ UniqueConstCodeSegment -CodeSegment::create(const Bytes& unlinkedBytes, +CodeSegment::create(CompileMode mode, + const Bytes& unlinkedBytes, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata) { // The unlinked bytes are a snapshot of the MacroAssembler's contents so // round up just like in the MacroAssembler overload above. uint32_t padding = ComputeByteAlignment(unlinkedBytes.length(), gc::SystemPageSize()); uint32_t codeLength = unlinkedBytes.length() + padding; UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength); if (!codeBytes) return nullptr; memcpy(codeBytes.get(), unlinkedBytes.begin(), unlinkedBytes.length()); memset(codeBytes.get() + unlinkedBytes.length(), 0, padding); - return create(Move(codeBytes), codeLength, bytecode, linkData, metadata); + return create(mode, Move(codeBytes), codeLength, bytecode, linkData, metadata); } /* static */ UniqueConstCodeSegment -CodeSegment::create(UniqueCodeBytes codeBytes, +CodeSegment::create(CompileMode mode, + UniqueCodeBytes codeBytes, uint32_t codeLength, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata) { // These should always exist and should never be first in the code segment. MOZ_ASSERT(linkData.interruptOffset != 0); MOZ_ASSERT(linkData.outOfBoundsOffset != 0); MOZ_ASSERT(linkData.unalignedAccessOffset != 0); auto cs = js::MakeUnique<CodeSegment>(); if (!cs) return nullptr; - if (!cs->initialize(Move(codeBytes), codeLength, bytecode, linkData, metadata)) + if (!cs->initialize(mode, Move(codeBytes), codeLength, bytecode, linkData, metadata)) return nullptr; return UniqueConstCodeSegment(cs.release()); } bool -CodeSegment::initialize(UniqueCodeBytes codeBytes, +CodeSegment::initialize(CompileMode mode, + UniqueCodeBytes codeBytes, uint32_t codeLength, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata) { MOZ_ASSERT(bytes_ == nullptr); + mode_ = mode; bytes_ = Move(codeBytes); functionLength_ = linkData.functionCodeLength; length_ = codeLength; interruptCode_ = bytes_.get() + linkData.interruptOffset; outOfBoundsCode_ = bytes_.get() + linkData.outOfBoundsOffset; unalignedAccessCode_ = bytes_.get() + linkData.unalignedAccessOffset; if (!StaticallyLink(*this, linkData)) @@ -301,44 +306,46 @@ CodeSegment::serializedSize() const void CodeSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const { *data += mallocSizeOf(this); *code += RoundupCodeLength(length_); } uint8_t* -CodeSegment::serialize(uint8_t* cursor, const LinkData& linkData) const +CodeSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const { + MOZ_ASSERT(mode() == CompileMode::Ion); + cursor = WriteScalar<uint32_t>(cursor, length_); uint8_t* base = cursor; cursor = WriteBytes(cursor, bytes_.get(), length_); StaticallyUnlink(base, linkData); return cursor; } const uint8_t* CodeSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode, - const LinkData& linkData, const Metadata& metadata) + const LinkDataTier& linkData, const Metadata& metadata) { uint32_t length; cursor = ReadScalar<uint32_t>(cursor, &length); if (!cursor) return nullptr; MOZ_ASSERT(length_ % gc::SystemPageSize() == 0); UniqueCodeBytes bytes = AllocateCodeBytes(length); if (!bytes) return nullptr; cursor = ReadBytes(cursor, bytes.get(), length); if (!cursor) return nullptr; - if (!initialize(Move(bytes), length, bytecode, linkData, metadata)) + if (!initialize(CompileMode::Ion, Move(bytes), length, bytecode, linkData, metadata)) return nullptr; return cursor; } size_t FuncExport::serializedSize() const { @@ -439,110 +446,136 @@ CacheableChars::deserialize(const uint8_ size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { return mallocSizeOf(get()); } size_t +MetadataTier::serializedSize() const +{ + return SerializedPodVectorSize(memoryAccesses) + + SerializedPodVectorSize(codeRanges) + + SerializedPodVectorSize(callSites) + + SerializedVectorSize(funcImports) + + SerializedVectorSize(funcExports); +} + +size_t +MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return memoryAccesses.sizeOfExcludingThis(mallocSizeOf) + + codeRanges.sizeOfExcludingThis(mallocSizeOf) + + callSites.sizeOfExcludingThis(mallocSizeOf) + + SizeOfVectorExcludingThis(funcImports, mallocSizeOf) + + SizeOfVectorExcludingThis(funcExports, mallocSizeOf); +} + +uint8_t* +MetadataTier::serialize(uint8_t* cursor) const +{ + MOZ_ASSERT(debugTrapFarJumpOffsets.empty() && debugFuncToCodeRange.empty()); + cursor = SerializePodVector(cursor, memoryAccesses); + cursor = SerializePodVector(cursor, codeRanges); + cursor = SerializePodVector(cursor, callSites); + cursor = SerializeVector(cursor, funcImports); + cursor = SerializeVector(cursor, funcExports); + return cursor; +} + +/* static */ const uint8_t* +MetadataTier::deserialize(const uint8_t* cursor) +{ + (cursor = DeserializePodVector(cursor, &memoryAccesses)) && + (cursor = DeserializePodVector(cursor, &codeRanges)) && + (cursor = DeserializePodVector(cursor, &callSites)) && + (cursor = DeserializeVector(cursor, &funcImports)) && + (cursor = DeserializeVector(cursor, &funcExports)); + debugTrapFarJumpOffsets.clear(); + debugFuncToCodeRange.clear(); + return cursor; +} + +size_t Metadata::serializedSize() const { return sizeof(pod()) + - SerializedVectorSize(funcImports) + - SerializedVectorSize(funcExports) + + tier().serializedSize() + SerializedVectorSize(sigIds) + SerializedPodVectorSize(globals) + SerializedPodVectorSize(tables) + - SerializedPodVectorSize(memoryAccesses) + - SerializedPodVectorSize(codeRanges) + - SerializedPodVectorSize(callSites) + SerializedPodVectorSize(funcNames) + SerializedPodVectorSize(customSections) + filename.serializedSize() + sizeof(hash); } +size_t +Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return tier().sizeOfExcludingThis(mallocSizeOf) + + SizeOfVectorExcludingThis(sigIds, mallocSizeOf) + + globals.sizeOfExcludingThis(mallocSizeOf) + + tables.sizeOfExcludingThis(mallocSizeOf) + + funcNames.sizeOfExcludingThis(mallocSizeOf) + + customSections.sizeOfExcludingThis(mallocSizeOf) + + filename.sizeOfExcludingThis(mallocSizeOf); +} + uint8_t* Metadata::serialize(uint8_t* cursor) const { - MOZ_ASSERT(!debugEnabled && debugTrapFarJumpOffsets.empty() && - debugFuncArgTypes.empty() && debugFuncReturnTypes.empty() && - debugFuncToCodeRange.empty()); + MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() && debugFuncReturnTypes.empty()); cursor = WriteBytes(cursor, &pod(), sizeof(pod())); - cursor = SerializeVector(cursor, funcImports); - cursor = SerializeVector(cursor, funcExports); + cursor = tier().serialize(cursor); cursor = SerializeVector(cursor, sigIds); cursor = SerializePodVector(cursor, globals); cursor = SerializePodVector(cursor, tables); - cursor = SerializePodVector(cursor, memoryAccesses); - cursor = SerializePodVector(cursor, codeRanges); - cursor = SerializePodVector(cursor, callSites); cursor = SerializePodVector(cursor, funcNames); cursor = SerializePodVector(cursor, customSections); cursor = filename.serialize(cursor); cursor = WriteBytes(cursor, hash, sizeof(hash)); return cursor; } /* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) { (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) && - (cursor = DeserializeVector(cursor, &funcImports)) && - (cursor = DeserializeVector(cursor, &funcExports)) && + (cursor = tier().deserialize(cursor)) && (cursor = DeserializeVector(cursor, &sigIds)) && (cursor = DeserializePodVector(cursor, &globals)) && (cursor = DeserializePodVector(cursor, &tables)) && - (cursor = DeserializePodVector(cursor, &memoryAccesses)) && - (cursor = DeserializePodVector(cursor, &codeRanges)) && - (cursor = DeserializePodVector(cursor, &callSites)) && (cursor = DeserializePodVector(cursor, &funcNames)) && (cursor = DeserializePodVector(cursor, &customSections)) && (cursor = filename.deserialize(cursor)) && (cursor = ReadBytes(cursor, hash, sizeof(hash))); debugEnabled = false; - debugTrapFarJumpOffsets.clear(); - debugFuncToCodeRange.clear(); debugFuncArgTypes.clear(); debugFuncReturnTypes.clear(); return cursor; } -size_t -Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const -{ - return SizeOfVectorExcludingThis(funcImports, mallocSizeOf) + - SizeOfVectorExcludingThis(funcExports, mallocSizeOf) + - SizeOfVectorExcludingThis(sigIds, mallocSizeOf) + - globals.sizeOfExcludingThis(mallocSizeOf) + - tables.sizeOfExcludingThis(mallocSizeOf) + - memoryAccesses.sizeOfExcludingThis(mallocSizeOf) + - codeRanges.sizeOfExcludingThis(mallocSizeOf) + - callSites.sizeOfExcludingThis(mallocSizeOf) + - funcNames.sizeOfExcludingThis(mallocSizeOf) + - customSections.sizeOfExcludingThis(mallocSizeOf) + - filename.sizeOfExcludingThis(mallocSizeOf); -} - struct ProjectFuncIndex { const FuncExportVector& funcExports; explicit ProjectFuncIndex(const FuncExportVector& funcExports) : funcExports(funcExports) {} uint32_t operator[](size_t index) const { return funcExports[index].funcIndex(); } }; const FuncExport& Metadata::lookupFuncExport(uint32_t funcIndex) const { + const FuncExportVector& funcExports = tier().funcExports; size_t match; if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), funcIndex, &match)) MOZ_CRASH("missing function export"); return funcExports[match]; } bool @@ -567,18 +600,18 @@ Metadata::getFuncName(const Bytes* maybe const char* funcIndexStr = NumberToCString(nullptr, &cbuf, funcIndex); MOZ_ASSERT(funcIndexStr); return name->append(beforeFuncIndex, strlen(beforeFuncIndex)) && name->append(funcIndexStr, strlen(funcIndexStr)) && name->append(afterFuncIndex, strlen(afterFuncIndex)); } -Code::Code(UniqueConstCodeSegment segment, const Metadata& metadata) - : segment_(Move(segment)), +Code::Code(UniqueConstCodeSegment tier, const Metadata& metadata) + : tier_(Move(tier)), metadata_(&metadata), profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()) { } Code::Code() : profilingLabels_(mutexid::WasmCodeProfilingLabels, CacheableCharsVector()) { @@ -592,103 +625,109 @@ struct CallSiteRetAddrOffset return callSites[index].returnAddressOffset(); } }; size_t Code::serializedSize() const { return metadata().serializedSize() + - segment().serializedSize(); + segmentTier().serializedSize(); } uint8_t* Code::serialize(uint8_t* cursor, const LinkData& linkData) const { MOZ_RELEASE_ASSERT(!metadata().debugEnabled); + MOZ_RELEASE_ASSERT(metadataTier().mode == CompileMode::Ion); cursor = metadata().serialize(cursor); - cursor = segment().serialize(cursor, linkData); + cursor = segmentTier().serialize(cursor, linkData.tier()); return cursor; } const uint8_t* Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, const LinkData& linkData, Metadata* maybeMetadata) { MutableMetadata metadata; if (maybeMetadata) { metadata = maybeMetadata; } else { - metadata = js_new<Metadata>(); + auto tier = js::MakeUnique<MetadataTier>(CompileMode::Ion); + if (!tier) + return nullptr; + + metadata = js_new<Metadata>(Move(tier)); if (!metadata) return nullptr; } + cursor = metadata->deserialize(cursor); if (!cursor) return nullptr; UniqueCodeSegment codeSegment = js::MakeUnique<CodeSegment>(); if (!codeSegment) return nullptr; - cursor = codeSegment->deserialize(cursor, *bytecode, linkData, *metadata); + cursor = codeSegment->deserialize(cursor, *bytecode, linkData.tier(), *metadata); if (!cursor) return nullptr; - segment_ = UniqueConstCodeSegment(codeSegment.release()); + tier_ = UniqueConstCodeSegment(codeSegment.release()); metadata_ = metadata; return cursor; } const CallSite* Code::lookupCallSite(void* returnAddress) const { - uint32_t target = ((uint8_t*)returnAddress) - segment_->base(); + uint32_t target = ((uint8_t*)returnAddress) - segmentTier().base(); size_t lowerBound = 0; - size_t upperBound = metadata().callSites.length(); + size_t upperBound = metadataTier().callSites.length(); size_t match; - if (!BinarySearch(CallSiteRetAddrOffset(metadata().callSites), lowerBound, upperBound, target, &match)) + if (!BinarySearch(CallSiteRetAddrOffset(metadataTier().callSites), lowerBound, upperBound, target, &match)) return nullptr; - return &metadata().callSites[match]; + return &metadataTier().callSites[match]; } const CodeRange* Code::lookupRange(void* pc) const { - CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base()); - return LookupInSorted(metadata().codeRanges, target); + CodeRange::OffsetInCode target((uint8_t*)pc - segmentTier().base()); + return LookupInSorted(metadataTier().codeRanges, target); } struct MemoryAccessOffset { const MemoryAccessVector& accesses; explicit MemoryAccessOffset(const MemoryAccessVector& accesses) : accesses(accesses) {} uintptr_t operator[](size_t index) const { return accesses[index].insnOffset(); } }; const MemoryAccess* Code::lookupMemoryAccess(void* pc) const { - MOZ_ASSERT(segment_->containsFunctionPC(pc)); + MOZ_ASSERT(segmentTier().containsFunctionPC(pc)); - uint32_t target = ((uint8_t*)pc) - segment_->base(); + uint32_t target = ((uint8_t*)pc) - segmentTier().base(); size_t lowerBound = 0; - size_t upperBound = metadata().memoryAccesses.length(); + size_t upperBound = metadataTier().memoryAccesses.length(); size_t match; - if (!BinarySearch(MemoryAccessOffset(metadata().memoryAccesses), lowerBound, upperBound, target, &match)) + if (!BinarySearch(MemoryAccessOffset(metadataTier().memoryAccesses), lowerBound, upperBound, target, &match)) return nullptr; - return &metadata().memoryAccesses[match]; + return &metadataTier().memoryAccesses[match]; } // When enabled, generate profiling labels for every name in funcNames_ that is // the name of some Function CodeRange. This involves malloc() so do it now // since, once we start sampling, we'll be in a signal-handing context where we // cannot malloc. void Code::ensureProfilingLabels(const Bytes* maybeBytecode, bool profilingEnabled) const @@ -698,26 +737,26 @@ Code::ensureProfilingLabels(const Bytes* if (!profilingEnabled) { labels->clear(); return; } if (!labels->empty()) return; - for (const CodeRange& codeRange : metadata().codeRanges) { + for (const CodeRange& codeRange : metadataTier().codeRanges) { if (!codeRange.isFunction()) continue; ToCStringBuf cbuf; const char* bytecodeStr = NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode()); MOZ_ASSERT(bytecodeStr); UTF8Bytes name; - if (!metadata_->getFuncName(maybeBytecode, codeRange.funcIndex(), &name)) + if (!metadata().getFuncName(maybeBytecode, codeRange.funcIndex(), &name)) return; if (!name.append(" (", 2)) return; if (const char* filename = metadata().filename.get()) { if (!name.append(filename, strlen(filename))) return; } else { @@ -767,10 +806,10 @@ Code::addSizeOfMiscIfNotSeen(MallocSizeO return; bool ok = seenCode->add(p, this); (void)ok; // oh well *data += mallocSizeOf(this) + metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) + profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf); - segment_->addSizeOfMisc(mallocSizeOf, code, data); + segmentTier().addSizeOfMisc(mallocSizeOf, code, data); }
--- a/js/src/wasm/WasmCode.h +++ b/js/src/wasm/WasmCode.h @@ -26,17 +26,19 @@ namespace js { struct AsmJSMetadata; class WasmInstanceObject; namespace wasm { struct LinkData; +struct LinkDataTier; struct Metadata; +struct MetadataTier; class FrameIterator; // ShareableBytes is a reference-counted Vector of bytes. struct ShareableBytes : ShareableBase<ShareableBytes> { // Vector is 'final', so instead make Vector a member and add boilerplate. Bytes bytes; @@ -63,62 +65,72 @@ class CodeSegment uint32_t codeLength; FreeCode() : codeLength(0) {} explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {} void operator()(uint8_t* codeBytes); }; typedef UniquePtr<uint8_t, FreeCode> UniqueCodeBytes; static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength); + // How this code was compiled. + CompileMode mode_; + // bytes_ points to a single allocation of executable machine code in // the range [0, length_). The range [0, functionLength_) is // the subrange of [0, length_) which contains function code. UniqueCodeBytes bytes_; uint32_t functionLength_; uint32_t length_; // These are pointers into code for stubs used for asynchronous // signal-handler control-flow transfer. uint8_t* interruptCode_; uint8_t* outOfBoundsCode_; uint8_t* unalignedAccessCode_; - bool initialize(UniqueCodeBytes bytes, + bool initialize(CompileMode mode, + UniqueCodeBytes bytes, uint32_t codeLength, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata); - static UniqueConstCodeSegment create(UniqueCodeBytes bytes, + static UniqueConstCodeSegment create(CompileMode mode, + UniqueCodeBytes bytes, uint32_t codeLength, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata); public: CodeSegment(const CodeSegment&) = delete; void operator=(const CodeSegment&) = delete; CodeSegment() - : functionLength_(0), + : mode_(CompileMode(-1)), + functionLength_(0), length_(0), interruptCode_(nullptr), outOfBoundsCode_(nullptr), unalignedAccessCode_(nullptr) {} - static UniqueConstCodeSegment create(jit::MacroAssembler& masm, + static UniqueConstCodeSegment create(CompileMode mode, + jit::MacroAssembler& masm, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata); - static UniqueConstCodeSegment create(const Bytes& unlinkedBytes, + static UniqueConstCodeSegment create(CompileMode mode, + const Bytes& unlinkedBytes, const ShareableBytes& bytecode, - const LinkData& linkData, + const LinkDataTier& linkData, const Metadata& metadata); + CompileMode mode() const { return mode_; } + uint8_t* base() const { return bytes_.get(); } uint32_t length() const { return length_; } uint8_t* interruptCode() const { return interruptCode_; } uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; } uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; } // The range [0, functionBytes) is a subrange of [0, codeBytes) that @@ -132,19 +144,19 @@ class CodeSegment } bool containsCodePC(const void* pc) const { return pc >= base() && pc < (base() + length_); } // Structured clone support: size_t serializedSize() const; - uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const; + uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const; const uint8_t* deserialize(const uint8_t* cursor, const ShareableBytes& bytecode, - const LinkData& linkData, const Metadata& metadata); + const LinkDataTier& linkData, const Metadata& metadata); void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const; }; // A FuncExport represents a single function definition inside a wasm Module // that has been exported one or more times. A FuncExport represents an // internal entry point that can be called via function definition index by // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by @@ -152,17 +164,17 @@ class CodeSegment // function definition index. class FuncExport { Sig sig_; MOZ_INIT_OUTSIDE_CTOR struct CacheablePod { uint32_t funcIndex_; uint32_t codeRangeIndex_; - uint32_t entryOffset_; + uint32_t entryOffset_; // Machine code offset } pod; public: FuncExport() = default; explicit FuncExport(Sig&& sig, uint32_t funcIndex, uint32_t codeRangeIndex) : sig_(Move(sig)) @@ -201,18 +213,18 @@ typedef Vector<FuncExport, 0, SystemAllo // offsets of these stubs are stored so that function-import callsites can be // dynamically patched at runtime. class FuncImport { Sig sig_; struct CacheablePod { uint32_t tlsDataOffset_; - uint32_t interpExitCodeOffset_; - uint32_t jitExitCodeOffset_; + uint32_t interpExitCodeOffset_; // Machine code offset + uint32_t jitExitCodeOffset_; // Machine code offset } pod; public: FuncImport() { memset(&pod, 0, sizeof(CacheablePod)); } FuncImport(Sig&& sig, uint32_t tlsDataOffset) @@ -305,59 +317,90 @@ typedef Vector<ValTypeVector, 0, SystemA typedef Vector<ExprType, 0, SystemAllocPolicy> FuncReturnTypesVector; // Metadata holds all the data that is needed to describe compiled wasm code // at runtime (as opposed to data that is only used to statically link or // instantiate a module). // // Metadata is built incrementally by ModuleGenerator and then shared immutably // between modules. +// +// The Metadata structure is split into tier-invariant and tier-variant parts; +// the former points to instances of the latter. Additionally, the asm.js +// subsystem subclasses the Metadata, adding more tier-invariant data, some of +// which is serialized. See AsmJS.cpp. struct MetadataCacheablePod { ModuleKind kind; MemoryUsage memoryUsage; uint32_t minMemoryLength; + uint32_t globalDataLength; Maybe<uint32_t> maxMemoryLength; Maybe<uint32_t> startFuncIndex; explicit MetadataCacheablePod(ModuleKind kind) : kind(kind), memoryUsage(MemoryUsage::None), - minMemoryLength(0) + minMemoryLength(0), + globalDataLength(0) {} }; typedef uint8_t ModuleHash[8]; +struct MetadataTier +{ + explicit MetadataTier(CompileMode mode) : mode(mode) {} + + CompileMode mode; + + MemoryAccessVector memoryAccesses; + CodeRangeVector codeRanges; + CallSiteVector callSites; + FuncImportVector funcImports; + FuncExportVector funcExports; + + // Debug information, not serialized. + Uint32Vector debugTrapFarJumpOffsets; + Uint32Vector debugFuncToCodeRange; + + WASM_DECLARE_SERIALIZABLE(MetadataTier); +}; + +typedef UniquePtr<MetadataTier> UniqueMetadataTier; + struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod { - explicit Metadata(ModuleKind kind = ModuleKind::Wasm) : MetadataCacheablePod(kind) {} + // Both `tier_` and the means of accessing it will become more complicated + // when tiering is implemented. + UniqueMetadataTier tier_; + + const MetadataTier& tier() const { return *tier_; } + MetadataTier& tier() { return *tier_; } + + explicit Metadata(UniqueMetadataTier tier, ModuleKind kind = ModuleKind::Wasm) + : MetadataCacheablePod(kind), + tier_(Move(tier)) + {} virtual ~Metadata() {} MetadataCacheablePod& pod() { return *this; } const MetadataCacheablePod& pod() const { return *this; } - FuncImportVector funcImports; - FuncExportVector funcExports; SigWithIdVector sigIds; GlobalDescVector globals; TableDescVector tables; - MemoryAccessVector memoryAccesses; - CodeRangeVector codeRanges; - CallSiteVector callSites; NameInBytecodeVector funcNames; CustomSectionVector customSections; CacheableChars filename; ModuleHash hash; // Debug-enabled code is not serialized. bool debugEnabled; - Uint32Vector debugTrapFarJumpOffsets; - Uint32Vector debugFuncToCodeRange; FuncArgTypesVector debugFuncArgTypes; FuncReturnTypesVector debugFuncReturnTypes; bool usesMemory() const { return UsesMemory(memoryUsage); } bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; } const FuncExport& lookupFuncExport(uint32_t funcIndex) const; @@ -385,35 +428,37 @@ struct Metadata : ShareableBase<Metadata virtual bool getFuncName(const Bytes* maybeBytecode, uint32_t funcIndex, UTF8Bytes* name) const; WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata); }; typedef RefPtr<Metadata> MutableMetadata; typedef RefPtr<const Metadata> SharedMetadata; -// Code objects own executable code and the metadata that describes it. At the -// moment, Code objects are owned uniquely by instances since CodeSegments are -// not shareable. However, once this restriction is removed, a single Code -// object will be shared between a module and all its instances. +// Code objects own executable code and the metadata that describe it. A single +// Code object is normally shared between a module and all its instances. // // profilingLabels_ is lazily initialized, but behind a lock. class Code : public ShareableBase<Code> { - UniqueConstCodeSegment segment_; + // `tier_` and the means of accessing it will change as we implement + // tiering. + + UniqueConstCodeSegment tier_; SharedMetadata metadata_; ExclusiveData<CacheableCharsVector> profilingLabels_; public: Code(); - Code(UniqueConstCodeSegment segment, const Metadata& metadata); + Code(UniqueConstCodeSegment tier, const Metadata& metadata); - const CodeSegment& segment() const { return *segment_; } + const CodeSegment& segmentTier() const { return *tier_; } + const MetadataTier& metadataTier() const { return metadata_->tier(); } const Metadata& metadata() const { return *metadata_; } // Frame iterator support: const CallSite* lookupCallSite(void* returnAddress) const; const CodeRange* lookupRange(void* pc) const; const MemoryAccess* lookupMemoryAccess(void* pc) const;
--- a/js/src/wasm/WasmCompartment.cpp +++ b/js/src/wasm/WasmCompartment.cpp @@ -44,20 +44,20 @@ struct InstanceComparator int operator()(const Instance* instance) const { if (instance == &target) return 0; // Instances can share code, so the segments can be equal (though they // can't partially overlap). If the codeBases are equal, we sort by // Instance address. Thus a Code may map to many instances. - if (instance->codeBase() == target.codeBase()) + if (instance->codeBaseTier() == target.codeBaseTier()) return instance < &target ? -1 : 1; - return target.codeBase() < instance->codeBase() ? -1 : 1; + return target.codeBaseTier() < instance->codeBaseTier() ? -1 : 1; } }; bool Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj) { Instance& instance = instanceObj->instance(); MOZ_ASSERT(this == &instance.compartment()->wasm); @@ -98,19 +98,19 @@ Compartment::unregisterInstance(Instance } struct PCComparator { const void* pc; explicit PCComparator(const void* pc) : pc(pc) {} int operator()(const Instance* instance) const { - if (instance->codeSegment().containsCodePC(pc)) + if (instance->codeSegmentTier().containsCodePC(pc)) return 0; - return pc < instance->codeBase() ? -1 : 1; + return pc < instance->codeBaseTier() ? -1 : 1; } }; const Code* Compartment::lookupCode(const void* pc) const { // lookupCode() can be called asynchronously from the interrupt signal // handler. In that case, the signal handler is just asking whether the pc
--- a/js/src/wasm/WasmDebug.cpp +++ b/js/src/wasm/WasmDebug.cpp @@ -105,17 +105,17 @@ const char tooBigMessage[] = const char notGeneratedMessage[] = "WebAssembly text generation was disabled."; static const unsigned TooBig = 1000000; static const uint32_t DefaultBinarySourceColumnNumber = 1; static const CallSite* -SlowCallSiteSearchByOffset(const Metadata& metadata, uint32_t offset) +SlowCallSiteSearchByOffset(const MetadataTier& metadata, uint32_t offset) { for (const CallSite& callSite : metadata.callSites) { if (callSite.lineOrBytecode() == offset && callSite.kind() == CallSiteDesc::Breakpoint) return &callSite; } return nullptr; } @@ -184,17 +184,17 @@ struct LineComparator bool DebugState::getLineOffsets(JSContext* cx, size_t lineno, Vector<uint32_t>* offsets) { if (!debugEnabled()) return true; if (binarySource_) { - const CallSite* callsite = SlowCallSiteSearchByOffset(metadata(), lineno); + const CallSite* callsite = SlowCallSiteSearchByOffset(metadataTier(), lineno); if (callsite && !offsets->append(lineno)) return false; return true; } if (!ensureSourceMap(cx)) return false; @@ -223,17 +223,17 @@ DebugState::getLineOffsets(JSContext* cx bool DebugState::getAllColumnOffsets(JSContext* cx, Vector<ExprLoc>* offsets) { if (!metadata().debugEnabled) return true; if (binarySource_) { - for (const CallSite& callSite : metadata().callSites) { + for (const CallSite& callSite : metadataTier().callSites) { if (callSite.kind() != CallSite::Breakpoint) continue; uint32_t offset = callSite.lineOrBytecode(); if (!offsets->emplaceBack(offset, DefaultBinarySourceColumnNumber, offset)) return false; } return true; } @@ -250,17 +250,17 @@ DebugState::getAllColumnOffsets(JSContex bool DebugState::getOffsetLocation(JSContext* cx, uint32_t offset, bool* found, size_t* lineno, size_t* column) { *found = false; if (!debugEnabled()) return true; if (binarySource_) { - if (!SlowCallSiteSearchByOffset(metadata(), offset)) + if (!SlowCallSiteSearchByOffset(metadataTier(), offset)) return true; // offset was not found *found = true; *lineno = offset; *column = DefaultBinarySourceColumnNumber; return true; } if (!ensureSourceMap(cx)) @@ -325,17 +325,17 @@ DebugState::incrementStepModeCount(JSCon p->value()++; return true; } if (!stepModeCounters_.add(p, funcIndex, 1)) { ReportOutOfMemory(cx); return false; } - AutoWritableJitCode awjc(cx->runtime(), code_->segment().base() + codeRange.begin(), + AutoWritableJitCode awjc(cx->runtime(), code_->segmentTier().base() + codeRange.begin(), codeRange.end() - codeRange.begin()); AutoFlushICache afc("Code::incrementStepModeCount"); for (const CallSite& callSite : callSites()) { if (callSite.kind() != CallSite::Breakpoint) continue; uint32_t offset = callSite.returnAddressOffset(); if (codeRange.begin() <= offset && offset <= codeRange.end()) @@ -354,17 +354,17 @@ DebugState::decrementStepModeCount(JSCon MOZ_ASSERT(stepModeCounters_.initialized() && !stepModeCounters_.empty()); StepModeCounters::Ptr p = stepModeCounters_.lookup(funcIndex); MOZ_ASSERT(p); if (--p->value()) return true; stepModeCounters_.remove(p); - AutoWritableJitCode awjc(cx->runtime(), code_->segment().base() + codeRange.begin(), + AutoWritableJitCode awjc(cx->runtime(), code_->segmentTier().base() + codeRange.begin(), codeRange.end() - codeRange.begin()); AutoFlushICache afc("Code::decrementStepModeCount"); for (const CallSite& callSite : callSites()) { if (callSite.kind() != CallSite::Breakpoint) continue; uint32_t offset = callSite.returnAddressOffset(); if (codeRange.begin() <= offset && offset <= codeRange.end()) { @@ -375,37 +375,37 @@ DebugState::decrementStepModeCount(JSCon return true; } bool DebugState::hasBreakpointTrapAtOffset(uint32_t offset) { if (!debugEnabled()) return false; - return SlowCallSiteSearchByOffset(metadata(), offset); + return SlowCallSiteSearchByOffset(metadataTier(), offset); } void DebugState::toggleBreakpointTrap(JSRuntime* rt, uint32_t offset, bool enabled) { MOZ_ASSERT(debugEnabled()); - const CallSite* callSite = SlowCallSiteSearchByOffset(metadata(), offset); + const CallSite* callSite = SlowCallSiteSearchByOffset(metadataTier(), offset); if (!callSite) return; size_t debugTrapOffset = callSite->returnAddressOffset(); - const CodeRange* codeRange = code_->lookupRange(code_->segment().base() + debugTrapOffset); + const CodeRange* codeRange = code_->lookupRange(code_->segmentTier().base() + debugTrapOffset); MOZ_ASSERT(codeRange && codeRange->isFunction()); if (stepModeCounters_.initialized() && stepModeCounters_.lookup(codeRange->funcIndex())) return; // no need to toggle when step mode is enabled - AutoWritableJitCode awjc(rt, code_->segment().base(), code_->segment().length()); + AutoWritableJitCode awjc(rt, code_->segmentTier().base(), code_->segmentTier().length()); AutoFlushICache afc("Code::toggleBreakpointTrap"); - AutoFlushICache::setRange(uintptr_t(code_->segment().base()), code_->segment().length()); + AutoFlushICache::setRange(uintptr_t(code_->segmentTier().base()), code_->segmentTier().length()); toggleDebugTrap(debugTrapOffset, enabled); } WasmBreakpointSite* DebugState::getOrCreateBreakpointSite(JSContext* cx, uint32_t offset) { WasmBreakpointSite* site; if (!breakpointSites_.initialized() && !breakpointSites_.init()) { @@ -473,27 +473,27 @@ DebugState::clearBreakpointsIn(JSContext } return true; } void DebugState::toggleDebugTrap(uint32_t offset, bool enabled) { MOZ_ASSERT(offset); - uint8_t* trap = code_->segment().base() + offset; - const Uint32Vector& farJumpOffsets = metadata().debugTrapFarJumpOffsets; + uint8_t* trap = code_->segmentTier().base() + offset; + const Uint32Vector& farJumpOffsets = metadataTier().debugTrapFarJumpOffsets; if (enabled) { MOZ_ASSERT(farJumpOffsets.length() > 0); size_t i = 0; while (i < farJumpOffsets.length() && offset < farJumpOffsets[i]) i++; if (i >= farJumpOffsets.length() || (i > 0 && offset - farJumpOffsets[i - 1] < farJumpOffsets[i] - offset)) i--; - uint8_t* farJump = code_->segment().base() + farJumpOffsets[i]; + uint8_t* farJump = code_->segmentTier().base() + farJumpOffsets[i]; MacroAssembler::patchNopToCall(trap, farJump); } else { MacroAssembler::patchCallToNop(trap); } } void DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx, bool enabled) @@ -505,19 +505,19 @@ DebugState::adjustEnterAndLeaveFrameTrap if (enabled) ++enterAndLeaveFrameTrapsCounter_; else --enterAndLeaveFrameTrapsCounter_; bool stillEnabled = enterAndLeaveFrameTrapsCounter_ > 0; if (wasEnabled == stillEnabled) return; - AutoWritableJitCode awjc(cx->runtime(), code_->segment().base(), code_->segment().length()); + AutoWritableJitCode awjc(cx->runtime(), code_->segmentTier().base(), code_->segmentTier().length()); AutoFlushICache afc("Code::adjustEnterAndLeaveFrameTrapsState"); - AutoFlushICache::setRange(uintptr_t(code_->segment().base()), code_->segment().length()); + AutoFlushICache::setRange(uintptr_t(code_->segmentTier().base()), code_->segmentTier().length()); for (const CallSite& callSite : callSites()) { if (callSite.kind() != CallSite::EnterFrame && callSite.kind() != CallSite::LeaveFrame) continue; toggleDebugTrap(callSite.returnAddressOffset(), stillEnabled); } } bool
--- a/js/src/wasm/WasmDebug.h +++ b/js/src/wasm/WasmDebug.h @@ -29,17 +29,18 @@ class Debugger; class WasmActivation; class WasmBreakpoint; class WasmBreakpointSite; class WasmInstanceObject; namespace wasm { struct LinkData; -struct Metadata; +struct LinkDataTier; +struct MetadataTier; class FrameIterator; // The generated source location for the AST node/expression. The offset field refers // an offset in an binary format file. struct ExprLoc { uint32_t lineno; @@ -145,23 +146,24 @@ class DebugState // Debug URL helpers. JSString* debugDisplayURL(JSContext* cx) const; bool getSourceMappingURL(JSContext* cx, MutableHandleString result) const; // Accessors for commonly used elements of linked structures. + const MetadataTier& metadataTier() const { return code_->metadataTier(); } const Metadata& metadata() const { return code_->metadata(); } bool debugEnabled() const { return metadata().debugEnabled; } - const CodeRangeVector& codeRanges() const { return metadata().codeRanges; } - const CallSiteVector& callSites() const { return metadata().callSites; } + const CodeRangeVector& codeRanges() const { return metadataTier().codeRanges; } + const CallSiteVector& callSites() const { return metadataTier().callSites; } uint32_t debugFuncToCodeRange(uint32_t funcIndex) const { - return metadata().debugFuncToCodeRange[funcIndex]; + return metadataTier().debugFuncToCodeRange[funcIndex]; } // about:memory reporting: void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata, ShareableBytes::SeenSet* seenBytes, Code::SeenSet* seenCode,
--- a/js/src/wasm/WasmFrameIterator.cpp +++ b/js/src/wasm/WasmFrameIterator.cpp @@ -209,17 +209,17 @@ FrameIterator::unwoundAddressOfReturnAdd bool FrameIterator::debugEnabled() const { MOZ_ASSERT(!done()); // Only non-imported functions can have debug frames. return code_->metadata().debugEnabled && - codeRange_->funcIndex() >= code_->metadata().funcImports.length(); + codeRange_->funcIndex() >= code_->metadataTier().funcImports.length(); } DebugFrame* FrameIterator::debugFrame() const { MOZ_ASSERT(!done()); MOZ_ASSERT(debugEnabled()); return reinterpret_cast<DebugFrame*>((uint8_t*)fp_ - DebugFrame::offsetOfFrame()); @@ -603,17 +603,17 @@ ProfilingFrameIterator::ProfilingFrameIt // CodeRange is relative. If the pc is not in a wasm module or a builtin // thunk, then execution must be entering from or leaving to the C++ caller // that pushed the WasmActivation. const CodeRange* codeRange; uint8_t* codeBase; code_ = activation_->compartment()->wasm.lookupCode(pc); if (code_) { codeRange = code_->lookupRange(pc); - codeBase = code_->segment().base(); + codeBase = code_->segmentTier().base(); } else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) { MOZ_ASSERT(done()); return; } // When the pc is inside the prologue/epilogue, the innermost call's Frame // is not complete and thus fp points to the second-to-innermost call's // Frame. Since fp can only tell you about its caller, naively unwinding @@ -955,17 +955,17 @@ wasm::LookupFaultingInstance(WasmActivat const Code* code = activation->compartment()->wasm.lookupCode(pc); if (!code) return nullptr; const CodeRange* codeRange = code->lookupRange(pc); if (!codeRange || !codeRange->isFunction()) return nullptr; - size_t offsetInModule = ((uint8_t*)pc) - code->segment().base(); + size_t offsetInModule = ((uint8_t*)pc) - code->segmentTier().base(); if (offsetInModule < codeRange->funcNormalEntry() + SetFP) return nullptr; if (offsetInModule >= codeRange->ret() - PoppedFP) return nullptr; Instance* instance = reinterpret_cast<Frame*>(fp)->tls->instance; MOZ_RELEASE_ASSERT(&instance->code() == code); return instance;
--- a/js/src/wasm/WasmGenerator.cpp +++ b/js/src/wasm/WasmGenerator.cpp @@ -43,16 +43,18 @@ using mozilla::MakeEnumeratedRange; static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024; static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024; static const uint32_t BAD_CODE_RANGE = UINT32_MAX; ModuleGenerator::ModuleGenerator(UniqueChars* error) : compileMode_(CompileMode(-1)), error_(error), + linkDataTier_(nullptr), + metadataTier_(nullptr), numSigs_(0), numTables_(0), lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE), masmAlloc_(&lifo_), masm_(MacroAssembler::WasmToken(), masmAlloc_), lastPatchedCallsite_(0), startOfUnpatchedCallsites_(0), parallel_(false), @@ -104,16 +106,21 @@ ModuleGenerator::~ModuleGenerator() MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_); } bool ModuleGenerator::initAsmJS(Metadata* asmJSMetadata) { MOZ_ASSERT(env_->isAsmJS()); + if (!linkData_.initTier(CompileMode::Ion)) + return false; + linkDataTier_ = &linkData_.tier(); + + metadataTier_ = &asmJSMetadata->tier(); metadata_ = asmJSMetadata; MOZ_ASSERT(isAsmJS()); // Enabling debugging requires baseline and baseline is only enabled for // wasm (since the baseline does not currently support Atomics or SIMD). metadata_->debugEnabled = false; compileMode_ = CompileMode::Ion; @@ -129,36 +136,48 @@ ModuleGenerator::initAsmJS(Metadata* asm return true; } bool ModuleGenerator::initWasm(const CompileArgs& args) { MOZ_ASSERT(!env_->isAsmJS()); - metadata_ = js_new<Metadata>(); + bool canBaseline = BaselineCanCompile(); + bool debugEnabled = args.debugEnabled && canBaseline; + compileMode_ = ((args.alwaysBaseline || debugEnabled) && canBaseline) + ? CompileMode::Baseline + : CompileMode::Ion; + + if (!linkData_.initTier(compileMode_)) + return false; + linkDataTier_ = &linkData_.tier(); + + auto metadataTier = js::MakeUnique<MetadataTier>(compileMode_); + if (!metadataTier) + return false; + + metadata_ = js_new<Metadata>(Move(metadataTier)); if (!metadata_) return false; + metadataTier_ = &metadata_->tier(); + MOZ_ASSERT(!isAsmJS()); - bool canBaseline = BaselineCanCompile(); - metadata_->debugEnabled = args.debugEnabled && canBaseline; - compileMode_ = ((args.alwaysBaseline || metadata_->debugEnabled) && canBaseline) - ? CompileMode::Baseline - : CompileMode::Ion; + metadata_->debugEnabled = debugEnabled; // For wasm, the Vectors are correctly-sized and already initialized. numSigs_ = env_->sigs.length(); numTables_ = env_->tables.length(); for (size_t i = 0; i < env_->funcImportGlobalDataOffsets.length(); i++) { - env_->funcImportGlobalDataOffsets[i] = linkData_.globalDataLength; - linkData_.globalDataLength += sizeof(FuncImportTls); + env_->funcImportGlobalDataOffsets[i] = metadata_->globalDataLength; + metadata_->globalDataLength += sizeof(FuncImportTls); if (!addFuncImport(*env_->funcSigs[i], env_->funcImportGlobalDataOffsets[i])) return false; } for (TableDesc& table : env_->tables) { if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*), &table.globalDataOffset)) return false; } @@ -219,18 +238,16 @@ ModuleGenerator::initWasm(const CompileA } bool ModuleGenerator::init(UniqueModuleEnvironment env, const CompileArgs& args, Metadata* maybeAsmJSMetadata) { env_ = Move(env); - linkData_.globalDataLength = 0; - if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, env_->funcSigs.length())) return false; if (!assumptions_.clone(args.assumptions)) return false; if (!exportedFuncs_.init()) return false; @@ -292,17 +309,17 @@ ModuleGenerator::funcIsCompiled(uint32_t { return funcToCodeRange_[funcIndex] != BAD_CODE_RANGE; } const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const { MOZ_ASSERT(funcIsCompiled(funcIndex)); - const CodeRange& cr = metadata_->codeRanges[funcToCodeRange_[funcIndex]]; + const CodeRange& cr = metadataTier_->codeRanges[funcToCodeRange_[funcIndex]]; MOZ_ASSERT(cr.isFunction()); return cr; } static uint32_t JumpRange() { return Min(JitOptions.jumpThreshold, JumpImmediateRange); @@ -353,17 +370,17 @@ ModuleGenerator::patchCallSites() if (!p) { Offsets offsets; offsets.begin = masm_.currentOffset(); masm_.append(CallFarJump(cs.funcIndex(), masm_.farJumpWithPatch())); offsets.end = masm_.currentOffset(); if (masm_.oom()) return false; - if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) return false; if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin)) return false; } masm_.patchCall(callerOffset, p->value()); break; } @@ -374,42 +391,42 @@ ModuleGenerator::patchCallSites() Offsets offsets; offsets.begin = masm_.currentOffset(); masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)), WasmTlsReg); masm_.append(TrapFarJump(cs.trap(), masm_.farJumpWithPatch())); offsets.end = masm_.currentOffset(); if (masm_.oom()) return false; - if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) return false; existingTrapFarJumps[cs.trap()] = Some(offsets.begin); } masm_.patchCall(callerOffset, *existingTrapFarJumps[cs.trap()]); break; } case CallSiteDesc::Breakpoint: case CallSiteDesc::EnterFrame: case CallSiteDesc::LeaveFrame: { - Uint32Vector& jumps = metadata_->debugTrapFarJumpOffsets; + Uint32Vector& jumps = metadataTier_->debugTrapFarJumpOffsets; if (jumps.empty() || uint32_t(abs(int32_t(jumps.back()) - int32_t(callerOffset))) >= JumpRange()) { // See BaseCompiler::insertBreakablePoint for why we must // reload the TLS register on this path. Offsets offsets; offsets.begin = masm_.currentOffset(); masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)), WasmTlsReg); uint32_t jumpOffset = masm_.farJumpWithPatch().offset(); offsets.end = masm_.currentOffset(); if (masm_.oom()) return false; - if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets)) return false; if (!debugTrapFarJumps_.emplaceBack(jumpOffset)) return false; if (!jumps.emplaceBack(offsets.begin)) return false; } break; } @@ -453,18 +470,18 @@ ModuleGenerator::finishTask(CompileTask* const FuncBytes& func = unit.func(); // Offset the recorded FuncOffsets by the offset of the function in the // whole module's code segment. FuncOffsets offsets = unit.offsets(); offsets.offsetBy(offsetInWhole); // Add the CodeRange for this function. - uint32_t funcCodeRangeIndex = metadata_->codeRanges.length(); - if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), offsets)) + uint32_t funcCodeRangeIndex = metadataTier_->codeRanges.length(); + if (!metadataTier_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), offsets)) return false; MOZ_ASSERT(!funcIsCompiled(func.index())); funcToCodeRange_[func.index()] = funcCodeRangeIndex; } // Merge the compiled results into the whole-module masm. mozilla::DebugOnly<size_t> sizeBefore = masm_.size(); @@ -502,42 +519,42 @@ ModuleGenerator::finishFuncExports() if (!sorted.reserve(exportedFuncs_.count())) return false; for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront()) sorted.infallibleAppend(r.front()); std::sort(sorted.begin(), sorted.end()); - MOZ_ASSERT(metadata_->funcExports.empty()); - if (!metadata_->funcExports.reserve(sorted.length())) + MOZ_ASSERT(metadataTier_->funcExports.empty()); + if (!metadataTier_->funcExports.reserve(sorted.length())) return false; for (uint32_t funcIndex : sorted) { Sig sig; if (!sig.clone(funcSig(funcIndex))) return false; uint32_t codeRangeIndex = funcToCodeRange_[funcIndex]; - metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex); + metadataTier_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex); } return true; } typedef Vector<Offsets, 0, SystemAllocPolicy> OffsetVector; typedef Vector<CallableOffsets, 0, SystemAllocPolicy> CallableOffsetVector; bool ModuleGenerator::finishCodegen() { masm_.haltingAlign(CodeAlignment); uint32_t offsetInWhole = masm_.size(); - uint32_t numFuncExports = metadata_->funcExports.length(); + uint32_t numFuncExports = metadataTier_->funcExports.length(); MOZ_ASSERT(numFuncExports == exportedFuncs_.count()); // Generate stubs in a separate MacroAssembler since, otherwise, for modules // larger than the JumpImmediateRange, even local uses of Label will fail // due to the large absolute offsets temporarily stored by Label::bind(). OffsetVector entries; CallableOffsetVector interpExits; @@ -552,25 +569,25 @@ ModuleGenerator::finishCodegen() { TempAllocator alloc(&lifo_); MacroAssembler masm(MacroAssembler::WasmToken(), alloc); Label throwLabel; if (!entries.resize(numFuncExports)) return false; for (uint32_t i = 0; i < numFuncExports; i++) - entries[i] = GenerateEntry(masm, metadata_->funcExports[i]); + entries[i] = GenerateEntry(masm, metadataTier_->funcExports[i]); if (!interpExits.resize(numFuncImports())) return false; if (!jitExits.resize(numFuncImports())) return false; for (uint32_t i = 0; i < numFuncImports(); i++) { - interpExits[i] = GenerateImportInterpExit(masm, metadata_->funcImports[i], i, &throwLabel); - jitExits[i] = GenerateImportJitExit(masm, metadata_->funcImports[i], &throwLabel); + interpExits[i] = GenerateImportInterpExit(masm, metadataTier_->funcImports[i], i, &throwLabel); + jitExits[i] = GenerateImportJitExit(masm, metadataTier_->funcImports[i], &throwLabel); } for (Trap trap : MakeEnumeratedRange(Trap::Limit)) trapExits[trap] = GenerateTrapExit(masm, trap, &throwLabel); outOfBoundsExit = GenerateOutOfBoundsExit(masm, &throwLabel); unalignedAccessExit = GenerateUnalignedExit(masm, &throwLabel); interruptExit = GenerateInterruptExit(masm, &throwLabel); @@ -581,64 +598,64 @@ ModuleGenerator::finishCodegen() return false; } // Adjust each of the resulting Offsets (to account for being merged into // masm_) and then create code ranges for all the stubs. for (uint32_t i = 0; i < numFuncExports; i++) { entries[i].offsetBy(offsetInWhole); - metadata_->funcExports[i].initEntryOffset(entries[i].begin); - if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) + metadataTier_->funcExports[i].initEntryOffset(entries[i].begin); + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::Entry, entries[i])) return false; } for (uint32_t i = 0; i < numFuncImports(); i++) { interpExits[i].offsetBy(offsetInWhole); - metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin); - if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) + metadataTier_->funcImports[i].initInterpExitOffset(interpExits[i].begin); + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i])) return false; jitExits[i].offsetBy(offsetInWhole); - metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin); - if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) + metadataTier_->funcImports[i].initJitExitOffset(jitExits[i].begin); + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i])) return false; } for (Trap trap : MakeEnumeratedRange(Trap::Limit)) { trapExits[trap].offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap])) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap])) return false; } outOfBoundsExit.offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit)) return false; unalignedAccessExit.offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit)) return false; interruptExit.offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::Interrupt, interruptExit)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::Interrupt, interruptExit)) return false; throwStub.offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::Throw, throwStub)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::Throw, throwStub)) return false; debugTrapStub.offsetBy(offsetInWhole); - if (!metadata_->codeRanges.emplaceBack(CodeRange::DebugTrap, debugTrapStub)) + if (!metadataTier_->codeRanges.emplaceBack(CodeRange::DebugTrap, debugTrapStub)) return false; // Fill in LinkData with the offsets of these stubs. - linkData_.unalignedAccessOffset = unalignedAccessExit.begin; - linkData_.outOfBoundsOffset = outOfBoundsExit.begin; - linkData_.interruptOffset = interruptExit.begin; + linkDataTier_->unalignedAccessOffset = unalignedAccessExit.begin; + linkDataTier_->outOfBoundsOffset = outOfBoundsExit.begin; + linkDataTier_->interruptOffset = interruptExit.begin; // Now that all other code has been emitted, patch all remaining callsites // then far jumps. Patching callsites can generate far jumps so there is an // ordering dependency. if (!patchCallSites()) return false; @@ -651,70 +668,70 @@ ModuleGenerator::finishCodegen() return !masm_.oom(); } bool ModuleGenerator::finishLinkData() { // Inflate the global bytes up to page size so that the total bytes are a // page size (as required by the allocator functions). - linkData_.globalDataLength = AlignBytes(linkData_.globalDataLength, gc::SystemPageSize()); + metadata_->globalDataLength = AlignBytes(metadata_->globalDataLength, gc::SystemPageSize()); // Add links to absolute addresses identified symbolically. for (size_t i = 0; i < masm_.numSymbolicAccesses(); i++) { SymbolicAccess src = masm_.symbolicAccess(i); - if (!linkData_.symbolicLinks[src.target].append(src.patchAt.offset())) + if (!linkDataTier_->symbolicLinks[src.target].append(src.patchAt.offset())) return false; } // Relative link metadata: absolute addresses that refer to another point within // the asm.js module. // CodeLabels are used for switch cases and loads from floating-point / // SIMD values in the constant pool. for (size_t i = 0; i < masm_.numCodeLabels(); i++) { CodeLabel cl = masm_.codeLabel(i); - LinkData::InternalLink inLink(LinkData::InternalLink::CodeLabel); + LinkDataTier::InternalLink inLink(LinkDataTier::InternalLink::CodeLabel); inLink.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt()); inLink.targetOffset = cl.target()->offset(); - if (!linkData_.internalLinks.append(inLink)) + if (!linkDataTier_->internalLinks.append(inLink)) return false; } return true; } bool ModuleGenerator::addFuncImport(const Sig& sig, uint32_t globalDataOffset) { MOZ_ASSERT(!finishedFuncDefs_); Sig copy; if (!copy.clone(sig)) return false; - return metadata_->funcImports.emplaceBack(Move(copy), globalDataOffset); + return metadataTier_->funcImports.emplaceBack(Move(copy), globalDataOffset); } bool ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset) { - CheckedInt<uint32_t> newGlobalDataLength(linkData_.globalDataLength); + CheckedInt<uint32_t> newGlobalDataLength(metadata_->globalDataLength); newGlobalDataLength += ComputeByteAlignment(newGlobalDataLength.value(), align); if (!newGlobalDataLength.isValid()) return false; *globalDataOffset = newGlobalDataLength.value(); newGlobalDataLength += bytes; if (!newGlobalDataLength.isValid()) return false; - linkData_.globalDataLength = newGlobalDataLength.value(); + metadata_->globalDataLength = newGlobalDataLength.value(); return true; } bool ModuleGenerator::allocateGlobal(GlobalDesc* global) { MOZ_ASSERT(!startedFuncDefs_); unsigned width = 0; @@ -823,27 +840,27 @@ ModuleGenerator::initImport(uint32_t fun uint32_t globalDataOffset; if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*), &globalDataOffset)) return false; MOZ_ASSERT(!env_->funcImportGlobalDataOffsets[funcIndex]); env_->funcImportGlobalDataOffsets[funcIndex] = globalDataOffset; - MOZ_ASSERT(funcIndex == metadata_->funcImports.length()); + MOZ_ASSERT(funcIndex == metadataTier_->funcImports.length()); return addFuncImport(sig(sigIndex), globalDataOffset); } uint32_t ModuleGenerator::numFuncImports() const { // Until all functions have been validated, asm.js doesn't know the total // number of imports. MOZ_ASSERT_IF(isAsmJS(), finishedFuncDefs_); - return metadata_->funcImports.length(); + return metadataTier_->funcImports.length(); } const SigWithId& ModuleGenerator::funcSig(uint32_t funcIndex) const { MOZ_ASSERT(env_->funcSigs[funcIndex]); return *env_->funcSigs[funcIndex]; } @@ -995,34 +1012,34 @@ ModuleGenerator::finishFuncDefs() if (currentTask_ && !launchBatchCompile()) return false; while (outstanding_ > 0) { if (!finishOutstandingTask()) return false; } - linkData_.functionCodeLength = masm_.size(); + linkDataTier_->functionCodeLength = masm_.size(); finishedFuncDefs_ = true; // Generate wrapper functions for every import. These wrappers turn imports // into plain functions so they can be put into tables and re-exported. // asm.js cannot do either and so no wrappers are generated. if (!isAsmJS()) { for (size_t funcIndex = 0; funcIndex < numFuncImports(); funcIndex++) { - const FuncImport& funcImport = metadata_->funcImports[funcIndex]; + const FuncImport& funcImport = metadataTier_->funcImports[funcIndex]; const SigWithId& sig = funcSig(funcIndex); FuncOffsets offsets = GenerateImportFunction(masm_, funcImport, sig.id); if (masm_.oom()) return false; - uint32_t codeRangeIndex = metadata_->codeRanges.length(); - if (!metadata_->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0, offsets)) + uint32_t codeRangeIndex = metadataTier_->codeRanges.length(); + if (!metadataTier_->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0, offsets)) return false; MOZ_ASSERT(!funcIsCompiled(funcIndex)); funcToCodeRange_[funcIndex] = codeRangeIndex; } } // All function indices should have an associated code range at this point @@ -1121,74 +1138,78 @@ ModuleGenerator::finish(const ShareableB if (!finishFuncExports()) return nullptr; if (!finishCodegen()) return nullptr; // Convert the CallSiteAndTargetVector (needed during generation) to a // CallSiteVector (what is stored in the Module). - if (!metadata_->callSites.appendAll(masm_.callSites())) + if (!metadataTier_->callSites.appendAll(masm_.callSites())) return nullptr; // The MacroAssembler has accumulated all the memory accesses during codegen. - metadata_->memoryAccesses = masm_.extractMemoryAccesses(); + metadataTier_->memoryAccesses = masm_.extractMemoryAccesses(); // Copy over data from the ModuleEnvironment. metadata_->memoryUsage = env_->memoryUsage; metadata_->minMemoryLength = env_->minMemoryLength; metadata_->maxMemoryLength = env_->maxMemoryLength; metadata_->tables = Move(env_->tables); metadata_->globals = Move(env_->globals); metadata_->funcNames = Move(env_->funcNames); metadata_->customSections = Move(env_->customSections); // Additional debug information to copy. metadata_->debugFuncArgTypes = Move(debugFuncArgTypes_); metadata_->debugFuncReturnTypes = Move(debugFuncReturnTypes_); if (metadata_->debugEnabled) - metadata_->debugFuncToCodeRange = Move(funcToCodeRange_); + metadataTier_->debugFuncToCodeRange = Move(funcToCodeRange_); // These Vectors can get large and the excess capacity can be significant, // so realloc them down to size. - metadata_->memoryAccesses.podResizeToFit(); - metadata_->codeRanges.podResizeToFit(); - metadata_->callSites.podResizeToFit(); - metadata_->debugTrapFarJumpOffsets.podResizeToFit(); - metadata_->debugFuncToCodeRange.podResizeToFit(); + metadataTier_->memoryAccesses.podResizeToFit(); + metadataTier_->codeRanges.podResizeToFit(); + metadataTier_->callSites.podResizeToFit(); + metadataTier_->debugTrapFarJumpOffsets.podResizeToFit(); + metadataTier_->debugFuncToCodeRange.podResizeToFit(); // For asm.js, the tables vector is over-allocated (to avoid resize during // parallel copilation). Shrink it back down to fit. if (isAsmJS() && !metadata_->tables.resize(numTables_)) return nullptr; // Assert CodeRanges are sorted. #ifdef DEBUG uint32_t lastEnd = 0; - for (const CodeRange& codeRange : metadata_->codeRanges) { + for (const CodeRange& codeRange : metadataTier_->codeRanges) { MOZ_ASSERT(codeRange.begin() >= lastEnd); lastEnd = codeRange.end(); } #endif // Assert debugTrapFarJumpOffsets are sorted. #ifdef DEBUG uint32_t lastOffset = 0; - for (uint32_t debugTrapFarJumpOffset : metadata_->debugTrapFarJumpOffsets) { + for (uint32_t debugTrapFarJumpOffset : metadataTier_->debugTrapFarJumpOffsets) { MOZ_ASSERT(debugTrapFarJumpOffset >= lastOffset); lastOffset = debugTrapFarJumpOffset; } #endif if (!finishLinkData()) return nullptr; generateBytecodeHash(bytecode); - UniqueConstCodeSegment codeSegment = CodeSegment::create(masm_, bytecode, linkData_, *metadata_); + UniqueConstCodeSegment codeSegment = CodeSegment::create(compileMode_, + masm_, + bytecode, + *linkDataTier_, + *metadata_); if (!codeSegment) return nullptr; UniqueConstBytes maybeDebuggingBytes; if (metadata_->debugEnabled) { Bytes bytes; if (!bytes.resize(masm_.bytesNeeded())) return nullptr;
--- a/js/src/wasm/WasmGenerator.h +++ b/js/src/wasm/WasmGenerator.h @@ -84,22 +84,16 @@ class FuncBytes const SigWithId& sig() const { return *sig_; } uint32_t lineOrBytecode() const { return lineOrBytecode_; } const Uint32Vector& callSiteLineNums() const { return callSiteLineNums_; } }; typedef UniquePtr<FuncBytes> UniqueFuncBytes; typedef Vector<UniqueFuncBytes, 8, SystemAllocPolicy> UniqueFuncBytesVector; -enum class CompileMode -{ - Baseline, - Ion -}; - // FuncCompileUnit contains all the data necessary to produce and store the // results of a single function's compilation. class FuncCompileUnit { UniqueFuncBytes func_; FuncOffsets offsets_; DebugOnly<bool> finished_; @@ -215,17 +209,19 @@ class MOZ_STACK_CLASS ModuleGenerator typedef EnumeratedArray<Trap, Trap::Limit, CallableOffsets> TrapExitOffsetArray; // Constant parameters CompileMode compileMode_; UniqueChars* error_; // Data that is moved into the result of finish() Assumptions assumptions_; + LinkDataTier* linkDataTier_; // Owned by linkData_ LinkData linkData_; + MetadataTier* metadataTier_; // Owned by metadata_ MutableMetadata metadata_; // Data scoped to the ModuleGenerator's lifetime UniqueModuleEnvironment env_; uint32_t numSigs_; uint32_t numTables_; LifoAlloc lifo_; jit::JitContext jcx_;
--- a/js/src/wasm/WasmInstance.cpp +++ b/js/src/wasm/WasmInstance.cpp @@ -122,17 +122,17 @@ Instance::tableTls(const TableDesc& td) { return *(TableTls*)(globalSegment().globalData() + td.globalDataOffset); } bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv, MutableHandleValue rval) { - const FuncImport& fi = metadata().funcImports[funcImportIndex]; + const FuncImport& fi = metadataTier().funcImports[funcImportIndex]; InvokeArgs args(cx); if (!args.init(cx, argc)) return false; bool hasI64Arg = false; MOZ_ASSERT(fi.sig().args().length() == argc); for (size_t i = 0; i < argc; i++) { @@ -184,17 +184,17 @@ Instance::callImport(JSContext* cx, uint // Don't try to optimize if the function has at least one i64 arg or if // it returns an int64. GenerateJitExit relies on this, as does the // type inference code below in this function. if (hasI64Arg || fi.sig().ret() == ExprType::I64) return true; // The import may already have become optimized. - void* jitExitCode = codeBase() + fi.jitExitCodeOffset(); + void* jitExitCode = codeBaseTier() + fi.jitExitCodeOffset(); if (import.code == jitExitCode) return true; // Test if the function is JIT compiled. if (!importFun->hasScript()) return true; JSScript* script = importFun->nonLazyScript(); @@ -330,48 +330,48 @@ Instance::Instance(JSContext* cx, object_(object), code_(code), debug_(Move(debug)), globals_(Move(globals)), memory_(memory), tables_(Move(tables)), enterFrameTrapsEnabled_(false) { - MOZ_ASSERT(funcImports.length() == metadata().funcImports.length()); + MOZ_ASSERT(funcImports.length() == metadataTier().funcImports.length()); MOZ_ASSERT(tables_.length() == metadata().tables.length()); tlsData()->cx = cx; tlsData()->instance = this; tlsData()->globalData = globals_->globalData(); tlsData()->memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr; #ifndef WASM_HUGE_MEMORY tlsData()->boundsCheckLimit = memory ? memory->buffer().wasmBoundsCheckLimit() : 0; #endif tlsData()->stackLimit = *(void**)cx->stackLimitAddressForJitCode(JS::StackForUntrustedScript); - for (size_t i = 0; i < metadata().funcImports.length(); i++) { + for (size_t i = 0; i < metadataTier().funcImports.length(); i++) { HandleFunction f = funcImports[i]; - const FuncImport& fi = metadata().funcImports[i]; + const FuncImport& fi = metadataTier().funcImports[i]; FuncImportTls& import = funcImportTls(fi); if (!isAsmJS() && IsExportedWasmFunction(f)) { WasmInstanceObject* calleeInstanceObj = ExportedFunctionToInstanceObject(f); const CodeRange& codeRange = calleeInstanceObj->getExportedFunctionCodeRange(f); Instance& calleeInstance = calleeInstanceObj->instance(); import.tls = calleeInstance.tlsData(); - import.code = calleeInstance.codeSegment().base() + codeRange.funcNormalEntry(); + import.code = calleeInstance.codeBaseTier() + codeRange.funcNormalEntry(); import.baselineScript = nullptr; import.obj = calleeInstanceObj; } else if (void* thunk = MaybeGetBuiltinThunk(f, fi.sig(), cx)) { import.tls = tlsData(); import.code = thunk; import.baselineScript = nullptr; import.obj = f; } else { import.tls = tlsData(); - import.code = codeBase() + fi.interpExitCodeOffset(); + import.code = codeBaseTier() + fi.interpExitCodeOffset(); import.baselineScript = nullptr; import.obj = f; } } for (size_t i = 0; i < tables_.length(); i++) { const TableDesc& td = metadata().tables[i]; TableTls& table = tableTls(td); @@ -442,18 +442,18 @@ Instance::init(JSContext* cx) return true; } Instance::~Instance() { compartment_->wasm.unregisterInstance(*this); - for (unsigned i = 0; i < metadata().funcImports.length(); i++) { - FuncImportTls& import = funcImportTls(metadata().funcImports[i]); + for (unsigned i = 0; i < metadataTier().funcImports.length(); i++) { + FuncImportTls& import = funcImportTls(metadataTier().funcImports[i]); if (import.baselineScript) import.baselineScript->removeDependentWasmImport(*this, i); } if (!metadata().sigIds.empty()) { ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet->lock(); for (const SigWithId& sig : metadata().sigIds) { @@ -490,17 +490,17 @@ Instance::tracePrivate(JSTracer* trc) { // This method is only called from WasmInstanceObject so the only reason why // TraceEdge is called is so that the pointer can be updated during a moving // GC. TraceWeakEdge may sound better, but it is less efficient given that // we know object_ is already marked. MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_)); TraceEdge(trc, &object_, "wasm instance object"); - for (const FuncImport& fi : metadata().funcImports) + for (const FuncImport& fi : metadataTier().funcImports) TraceNullableEdge(trc, &funcImportTls(fi).obj, "wasm import"); for (const SharedTable& table : tables_) table->trace(trc); TraceNullableEdge(trc, &memory_, "wasm buffer"); } @@ -655,17 +655,17 @@ Instance::callExport(JSContext* cx, uint // when running this module. Additionally, push a JitActivation so that // the optimized wasm-to-Ion FFI call path (which we want to be very // fast) can avoid doing so. The JitActivation is marked as inactive so // stack iteration will skip over it. WasmActivation activation(cx); JitActivation jitActivation(cx, /* active */ false); // Call the per-exported-function trampoline created by GenerateEntry. - auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, codeBase() + func.entryOffset()); + auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, codeBaseTier() + func.entryOffset()); if (!CALL_GENERATED_2(funcPtr, exportArgs.begin(), tlsData())) return false; } if (isAsmJS() && args.isConstructing()) { // By spec, when a JS function is called as a constructor and this // function returns a primary type, which is the case for all asm.js // exported functions, the returned value is discarded and an empty @@ -799,19 +799,19 @@ Instance::onMovingGrowTable() TableTls& table = tableTls(metadata().tables[0]); table.length = tables_[0]->length(); table.base = tables_[0]->base(); } void Instance::deoptimizeImportExit(uint32_t funcImportIndex) { - const FuncImport& fi = metadata().funcImports[funcImportIndex]; + const FuncImport& fi = metadataTier().funcImports[funcImportIndex]; FuncImportTls& import = funcImportTls(fi); - import.code = codeBase() + fi.interpExitCodeOffset(); + import.code = codeBaseTier() + fi.interpExitCodeOffset(); import.baselineScript = nullptr; } void Instance::ensureEnterFrameTrapsState(JSContext* cx, bool enabled) { if (enterFrameTrapsEnabled_ == enabled) return;
--- a/js/src/wasm/WasmInstance.h +++ b/js/src/wasm/WasmInstance.h @@ -104,19 +104,20 @@ class Instance bool init(JSContext* cx); void trace(JSTracer* trc); JSContext* cx() const { return tlsData()->cx; } JSCompartment* compartment() const { return compartment_; } const Code& code() const { return *code_; } DebugState& debug() { return *debug_; } const DebugState& debug() const { return *debug_; } - const CodeSegment& codeSegment() const { return code_->segment(); } + const CodeSegment& codeSegmentTier() const { return code_->segmentTier(); } const GlobalSegment& globalSegment() const { return *globals_; } - uint8_t* codeBase() const { return code_->segment().base(); } + uint8_t* codeBaseTier() const { return code_->segmentTier().base(); } + const MetadataTier& metadataTier() const { return code_->metadataTier(); } const Metadata& metadata() const { return code_->metadata(); } bool isAsmJS() const { return metadata().isAsmJS(); } const SharedTableVector& tables() const { return tables_; } SharedMem<uint8_t*> memoryBase() const; size_t memoryLength() const; size_t memoryMappedSize() const; bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const; TlsData* tlsData() const { return globals_->tlsData(); } @@ -156,17 +157,17 @@ class Instance // Called by Wasm(Memory|Table)Object when a moving resize occurs: void onMovingGrowMemory(uint8_t* prevMemoryBase); void onMovingGrowTable(); // Debug support: - bool debugEnabled() const { return code_->metadata().debugEnabled; } + bool debugEnabled() const { return metadata().debugEnabled; } bool enterFrameTrapsEnabled() const { return enterFrameTrapsEnabled_; } void ensureEnterFrameTrapsState(JSContext* cx, bool enabled); // about:memory reporting: void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata, ShareableBytes::SeenSet* seenBytes,
--- a/js/src/wasm/WasmJS.cpp +++ b/js/src/wasm/WasmJS.cpp @@ -670,17 +670,17 @@ WasmModuleObject::imports(JSContext* cx, props.infallibleAppend(IdValuePair(NameToId(cx->names().name), StringValue(nameStr))); JSString* kindStr = KindToString(cx, names, import.kind); if (!kindStr) return false; props.infallibleAppend(IdValuePair(NameToId(names.kind), StringValue(kindStr))); if (JitOptions.wasmTestMode && import.kind == DefinitionKind::Function) { - JSString* sigStr = SigToString(cx, module->metadata().funcImports[numFuncImport++].sig()); + JSString* sigStr = SigToString(cx, module->metadataTier().funcImports[numFuncImport++].sig()); if (!sigStr) return false; if (!props.append(IdValuePair(NameToId(names.signature), StringValue(sigStr)))) return false; } JSObject* obj = ObjectGroup::newPlainObject(cx, props.begin(), props.length(), GenericObject); if (!obj) @@ -726,17 +726,17 @@ WasmModuleObject::exports(JSContext* cx, props.infallibleAppend(IdValuePair(NameToId(cx->names().name), StringValue(nameStr))); JSString* kindStr = KindToString(cx, names, exp.kind()); if (!kindStr) return false; props.infallibleAppend(IdValuePair(NameToId(names.kind), StringValue(kindStr))); if (JitOptions.wasmTestMode && exp.kind() == DefinitionKind::Function) { - JSString* sigStr = SigToString(cx, module->metadata().funcExports[numFuncExport++].sig()); + JSString* sigStr = SigToString(cx, module->metadataTier().funcExports[numFuncExport++].sig()); if (!sigStr) return false; if (!props.append(IdValuePair(NameToId(names.signature), StringValue(sigStr)))) return false; } JSObject* obj = ObjectGroup::newPlainObject(cx, props.begin(), props.length(), GenericObject); if (!obj) @@ -810,17 +810,17 @@ WasmModuleObject::create(JSContext* cx, { AutoSetNewObjectMetadata metadata(cx); auto* obj = NewObjectWithGivenProto<WasmModuleObject>(cx, proto); if (!obj) return nullptr; obj->initReservedSlot(MODULE_SLOT, PrivateValue(&module)); module.AddRef(); - cx->zone()->updateJitCodeMallocBytes(module.codeLength()); + cx->zone()->updateJitCodeMallocBytes(module.codeLengthTier()); return obj; } static bool GetBufferSource(JSContext* cx, JSObject* obj, unsigned errorNumber, MutableBytes* bytecode) { *bytecode = cx->new_<ShareableBytes>(); if (!*bytecode) @@ -1161,18 +1161,18 @@ WasmInstanceObject::getExportedFunction( return true; } const CodeRange& WasmInstanceObject::getExportedFunctionCodeRange(HandleFunction fun) { uint32_t funcIndex = ExportedFunctionToFuncIndex(fun); MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun); - const Metadata& metadata = instance().metadata(); - return metadata.codeRanges[metadata.lookupFuncExport(funcIndex).codeRangeIndex()]; + const FuncExport& funcExport = instance().metadata().lookupFuncExport(funcIndex); + return instance().metadataTier().codeRanges[funcExport.codeRangeIndex()]; } /* static */ WasmFunctionScope* WasmInstanceObject::getFunctionScope(JSContext* cx, HandleWasmInstanceObject instanceObj, uint32_t funcIndex) { if (ScopeMap::Ptr p = instanceObj->scopes().lookup(funcIndex)) return p->value(); @@ -1711,18 +1711,18 @@ WasmTableObject::setImpl(JSContext* cx, #ifdef DEBUG RootedFunction f(cx); MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f)); MOZ_ASSERT(value == f); #endif Instance& instance = instanceObj->instance(); const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex); - const CodeRange& codeRange = instance.metadata().codeRanges[funcExport.codeRangeIndex()]; - void* code = instance.codeSegment().base() + codeRange.funcTableEntry(); + const CodeRange& codeRange = instance.metadataTier().codeRanges[funcExport.codeRangeIndex()]; + void* code = instance.codeBaseTier() + codeRange.funcTableEntry(); table.set(index, code, instance); } else { table.setNull(index); } args.rval().setUndefined(); return true; }
--- a/js/src/wasm/WasmModule.cpp +++ b/js/src/wasm/WasmModule.cpp @@ -37,111 +37,147 @@ using namespace js::wasm; using mozilla::IsNaN; const char wasm::InstanceExportField[] = "exports"; #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) // On MIPS, CodeLabels are instruction immediates so InternalLinks only // patch instruction immediates. -LinkData::InternalLink::InternalLink(Kind kind) +LinkDataTier::InternalLink::InternalLink(Kind kind) { MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate); } bool -LinkData::InternalLink::isRawPointerPatch() +LinkDataTier::InternalLink::isRawPointerPatch() { return false; } #else // On the rest, CodeLabels are raw pointers so InternalLinks only patch // raw pointers. -LinkData::InternalLink::InternalLink(Kind kind) +LinkDataTier::InternalLink::InternalLink(Kind kind) { MOZ_ASSERT(kind == CodeLabel || kind == RawPointer); } bool -LinkData::InternalLink::isRawPointerPatch() +LinkDataTier::InternalLink::isRawPointerPatch() { return true; } #endif size_t -LinkData::SymbolicLinkArray::serializedSize() const +LinkDataTier::SymbolicLinkArray::serializedSize() const { size_t size = 0; for (const Uint32Vector& offsets : *this) size += SerializedPodVectorSize(offsets); return size; } uint8_t* -LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const +LinkDataTier::SymbolicLinkArray::serialize(uint8_t* cursor) const { for (const Uint32Vector& offsets : *this) cursor = SerializePodVector(cursor, offsets); return cursor; } const uint8_t* -LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor) +LinkDataTier::SymbolicLinkArray::deserialize(const uint8_t* cursor) { for (Uint32Vector& offsets : *this) { cursor = DeserializePodVector(cursor, &offsets); if (!cursor) return nullptr; } return cursor; } size_t -LinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +LinkDataTier::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { size_t size = 0; for (const Uint32Vector& offsets : *this) size += offsets.sizeOfExcludingThis(mallocSizeOf); return size; } size_t -LinkData::serializedSize() const +LinkDataTier::serializedSize() const { return sizeof(pod()) + SerializedPodVectorSize(internalLinks) + symbolicLinks.serializedSize(); } uint8_t* -LinkData::serialize(uint8_t* cursor) const +LinkDataTier::serialize(uint8_t* cursor) const { + MOZ_ASSERT(mode == CompileMode::Ion); + cursor = WriteBytes(cursor, &pod(), sizeof(pod())); cursor = SerializePodVector(cursor, internalLinks); cursor = symbolicLinks.serialize(cursor); return cursor; } const uint8_t* -LinkData::deserialize(const uint8_t* cursor) +LinkDataTier::deserialize(const uint8_t* cursor) { (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) && (cursor = DeserializePodVector(cursor, &internalLinks)) && (cursor = symbolicLinks.deserialize(cursor)); return cursor; } size_t -LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +LinkDataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const { return internalLinks.sizeOfExcludingThis(mallocSizeOf) + symbolicLinks.sizeOfExcludingThis(mallocSizeOf); } +bool +LinkData::initTier(CompileMode mode) +{ + MOZ_ASSERT(!tier_); + tier_ = js::MakeUnique<LinkDataTier>(mode); + return tier_ != nullptr; +} + +size_t +LinkData::serializedSize() const +{ + return tier_->serializedSize(); +} + +uint8_t* +LinkData::serialize(uint8_t* cursor) const +{ + cursor = tier_->serialize(cursor); + return cursor; +} + +const uint8_t* +LinkData::deserialize(const uint8_t* cursor) +{ + (cursor = tier_->deserialize(cursor)); + return cursor; +} + +size_t +LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const +{ + return tier_->sizeOfExcludingThis(mallocSizeOf); +} + /* virtual */ void Module::serializedSize(size_t* maybeBytecodeSize, size_t* maybeCompiledSize) const { if (maybeBytecodeSize) *maybeBytecodeSize = bytecode_->bytes.length(); // The compiled debug code must not be saved, set compiled size to 0, // so Module::assumptionsMatch will return false during assumptions @@ -220,16 +256,19 @@ Module::deserialize(const uint8_t* bytec memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize); Assumptions assumptions; const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize); if (!cursor) return nullptr; LinkData linkData; + if (!linkData.initTier(CompileMode::Ion)) + return nullptr; + cursor = linkData.deserialize(cursor); if (!cursor) return nullptr; ImportVector imports; cursor = DeserializeVector(cursor, &imports); if (!cursor) return nullptr; @@ -387,31 +426,31 @@ Module::addSizeOfMisc(MallocSizeOf mallo // segment/function body. bool Module::extractCode(JSContext* cx, MutableHandleValue vp) const { RootedPlainObject result(cx, NewBuiltinClassInstance<PlainObject>(cx)); if (!result) return false; - RootedObject code(cx, JS_NewUint8Array(cx, code_->segment().length())); + RootedObject code(cx, JS_NewUint8Array(cx, code_->segmentTier().length())); if (!code) return false; - memcpy(code->as<TypedArrayObject>().viewDataUnshared(), code_->segment().base(), code_->segment().length()); + memcpy(code->as<TypedArrayObject>().viewDataUnshared(), code_->segmentTier().base(), code_->segmentTier().length()); RootedValue value(cx, ObjectValue(*code)); if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE)) return false; RootedObject segments(cx, NewDenseEmptyArray(cx)); if (!segments) return false; - for (const CodeRange& p : metadata().codeRanges) { + for (const CodeRange& p : metadataTier().codeRanges) { RootedObject segment(cx, NewObjectWithGivenProto<PlainObject>(cx, nullptr)); if (!segment) return false; value.setNumber((uint32_t)p.begin()); if (!JS_DefineProperty(cx, segment, "begin", value, JSPROP_ENUMERATE)) return false; @@ -504,30 +543,30 @@ Module::initSegments(JSContext* cx, } // Now that initialization can't fail partway through, write data/elem // segments into memories/tables. for (const ElemSegment& seg : elemSegments_) { Table& table = *tables[seg.tableIndex]; uint32_t offset = EvaluateInitExpr(globalImports, seg.offset); - const CodeRangeVector& codeRanges = metadata().codeRanges; - uint8_t* codeBase = instance.codeBase(); + const CodeRangeVector& codeRanges = metadataTier().codeRanges; + uint8_t* codeBase = instance.codeBaseTier(); for (uint32_t i = 0; i < seg.elemCodeRangeIndices.length(); i++) { uint32_t funcIndex = seg.elemFuncIndices[i]; if (funcIndex < funcImports.length() && IsExportedWasmFunction(funcImports[funcIndex])) { MOZ_ASSERT(!metadata().isAsmJS()); MOZ_ASSERT(!table.isTypedFunction()); HandleFunction f = funcImports[funcIndex]; WasmInstanceObject* exportInstanceObj = ExportedFunctionToInstanceObject(f); const CodeRange& cr = exportInstanceObj->getExportedFunctionCodeRange(f); Instance& exportInstance = exportInstanceObj->instance(); - table.set(offset + i, exportInstance.codeBase() + cr.funcTableEntry(), exportInstance); + table.set(offset + i, exportInstance.codeBaseTier() + cr.funcTableEntry(), exportInstance); } else { const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]]; uint32_t entryOffset = table.isTypedFunction() ? cr.funcNormalEntry() : cr.funcTableEntry(); table.set(offset + i, codeBase + entryOffset, instance); } } @@ -558,31 +597,31 @@ FindImportForFuncImport(const ImportVect funcImportIndex--; } MOZ_CRASH("ran out of imports"); } bool Module::instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const { - MOZ_ASSERT(funcImports.length() == metadata().funcImports.length()); + MOZ_ASSERT(funcImports.length() == metadataTier().funcImports.length()); if (metadata().isAsmJS()) return true; - for (size_t i = 0; i < metadata().funcImports.length(); i++) { + for (size_t i = 0; i < metadataTier().funcImports.length(); i++) { HandleFunction f = funcImports[i]; if (!IsExportedFunction(f) || ExportedFunctionToInstance(f).isAsmJS()) continue; uint32_t funcIndex = ExportedFunctionToFuncIndex(f); Instance& instance = ExportedFunctionToInstance(f); const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex); - if (funcExport.sig() != metadata().funcImports[i].sig()) { + if (funcExport.sig() != metadataTier().funcImports[i].sig()) { const Import& import = FindImportForFuncImport(imports_, i); JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_SIG, import.module.get(), import.field.get()); return false; } } return true; @@ -864,30 +903,33 @@ Module::instantiate(JSContext* cx, if (!instantiateMemory(cx, &memory)) return false; RootedWasmTableObject table(cx, tableImport); SharedTableVector tables; if (!instantiateTable(cx, &table, &tables)) return false; - auto globalSegment = GlobalSegment::create(linkData_.globalDataLength); + auto globalSegment = GlobalSegment::create(metadata().globalDataLength); if (!globalSegment) return false; SharedCode code(code_); if (metadata().debugEnabled) { // The first time through, use the pre-linked code in the module but // mark it as busy. Subsequently, instantiate the copy of the code // bytes that we keep around for debugging instead, because the debugger // may patch the pre-linked code at any time. if (!codeIsBusy_.compareExchange(false, true)) { - auto codeSegment = CodeSegment::create(*unlinkedCodeForDebugging_, *bytecode_, - linkData_, metadata()); + auto codeSegment = CodeSegment::create(CompileMode::Baseline, + *unlinkedCodeForDebugging_, + *bytecode_, + linkData_.tier(), + metadata()); if (!codeSegment) return false; code = js_new<Code>(Move(codeSegment), metadata()); if (!code) return false; } }
--- a/js/src/wasm/WasmModule.h +++ b/js/src/wasm/WasmModule.h @@ -28,31 +28,36 @@ namespace js { namespace wasm { // LinkData contains all the metadata necessary to patch all the locations // that depend on the absolute address of a CodeSegment. // // LinkData is built incrementing by ModuleGenerator and then stored immutably // in Module. -struct LinkDataCacheablePod +struct LinkDataTierCacheablePod { uint32_t functionCodeLength; - uint32_t globalDataLength; uint32_t interruptOffset; uint32_t outOfBoundsOffset; uint32_t unalignedAccessOffset; - LinkDataCacheablePod() { mozilla::PodZero(this); } + LinkDataTierCacheablePod() { mozilla::PodZero(this); } }; -struct LinkData : LinkDataCacheablePod +struct LinkDataTier : LinkDataTierCacheablePod { - LinkDataCacheablePod& pod() { return *this; } - const LinkDataCacheablePod& pod() const { return *this; } + CompileMode mode; + + explicit LinkDataTier(CompileMode mode) : mode(mode) { + MOZ_ASSERT(mode == CompileMode::Ion || mode == CompileMode::Baseline); + } + + LinkDataTierCacheablePod& pod() { return *this; } + const LinkDataTierCacheablePod& pod() const { return *this; } struct InternalLink { enum Kind { RawPointer, CodeLabel, InstructionImmediate }; MOZ_INIT_OUTSIDE_CTOR uint32_t patchAtOffset; @@ -69,18 +74,34 @@ struct LinkData : LinkDataCacheablePod }; InternalLinkVector internalLinks; SymbolicLinkArray symbolicLinks; WASM_DECLARE_SERIALIZABLE(LinkData) }; -typedef UniquePtr<LinkData> UniqueLinkData; -typedef UniquePtr<const LinkData> UniqueConstLinkData; +typedef UniquePtr<LinkDataTier> UniqueLinkDataTier; + +struct LinkData +{ + // `tier_` and the means of accessing it will become more complicated once + // tiering is implemented. + UniqueLinkDataTier tier_; + + LinkData() : tier_(nullptr) {} + + // Construct the tier_ object. + bool initTier(CompileMode mode); + + const LinkDataTier& tier() const { MOZ_ASSERT(tier_); return *tier_; } + LinkDataTier& tier() { MOZ_ASSERT(tier_); return *tier_; } + + WASM_DECLARE_SERIALIZABLE(LinkData) +}; // Module represents a compiled wasm module and primarily provides two // operations: instantiation and serialization. A Module can be instantiated any // number of times to produce new Instance objects. A Module can be serialized // any number of times such that the serialized bytes can be deserialized later // to produce a new, equivalent Module. // // Fully linked-and-instantiated code (represented by Code and its owned @@ -140,21 +161,22 @@ class Module : public JS::WasmModule elemSegments_(Move(elemSegments)), bytecode_(&bytecode), codeIsBusy_(false) { MOZ_ASSERT_IF(metadata().debugEnabled, unlinkedCodeForDebugging_); } ~Module() override { /* Note: can be called on any thread */ } + const MetadataTier& metadataTier() const { return code_->metadataTier(); } const Metadata& metadata() const { return code_->metadata(); } const ImportVector& imports() const { return imports_; } const ExportVector& exports() const { return exports_; } const Bytes& bytecode() const { return bytecode_->bytes; } - uint32_t codeLength() const { return code_->segment().length(); } + uint32_t codeLengthTier() const { return code_->segmentTier().length(); } // Instantiate this module with the given imports: bool instantiate(JSContext* cx, Handle<FunctionVector> funcImports, HandleWasmTableObject tableImport, HandleWasmMemoryObject memoryImport, const ValVector& globalImports,
--- a/js/src/wasm/WasmSignalHandlers.cpp +++ b/js/src/wasm/WasmSignalHandlers.cpp @@ -658,50 +658,50 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c return reinterpret_cast<uint8_t*>(result); } MOZ_COLD static void HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress, const Instance& instance, WasmActivation* activation, uint8_t** ppc) { - MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc)); + MOZ_RELEASE_ASSERT(instance.codeSegmentTier().containsFunctionPC(pc)); const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc); if (!memoryAccess) { // If there is no associated MemoryAccess for the faulting PC, this must be // experimental SIMD.js or Atomics. When these are converted to // non-experimental wasm features, this case, as well as outOfBoundsCode, // can be removed. activation->startInterrupt(pc, ContextToFP(context)); - *ppc = instance.codeSegment().outOfBoundsCode(); + *ppc = instance.codeSegmentTier().outOfBoundsCode(); return; } - MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase())); + MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBaseTier())); // On WASM_HUGE_MEMORY platforms, asm.js code may fault. asm.js does not // trap on fault and so has no trap out-of-line path. Instead, stores are // silently ignored (by advancing the pc past the store and resuming) and // loads silently succeed with a JS-semantics-determined value. if (memoryAccess->hasTrapOutOfLineCode()) { - *ppc = memoryAccess->trapOutOfLineCode(instance.codeBase()); + *ppc = memoryAccess->trapOutOfLineCode(instance.codeBaseTier()); return; } MOZ_RELEASE_ASSERT(instance.isAsmJS()); // Disassemble the instruction which caused the trap so that we can extract // information about it and decide what to do. Disassembler::HeapAccess access; uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access); const Disassembler::ComplexAddress& address = access.address(); MOZ_RELEASE_ASSERT(end > pc); - MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(end)); + MOZ_RELEASE_ASSERT(instance.codeSegmentTier().containsFunctionPC(end)); // Check x64 asm.js heap access invariants. MOZ_RELEASE_ASSERT(address.disp() >= 0); MOZ_RELEASE_ASSERT(address.base() == HeapReg.code()); MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code()); MOZ_RELEASE_ASSERT(address.scale() == 0); if (address.hasBase()) { uintptr_t base; @@ -804,28 +804,28 @@ HandleMemoryAccess(EMULATOR_CONTEXT* con } #else // WASM_HUGE_MEMORY MOZ_COLD static void HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress, const Instance& instance, WasmActivation* activation, uint8_t** ppc) { - MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc)); + MOZ_RELEASE_ASSERT(instance.codeSegmentTier().containsFunctionPC(pc)); const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc); if (!memoryAccess) { // See explanation in the WASM_HUGE_MEMORY HandleMemoryAccess. activation->startInterrupt(pc, ContextToFP(context)); - *ppc = instance.codeSegment().outOfBoundsCode(); + *ppc = instance.codeSegmentTier().outOfBoundsCode(); return; } MOZ_RELEASE_ASSERT(memoryAccess->hasTrapOutOfLineCode()); - *ppc = memoryAccess->trapOutOfLineCode(instance.codeBase()); + *ppc = memoryAccess->trapOutOfLineCode(instance.codeBaseTier()); } #endif // WASM_HUGE_MEMORY MOZ_COLD static bool IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress) { size_t accessLimit = instance.memoryMappedSize(); @@ -861,29 +861,29 @@ HandleFault(PEXCEPTION_POINTERS exceptio WasmActivation* activation = MaybeActiveActivation(cx); if (!activation) return false; const Code* code = activation->compartment()->wasm.lookupCode(pc); if (!code) return false; - if (!code->segment().containsFunctionPC(pc)) { + if (!code->segmentTier().containsFunctionPC(pc)) { // On Windows, it is possible for InterruptRunningJitCode to execute // between a faulting heap access and the handling of the fault due // to InterruptRunningJitCode's use of SuspendThread. When this happens, // after ResumeThread, the exception handler is called with pc equal to // CodeSegment.interrupt, which is logically wrong. The Right Thing would // be for the OS to make fault-handling atomic (so that CONTEXT.pc was // always the logically-faulting pc). Fortunately, we can detect this // case and silence the exception ourselves (the exception will // retrigger after the interrupt jumps back to resumePC). - return pc == code->segment().interruptCode() && + return pc == code->segmentTier().interruptCode() && activation->interrupted() && - code->segment().containsFunctionPC(activation->resumePC()); + code->segmentTier().containsFunctionPC(activation->resumePC()); } const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(context)); if (!instance) return false; uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]); @@ -1016,17 +1016,17 @@ HandleMachException(JSContext* cx, const // normally only be accessed by the cx's active thread. AutoNoteSingleThreadedRegion anstr; WasmActivation* activation = MaybeActiveActivation(cx); if (!activation) return false; const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(&context)); - if (!instance || !instance->codeSegment().containsFunctionPC(pc)) + if (!instance || !instance->codeSegmentTier().containsFunctionPC(pc)) return false; uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]); // This check isn't necessary, but, since we can, check anyway to make // sure we aren't covering up a real bug. if (!IsHeapAccessAddress(*instance, faultingAddress)) return false; @@ -1223,17 +1223,17 @@ HandleFault(int signum, siginfo_t* info, return false; AutoSetHandlingSegFault handling(cx); WasmActivation* activation = MaybeActiveActivation(cx); if (!activation) return false; const Instance* instance = LookupFaultingInstance(activation, pc, ContextToFP(context)); - if (!instance || !instance->codeSegment().containsFunctionPC(pc)) + if (!instance || !instance->codeSegmentTier().containsFunctionPC(pc)) return false; uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr); // Although it's not strictly necessary, to make sure we're not covering up // any real bugs, check that the faulting address is indeed in the // instance's memory. if (!faultingAddress) { @@ -1254,17 +1254,17 @@ HandleFault(int signum, siginfo_t* info, #ifdef JS_CODEGEN_ARM if (signal == Signal::BusError) { // TODO: We may see a bus error for something that is an unaligned access that // partly overlaps the end of the heap. In this case, it is an out-of-bounds // error and we should signal that properly, but to do so we must inspect // the operand of the failed access. activation->startInterrupt(pc, ContextToFP(context)); - *ppc = instance->codeSegment().unalignedAccessCode(); + *ppc = instance->codeSegmentTier().unalignedAccessCode(); return true; } #endif HandleMemoryAccess(context, pc, faultingAddress, *instance, activation, ppc); return true; } @@ -1344,17 +1344,17 @@ RedirectJitCodeToInterruptCheck(JSContex #endif // Only interrupt in function code so that the frame iterators have the // invariant that resumePC always has a function CodeRange and we can't // get into any weird interrupt-during-interrupt-stub cases. if (!cx->compartment()) return false; const Code* code = cx->compartment()->wasm.lookupCode(pc); - if (!code || !code->segment().containsFunctionPC(pc)) + if (!code || !code->segmentTier().containsFunctionPC(pc)) return false; // Only probe cx->activation() via MaybeActiveActivation after we know the // pc is in wasm code. This way we don't depend on signal-safe update of // cx->activation(). WasmActivation* activation = MaybeActiveActivation(cx); MOZ_ASSERT(activation); @@ -1370,17 +1370,17 @@ RedirectJitCodeToInterruptCheck(JSContex // The out-of-bounds/unaligned trap paths which call startInterrupt() go // through function code, so test if already interrupted. These paths are // temporary though, so this case can be removed later. if (activation->interrupted()) return false; activation->startInterrupt(pc, fp); - *ContextToPC(context) = code->segment().interruptCode(); + *ContextToPC(context) = code->segmentTier().interruptCode(); #endif return true; } #if !defined(XP_WIN) // For the interrupt signal, pick a signal number that: // - is not otherwise used by mozilla or standard libraries
--- a/js/src/wasm/WasmTypes.h +++ b/js/src/wasm/WasmTypes.h @@ -1173,16 +1173,24 @@ struct Assumptions // A Module can either be asm.js or wasm. enum ModuleKind { Wasm, AsmJS }; +// Code can be compiled either with the Baseline compiler or the Ion compiler. + +enum class CompileMode +{ + Baseline, + Ion +}; + // Represents the resizable limits of memories and tables. struct Limits { uint32_t initial; Maybe<uint32_t> maximum; };
--- a/layout/generic/nsIFrame.h +++ b/layout/generic/nsIFrame.h @@ -782,22 +782,23 @@ public: * * Callers outside of libxul should use nsIDOMWindow::GetComputedStyle() * instead of these accessors. * * Callers can use Style*WithOptionalParam if they're in a function that * accepts an *optional* pointer the style struct. */ #define STYLE_STRUCT(name_, checkdata_cb_) \ - const nsStyle##name_ * Style##name_ () const { \ + const nsStyle##name_ * Style##name_ () const MOZ_NONNULL_RETURN { \ NS_ASSERTION(mStyleContext, "No style context found!"); \ return mStyleContext->Style##name_ (); \ } \ const nsStyle##name_ * Style##name_##WithOptionalParam( \ - const nsStyle##name_ * aStyleStruct) const { \ + const nsStyle##name_ * aStyleStruct) const \ + MOZ_NONNULL_RETURN { \ if (aStyleStruct) { \ MOZ_ASSERT(aStyleStruct == Style##name_()); \ return aStyleStruct; \ } \ return Style##name_(); \ } #include "nsStyleStructList.h" #undef STYLE_STRUCT
--- a/layout/style/nsStyleContext.h +++ b/layout/style/nsStyleContext.h @@ -340,28 +340,28 @@ public: * This function will NOT return null (even when out of memory) when * given a valid style struct ID, so the result does not need to be * null-checked. * * The typesafe functions below are preferred to the use of this * function, both because they're easier to read and because they're * faster. */ - const void* NS_FASTCALL StyleData(nsStyleStructID aSID); + const void* NS_FASTCALL StyleData(nsStyleStructID aSID) MOZ_NONNULL_RETURN; /** * Define typesafe getter functions for each style struct by * preprocessing the list of style structs. These functions are the * preferred way to get style data. The macro creates functions like: * const nsStyleBorder* StyleBorder(); * const nsStyleColor* StyleColor(); */ - #define STYLE_STRUCT(name_, checkdata_cb_) \ - const nsStyle##name_ * Style##name_() { \ - return DoGetStyle##name_<true>(); \ + #define STYLE_STRUCT(name_, checkdata_cb_) \ + const nsStyle##name_ * Style##name_() MOZ_NONNULL_RETURN { \ + return DoGetStyle##name_<true>(); \ } #include "nsStyleStructList.h" #undef STYLE_STRUCT /** * Equivalent to StyleFoo(), except that we skip the cache write during the * servo traversal. This can cause incorrect behavior if used improperly, * since we won't record that layout potentially depends on the values in
--- a/layout/xul/nsImageBoxFrame.cpp +++ b/layout/xul/nsImageBoxFrame.cpp @@ -338,93 +338,102 @@ nsImageBoxFrame::BuildDisplayList(nsDisp aLists.Content()->AppendToTop(&list); } DrawResult nsImageBoxFrame::PaintImage(nsRenderingContext& aRenderingContext, const nsRect& aDirtyRect, nsPoint aPt, uint32_t aFlags) { - nsRect constraintRect; - GetXULClientRect(constraintRect); - - constraintRect += aPt; - if (!mImageRequest) { // This probably means we're drawn by a native theme. return DrawResult::SUCCESS; } - // don't draw if the image is not dirty - // XXX(seth): Can this actually happen anymore? - nsRect dirty; - if (!dirty.IntersectRect(aDirtyRect, constraintRect)) { - return DrawResult::TEMPORARY_ERROR; - } - // Don't draw if the image's size isn't available. uint32_t imgStatus; if (!NS_SUCCEEDED(mImageRequest->GetImageStatus(&imgStatus)) || !(imgStatus & imgIRequest::STATUS_SIZE_AVAILABLE)) { return DrawResult::NOT_READY; } nsCOMPtr<imgIContainer> imgCon; mImageRequest->GetImage(getter_AddRefs(imgCon)); if (!imgCon) { return DrawResult::NOT_READY; } + Maybe<nsPoint> anchorPoint; + nsRect dest = GetDestRect(aPt, anchorPoint); + + // don't draw if the image is not dirty + // XXX(seth): Can this actually happen anymore? + nsRect dirty; + if (!dirty.IntersectRect(aDirtyRect, dest)) { + return DrawResult::TEMPORARY_ERROR; + } + bool hasSubRect = !mUseSrcAttr && (mSubRect.width > 0 || mSubRect.height > 0); - Maybe<nsPoint> anchorPoint; + Maybe<SVGImageContext> svgContext; + SVGImageContext::MaybeStoreContextPaint(svgContext, this, imgCon); + return nsLayoutUtils::DrawSingleImage( + *aRenderingContext.ThebesContext(), + PresContext(), imgCon, + nsLayoutUtils::GetSamplingFilterForFrame(this), + dest, dirty, + svgContext, aFlags, + anchorPoint.ptrOr(nullptr), + hasSubRect ? &mSubRect : nullptr); +} + +nsRect +nsImageBoxFrame::GetDestRect(const nsPoint& aOffset, Maybe<nsPoint>& aAnchorPoint) +{ + nsCOMPtr<imgIContainer> imgCon; + mImageRequest->GetImage(getter_AddRefs(imgCon)); + MOZ_ASSERT(imgCon); + + nsRect clientRect; + GetXULClientRect(clientRect); + clientRect += aOffset; nsRect dest; if (!mUseSrcAttr) { // Our image (if we have one) is coming from the CSS property // 'list-style-image' (combined with '-moz-image-region'). For now, ignore // 'object-fit' & 'object-position' in this case, and just fill our rect. // XXXdholbert Should we even honor these properties in this case? They only // apply to replaced elements, and I'm not sure we count as a replaced // element when our image data is determined by CSS. - dest = constraintRect; + dest = clientRect; } else { // Determine dest rect based on intrinsic size & ratio, along with // 'object-fit' & 'object-position' properties: IntrinsicSize intrinsicSize; nsSize intrinsicRatio; if (mIntrinsicSize.width > 0 && mIntrinsicSize.height > 0) { // Image has a valid size; use it as intrinsic size & ratio. intrinsicSize.width.SetCoordValue(mIntrinsicSize.width); intrinsicSize.height.SetCoordValue(mIntrinsicSize.height); intrinsicRatio = mIntrinsicSize; } else { // Image doesn't have a (valid) intrinsic size. // Try to look up intrinsic ratio and use that at least. imgCon->GetIntrinsicRatio(&intrinsicRatio); } - anchorPoint.emplace(); - dest = nsLayoutUtils::ComputeObjectDestRect(constraintRect, + aAnchorPoint.emplace(); + dest = nsLayoutUtils::ComputeObjectDestRect(clientRect, intrinsicSize, intrinsicRatio, StylePosition(), - anchorPoint.ptr()); + aAnchorPoint.ptr()); } - Maybe<SVGImageContext> svgContext; - SVGImageContext::MaybeStoreContextPaint(svgContext, this, imgCon); - - return nsLayoutUtils::DrawSingleImage( - *aRenderingContext.ThebesContext(), - PresContext(), imgCon, - nsLayoutUtils::GetSamplingFilterForFrame(this), - dest, dirty, - svgContext, aFlags, - anchorPoint.ptrOr(nullptr), - hasSubRect ? &mSubRect : nullptr); + return dest; } void nsDisplayXULImage::Paint(nsDisplayListBuilder* aBuilder, nsRenderingContext* aCtx) { // Even though we call StartDecoding when we get a new image we pass // FLAG_SYNC_DECODE_IF_FAST here for the case where the size we draw at is not // the intrinsic size of the image and we aren't likely to implement predictive @@ -490,22 +499,18 @@ nsDisplayXULImage::GetImage() imageFrame->mImageRequest->GetImage(getter_AddRefs(imgCon)); return imgCon.forget(); } nsRect nsDisplayXULImage::GetDestRect() { - nsImageBoxFrame* imageFrame = static_cast<nsImageBoxFrame*>(mFrame); - - nsRect clientRect; - imageFrame->GetXULClientRect(clientRect); - - return clientRect + ToReferenceFrame(); + Maybe<nsPoint> anchorPoint; + return static_cast<nsImageBoxFrame*>(mFrame)->GetDestRect(ToReferenceFrame(), anchorPoint); } bool nsImageBoxFrame::CanOptimizeToImageLayer() { bool hasSubRect = !mUseSrcAttr && (mSubRect.width > 0 || mSubRect.height > 0); if (hasSubRect) { return false;
--- a/layout/xul/nsImageBoxFrame.h +++ b/layout/xul/nsImageBoxFrame.h @@ -92,16 +92,18 @@ public: virtual ~nsImageBoxFrame(); DrawResult PaintImage(nsRenderingContext& aRenderingContext, const nsRect& aDirtyRect, nsPoint aPt, uint32_t aFlags); bool CanOptimizeToImageLayer(); + nsRect GetDestRect(const nsPoint& aOffset, Maybe<nsPoint>& aAnchorPoint); + protected: explicit nsImageBoxFrame(nsStyleContext* aContext); virtual void GetImageSize(); private: nsresult OnSizeAvailable(imgIRequest* aRequest, imgIContainer* aImage); nsresult OnDecodeComplete(imgIRequest* aRequest);
--- a/mfbt/Attributes.h +++ b/mfbt/Attributes.h @@ -142,16 +142,31 @@ * MOZ_NONNULL(1, 2) int foo(char *p, char *q); */ #if defined(__GNUC__) || defined(__clang__) # define MOZ_NONNULL(...) __attribute__ ((nonnull(__VA_ARGS__))) #else # define MOZ_NONNULL(...) #endif +/** + * MOZ_NONNULL_RETURN tells the compiler that the function's return value is + * guaranteed to be a non-null pointer, which may enable the compiler to + * optimize better at call sites. + * + * Place this attribute at the end of a function declaration. For example, + * + * char* foo(char *p, char *q) MOZ_NONNULL_RETURN; + */ +#if defined(__GNUC__) || defined(__clang__) +# define MOZ_NONNULL_RETURN __attribute__ ((returns_nonnull)) +#else +# define MOZ_NONNULL_RETURN +#endif + /* * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS, specified at the end of a function * declaration, indicates that for the purposes of static analysis, this * function does not return. (The function definition does not need to be * annotated.) * * MOZ_ReportCrash(const char* s, const char* file, int ln) * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
--- a/mobile/android/base/java/org/mozilla/gecko/IntentHelper.java +++ b/mobile/android/base/java/org/mozilla/gecko/IntentHelper.java @@ -276,16 +276,22 @@ public final class IntentHelper implemen final Intent intent; try { intent = Intent.parseUri(targetURI, 0); } catch (final URISyntaxException e) { Log.e(LOGTAG, "Unable to parse URI - " + e); return null; } + final Uri data = intent.getData(); + if (data != null && "file".equals(data.normalizeScheme().getScheme())) { + Log.w(LOGTAG, "Blocked intent with \"file://\" data scheme."); + return null; + } + // Only open applications which can accept arbitrary data from a browser. intent.addCategory(Intent.CATEGORY_BROWSABLE); // Prevent site from explicitly opening our internal activities, which can leak data. intent.setComponent(null); nullIntentSelector(intent); return intent;
--- a/mobile/android/chrome/content/browser.js +++ b/mobile/android/chrome/content/browser.js @@ -4499,19 +4499,16 @@ Tab.prototype = { } } // Update the page actions URI for helper apps. if (BrowserApp.selectedTab == this) { ExternalApps.updatePageActionUri(fixedURI); } - // Strip reader mode URI and also make it exposable if needed - fixedURI = this._stripAboutReaderURL(fixedURI); - let message = { type: "Content:LocationChange", tabID: this.id, uri: truncate(fixedURI.spec, MAX_URI_LENGTH), userRequested: this.userRequested || "", baseDomain: baseDomain, contentType: (contentType ? contentType : ""), sameDocument: sameDocument, @@ -4528,20 +4525,16 @@ Tab.prototype = { // XXX This code assumes that this is the earliest hook we have at which // browser.contentDocument is changed to the new document we're loading this.contentDocumentIsDisplayed = false; this.hasTouchListener = false; Services.obs.notifyObservers(this.browser, "Session:NotifyLocationChange"); } }, - _stripAboutReaderURL: function (originalURI) { - return ReaderMode.getOriginalUrlObjectForDisplay(originalURI.spec) || originalURI; - }, - // Properties used to cache security state used to update the UI _state: null, _hostChanged: false, // onLocationChange will flip this bit onSecurityChange: function(aWebProgress, aRequest, aState) { // Don't need to do anything if the data we use to update the UI hasn't changed if (this._state == aState && !this._hostChanged) return;
--- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/mozglue/SafeIntent.java +++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/mozglue/SafeIntent.java @@ -20,16 +20,17 @@ import java.util.ArrayList; * for more. */ public class SafeIntent { private static final String LOGTAG = "Gecko" + SafeIntent.class.getSimpleName(); private final Intent intent; public SafeIntent(final Intent intent) { + stripDataUri(intent); this.intent = intent; } public boolean hasExtra(String name) { try { return intent.hasExtra(name); } catch (OutOfMemoryError e) { Log.w(LOGTAG, "Couldn't determine if intent had an extra: OOM. Malformed?"); @@ -126,9 +127,24 @@ public class SafeIntent { Log.w(LOGTAG, "Couldn't get intent data.", e); return null; } } public Intent getUnsafe() { return intent; } + + private static void stripDataUri(final Intent intent) { + // We should limit intent filters and check incoming intents against white-list + // But for now we just strip 'about:reader?url=' + if (intent != null && intent.getData() != null) { + final String url = intent.getData().toString(); + final String prefix = "about:reader?url="; + if (url != null && url.startsWith(prefix)) { + final String strippedUrl = url.replace(prefix, ""); + if (strippedUrl != null) { + intent.setData(Uri.parse(strippedUrl)); + } + } + } + } }
--- a/netwerk/streamconv/converters/nsDirIndexParser.cpp +++ b/netwerk/streamconv/converters/nsDirIndexParser.cpp @@ -27,31 +27,30 @@ NS_IMPL_ISUPPORTS(nsDirIndexParser, nsDirIndexParser::nsDirIndexParser() { } nsresult nsDirIndexParser::Init() { mLineStart = 0; mHasDescription = false; - mFormat = nullptr; + mFormat[0] = -1; mozilla::dom::FallbackEncoding::FromLocale(mEncoding); nsresult rv; // XXX not threadsafe if (gRefCntParser++ == 0) rv = CallGetService(NS_ITEXTTOSUBURI_CONTRACTID, &gTextToSubURI); else rv = NS_OK; return rv; } nsDirIndexParser::~nsDirIndexParser() { - delete[] mFormat; // XXX not threadsafe if (--gRefCntParser == 0) { NS_IF_RELEASE(gTextToSubURI); } } NS_IMETHODIMP nsDirIndexParser::SetListener(nsIDirIndexListener* aListener) { @@ -117,51 +116,24 @@ nsDirIndexParser::gFieldTable[] = { { "File-Type", FIELD_FILETYPE }, { nullptr, FIELD_UNKNOWN } }; nsrefcnt nsDirIndexParser::gRefCntParser = 0; nsITextToSubURI *nsDirIndexParser::gTextToSubURI; nsresult -nsDirIndexParser::ParseFormat(const char* aFormatStr) { +nsDirIndexParser::ParseFormat(const char* aFormatStr) +{ // Parse a "200" format line, and remember the fields and their // ordering in mFormat. Multiple 200 lines stomp on each other. - - // Lets find out how many elements we have. - // easier to do this then realloc - const char* pos = aFormatStr; - unsigned int num = 0; - do { - while (*pos && nsCRT::IsAsciiSpace(char16_t(*pos))) - ++pos; - - ++num; - // There are a maximum of six allowed header fields (doubled plus - // terminator, just in case) -- Bug 443299 - if (num > (2 * ArrayLength(gFieldTable))) - return NS_ERROR_UNEXPECTED; + unsigned int formatNum = 0; + mFormat[0] = -1; - if (! *pos) - break; - - while (*pos && !nsCRT::IsAsciiSpace(char16_t(*pos))) - ++pos; - - } while (*pos); - - delete[] mFormat; - mFormat = new int[num+1]; - // Prevent nullptr Deref - Bug 443299 - if (mFormat == nullptr) - return NS_ERROR_OUT_OF_MEMORY; - int formatNum=0; do { - mFormat[formatNum] = -1; - while (*aFormatStr && nsCRT::IsAsciiSpace(char16_t(*aFormatStr))) ++aFormatStr; if (! *aFormatStr) break; nsAutoCString name; int32_t len = 0; @@ -176,33 +148,33 @@ nsDirIndexParser::ParseFormat(const char // All tokens are case-insensitive - http://www.mozilla.org/projects/netlib/dirindexformat.html if (name.LowerCaseEqualsLiteral("description")) mHasDescription = true; for (Field* i = gFieldTable; i->mName; ++i) { if (name.EqualsIgnoreCase(i->mName)) { mFormat[formatNum] = i->mType; - ++formatNum; + mFormat[++formatNum] = -1; break; } } - } while (*aFormatStr); + } while (*aFormatStr && (formatNum < (ArrayLength(mFormat)-1))); return NS_OK; } nsresult nsDirIndexParser::ParseData(nsIDirIndex *aIdx, char* aDataStr, int32_t aLineLen) { // Parse a "201" data line, using the field ordering specified in // mFormat. - if (!mFormat || (mFormat[0] == -1)) { + if(mFormat[0] == -1) { // Ignore if we haven't seen a format yet. return NS_OK; } nsresult rv = NS_OK; nsAutoCString filename; int32_t lineLen = aLineLen;
--- a/netwerk/streamconv/converters/nsDirIndexParser.h +++ b/netwerk/streamconv/converters/nsDirIndexParser.h @@ -42,17 +42,17 @@ public: protected: nsCOMPtr<nsIDirIndexListener> mListener; nsCString mEncoding; nsCString mComment; nsCString mBuf; int32_t mLineStart; bool mHasDescription; - int* mFormat; + int mFormat[8]; nsresult ProcessData(nsIRequest *aRequest, nsISupports *aCtxt); nsresult ParseFormat(const char* buf); nsresult ParseData(nsIDirIndex* aIdx, char* aDataStr, int32_t lineLen); struct Field { const char *mName; fieldType mType;
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/open-url-worker-origin.htm.ini +++ /dev/null @@ -1,8 +0,0 @@ -[open-url-worker-origin.htm] - type: testharness - [Referer header] - expected: FAIL - - [Origin header] - expected: FAIL -
--- a/testing/web-platform/tests/XMLHttpRequest/open-url-worker-origin.htm +++ b/testing/web-platform/tests/XMLHttpRequest/open-url-worker-origin.htm @@ -7,18 +7,18 @@ <script src="/resources/testharnessreport.js"></script> <link rel="help" href="https://xhr.spec.whatwg.org/#the-open()-method" data-tested-assertations="following::OL[1]/LI[3] following::OL[1]/LI[3]/ol[1]/li[1] following::OL[1]/LI[3]/ol[1]/li[2] following::OL[1]/LI[3]/ol[1]/li[3]" /> </head> <body> <div id="log"></div> <script type="text/javascript"> var test = async_test() // This "test" does not actually do any assertations. It's just there to have multiple, separate, asyncronous sub-tests. var expectations = { - 'Referer header': 'referer: '+(location.href.replace(/[^/]*$/, ''))+"resources/workerxhr-origin-referrer.js\n", - 'Origin header': 'origin: '+location.protocol+'//'+location.hostname+((location.port === "")?"":":"+location.port)+'\n', + 'Referer header': 'Referer: '+(location.href.replace(/[^/]*$/, ''))+"resources/workerxhr-origin-referrer.js\n", + 'Origin header': 'Origin: '+location.protocol+'//'+location.hostname+((location.port === "")?"":":"+location.port)+'\n', 'Request URL test' : (location.href.replace(/[^/]*$/, ''))+'resources/requri.py?full' } // now start the worker var worker = new Worker("resources/workerxhr-origin-referrer.js") worker.onmessage = function (e) { var subtest = async_test(e.data.test) subtest.step(function(){ var thisExpectation = expectations[e.data.test]
--- a/toolkit/components/url-classifier/tests/mochitest/chrome.ini +++ b/toolkit/components/url-classifier/tests/mochitest/chrome.ini @@ -1,14 +1,15 @@ [DEFAULT] skip-if = os == 'android' support-files = allowlistAnnotatedFrame.html classifiedAnnotatedFrame.html classifiedAnnotatedPBFrame.html + trackingRequest.html bug_1281083.html report.sjs gethash.sjs classifierCommon.js classifierHelper.js head.js [test_lookup_system_principal.html] @@ -22,8 +23,9 @@ tags = trackingprotection [test_trackingprotection_bug1157081.html] tags = trackingprotection [test_trackingprotection_whitelist.html] tags = trackingprotection [test_safebrowsing_bug1272239.html] [test_donottrack.html] [test_classifier_changetablepref.html] [test_reporturl.html] +[test_trackingprotection_bug1312515.html]
--- a/toolkit/components/url-classifier/tests/mochitest/mochitest.ini +++ b/toolkit/components/url-classifier/tests/mochitest/mochitest.ini @@ -10,16 +10,19 @@ support-files = evil.css evil.css^headers^ evil.js evil.js^headers^ evilWorker.js import.css raptor.jpg track.html + trackingRequest.html + trackingRequest.js + trackingRequest.js^headers^ unwantedWorker.js vp9.webm whitelistFrame.html workerFrame.html ping.sjs basic.vtt basic.vtt^headers^ dnt.html
new file mode 100644 --- /dev/null +++ b/toolkit/components/url-classifier/tests/mochitest/test_trackingprotection_bug1312515.html @@ -0,0 +1,130 @@ +<!DOCTYPE HTML> +<!-- Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ --> +<html> +<head> + <title>Test Bug 1312515</title> + <script type="text/javascript" src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css"> +</head> + +<body> +<p id="display"></p> +<div id="content" style="display: none"> +</div> +<pre id="test"> + +<script class="testbody" type="text/javascript"> + +var Cc = SpecialPowers.Cc; +var Ci = SpecialPowers.Ci; + +var mainWindow = window.QueryInterface(Ci.nsIInterfaceRequestor) + .getInterface(Ci.nsIWebNavigation) + .QueryInterface(Ci.nsIDocShellTreeItem) + .rootTreeItem + .QueryInterface(Ci.nsIInterfaceRequestor) + .getInterface(Ci.nsIDOMWindow); +var contentPage = "http://www.itisatrap.org/tests/toolkit/components/url-classifier/tests/mochitest/trackingRequest.html"; + +Components.utils.import("resource://gre/modules/Services.jsm"); +Components.utils.import("resource://testing-common/UrlClassifierTestUtils.jsm"); + +function whenDelayedStartupFinished(aWindow, aCallback) { + Services.obs.addObserver(function observer(aSubject, aTopic) { + if (aWindow == aSubject) { + Services.obs.removeObserver(observer, aTopic); + setTimeout(aCallback, 0); + } + }, "browser-delayed-startup-finished"); +} + +function testOnWindow(aPrivate, aCallback) { + var win = mainWindow.OpenBrowserWindow({private: aPrivate}); + win.addEventListener("load", function() { + whenDelayedStartupFinished(win, function() { + win.addEventListener("DOMContentLoaded", function onInnerLoad() { + if (win.content.location.href != contentPage) { + win.gBrowser.loadURI(contentPage); + return; + } + win.removeEventListener("DOMContentLoaded", onInnerLoad, true); + + win.content.addEventListener('load', function innerLoad2() { + win.content.removeEventListener('load', innerLoad2); + SimpleTest.executeSoon(function() { aCallback(win); }); + }, false, true); + }, true); + SimpleTest.executeSoon(function() { win.gBrowser.loadURI(contentPage); }); + }); + }, {capture: true, once: true}); +} + +const topic = "http-on-opening-request"; +var testUrl; +var testWindow; +var resolve; + +function checkLowestPriority(aSubject) { + var channel = aSubject.QueryInterface(Ci.nsIChannel); + info("Channel's url=" + channel.name); + if (channel.name !== testUrl) { + return; + } + + var p = aSubject.QueryInterface(Ci.nsISupportsPriority); + is(p.priority, Ci.nsISupportsPriority.PRIORITY_LOWEST, "Priority should be lowest."); + SpecialPowers.removeObserver(checkLowestPriority, topic); + resolve(); +} + +function testXHR() { + return new Promise(function(aResolve, aReject) { + resolve = aResolve; + SpecialPowers.addObserver(checkLowestPriority, topic); + testUrl = "http://mochi.test:8888/"; + testWindow.content.postMessage({type: "doXHR", url: testUrl}, "*"); + }); +} + +function testFetch() { + return new Promise(function(aResolve, aReject) { + resolve = aResolve; + SpecialPowers.addObserver(checkLowestPriority, topic); + testUrl = "http://itisatracker.org/"; + testWindow.content.postMessage({type: "doFetch", url: testUrl}, "*"); + }); +} + +function endTest() { + testWindow.close(); + testWindow = null; + SimpleTest.finish(); +} + +SpecialPowers.pushPrefEnv( + {"set" : [["urlclassifier.trackingTable", "test-track-simple"], + ["privacy.trackingprotection.annotate_channels", true], + ["privacy.trackingprotection.lower_network_priority", true], + ["channelclassifier.allowlist_example", true]]}, + test); + +function test() { + SimpleTest.registerCleanupFunction(UrlClassifierTestUtils.cleanupTestTrackers); + UrlClassifierTestUtils.addTestTrackers().then(() => { + testOnWindow(false, function(aWindow) { + testWindow = aWindow; + testXHR(). + then(testFetch). + then(endTest) + }); + }); +} + +SimpleTest.waitForExplicitFinish(); + +</script> + +</pre> +</body> +</html>
new file mode 100644 --- /dev/null +++ b/toolkit/components/url-classifier/tests/mochitest/trackingRequest.html @@ -0,0 +1,14 @@ +<!DOCTYPE HTML> +<!-- Any copyright is dedicated to the Public Domain. + http://creativecommons.org/publicdomain/zero/1.0/ --> +<html> +<head> +<title></title> + +</head> +<body> + +<script id="badscript" data-touched="not sure" src="http://tracking.example.com/tests/toolkit/components/url-classifier/tests/mochitest/trackingRequest.js"></script> + +</body> +</html>
new file mode 100644 --- /dev/null +++ b/toolkit/components/url-classifier/tests/mochitest/trackingRequest.js @@ -0,0 +1,11 @@ + +window.addEventListener("message", function onMessage(evt) { + if (evt.data.type === "doXHR") { + var request = new XMLHttpRequest(); + request.open("GET", evt.data.url, true); + request.send(null); + } + else if (evt.data.type === "doFetch") { + fetch(evt.data.url); + } +});
new file mode 100644 --- /dev/null +++ b/toolkit/components/url-classifier/tests/mochitest/trackingRequest.js^headers^ @@ -0,0 +1,2 @@ +Access-Control-Allow-Origin: * +Cache-Control: no-store
--- a/xpcom/string/nsCharTraits.h +++ b/xpcom/string/nsCharTraits.h @@ -92,23 +92,16 @@ template <> struct nsCharTraits<char16_t> { typedef char16_t char_type; typedef uint16_t unsigned_char_type; typedef char incompatible_char_type; static char_type* const sEmptyBuffer; - static void - assign(char_type& aLhs, char_type aRhs) - { - aLhs = aRhs; - } - - // integer representation of characters: typedef int int_type; static char_type to_char_type(int_type aChar) { return char_type(aChar); } @@ -162,26 +155,16 @@ struct nsCharTraits<char16_t> { for (char_type* s = aStr1; aN--; ++s, ++aStr2) { NS_ASSERTION(!(*aStr2 & ~0x7F), "Unexpected non-ASCII character"); *s = static_cast<char_type>(*aStr2); } return aStr1; } - static char_type* - assign(char_type* aStr, size_t aN, char_type aChar) - { - char_type* result = aStr; - while (aN--) { - assign(*aStr++, aChar); - } - return result; - } - static int compare(const char_type* aStr1, const char_type* aStr2, size_t aN) { for (; aN--; ++aStr1, ++aStr2) { if (!eq(*aStr1, *aStr2)) { return to_int_type(*aStr1) - to_int_type(*aStr2); } } @@ -317,23 +300,16 @@ template <> struct nsCharTraits<char> { typedef char char_type; typedef unsigned char unsigned_char_type; typedef char16_t incompatible_char_type; static char_type* const sEmptyBuffer; - static void - assign(char_type& aLhs, char_type aRhs) - { - aLhs = aRhs; - } - - // integer representation of characters: typedef int int_type; static char_type to_char_type(int_type aChar) { return char_type(aChar); @@ -383,22 +359,16 @@ struct nsCharTraits<char> } static char_type* copyASCII(char_type* aStr1, const char* aStr2, size_t aN) { return copy(aStr1, aStr2, aN); } - static char_type* - assign(char_type* aStr, size_t aN, char_type aChar) - { - return static_cast<char_type*>(memset(aStr, to_int_type(aChar), aN)); - } - static int compare(const char_type* aStr1, const char_type* aStr2, size_t aN) { return memcmp(aStr1, aStr2, aN); } static int compareASCII(const char_type* aStr1, const char* aStr2, size_t aN)