author | Brian Hackett <bhackett1024@gmail.com> |
Mon, 13 Aug 2018 20:48:14 +0000 | |
changeset 431345 | a3bd72355db187ec1f492127d117f6d5796ce802 |
parent 431344 | 41d3f63a86da3ac2a57c46afc40a99ab411539df |
child 431346 | cb8d7e42139e064c7c6caeef0001b5803fa971b5 |
push id | 34437 |
push user | ebalazs@mozilla.com |
push date | Tue, 14 Aug 2018 09:31:09 +0000 |
treeherder | mozilla-central@914b3b370ad0 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | froydnj |
bugs | 1481009 |
milestone | 63.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/toolkit/recordreplay/MemorySnapshot.cpp +++ b/toolkit/recordreplay/MemorySnapshot.cpp @@ -299,17 +299,17 @@ public: // are in untracked memory. struct MemoryInfo { // Whether new dirty pages or allocated regions are allowed. bool mMemoryChangesAllowed; // Untracked memory regions allocated before the first checkpoint. This is only // accessed on the main thread, and is not a vector because of reentrancy // issues. - static const size_t MaxInitialUntrackedRegions = 256; + static const size_t MaxInitialUntrackedRegions = 512; AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions]; SpinLock mInitialUntrackedRegionsLock; // All tracked memory in the process. This may be updated by any thread while // holding mTrackedRegionsLock. SplayTree<AllocatedMemoryRegion, AllocatedMemoryRegion::AddressSort, AllocPolicy<MemoryKind::TrackedRegions>, 4> mTrackedRegions; @@ -723,23 +723,90 @@ RemoveInitialUntrackedRegion(uint8_t* aB region.mBase = nullptr; region.mSize = 0; return; } } MOZ_CRASH(); } +// Get information about the mapped region containing *aAddress, or the next +// mapped region afterwards if aAddress is not mapped. aAddress is updated to +// the start of that region, and aSize, aProtection, and aMaxProtection are +// updated with the size and protection status of the region. Returns false if +// there are no more mapped regions after *aAddress. +static bool +QueryRegion(uint8_t** aAddress, size_t* aSize, + int* aProtection = nullptr, int* aMaxProtection = nullptr) +{ + mach_vm_address_t addr = (mach_vm_address_t) *aAddress; + mach_vm_size_t nbytes; + + vm_region_basic_info_64 info; + mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64); + mach_port_t some_port; + kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO, + (vm_region_info_t) &info, &info_count, &some_port); + if (rv == KERN_INVALID_ADDRESS) { + return false; + } + MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS); + + *aAddress = (uint8_t*) addr; + *aSize = nbytes; + if (aProtection) { + *aProtection = info.protection; + } + if (aMaxProtection) { + *aMaxProtection = info.max_protection; + } + return true; +} + static void MarkThreadStacksAsUntracked() { + AutoPassThroughThreadEvents pt; + // Thread stacks are excluded from the tracked regions. for (size_t i = MainThreadId; i <= MaxThreadId; i++) { Thread* thread = Thread::GetById(i); + if (!thread->StackBase()) { + continue; + } + AddInitialUntrackedMemoryRegion(thread->StackBase(), thread->StackSize()); + + // Look for a mapped region with no access permissions immediately after + // the thread stack's allocated region, and include this in the untracked + // memory if found. This is done to avoid confusing breakpad, which will + // scan the allocated memory in this process and will not correctly + // determine stack boundaries if we track these trailing regions and end up + // marking them as readable. + + // Find the mapped region containing the thread's stack. + uint8_t* base = thread->StackBase(); + size_t size; + if (!QueryRegion(&base, &size)) { + MOZ_CRASH("Could not find memory region information for thread stack"); + } + + // Sanity check the region size. Note that we don't mark this entire region + // as untracked, since it may contain TLS data which should be tracked. + MOZ_RELEASE_ASSERT(base <= thread->StackBase()); + MOZ_RELEASE_ASSERT(base + size >= thread->StackBase() + thread->StackSize()); + + uint8_t* trailing = base + size; + size_t trailingSize; + int protection; + if (QueryRegion(&trailing, &trailingSize, &protection)) { + if (trailing == base + size && protection == 0) { + AddInitialUntrackedMemoryRegion(trailing, trailingSize); + } + } } } // Given an address region [*aAddress, *aAddress + *aSize], return true if // there is any intersection with an excluded region // [aExclude, aExclude + aExcludeSize], set *aSize to contain the subregion // starting at aAddress which which is not excluded, and *aRemaining and // *aRemainingSize to any additional subregion which is not excluded. @@ -825,44 +892,40 @@ AddInitialTrackedMemoryRegions(uint8_t* aAddress = remaining; aSize = remainingSize; } } static void UpdateNumTrackedRegionsForSnapshot(); -// Handle all initial untracked memory regions in the process. +// Fill in the set of tracked memory regions that are currently mapped within +// this process. static void ProcessAllInitialMemoryRegions() { MOZ_ASSERT(!AreThreadEventsPassedThrough()); { AutoPassThroughThreadEvents pt; - for (mach_vm_address_t addr = 0;;) { - mach_vm_size_t nbytes; - - vm_region_basic_info_64 info; - mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64); - mach_port_t some_port; - kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO, - (vm_region_info_t) &info, &info_count, &some_port); - if (rv == KERN_INVALID_ADDRESS) { + for (uint8_t* addr = nullptr;;) { + size_t size; + int maxProtection; + if (!QueryRegion(&addr, &size, nullptr, &maxProtection)) { break; } - MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS); - if (info.max_protection & VM_PROT_WRITE) { - MOZ_RELEASE_ASSERT(info.max_protection & VM_PROT_READ); - AddInitialTrackedMemoryRegions(reinterpret_cast<uint8_t*>(addr), nbytes, - info.max_protection & VM_PROT_EXECUTE); + // Consider all memory regions that can possibly be written to, even if + // they aren't currently writable. + if (maxProtection & VM_PROT_WRITE) { + MOZ_RELEASE_ASSERT(maxProtection & VM_PROT_READ); + AddInitialTrackedMemoryRegions(addr, size, maxProtection & VM_PROT_EXECUTE); } - addr += nbytes; + addr += size; } } UpdateNumTrackedRegionsForSnapshot(); // Write protect all tracked memory. AutoDisallowMemoryChanges disallow; for (const AllocatedMemoryRegion& region : gMemoryInfo->mTrackedRegionsByAllocationOrder) {