Bug 1481009 Part 7 - Treat inaccessible memory regions after thread stacks as untracked, r=froydnj.
☠☠ backed out by e3cec7443adf ☠ ☠
authorBrian Hackett <bhackett1024@gmail.com>
Wed, 08 Aug 2018 16:50:15 +0000
changeset 431104 4a0c7dd5e1acf4e3f517eeee51b6483de04af949
parent 431103 4e6908123a97033d65fadd2cd04ce0118658c7e8
child 431105 11ee868c59038139231cfa32548139aa88da1648
push id106352
push userbhackett@mozilla.com
push dateSat, 11 Aug 2018 14:55:04 +0000
treeherdermozilla-inbound@11ee868c5903 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1481009
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1481009 Part 7 - Treat inaccessible memory regions after thread stacks as untracked, r=froydnj.
toolkit/recordreplay/MemorySnapshot.cpp
--- a/toolkit/recordreplay/MemorySnapshot.cpp
+++ b/toolkit/recordreplay/MemorySnapshot.cpp
@@ -299,17 +299,17 @@ public:
 // are in untracked memory.
 struct MemoryInfo {
   // Whether new dirty pages or allocated regions are allowed.
   bool mMemoryChangesAllowed;
 
   // Untracked memory regions allocated before the first checkpoint. This is only
   // accessed on the main thread, and is not a vector because of reentrancy
   // issues.
-  static const size_t MaxInitialUntrackedRegions = 256;
+  static const size_t MaxInitialUntrackedRegions = 512;
   AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
   SpinLock mInitialUntrackedRegionsLock;
 
   // All tracked memory in the process. This may be updated by any thread while
   // holding mTrackedRegionsLock.
   SplayTree<AllocatedMemoryRegion, AllocatedMemoryRegion::AddressSort,
             AllocPolicy<MemoryKind::TrackedRegions>, 4>
     mTrackedRegions;
@@ -723,23 +723,90 @@ RemoveInitialUntrackedRegion(uint8_t* aB
       region.mBase = nullptr;
       region.mSize = 0;
       return;
     }
   }
   MOZ_CRASH();
 }
 
+// Get information about the mapped region containing *aAddress, or the next
+// mapped region afterwards if aAddress is not mapped. aAddress is updated to
+// the start of that region, and aSize, aProtection, and aMaxProtection are
+// updated with the size and protection status of the region. Returns false if
+// there are no more mapped regions after *aAddress.
+static bool
+QueryRegion(uint8_t** aAddress, size_t* aSize,
+            int* aProtection = nullptr, int* aMaxProtection = nullptr)
+{
+  mach_vm_address_t addr = (mach_vm_address_t) *aAddress;
+  mach_vm_size_t nbytes;
+
+  vm_region_basic_info_64 info;
+  mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
+  mach_port_t some_port;
+  kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
+                                    (vm_region_info_t) &info, &info_count, &some_port);
+  if (rv == KERN_INVALID_ADDRESS) {
+    return false;
+  }
+  MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
+
+  *aAddress = (uint8_t*) addr;
+  *aSize = nbytes;
+  if (aProtection) {
+    *aProtection = info.protection;
+  }
+  if (aMaxProtection) {
+    *aMaxProtection = info.max_protection;
+  }
+  return true;
+}
+
 static void
 MarkThreadStacksAsUntracked()
 {
+  AutoPassThroughThreadEvents pt;
+
   // Thread stacks are excluded from the tracked regions.
   for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
     Thread* thread = Thread::GetById(i);
+    if (!thread->StackBase()) {
+      continue;
+    }
+
     AddInitialUntrackedMemoryRegion(thread->StackBase(), thread->StackSize());
+
+    // Look for a mapped region with no access permissions immediately after
+    // the thread stack's allocated region, and include this in the untracked
+    // memory if found. This is done to avoid confusing breakpad, which will
+    // scan the allocated memory in this process and will not correctly
+    // determine stack boundaries if we track these trailing regions and end up
+    // marking them as readable.
+
+    // Find the mapped region containing the thread's stack.
+    uint8_t* base = thread->StackBase();
+    size_t size;
+    if (!QueryRegion(&base, &size)) {
+      MOZ_CRASH("Could not find memory region information for thread stack");
+    }
+
+    // Sanity check the region size. Note that we don't mark this entire region
+    // as untracked, since it may contain TLS data which should be tracked.
+    MOZ_RELEASE_ASSERT(base <= thread->StackBase());
+    MOZ_RELEASE_ASSERT(base + size >= thread->StackBase() + thread->StackSize());
+
+    uint8_t* trailing = base + size;
+    size_t trailingSize;
+    int protection;
+    if (QueryRegion(&trailing, &trailingSize, &protection)) {
+      if (trailing == base + size && protection == 0) {
+        AddInitialUntrackedMemoryRegion(trailing, trailingSize);
+      }
+    }
   }
 }
 
 // Given an address region [*aAddress, *aAddress + *aSize], return true if
 // there is any intersection with an excluded region
 // [aExclude, aExclude + aExcludeSize], set *aSize to contain the subregion
 // starting at aAddress which which is not excluded, and *aRemaining and
 // *aRemainingSize to any additional subregion which is not excluded.
@@ -825,44 +892,40 @@ AddInitialTrackedMemoryRegions(uint8_t* 
 
     aAddress = remaining;
     aSize = remainingSize;
   }
 }
 
 static void UpdateNumTrackedRegionsForSnapshot();
 
-// Handle all initial untracked memory regions in the process.
+// Fill in the set of tracked memory regions that are currently mapped within
+// this process.
 static void
 ProcessAllInitialMemoryRegions()
 {
   MOZ_ASSERT(!AreThreadEventsPassedThrough());
 
   {
     AutoPassThroughThreadEvents pt;
-    for (mach_vm_address_t addr = 0;;) {
-      mach_vm_size_t nbytes;
-
-      vm_region_basic_info_64 info;
-      mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
-      mach_port_t some_port;
-      kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
-                                        (vm_region_info_t) &info, &info_count, &some_port);
-      if (rv == KERN_INVALID_ADDRESS) {
+    for (uint8_t* addr = nullptr;;) {
+      size_t size;
+      int maxProtection;
+      if (!QueryRegion(&addr, &size, nullptr, &maxProtection)) {
         break;
       }
-      MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
 
-      if (info.max_protection & VM_PROT_WRITE) {
-        MOZ_RELEASE_ASSERT(info.max_protection & VM_PROT_READ);
-        AddInitialTrackedMemoryRegions(reinterpret_cast<uint8_t*>(addr), nbytes,
-                                       info.max_protection & VM_PROT_EXECUTE);
+      // Consider all memory regions that can possibly be written to, even if
+      // they aren't currently writable.
+      if (maxProtection & VM_PROT_WRITE) {
+        MOZ_RELEASE_ASSERT(maxProtection & VM_PROT_READ);
+        AddInitialTrackedMemoryRegions(addr, size, maxProtection & VM_PROT_EXECUTE);
       }
 
-      addr += nbytes;
+      addr += size;
     }
   }
 
   UpdateNumTrackedRegionsForSnapshot();
 
   // Write protect all tracked memory.
   AutoDisallowMemoryChanges disallow;
   for (const AllocatedMemoryRegion& region : gMemoryInfo->mTrackedRegionsByAllocationOrder) {