Bug 1594598 - Use uint8_t* to avoid a bunch of casts. r=glandium
authorNicholas Nethercote <nnethercote@mozilla.com>
Wed, 06 Nov 2019 23:56:48 +0000
changeset 501024 ec3308aa7246d40c57b9b1da9194ef461ee9c52e
parent 501023 ba5dde550477b6902efd0185b33bf9dd22bba06d
child 501025 446bc0f67bebfaa27a5d27369cfa7a65eaba2d44
push id99937
push usernnethercote@mozilla.com
push dateThu, 07 Nov 2019 00:37:00 +0000
treeherderautoland@ec3308aa7246 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1594598
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1594598 - Use uint8_t* to avoid a bunch of casts. r=glandium Currently the PHC code uses char* and uintptr_t in various address computations. This patch changes it to use uint8_t* instead, which is clearer than char* and avoids the need for various casts. Differential Revision: https://phabricator.services.mozilla.com/D43840
memory/replace/phc/PHC.cpp
memory/replace/phc/test/gtest/TestPHC.cpp
toolkit/crashreporter/test/nsTestCrasher.cpp
--- a/memory/replace/phc/PHC.cpp
+++ b/memory/replace/phc/PHC.cpp
@@ -339,61 +339,60 @@ Atomic<Time, Relaxed, Behavior::DontPres
 Atomic<Delay, ReleaseAcquire, Behavior::DontPreserve> GAtomic::sAllocDelay;
 
 // Shared, immutable global state. Initialized by replace_init() and never
 // changed after that. replace_init() runs early enough that no synchronization
 // is needed.
 class GConst {
  private:
   // The bounds of the allocated pages.
-  const uintptr_t mPagesStart;
-  const uintptr_t mPagesLimit;
+  uint8_t* const mPagesStart;
+  uint8_t* const mPagesLimit;
 
-  uintptr_t AllocPages() {
+  uint8_t* AllocPages() {
     // Allocate the pages so that they are inaccessible. They are never freed,
     // because it would happen at process termination when it would be of little
     // use.
     void* pages =
 #ifdef XP_WIN
         VirtualAlloc(nullptr, kAllPagesSize, MEM_RESERVE, PAGE_NOACCESS);
 #else
         mmap(nullptr, kAllPagesSize, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1,
              0);
 #endif
     if (!pages) {
       MOZ_CRASH();
     }
 
-    return reinterpret_cast<uintptr_t>(pages);
+    return static_cast<uint8_t*>(pages);
   }
 
  public:
   GConst()
       : mPagesStart(AllocPages()), mPagesLimit(mPagesStart + kAllPagesSize) {
-    LOG("AllocPages at %p..%p\n", (void*)mPagesStart, (void*)mPagesLimit);
+    LOG("AllocPages at %p..%p\n", mPagesStart, mPagesLimit);
   }
 
   // Detect if a pointer is to a page allocation, and if so, which one. This
   // function must be fast because it is called for every call to free(),
   // realloc(), malloc_usable_size(), and jemalloc_ptr_info().
   Maybe<uintptr_t> PageIndex(const void* aPtr) {
-    auto ptr = reinterpret_cast<uintptr_t>(aPtr);
-    if (!(mPagesStart <= ptr && ptr < mPagesLimit)) {
+    if (!(mPagesStart <= aPtr && aPtr < mPagesLimit)) {
       return Nothing();
     }
 
-    size_t i = (ptr - mPagesStart) / kPageSize;
+    size_t i = (static_cast<const uint8_t*>(aPtr) - mPagesStart) / kPageSize;
     MOZ_ASSERT(i < kMaxPageAllocs);
     return Some(i);
   }
 
   // Get the address of a page referred to via an index.
-  void* PagePtr(size_t aIndex) {
+  uint8_t* PagePtr(size_t aIndex) {
     MOZ_ASSERT(aIndex < kMaxPageAllocs);
-    return reinterpret_cast<void*>(mPagesStart + kPageSize * aIndex);
+    return mPagesStart + kPageSize * aIndex;
   }
 };
 
 static GConst* gConst;
 
 // On MacOS, the first __thread/thread_local access calls malloc, which leads
 // to an infinite loop. So we use pthread-based TLS instead, which somehow
 // doesn't have this problem.
@@ -659,17 +658,17 @@ class GMut {
       LOG("EnsureInUse(%p), failure\n", aPtr);
       // An operation on a freed page? This is a particular kind of
       // use-after-free. Deliberately touch the page in question, in order to
       // cause a crash that triggers the usual PHC machinery. But unlock sMutex
       // first, because that self-same PHC machinery needs to re-lock it, and
       // the crash causes non-local control flow so sMutex won't be unlocked
       // the normal way in the caller.
       sMutex.Unlock();
-      *static_cast<char*>(aPtr) = 0;
+      *static_cast<uint8_t*>(aPtr) = 0;
       MOZ_CRASH("unreachable");
     }
   }
 
   void FillAddrInfo(GMutLock, uintptr_t aIndex, const void* aBaseAddr,
                     phc::AddrInfo& aOut) {
     const PageInfo& page = mPages[aIndex];
     switch (page.mState) {
@@ -683,45 +682,45 @@ class GMut {
 
       case PageState::Freed:
         aOut.mKind = phc::AddrInfo::Kind::FreedPage;
         break;
 
       default:
         MOZ_CRASH();
     }
-    aOut.mBaseAddr = const_cast<const void*>(gConst->PagePtr(aIndex));
+    aOut.mBaseAddr = gConst->PagePtr(aIndex);
     aOut.mUsableSize = page.mUsableSize;
     aOut.mAllocStack = page.mAllocStack;
     aOut.mFreeStack = page.mFreeStack;
   }
 
   void FillJemallocPtrInfo(GMutLock, const void* aPtr, uintptr_t aIndex,
                            jemalloc_ptr_info_t* aInfo) {
     const PageInfo& page = mPages[aIndex];
     switch (page.mState) {
       case PageState::NeverAllocated:
         break;
 
       case PageState::InUse: {
         // Only return TagLiveAlloc if the pointer is within the bounds of the
         // allocation's usable size.
-        char* pagePtr = static_cast<char*>(gConst->PagePtr(aIndex));
+        uint8_t* pagePtr = gConst->PagePtr(aIndex);
         if (aPtr < pagePtr + page.mUsableSize) {
           *aInfo = {TagLiveAlloc, pagePtr, page.mUsableSize,
                     page.mArenaId.valueOr(0)};
           return;
         }
         break;
       }
 
       case PageState::Freed: {
         // Only return TagFreedAlloc if the pointer is within the bounds of the
         // former allocation's usable size.
-        char* pagePtr = static_cast<char*>(gConst->PagePtr(aIndex));
+        uint8_t* pagePtr = gConst->PagePtr(aIndex);
         if (aPtr < pagePtr + page.mUsableSize) {
           *aInfo = {TagFreedAlloc, gConst->PagePtr(aIndex), page.mUsableSize,
                     page.mArenaId.valueOr(0)};
           return;
         }
         break;
       }
 
@@ -858,24 +857,24 @@ static void* MaybePageAlloc(const Maybe<
 
   MutexAutoLock lock(GMut::sMutex);
 
   Time now = GAtomic::Now();
   Delay newAllocDelay = Rnd64ToDelay<kAvgAllocDelay>(gMut->Random64(lock));
 
   // We start at a random page alloc and wrap around, to ensure pages get even
   // amounts of use.
-  void* ptr = nullptr;
+  uint8_t* ptr = nullptr;
   for (uintptr_t n = 0, i = size_t(gMut->Random64(lock)) % kMaxPageAllocs;
        n < kMaxPageAllocs; n++, i = (i + 1) % kMaxPageAllocs) {
     if (!gMut->IsPageAllocatable(lock, i, now)) {
       continue;
     }
 
-    void* pagePtr = gConst->PagePtr(i);
+    uint8_t* pagePtr = gConst->PagePtr(i);
     bool ok =
 #ifdef XP_WIN
         !!VirtualAlloc(pagePtr, kPageSize, MEM_COMMIT, PAGE_READWRITE);
 #else
         mprotect(pagePtr, kPageSize, PROT_READ | PROT_WRITE) == 0;
 #endif
     size_t usableSize = sMallocTable.malloc_good_size(aReqSize);
     if (ok) {
--- a/memory/replace/phc/test/gtest/TestPHC.cpp
+++ b/memory/replace/phc/test/gtest/TestPHC.cpp
@@ -26,20 +26,20 @@ bool JeInfoEq(jemalloc_ptr_info_t& aInfo
               size_t aSize, arena_id_t arenaId) {
   return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize
 #ifdef MOZ_DEBUG
          && aInfo.arenaId == arenaId
 #endif
       ;
 }
 
-char* GetPHCAllocation(size_t aSize) {
+uint8_t* GetPHCAllocation(size_t aSize) {
   // A crude but effective way to get a PHC allocation.
   for (int i = 0; i < 2000000; i++) {
-    char* p = (char*)malloc(aSize);
+    uint8_t* p = (uint8_t*)malloc(aSize);
     if (ReplaceMalloc::IsPHCAllocation(p, nullptr)) {
       return p;
     }
     free(p);
   }
   return nullptr;
 }
 
@@ -56,17 +56,17 @@ TEST(PHC, TestPHCBasics)
   // Test some non-PHC allocation addresses.
   ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(nullptr, &phcInfo));
   ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0,
                         false, false));
   ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(&stackVar, &phcInfo));
   ASSERT_TRUE(PHCInfoEq(phcInfo, phc::AddrInfo::Kind::Unknown, nullptr, 0,
                         false, false));
 
-  char* p = GetPHCAllocation(32);
+  uint8_t* p = GetPHCAllocation(32);
   if (!p) {
     MOZ_CRASH("failed to get a PHC allocation");
   }
 
   // Test an in-use PHC allocation, via its base address.
   ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p, &phcInfo));
   ASSERT_TRUE(
       PHCInfoEq(phcInfo, phc::AddrInfo::Kind::InUsePage, p, 32ul, true, false));
@@ -117,39 +117,39 @@ TEST(PHC, TestPHCBasics)
   // possible to reliably get ahold of such a page.
 
   // There are not tests for `mKind == GuardPage` because it's currently not
   // implemented.
 }
 
 TEST(PHC, TestPHCDisabling)
 {
-  char* p = GetPHCAllocation(32);
-  char* q = GetPHCAllocation(32);
+  uint8_t* p = GetPHCAllocation(32);
+  uint8_t* q = GetPHCAllocation(32);
   if (!p || !q) {
     MOZ_CRASH("failed to get a PHC allocation");
   }
 
   ASSERT_TRUE(ReplaceMalloc::IsPHCEnabledOnCurrentThread());
   ReplaceMalloc::DisablePHCOnCurrentThread();
   ASSERT_FALSE(ReplaceMalloc::IsPHCEnabledOnCurrentThread());
 
   // Test realloc() on a PHC allocation while PHC is disabled on the thread.
-  char* p2 = (char*)realloc(p, 128);
+  uint8_t* p2 = (uint8_t*)realloc(p, 128);
   // The small realloc is in-place.
   ASSERT_TRUE(p2 == p);
   ASSERT_TRUE(ReplaceMalloc::IsPHCAllocation(p2, nullptr));
-  char* p3 = (char*)realloc(p2, 8192);
+  uint8_t* p3 = (uint8_t*)realloc(p2, 8192);
   // The big realloc is not in-place, and the result is not a PHC allocation.
   ASSERT_TRUE(p3 != p2);
   ASSERT_FALSE(ReplaceMalloc::IsPHCAllocation(p3, nullptr));
   free(p3);
 
   // Test free() on a PHC allocation while PHC is disabled on the thread.
   free(q);
 
   // These must not be PHC allocations.
-  char* r = GetPHCAllocation(32);  // This will fail.
+  uint8_t* r = GetPHCAllocation(32);  // This will fail.
   ASSERT_FALSE(!!r);
 
   ReplaceMalloc::ReenablePHCOnCurrentThread();
   ASSERT_TRUE(ReplaceMalloc::IsPHCEnabledOnCurrentThread());
 }
--- a/toolkit/crashreporter/test/nsTestCrasher.cpp
+++ b/toolkit/crashreporter/test/nsTestCrasher.cpp
@@ -120,20 +120,20 @@ void MOZ_NEVER_INLINE ReserveStack() {
   FILETIME stackmem[elements];
   ::GetSystemTimeAsFileTime(&stackmem[0]);
   ::GetSystemTimeAsFileTime(&stackmem[elements - 1]);
 }
 
 #endif  // XP_WIN && HAVE_64BIT_BUILD
 
 #ifdef MOZ_PHC
-char* GetPHCAllocation(size_t aSize) {
+uint8_t* GetPHCAllocation(size_t aSize) {
   // A crude but effective way to get a PHC allocation.
   for (int i = 0; i < 2000000; i++) {
-    char* p = (char*)malloc(aSize);
+    uint8_t* p = (uint8_t*)malloc(aSize);
     if (ReplaceMalloc::IsPHCAllocation(p, nullptr)) {
       return p;
     }
     free(p);
   }
   // This failure doesn't seem to occur in practice...
   MOZ_CRASH("failed to get a PHC allocation");
 }
@@ -189,24 +189,24 @@ extern "C" NS_EXPORT void Crash(int16_t 
       ReserveStack();
       pfnLauncher(0, reinterpret_cast<void*>(pfnTest));
       break;
     }
 #endif  // XP_WIN && HAVE_64BIT_BUILD && !defined(__MINGW32__)
 #ifdef MOZ_PHC
     case CRASH_PHC_USE_AFTER_FREE: {
       // Do a UAF, triggering a crash.
-      char* p = GetPHCAllocation(32);
+      uint8_t* p = GetPHCAllocation(32);
       free(p);
       *p = 0;
       // not reached
     }
     case CRASH_PHC_DOUBLE_FREE: {
       // Do a double free, triggering a crash.
-      char* p = GetPHCAllocation(64);
+      uint8_t* p = GetPHCAllocation(64);
       free(p);
       free(p);
       // not reached
     }
 #endif
     default:
       break;
   }