Bug 1402283 - Enforce arena matching on moz_arena_{realloc,free}. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 08 Nov 2017 17:43:47 +0900
changeset 444402 ee9d4052e9493b3e8bca00f3bead8f1ea22b933b
parent 444401 a82bf86d86fd3d83da8a3d6e929533da52ea036e
child 444403 7129e7f79826bebb6cf13e9c11b63b7317a24773
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1402283
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1402283 - Enforce arena matching on moz_arena_{realloc,free}. r=njn This enforces the API contract as described in memory/build/malloc_decls.h.
memory/build/Utils.h
memory/build/malloc_decls.h
memory/build/mozjemalloc.cpp
memory/gtest/TestJemalloc.cpp
--- a/memory/build/Utils.h
+++ b/memory/build/Utils.h
@@ -27,24 +27,34 @@ CompareAddr(T* aAddr1, T* aAddr2)
 {
   uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
   uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
 
   return (addr1 > addr2) - (addr1 < addr2);
 }
 
 // User-defined literals to make constants more legible
-constexpr unsigned long long int operator"" _KiB(unsigned long long int aNum)
+constexpr size_t operator"" _KiB(unsigned long long int aNum)
 {
-  return aNum * 1024;
+  return size_t(aNum) * 1024;
 }
 
-constexpr unsigned long long int operator"" _MiB(unsigned long long int aNum)
+constexpr size_t operator"" _KiB(long double aNum)
+{
+  return size_t(aNum * 1024);
+}
+
+constexpr size_t operator"" _MiB(unsigned long long int aNum)
 {
-  return aNum * 1024_KiB;
+  return size_t(aNum) * 1024_KiB;
+}
+
+constexpr size_t operator"" _MiB(long double aNum)
+{
+  return size_t(aNum * 1024_KiB);
 }
 
 constexpr long double operator""_percent(long double aPercent)
 {
   return aPercent / 100;
 }
 
 // Helper for (fast) comparison of fractions without involving divisions or
--- a/memory/build/malloc_decls.h
+++ b/memory/build/malloc_decls.h
@@ -100,26 +100,29 @@ MALLOC_DECL(jemalloc_ptr_info, void, con
 #endif
 
 #if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_BASE
 
 // Creates a separate arena, and returns its id, valid to use with moz_arena_*
 // functions.
 MALLOC_DECL(moz_create_arena, arena_id_t)
 
-// Dispose of the given arena. Subsequent uses of the arena will fail.
+// Dispose of the given arena. Subsequent uses of the arena will crash.
+// Passing an invalid id (inexistent or already disposed) to this function
+// will crash.
 MALLOC_DECL(moz_dispose_arena, void, arena_id_t)
 #endif
 
 #if MALLOC_FUNCS & MALLOC_FUNCS_ARENA_ALLOC
 // Same as the functions without the moz_arena_ prefix, but using arenas
 // created with moz_create_arena.
 // The contract, even if not enforced at runtime in some configurations,
-// is that moz_arena_realloc and moz_arena_free will crash if the wrong
-// arena id is given. All functions will crash if the arena id is invalid.
+// is that moz_arena_realloc and moz_arena_free will crash if the given
+// arena doesn't own the given pointer. All functions will crash if the
+// arena id is invalid.
 // Although discouraged, plain realloc and free can still be used on
 // pointers allocated with these functions. Realloc will properly keep
 // new pointers in the same arena as the original.
 MALLOC_DECL(moz_arena_malloc, void*, arena_id_t, size_t)
 MALLOC_DECL(moz_arena_calloc, void*, arena_id_t, size_t, size_t)
 MALLOC_DECL(moz_arena_realloc, void*, arena_id_t, void*, size_t)
 MALLOC_DECL(moz_arena_free, void, arena_id_t, void*)
 MALLOC_DECL(moz_arena_memalign, void*, arena_id_t, size_t, size_t)
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1252,17 +1252,17 @@ static void
 chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed);
 static void*
 huge_malloc(size_t size, bool zero, arena_t* aArena);
 static void*
 huge_palloc(size_t aSize, size_t aAlignment, bool aZero, arena_t* aArena);
 static void*
 huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena);
 static void
-huge_dalloc(void* aPtr);
+huge_dalloc(void* aPtr, arena_t* aArena);
 #ifdef XP_WIN
 extern "C"
 #else
 static
 #endif
   bool
   malloc_init_hard();
 
@@ -3579,52 +3579,53 @@ arena_t::DallocLarge(arena_chunk_t* aChu
 
   memset(aPtr, kAllocPoison, size);
   mStats.allocated_large -= size;
 
   DallocRun((arena_run_t*)aPtr, true);
 }
 
 static inline void
-arena_dalloc(void* aPtr, size_t aOffset)
+arena_dalloc(void* aPtr, size_t aOffset, arena_t* aArena)
 {
   MOZ_ASSERT(aPtr);
   MOZ_ASSERT(aOffset != 0);
   MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
 
   auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
   auto arena = chunk->arena;
   MOZ_ASSERT(arena);
   MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+  MOZ_RELEASE_ASSERT(!aArena || arena == aArena);
 
   MutexAutoLock lock(arena->mLock);
   size_t pageind = aOffset >> gPageSize2Pow;
   arena_chunk_map_t* mapelm = &chunk->map[pageind];
   MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
     // Small allocation.
     arena->DallocSmall(chunk, aPtr, mapelm);
   } else {
     // Large allocation.
     arena->DallocLarge(chunk, aPtr);
   }
 }
 
 static inline void
-idalloc(void* ptr)
+idalloc(void* ptr, arena_t* aArena)
 {
   size_t offset;
 
   MOZ_ASSERT(ptr);
 
   offset = GetChunkOffsetForPtr(ptr);
   if (offset != 0) {
-    arena_dalloc(ptr, offset);
+    arena_dalloc(ptr, offset, aArena);
   } else {
-    huge_dalloc(ptr);
+    huge_dalloc(ptr, aArena);
   }
 }
 
 void
 arena_t::RallocShrinkLarge(arena_chunk_t* aChunk,
                            void* aPtr,
                            size_t aSize,
                            size_t aOldSize)
@@ -3749,30 +3750,31 @@ arena_ralloc(void* aPtr, size_t aSize, s
 #ifdef VM_COPY_MIN
   if (copysize >= VM_COPY_MIN) {
     pages_copy(ret, aPtr, copysize);
   } else
 #endif
   {
     memcpy(ret, aPtr, copysize);
   }
-  idalloc(aPtr);
+  idalloc(aPtr, aArena);
   return ret;
 }
 
 static inline void*
 iralloc(void* aPtr, size_t aSize, arena_t* aArena)
 {
   MOZ_ASSERT(aPtr);
   MOZ_ASSERT(aSize != 0);
 
   auto info = AllocInfo::Get(aPtr);
-  aArena = aArena ? aArena : info.Arena();
+  auto arena = info.Arena();
+  MOZ_RELEASE_ASSERT(!aArena || arena == aArena);
+  aArena = aArena ? aArena : arena;
   size_t oldsize = info.Size();
-  MOZ_RELEASE_ASSERT(aArena);
   MOZ_DIAGNOSTIC_ASSERT(aArena->mMagic == ARENA_MAGIC);
 
   return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
                                    : huge_ralloc(aPtr, aSize, oldsize, aArena);
 }
 
 arena_t::arena_t()
 {
@@ -3961,16 +3963,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
       pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
 
       // Update recorded size.
       MutexAutoLock lock(huge_mtx);
       key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
       MOZ_ASSERT(node->mSize == aOldSize);
+      MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
       huge_allocated -= aOldSize - psize;
       // No need to change huge_mapped, because we didn't (un)map anything.
       node->mSize = psize;
     } else if (psize > aOldSize) {
       if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
                         psize - aOldSize)) {
         return nullptr;
       }
@@ -3985,16 +3988,17 @@ huge_ralloc(void* aPtr, size_t aSize, si
     if (psize > aOldSize) {
       // Update recorded size.
       extent_node_t key;
       MutexAutoLock lock(huge_mtx);
       key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
       MOZ_ASSERT(node->mSize == aOldSize);
+      MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
       huge_allocated += psize - aOldSize;
       // No need to change huge_mapped, because we didn't
       // (un)map anything.
       node->mSize = psize;
     }
 
     if (opt_zero && aSize > aOldSize) {
       memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
@@ -4014,33 +4018,34 @@ huge_ralloc(void* aPtr, size_t aSize, si
 #ifdef VM_COPY_MIN
   if (copysize >= VM_COPY_MIN) {
     pages_copy(ret, aPtr, copysize);
   } else
 #endif
   {
     memcpy(ret, aPtr, copysize);
   }
-  idalloc(aPtr);
+  idalloc(aPtr, aArena);
   return ret;
 }
 
 static void
-huge_dalloc(void* aPtr)
+huge_dalloc(void* aPtr, arena_t* aArena)
 {
   extent_node_t* node;
   {
     extent_node_t key;
     MutexAutoLock lock(huge_mtx);
 
     // Extract from tree of huge allocations.
     key.mAddr = aPtr;
     node = huge.Search(&key);
     MOZ_ASSERT(node);
     MOZ_ASSERT(node->mAddr == aPtr);
+    MOZ_RELEASE_ASSERT(!aArena || node->mArena == aArena);
     huge.Remove(node);
 
     huge_allocated -= node->mSize;
     huge_mapped -= CHUNK_CEILING(node->mSize);
   }
 
   // Unmap chunk.
   chunk_dealloc(node->mAddr, CHUNK_CEILING(node->mSize), HUGE_CHUNK);
@@ -4368,20 +4373,20 @@ inline void
 BaseAllocator::free(void* aPtr)
 {
   size_t offset;
 
   // A version of idalloc that checks for nullptr pointer.
   offset = GetChunkOffsetForPtr(aPtr);
   if (offset != 0) {
     MOZ_RELEASE_ASSERT(malloc_initialized);
-    arena_dalloc(aPtr, offset);
+    arena_dalloc(aPtr, offset, mArena);
   } else if (aPtr) {
     MOZ_RELEASE_ASSERT(malloc_initialized);
-    huge_dalloc(aPtr);
+    huge_dalloc(aPtr, mArena);
   }
 }
 
 template<void* (*memalign)(size_t, size_t)>
 struct AlignedAllocator
 {
   static inline int posix_memalign(void** aMemPtr,
                                    size_t aAlignment,
@@ -4702,19 +4707,18 @@ MozJemalloc::moz_create_arena()
   return 0;
 }
 
 template<>
 inline void
 MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
 {
   arena_t* arena = gArenas.GetById(aArenaId, /* IsPrivate = */ true);
-  if (arena) {
-    gArenas.DisposeArena(arena);
-  }
+  MOZ_RELEASE_ASSERT(arena);
+  gArenas.DisposeArena(arena);
 }
 
 #define MALLOC_DECL(name, return_type, ...)                                    \
   template<>                                                                   \
   inline return_type MozJemalloc::moz_arena_##name(                            \
     arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__))               \
   {                                                                            \
     BaseAllocator allocator(                                                   \
--- a/memory/gtest/TestJemalloc.cpp
+++ b/memory/gtest/TestJemalloc.cpp
@@ -7,16 +7,49 @@
 #include "mozilla/mozalloc.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/Vector.h"
 #include "mozmemory.h"
 #include "Utils.h"
 
 #include "gtest/gtest.h"
 
+#ifdef MOZ_CRASHREPORTER
+#include "nsCOMPtr.h"
+#include "nsICrashReporter.h"
+#include "nsServiceManagerUtils.h"
+#endif
+
+#if defined(DEBUG) && !defined(XP_WIN) && !defined(ANDROID)
+#define HAS_GDB_SLEEP_DURATION 1
+extern unsigned int _gdb_sleep_duration;
+#endif
+
+// Death tests are too slow on OSX because of the system crash reporter.
+#ifndef XP_DARWIN
+static void DisableCrashReporter()
+{
+#ifdef MOZ_CRASHREPORTER
+  nsCOMPtr<nsICrashReporter> crashreporter =
+    do_GetService("@mozilla.org/toolkit/crash-reporter;1");
+  if (crashreporter) {
+    crashreporter->SetEnabled(false);
+  }
+#endif
+}
+
+// Wrap ASSERT_DEATH_IF_SUPPORTED to disable the crash reporter
+// when entering the subprocess, so that the expected crashes don't
+// create a minidump that the gtest harness will interpret as an error.
+#define ASSERT_DEATH_WRAP(a, b) \
+  ASSERT_DEATH_IF_SUPPORTED({ DisableCrashReporter(); a; }, b)
+#else
+#define ASSERT_DEATH_WRAP(a, b)
+#endif
+
 using namespace mozilla;
 
 static inline void
 TestOne(size_t size)
 {
   size_t req = size;
   size_t adv = malloc_good_size(req);
   char* p = (char*)malloc(req);
@@ -218,8 +251,63 @@ TEST(Jemalloc, PtrInfo)
   // Entire chunk. It's impossible to check what is put into |info| for all of
   // these addresses; this is more about checking that we don't crash.
   for (size_t i = 0; i < stats.chunksize; i += 256) {
     jemalloc_ptr_info(&chunk[i], &info);
   }
 
   jemalloc_thread_local_arena(false);
 }
+
+#ifdef NIGHTLY_BUILD
+TEST(Jemalloc, Arenas)
+{
+  arena_id_t arena = moz_create_arena();
+  ASSERT_TRUE(arena != 0);
+  void* ptr = moz_arena_malloc(arena, 42);
+  ASSERT_TRUE(ptr != nullptr);
+  ptr = moz_arena_realloc(arena, ptr, 64);
+  ASSERT_TRUE(ptr != nullptr);
+  moz_arena_free(arena, ptr);
+  ptr = moz_arena_calloc(arena, 24, 2);
+  // For convenience, free can be used to free arena pointers.
+  free(ptr);
+  moz_dispose_arena(arena);
+
+#ifdef HAS_GDB_SLEEP_DURATION
+  // Avoid death tests adding some unnecessary (long) delays.
+  unsigned int old_gdb_sleep_duration = _gdb_sleep_duration;
+  _gdb_sleep_duration = 0;
+#endif
+
+  // Can't use an arena after it's disposed.
+  ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
+
+  // Arena id 0 can't be used to somehow get to the main arena.
+  ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
+
+  arena = moz_create_arena();
+  arena_id_t arena2 = moz_create_arena();
+
+  // For convenience, realloc can also be used to reallocate arena pointers.
+  // The result should be in the same arena. Test various size class transitions.
+  size_t sizes[] = { 1, 42, 80, 1_KiB, 1.5_KiB, 72_KiB, 129_KiB, 2.5_MiB, 5.1_MiB };
+  for (size_t from_size : sizes) {
+    for (size_t to_size : sizes) {
+      ptr = moz_arena_malloc(arena, from_size);
+      ptr = realloc(ptr, to_size);
+      // Freeing with the wrong arena should crash.
+      ASSERT_DEATH_WRAP(moz_arena_free(arena2, ptr), "");
+      // Likewise for moz_arena_realloc.
+      ASSERT_DEATH_WRAP(moz_arena_realloc(arena2, ptr, from_size), "");
+      // The following will crash if it's not in the right arena.
+      moz_arena_free(arena, ptr);
+    }
+  }
+
+  moz_dispose_arena(arena2);
+  moz_dispose_arena(arena);
+
+#ifdef HAS_GDB_SLEEP_DURATION
+  _gdb_sleep_duration = old_gdb_sleep_duration;
+#endif
+}
+#endif