Bug 1052579 - Modify GTest for jemalloc_ptr_info() to check arenaId r=glandium
authorChris Martin <cmartin@mozilla.com>
Tue, 07 May 2019 00:34:42 +0000
changeset 472803 5268e93ef3483469953bf60e346af77b16687935
parent 472802 516cc2c504f5650083dddd563fa27c6a9cec615a
child 472804 9ba2aa4a61437e0dd44dc8c5b48d3961b7b8ace1
push id35978
push usershindli@mozilla.com
push dateTue, 07 May 2019 09:44:39 +0000
treeherdermozilla-central@7aee5a30dd15 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1052579
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1052579 - Modify GTest for jemalloc_ptr_info() to check arenaId r=glandium In D25711, I added an arenaId member to `jemalloc_ptr_info_t` when `MOZ_DEBUG` is defined. This modifies the GTest for `jemalloc_ptr_info()` to ensure that the new member returns the correct value. Differential Revision: https://phabricator.services.mozilla.com/D30087
memory/gtest/TestJemalloc.cpp
--- a/memory/gtest/TestJemalloc.cpp
+++ b/memory/gtest/TestJemalloc.cpp
@@ -80,74 +80,80 @@ TEST(Jemalloc, UsableSizeInAdvance)
 
   for (size_t n = 1_MiB; n < 8_MiB; n += 128_KiB)
     ASSERT_NO_FATAL_FAILURE(TestThree(n));
 }
 
 static int gStaticVar;
 
 bool InfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
-            size_t aSize) {
-  return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize;
+            size_t aSize, arena_id_t arenaId) {
+  return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize
+#ifdef MOZ_DEBUG
+         && aInfo.arenaId == arenaId
+#endif
+      ;
 }
 
-bool InfoEqFreedPage(jemalloc_ptr_info_t& aInfo, void* aAddr,
-                     size_t aPageSize) {
+bool InfoEqFreedPage(jemalloc_ptr_info_t& aInfo, void* aAddr, size_t aPageSize,
+                     arena_id_t arenaId) {
   size_t pageSizeMask = aPageSize - 1;
 
   return jemalloc_ptr_is_freed_page(&aInfo) &&
          aInfo.addr == (void*)(uintptr_t(aAddr) & ~pageSizeMask) &&
-         aInfo.size == aPageSize;
+         aInfo.size == aPageSize
+#ifdef MOZ_DEBUG
+         && aInfo.arenaId == arenaId
+#endif
+      ;
 }
 
 TEST(Jemalloc, PtrInfo)
 {
-  // Some things might be running in other threads, so ensure our assumptions
-  // (e.g. about isFreedSmall and isFreedPage ratios below) are not altered by
-  // other threads.
-  jemalloc_thread_local_arena(true);
+  arena_id_t arenaId = moz_create_arena();
+  ASSERT_TRUE(arenaId != 0);
 
   jemalloc_stats_t stats;
   jemalloc_stats(&stats);
 
   jemalloc_ptr_info_t info;
   Vector<char*> small, large, huge;
 
   // For small (<= 2KiB) allocations, test every position within many possible
   // sizes.
   size_t small_max = stats.page_size / 2;
   for (size_t n = 0; n <= small_max; n += 8) {
-    auto p = (char*)malloc(n);
+    auto p = (char*)moz_arena_malloc(arenaId, n);
     size_t usable = moz_malloc_size_of(p);
     ASSERT_TRUE(small.append(p));
     for (size_t j = 0; j < usable; j++) {
       jemalloc_ptr_info(&p[j], &info);
-      ASSERT_TRUE(InfoEq(info, TagLiveSmall, p, usable));
+      ASSERT_TRUE(InfoEq(info, TagLiveSmall, p, usable, arenaId));
     }
   }
 
   // Similar for large (2KiB + 1 KiB .. 1MiB - 8KiB) allocations.
   for (size_t n = small_max + 1_KiB; n <= stats.large_max; n += 1_KiB) {
-    auto p = (char*)malloc(n);
+    auto p = (char*)moz_arena_malloc(arenaId, n);
     size_t usable = moz_malloc_size_of(p);
     ASSERT_TRUE(large.append(p));
     for (size_t j = 0; j < usable; j += 347) {
       jemalloc_ptr_info(&p[j], &info);
-      ASSERT_TRUE(InfoEq(info, TagLiveLarge, p, usable));
+      ASSERT_TRUE(InfoEq(info, TagLiveLarge, p, usable, arenaId));
     }
   }
 
   // Similar for huge (> 1MiB - 8KiB) allocations.
   for (size_t n = stats.chunksize; n <= 10_MiB; n += 512_KiB) {
-    auto p = (char*)malloc(n);
+    auto p = (char*)moz_arena_malloc(arenaId, n);
     size_t usable = moz_malloc_size_of(p);
     ASSERT_TRUE(huge.append(p));
     for (size_t j = 0; j < usable; j += 567) {
       jemalloc_ptr_info(&p[j], &info);
-      ASSERT_TRUE(InfoEq(info, TagLiveHuge, p, usable));
+      ASSERT_TRUE(InfoEq(info, TagLiveHuge, p, usable, arenaId));
     }
   }
 
   // The following loops check freed allocations. We step through the vectors
   // using prime-sized steps, which gives full coverage of the arrays while
   // avoiding deallocating in the same order we allocated.
   size_t len;
 
@@ -156,19 +162,19 @@ TEST(Jemalloc, PtrInfo)
   len = small.length();
   for (size_t i = 0, j = 0; i < len; i++, j = (j + 19) % len) {
     char* p = small[j];
     size_t usable = moz_malloc_size_of(p);
     free(p);
     for (size_t k = 0; k < usable; k++) {
       jemalloc_ptr_info(&p[k], &info);
       // There are two valid outcomes here.
-      if (InfoEq(info, TagFreedSmall, p, usable)) {
+      if (InfoEq(info, TagFreedSmall, p, usable, arenaId)) {
         isFreedSmall++;
-      } else if (InfoEqFreedPage(info, &p[k], stats.page_size)) {
+      } else if (InfoEqFreedPage(info, &p[k], stats.page_size, arenaId)) {
         isFreedPage++;
       } else {
         ASSERT_TRUE(false);
       }
     }
   }
   // There should be both FreedSmall and FreedPage results, but a lot more of
   // the former.
@@ -179,82 +185,83 @@ TEST(Jemalloc, PtrInfo)
   // Free the large allocations and recheck them.
   len = large.length();
   for (size_t i = 0, j = 0; i < len; i++, j = (j + 31) % len) {
     char* p = large[j];
     size_t usable = moz_malloc_size_of(p);
     free(p);
     for (size_t k = 0; k < usable; k += 357) {
       jemalloc_ptr_info(&p[k], &info);
-      ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size));
+      ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size, arenaId));
     }
   }
 
   // Free the huge allocations and recheck them.
   len = huge.length();
   for (size_t i = 0, j = 0; i < len; i++, j = (j + 7) % len) {
     char* p = huge[j];
     size_t usable = moz_malloc_size_of(p);
     free(p);
     for (size_t k = 0; k < usable; k += 587) {
       jemalloc_ptr_info(&p[k], &info);
-      ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+      ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
     }
   }
 
   // Null ptr.
   jemalloc_ptr_info(nullptr, &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Near-null ptr.
   jemalloc_ptr_info((void*)0x123, &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Maximum address.
   jemalloc_ptr_info((void*)uintptr_t(-1), &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Stack memory.
   int stackVar;
   jemalloc_ptr_info(&stackVar, &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Code memory.
   jemalloc_ptr_info((const void*)&jemalloc_ptr_info, &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Static memory.
   jemalloc_ptr_info(&gStaticVar, &info);
-  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
 
   // Chunk header.
   UniquePtr<int> p = MakeUnique<int>();
   size_t chunksizeMask = stats.chunksize - 1;
   char* chunk = (char*)(uintptr_t(p.get()) & ~chunksizeMask);
   size_t chunkHeaderSize = stats.chunksize - stats.large_max - stats.page_size;
   for (size_t i = 0; i < chunkHeaderSize; i += 64) {
     jemalloc_ptr_info(&chunk[i], &info);
-    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
   }
 
   // Run header.
   size_t page_sizeMask = stats.page_size - 1;
   char* run = (char*)(uintptr_t(p.get()) & ~page_sizeMask);
   for (size_t i = 0; i < 4 * sizeof(void*); i++) {
     jemalloc_ptr_info(&run[i], &info);
-    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U, 0U));
   }
 
   // Entire chunk. It's impossible to check what is put into |info| for all of
   // these addresses; this is more about checking that we don't crash.
   for (size_t i = 0; i < stats.chunksize; i += 256) {
     jemalloc_ptr_info(&chunk[i], &info);
   }
 
-  jemalloc_thread_local_arena(false);
+  // Until Bug 1364359 is fixed it is unsafe to call moz_dispose_arena.
+  // moz_dispose_arena(arenaId);
 }
 
 size_t sSizes[] = {1,      42,      79,      918,     1.5_KiB,
                    73_KiB, 129_KiB, 1.1_MiB, 2.6_MiB, 5.1_MiB};
 
 TEST(Jemalloc, Arenas)
 {
   arena_id_t arena = moz_create_arena();
@@ -617,17 +624,18 @@ TEST(Jemalloc, JunkPoison)
   // Until Bug 1364359 is fixed it is unsafe to call moz_dispose_arena.
   // moz_dispose_arena(buf_arena);
 
 #  ifdef HAS_GDB_SLEEP_DURATION
   _gdb_sleep_duration = old_gdb_sleep_duration;
 #  endif
 }
 
-TEST(Jemalloc, GuardRegion) {
+TEST(Jemalloc, GuardRegion)
+{
   jemalloc_stats_t stats;
   jemalloc_stats(&stats);
 
 #  ifdef HAS_GDB_SLEEP_DURATION
   // Avoid death tests adding some unnecessary (long) delays.
   unsigned int old_gdb_sleep_duration = _gdb_sleep_duration;
   _gdb_sleep_duration = 0;
 #  endif
@@ -643,17 +651,17 @@ TEST(Jemalloc, GuardRegion) {
     void* ptr = moz_arena_malloc(arena, stats.page_size);
     ASSERT_TRUE(ptr != nullptr);
     ASSERT_TRUE(ptr_list.append(ptr));
   }
 
   void* last_ptr_in_chunk = ptr_list[ptr_list.length() - 1];
   void* extra_ptr = moz_arena_malloc(arena, stats.page_size);
   void* guard_page = (void*)ALIGNMENT_CEILING(
-    (uintptr_t)last_ptr_in_chunk + stats.page_size, stats.page_size);
+      (uintptr_t)last_ptr_in_chunk + stats.page_size, stats.page_size);
   jemalloc_ptr_info_t info;
   jemalloc_ptr_info(guard_page, &info);
   ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info));
   ASSERT_TRUE(info.tag == TagFreedPageDecommitted);
 
   ASSERT_DEATH_WRAP(*(char*)guard_page = 0, "");
 
   for (void* ptr : ptr_list) {