Bug 1389305 - Add jemalloc_ptr_info() and moz_malloc_enclosing_size_of(). r=glandium.
☠☠ backed out by 584579605f5f ☠ ☠
authorNicholas Nethercote <nnethercote@mozilla.com>
Thu, 24 Aug 2017 19:37:27 +1000
changeset 427448 f232b5b1a0c74b84c5d7f4ecb131d25a92601015
parent 427431 a2723b65046096e587b968a3b6b1cb056c914b78
child 427449 5b30f38210e155ef046b55ef22e19d771c13540e
push id7761
push userjlund@mozilla.com
push dateFri, 15 Sep 2017 00:19:52 +0000
treeherdermozilla-beta@c38455951db4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1389305
milestone57.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1389305 - Add jemalloc_ptr_info() and moz_malloc_enclosing_size_of(). r=glandium. jemalloc_ptr_info() gives info about any pointer, such as whether it's within a live or free allocation, and if so, info about that allocation. It's useful for debugging. moz_malloc_enclosing_size_of() uses jemalloc_ptr_info() to measure the size of an allocation from an interior pointer. It's useful for memory reporting, especially for Rust code.
memory/build/malloc_decls.h
memory/build/mozmemory.h
memory/build/mozmemory_wrap.c
memory/build/mozmemory_wrap.h
memory/gtest/TestJemalloc.cpp
memory/mozalloc/mozalloc.cpp
memory/mozalloc/mozalloc.h
memory/mozjemalloc/mozjemalloc.cpp
memory/mozjemalloc/mozjemalloc_types.h
memory/replace/replace/ReplaceMalloc.cpp
mozglue/build/mozglue.def.in
mozglue/build/replace_malloc.mk
--- a/memory/build/malloc_decls.h
+++ b/memory/build/malloc_decls.h
@@ -57,15 +57,16 @@ MALLOC_DECL(valloc, void *, size_t)
 MALLOC_DECL(malloc_usable_size, size_t, usable_ptr_t)
 MALLOC_DECL(malloc_good_size, size_t, size_t)
 #  endif
 #  if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
 MALLOC_DECL_VOID(jemalloc_stats, jemalloc_stats_t *)
 MALLOC_DECL_VOID(jemalloc_purge_freed_pages)
 MALLOC_DECL_VOID(jemalloc_free_dirty_pages)
 MALLOC_DECL_VOID(jemalloc_thread_local_arena, bool)
+MALLOC_DECL_VOID(jemalloc_ptr_info, const void*, jemalloc_ptr_info_t*)
 #  endif
 
 #  undef MALLOC_DECL_VOID
 #endif /* MALLOC_DECL */
 
 #undef MALLOC_DECL
 #undef MALLOC_FUNCS
--- a/memory/build/mozmemory.h
+++ b/memory/build/mozmemory.h
@@ -8,16 +8,17 @@
 /*
  * This header is meant to be used when the following functions are
  * necessary:
  *   - malloc_good_size (used to be called je_malloc_usable_in_advance)
  *   - jemalloc_stats
  *   - jemalloc_purge_freed_pages
  *   - jemalloc_free_dirty_pages
  *   - jemalloc_thread_local_arena
+ *   - jemalloc_ptr_info
  */
 
 #ifndef MOZ_MEMORY
 #  error Should not include mozmemory.h when MOZ_MEMORY is not set
 #endif
 
 #include "mozmemory_wrap.h"
 #include "mozilla/Attributes.h"
@@ -82,9 +83,15 @@ MOZ_JEMALLOC_API void jemalloc_purge_fre
  * down subsequent allocations so it is recommended to use it only when
  * memory needs to be reclaimed at all costs (see bug 805855). This function
  * provides functionality similar to mallctl("arenas.purge") in jemalloc 3.
  */
 MOZ_JEMALLOC_API void jemalloc_free_dirty_pages();
 
 MOZ_JEMALLOC_API void jemalloc_thread_local_arena(bool enabled);
 
+/*
+ * Provide information about any allocation enclosing the given address.
+ */
+MOZ_JEMALLOC_API void jemalloc_ptr_info(const void* ptr,
+                                        jemalloc_ptr_info_t* info);
+
 #endif /* mozmemory_h */
--- a/memory/build/mozmemory_wrap.c
+++ b/memory/build/mozmemory_wrap.c
@@ -5,16 +5,17 @@
 #include <string.h>
 #include "mozmemory_wrap.h"
 #include "mozilla/Types.h"
 
 /* Declare malloc implementation functions with the right return and
  * argument types. */
 #define MALLOC_DECL(name, return_type, ...) \
   MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
+#define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
 #include "malloc_decls.h"
 
 #ifdef MOZ_WRAP_NEW_DELETE
 /* operator new(unsigned int) */
 MOZ_MEMORY_API void *
 mozmem_malloc_impl(_Znwj)(unsigned int size)
 {
   return malloc_impl(size);
--- a/memory/build/mozmemory_wrap.h
+++ b/memory/build/mozmemory_wrap.h
@@ -32,16 +32,17 @@
  *   - strdup
  *   - wcsdup (Windows only)
  *
  * - jemalloc specific functions:
  *   - jemalloc_stats
  *   - jemalloc_purge_freed_pages
  *   - jemalloc_free_dirty_pages
  *   - jemalloc_thread_local_arena
+ *   - jemalloc_ptr_info
  *   (these functions are native to mozjemalloc)
  *
  * These functions are all exported as part of libmozglue (see
  * $(topsrcdir)/mozglue/build/Makefile.in), with a few implementation
  * peculiarities:
  *
  * - On Windows, the malloc implementation functions are all prefixed with
  *   "je_", the duplication functions are prefixed with "wrap_", and jemalloc
@@ -202,10 +203,12 @@
 #endif
 
 /* Jemalloc specific function */
 #define jemalloc_stats_impl              mozmem_jemalloc_impl(jemalloc_stats)
 #define jemalloc_purge_freed_pages_impl  mozmem_jemalloc_impl(jemalloc_purge_freed_pages)
 #define jemalloc_free_dirty_pages_impl   mozmem_jemalloc_impl(jemalloc_free_dirty_pages)
 #define jemalloc_thread_local_arena_impl \
           mozmem_jemalloc_impl(jemalloc_thread_local_arena)
+#define jemalloc_ptr_info_impl \
+          mozmem_jemalloc_impl(jemalloc_ptr_info)
 
 #endif /* mozmemory_wrap_h */
--- a/memory/gtest/TestJemalloc.cpp
+++ b/memory/gtest/TestJemalloc.cpp
@@ -1,51 +1,220 @@
-/* -*-  Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/mozalloc.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Vector.h"
 #include "mozmemory.h"
 
 #include "gtest/gtest.h"
 
+using namespace mozilla;
+
 static inline void
 TestOne(size_t size)
 {
-    size_t req = size;
-    size_t adv = malloc_good_size(req);
-    char* p = (char*)malloc(req);
-    size_t usable = moz_malloc_usable_size(p);
-    // NB: Using EXPECT here so that we still free the memory on failure.
-    EXPECT_EQ(adv, usable) <<
-           "malloc_good_size(" << req << ") --> " << adv << "; "
-           "malloc_usable_size(" << req << ") --> " << usable;
-    free(p);
+  size_t req = size;
+  size_t adv = malloc_good_size(req);
+  char* p = (char*)malloc(req);
+  size_t usable = moz_malloc_usable_size(p);
+  // NB: Using EXPECT here so that we still free the memory on failure.
+  EXPECT_EQ(adv, usable) <<
+         "malloc_good_size(" << req << ") --> " << adv << "; "
+         "malloc_usable_size(" << req << ") --> " << usable;
+  free(p);
 }
 
 static inline void
 TestThree(size_t size)
 {
-    ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
-    ASSERT_NO_FATAL_FAILURE(TestOne(size));
-    ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
+  ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
+  ASSERT_NO_FATAL_FAILURE(TestOne(size));
+  ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
 }
 
+#define K   * 1024
+#define M   * 1024 * 1024
+
 TEST(Jemalloc, UsableSizeInAdvance)
 {
-  #define K   * 1024
-  #define M   * 1024 * 1024
-
   /*
    * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
    * various sizes beyond that.
    */
 
   for (size_t n = 0; n < 16 K; n++)
     ASSERT_NO_FATAL_FAILURE(TestOne(n));
 
   for (size_t n = 16 K; n < 1 M; n += 4 K)
     ASSERT_NO_FATAL_FAILURE(TestThree(n));
 
   for (size_t n = 1 M; n < 8 M; n += 128 K)
     ASSERT_NO_FATAL_FAILURE(TestThree(n));
 }
+
+static int gStaticVar;
+
+bool InfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
+            size_t aSize)
+{
+  return aInfo.tag == aTag && aInfo.addr == aAddr && aInfo.size == aSize;
+}
+
+bool InfoEqFreedPage(jemalloc_ptr_info_t& aInfo, void* aAddr, size_t aPageSize)
+{
+  size_t pageSizeMask = aPageSize - 1;
+
+  return jemalloc_ptr_is_freed_page(&aInfo) &&
+         aInfo.addr == (void*)(uintptr_t(aAddr) & ~pageSizeMask) &&
+         aInfo.size == aPageSize;
+}
+
+TEST(Jemalloc, PtrInfo)
+{
+  jemalloc_stats_t stats;
+  jemalloc_stats(&stats);
+
+  jemalloc_ptr_info_t info;
+  Vector<char*> small, large, huge;
+
+  // For small (<= 2KiB) allocations, test every position within many possible
+  // sizes.
+  size_t small_max = stats.page_size / 2;
+  for (size_t n = 0; n <= small_max; n += 8) {
+    auto p = (char*)malloc(n);
+    size_t usable = moz_malloc_size_of(p);
+    ASSERT_TRUE(small.append(p));
+    for (size_t j = 0; j < usable; j++) {
+      jemalloc_ptr_info(&p[j], &info);
+      ASSERT_TRUE(InfoEq(info, TagLiveSmall, p, usable));
+    }
+  }
+
+  // Similar for large (2KiB + 1 KiB .. 1MiB - 8KiB) allocations.
+  for (size_t n = small_max + 1 K; n <= stats.large_max; n += 1 K) {
+    auto p = (char*)malloc(n);
+    size_t usable = moz_malloc_size_of(p);
+    ASSERT_TRUE(large.append(p));
+    for (size_t j = 0; j < usable; j += 347) {
+      jemalloc_ptr_info(&p[j], &info);
+      ASSERT_TRUE(InfoEq(info, TagLiveLarge, p, usable));
+    }
+  }
+
+  // Similar for huge (> 1MiB - 8KiB) allocations.
+  for (size_t n = stats.chunksize; n <= 10 M; n += 512 K) {
+    auto p = (char*)malloc(n);
+    size_t usable = moz_malloc_size_of(p);
+    ASSERT_TRUE(huge.append(p));
+    for (size_t j = 0; j < usable; j += 567) {
+      jemalloc_ptr_info(&p[j], &info);
+      ASSERT_TRUE(InfoEq(info, TagLiveHuge, p, usable));
+    }
+  }
+
+  // The following loops check freed allocations. We step through the vectors
+  // using prime-sized steps, which gives full coverage of the arrays while
+  // avoiding deallocating in the same order we allocated.
+  size_t len;
+
+  // Free the small allocations and recheck them.
+  int isFreedSmall = 0, isFreedPage = 0;
+  len = small.length();
+  for (size_t i = 0, j = 0; i < len; i++, j = (j + 19) % len) {
+    char* p = small[j];
+    size_t usable = moz_malloc_size_of(p);
+    free(p);
+    for (size_t k = 0; k < usable; k++) {
+      jemalloc_ptr_info(&p[k], &info);
+      // There are two valid outcomes here.
+      if (InfoEq(info, TagFreedSmall, p, usable)) {
+        isFreedSmall++;
+      } else if (InfoEqFreedPage(info, &p[k], stats.page_size)) {
+        isFreedPage++;
+      } else {
+        ASSERT_TRUE(false);
+      }
+    }
+  }
+  // There should be a lot more FreedSmall results than FreedPage results.
+  ASSERT_TRUE(isFreedSmall / isFreedPage > 10);
+
+  // Free the large allocations and recheck them.
+  len = large.length();
+  for (size_t i = 0, j = 0; i < len; i++, j = (j + 31) % len) {
+    char* p = large[j];
+    size_t usable = moz_malloc_size_of(p);
+    free(p);
+    for (size_t k = 0; k < usable; k += 357) {
+      jemalloc_ptr_info(&p[k], &info);
+      ASSERT_TRUE(InfoEqFreedPage(info, &p[k], stats.page_size));
+    }
+  }
+
+  // Free the huge allocations and recheck them.
+  len = huge.length();
+  for (size_t i = 0, j = 0; i < len; i++, j = (j + 7) % len) {
+    char* p = huge[j];
+    size_t usable = moz_malloc_size_of(p);
+    free(p);
+    for (size_t k = 0; k < usable; k += 587) {
+      jemalloc_ptr_info(&p[k], &info);
+      ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+    }
+  }
+
+  // Null ptr.
+  jemalloc_ptr_info(nullptr, &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Near-null ptr.
+  jemalloc_ptr_info((void*)0x123, &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Maximum address.
+  jemalloc_ptr_info((void*)uintptr_t(-1), &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Stack memory.
+  int stackVar;
+  jemalloc_ptr_info(&stackVar, &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Code memory.
+  jemalloc_ptr_info((const void*)&jemalloc_ptr_info, &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Static memory.
+  jemalloc_ptr_info(&gStaticVar, &info);
+  ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+
+  // Chunk header.
+  UniquePtr<int> p = MakeUnique<int>();
+  size_t chunksizeMask = stats.chunksize - 1;
+  char* chunk = (char*)(uintptr_t(p.get()) & ~chunksizeMask);
+  size_t chunkHeaderSize = stats.chunksize - stats.large_max;
+  for (size_t i = 0; i < chunkHeaderSize; i += 64) {
+    jemalloc_ptr_info(&chunk[i], &info);
+    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  }
+
+  // Run header.
+  size_t page_sizeMask = stats.page_size - 1;
+  char* run = (char*)(uintptr_t(p.get()) & ~page_sizeMask);
+  for (size_t i = 0; i < 4 * sizeof(void*); i++) {
+    jemalloc_ptr_info(&run[i], &info);
+    ASSERT_TRUE(InfoEq(info, TagUnknown, nullptr, 0U));
+  }
+
+  // Entire chunk. It's impossible to check what is put into |info| for all of
+  // these addresses; this is more about checking that we don't crash.
+  for (size_t i = 0; i < stats.chunksize; i += 256) {
+    jemalloc_ptr_info(&chunk[i], &info);
+  }
+}
+
+#undef K
+#undef M
--- a/memory/mozalloc/mozalloc.cpp
+++ b/memory/mozalloc/mozalloc.cpp
@@ -61,16 +61,17 @@ MOZ_MEMORY_API char *strndup_impl(const 
 #endif
 
 #include <errno.h>
 #include <new>                  // for std::bad_alloc
 #include <string.h>
 
 #include <sys/types.h>
 
+#include "mozilla/Assertions.h"
 #include "mozilla/mozalloc.h"
 #include "mozilla/mozalloc_oom.h"  // for mozalloc_handle_oom
 
 #ifdef __GNUC__
 #define LIKELY(x)    (__builtin_expect(!!(x), 1))
 #define UNLIKELY(x)  (__builtin_expect(!!(x), 0))
 #else
 #define LIKELY(x)    (x)
@@ -209,13 +210,35 @@ moz_malloc_usable_size(void *ptr)
     return malloc_usable_size_impl(ptr);
 #elif defined(XP_WIN)
     return _msize(ptr);
 #else
     return 0;
 #endif
 }
 
-size_t moz_malloc_size_of(const void *ptr)
+size_t
+moz_malloc_size_of(const void *ptr)
 {
     return moz_malloc_usable_size((void *)ptr);
 }
+
+#if defined(MOZ_MEMORY)
+#include "mozjemalloc_types.h"
+// mozmemory.h declares jemalloc_ptr_info(), but including that header in this
+// file is complicated. So we just redeclare it here instead, and include
+// mozjemalloc_types.h for jemalloc_ptr_info_t.
+MOZ_JEMALLOC_API void jemalloc_ptr_info(const void* ptr,
+                                        jemalloc_ptr_info_t* info);
 #endif
+
+size_t
+moz_malloc_enclosing_size_of(const void *ptr)
+{
+#if defined(MOZ_MEMORY)
+    jemalloc_ptr_info_t info;
+    jemalloc_ptr_info(ptr, &info);
+    return jemalloc_ptr_is_live(&info) ? info.size : 0;
+#else
+    return 0;
+#endif
+}
+#endif
--- a/memory/mozalloc/mozalloc.h
+++ b/memory/mozalloc/mozalloc.h
@@ -93,16 +93,22 @@ MFBT_API void* moz_xrealloc(void* ptr, s
 
 MFBT_API char* moz_xstrdup(const char* str)
     MOZ_ALLOCATOR;
 
 MFBT_API size_t moz_malloc_usable_size(void *ptr);
 
 MFBT_API size_t moz_malloc_size_of(const void *ptr);
 
+/*
+ * Like moz_malloc_size_of(), but works reliably with interior pointers, i.e.
+ * pointers into the middle of a live allocation.
+ */
+MFBT_API size_t moz_malloc_enclosing_size_of(const void *ptr);
+
 #if defined(HAVE_STRNDUP)
 MFBT_API char* moz_xstrndup(const char* str, size_t strsize)
     MOZ_ALLOCATOR;
 #endif /* if defined(HAVE_STRNDUP) */
 
 
 #if defined(HAVE_POSIX_MEMALIGN)
 MFBT_API MOZ_MUST_USE
--- a/memory/mozjemalloc/mozjemalloc.cpp
+++ b/memory/mozjemalloc/mozjemalloc.cpp
@@ -1455,16 +1455,41 @@ extent_ad_comp(extent_node_t *a, extent_
 
 	return ((a_addr > b_addr) - (a_addr < b_addr));
 }
 
 /* Wrap red-black tree macros in functions. */
 rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
     extent_ad_comp)
 
+static inline int
+extent_bounds_comp(extent_node_t* aKey, extent_node_t* aNode)
+{
+  uintptr_t key_addr = (uintptr_t)aKey->addr;
+  uintptr_t node_addr = (uintptr_t)aNode->addr;
+  size_t node_size = aNode->size;
+
+  // Is aKey within aNode?
+  if (node_addr <= key_addr && key_addr < node_addr + node_size) {
+    return 0;
+  }
+
+  return ((key_addr > node_addr) - (key_addr < node_addr));
+}
+
+/*
+ * This is an expansion of just the search function from the rb_wrap macro.
+ */
+static extent_node_t *
+extent_tree_bounds_search(extent_tree_t *tree, extent_node_t *key) {
+    extent_node_t *ret;
+    rb_search(extent_node_t, link_ad, extent_bounds_comp, tree, key, ret);
+    return ret;
+}
+
 /*
  * End extent tree code.
  */
 /******************************************************************************/
 /*
  * Begin chunk management functions.
  */
 
@@ -3539,16 +3564,144 @@ isalloc(const void *ptr)
 		ret = node->size;
 
 		malloc_mutex_unlock(&huge_mtx);
 	}
 
 	return (ret);
 }
 
+MOZ_JEMALLOC_API void
+jemalloc_ptr_info_impl(const void* aPtr, jemalloc_ptr_info_t* aInfo)
+{
+  arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aPtr);
+
+  // Is the pointer null, or within one chunk's size of null?
+  if (!chunk) {
+    *aInfo = { TagUnknown, nullptr, 0 };
+    return;
+  }
+
+  // Look for huge allocations before looking for |chunk| in chunk_rtree.
+  // This is necessary because |chunk| won't be in chunk_rtree if it's
+  // the second or subsequent chunk in a huge allocation.
+  extent_node_t* node;
+  extent_node_t key;
+  malloc_mutex_lock(&huge_mtx);
+  key.addr = const_cast<void*>(aPtr);
+  node = extent_tree_bounds_search(&huge, &key);
+  if (node) {
+    *aInfo = { TagLiveHuge, node->addr, node->size };
+  }
+  malloc_mutex_unlock(&huge_mtx);
+  if (node) {
+    return;
+  }
+
+  // It's not a huge allocation. Check if we have a known chunk.
+  if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
+    *aInfo = { TagUnknown, nullptr, 0 };
+    return;
+  }
+
+  MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+
+  // Get the page number within the chunk.
+  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
+  if (pageind < arena_chunk_header_npages) {
+    // Within the chunk header.
+    *aInfo = { TagUnknown, nullptr, 0 };
+    return;
+  }
+
+  size_t mapbits = chunk->map[pageind].bits;
+
+  if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
+    PtrInfoTag tag = TagFreedPageDirty;
+    if (mapbits & CHUNK_MAP_DIRTY)
+      tag = TagFreedPageDirty;
+    else if (mapbits & CHUNK_MAP_DECOMMITTED)
+      tag = TagFreedPageDecommitted;
+    else if (mapbits & CHUNK_MAP_MADVISED)
+      tag = TagFreedPageMadvised;
+    else if (mapbits & CHUNK_MAP_ZEROED)
+      tag = TagFreedPageZeroed;
+    else
+      MOZ_CRASH();
+
+    void* pageaddr = (void*)(uintptr_t(aPtr) & ~pagesize_mask);
+    *aInfo = { tag, pageaddr, pagesize };
+    return;
+  }
+
+  if (mapbits & CHUNK_MAP_LARGE) {
+    // It's a large allocation. Only the first page of a large
+    // allocation contains its size, so if the address is not in
+    // the first page, scan back to find the allocation size.
+    size_t size;
+    while (true) {
+      size = mapbits & ~pagesize_mask;
+      if (size != 0) {
+        break;
+      }
+
+      // The following two return paths shouldn't occur in
+      // practice unless there is heap corruption.
+
+      pageind--;
+      MOZ_DIAGNOSTIC_ASSERT(pageind >= arena_chunk_header_npages);
+      if (pageind < arena_chunk_header_npages) {
+        *aInfo = { TagUnknown, nullptr, 0 };
+        return;
+      }
+
+      mapbits = chunk->map[pageind].bits;
+      MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
+      if (!(mapbits & CHUNK_MAP_LARGE)) {
+        *aInfo = { TagUnknown, nullptr, 0 };
+        return;
+      }
+    }
+
+    void* addr = ((char*)chunk) + (pageind << pagesize_2pow);
+    *aInfo = { TagLiveLarge, addr, size };
+    return;
+  }
+
+  // It must be a small allocation.
+
+  auto run = (arena_run_t *)(mapbits & ~pagesize_mask);
+  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+
+  // The allocation size is stored in the run metadata.
+  size_t size = run->bin->reg_size;
+
+  // Address of the first possible pointer in the run after its headers.
+  uintptr_t reg0_addr = (uintptr_t)run + run->bin->reg0_offset;
+  if (aPtr < (void*)reg0_addr) {
+    // In the run header.
+    *aInfo = { TagUnknown, nullptr, 0 };
+    return;
+  }
+
+  // Position in the run.
+  unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
+
+  // Pointer to the allocation's base address.
+  void* addr = (void*)(reg0_addr + regind * size);
+
+  // Check if the allocation has been freed.
+  unsigned elm = regind >> (SIZEOF_INT_2POW + 3);
+  unsigned bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+  PtrInfoTag tag = ((run->regs_mask[elm] & (1U << bit)))
+                 ? TagFreedSmall : TagLiveSmall;
+
+  *aInfo = { tag, addr, size};
+}
+
 static inline void
 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     arena_chunk_map_t *mapelm)
 {
 	arena_run_t *run;
 	arena_bin_t *bin;
 	size_t size;
 
@@ -4767,16 +4920,17 @@ jemalloc_stats_impl(jemalloc_stats_t *st
 	 */
 	stats->opt_junk = opt_junk;
 	stats->opt_zero = opt_zero;
 	stats->narenas = narenas;
 	stats->quantum = quantum;
 	stats->small_max = small_max;
 	stats->large_max = arena_maxclass;
 	stats->chunksize = chunksize;
+	stats->page_size = pagesize;
 	stats->dirty_max = opt_dirty_max;
 
 	/*
 	 * Gather current memory usage statistics.
 	 */
 	stats->mapped = 0;
 	stats->allocated = 0;
         stats->waste = 0;
--- a/memory/mozjemalloc/mozjemalloc_types.h
+++ b/memory/mozjemalloc/mozjemalloc_types.h
@@ -55,16 +55,17 @@ typedef struct {
 	 */
 	bool	opt_junk;	/* Fill allocated memory with kAllocJunk? */
 	bool	opt_zero;	/* Fill allocated memory with 0x0? */
 	size_t	narenas;	/* Number of arenas. */
 	size_t	quantum;	/* Allocation quantum. */
 	size_t	small_max;	/* Max quantum-spaced allocation size. */
 	size_t	large_max;	/* Max sub-chunksize allocation size. */
 	size_t	chunksize;	/* Size of each virtual memory mapping. */
+	size_t  page_size;	/* Size of pages. */
 	size_t	dirty_max;	/* Max dirty pages per arena. */
 
 	/*
 	 * Current memory usage statistics.
 	 */
 	size_t	mapped;		/* Bytes mapped (not necessarily committed). */
 	size_t	allocated;	/* Bytes allocated (committed, in use by application). */
         size_t  waste;          /* Bytes committed, not in use by the
@@ -72,13 +73,77 @@ typedef struct {
                                    unused (i.e., not dirty). */
         size_t	page_cache;	/* Committed, unused pages kept around as a
                                    cache.  (jemalloc calls these "dirty".) */
         size_t  bookkeeping;    /* Committed bytes used internally by the
                                    allocator. */
 	size_t bin_unused; /* Bytes committed to a bin but currently unused. */
 } jemalloc_stats_t;
 
+enum PtrInfoTag {
+  // The pointer is not currently known to the allocator.
+  // 'addr' and 'size' are always 0.
+  TagUnknown,
+
+  // The pointer is within a live allocation.
+  // 'addr' and 'size' describe the allocation.
+  TagLiveSmall,
+  TagLiveLarge,
+  TagLiveHuge,
+
+  // The pointer is within a small freed allocation.
+  // 'addr' and 'size' describe the allocation.
+  TagFreedSmall,
+
+  // The pointer is within a freed page. Details about the original
+  // allocation, including its size, are not available.
+  // 'addr' and 'size' describe the page.
+  TagFreedPageDirty,
+  TagFreedPageDecommitted,
+  TagFreedPageMadvised,
+  TagFreedPageZeroed,
+};
+
+/*
+ * The information in jemalloc_ptr_info_t could be represented in a variety of
+ * ways. The chosen representation has the following properties.
+ * - The number of fields is minimized.
+ * - The 'tag' field unambiguously defines the meaning of the subsequent fields.
+ * Helper functions are used to group together related categories of tags.
+ */
+typedef struct {
+  enum PtrInfoTag tag;
+  void* addr;     // meaning depends on tag; see above
+  size_t size;    // meaning depends on tag; see above
+} jemalloc_ptr_info_t;
+
+static inline jemalloc_bool
+jemalloc_ptr_is_live(jemalloc_ptr_info_t* info)
+{
+  return info->tag == TagLiveSmall ||
+         info->tag == TagLiveLarge ||
+         info->tag == TagLiveHuge;
+}
+
+static inline jemalloc_bool
+jemalloc_ptr_is_freed(jemalloc_ptr_info_t* info)
+{
+  return info->tag == TagFreedSmall ||
+         info->tag == TagFreedPageDirty ||
+         info->tag == TagFreedPageDecommitted ||
+         info->tag == TagFreedPageMadvised ||
+         info->tag == TagFreedPageZeroed;
+}
+
+static inline jemalloc_bool
+jemalloc_ptr_is_freed_page(jemalloc_ptr_info_t* info)
+{
+  return info->tag == TagFreedPageDirty ||
+         info->tag == TagFreedPageDecommitted ||
+         info->tag == TagFreedPageMadvised ||
+         info->tag == TagFreedPageZeroed;
+}
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
 
 #endif /* _JEMALLOC_TYPES_H_ */
--- a/memory/replace/replace/ReplaceMalloc.cpp
+++ b/memory/replace/replace/ReplaceMalloc.cpp
@@ -256,8 +256,19 @@ void
 replace_jemalloc_thread_local_arena(bool aEnabled)
 {
   gFuncs->jemalloc_thread_local_arena(aEnabled);
   const malloc_hook_table_t* hook_table = gHookTable;
   if (hook_table && hook_table->jemalloc_thread_local_arena_hook) {
     hook_table->jemalloc_thread_local_arena_hook(aEnabled);
   }
 }
+
+void
+replace_jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
+{
+  gFuncs->jemalloc_ptr_info(aPtr, aInfo);
+  const malloc_hook_table_t* hook_table = gHookTable;
+  if (hook_table && hook_table->jemalloc_ptr_info_hook) {
+    hook_table->jemalloc_ptr_info_hook(aPtr, aInfo);
+  }
+}
+
--- a/mozglue/build/mozglue.def.in
+++ b/mozglue/build/mozglue.def.in
@@ -28,13 +28,15 @@ EXPORTS
 #endif
   _aligned_malloc
   strndup=wrap_strndup
   strdup=wrap_strdup
   _strdup=wrap_strdup
   wcsdup=wrap_wcsdup
   _wcsdup=wrap_wcsdup
   jemalloc_stats
+  jemalloc_purge_freed_pages
   jemalloc_free_dirty_pages
   jemalloc_thread_local_arena
+  jemalloc_ptr_info
   ; A hack to work around the CRT (see giant comment in Makefile.in)
   frex=dumb_free_thunk
 #endif
--- a/mozglue/build/replace_malloc.mk
+++ b/mozglue/build/replace_malloc.mk
@@ -15,12 +15,13 @@ OS_LDFLAGS += \
   -Wl,-U,_replace_memalign \
   -Wl,-U,_replace_valloc \
   -Wl,-U,_replace_malloc_usable_size \
   -Wl,-U,_replace_malloc_good_size \
   -Wl,-U,_replace_jemalloc_stats \
   -Wl,-U,_replace_jemalloc_purge_freed_pages \
   -Wl,-U,_replace_jemalloc_free_dirty_pages \
   -Wl,-U,_replace_jemalloc_thread_local_arena \
+  -Wl,-U,_replace_jemalloc_ptr_info \
   $(NULL)
 
 EXTRA_DEPS += $(topsrcdir)/mozglue/build/replace_malloc.mk
 endif