Bug 1294732 - Back out all of bug 1271165 as it has served its purpose. r=glandium
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Tue, 23 Aug 2016 08:45:00 -0400
changeset 311107 ab461807ca14d4ad55d9f83ec975d87065a24419
parent 311106 aebc13fe16b3dfe2f8a3b5cc8e88856fb3b9589d
child 311108 1442a6ab4296039c44f3e80a7a0b1aea6d74f970
push id81042
push userryanvm@gmail.com
push dateThu, 25 Aug 2016 14:13:22 +0000
treeherdermozilla-inbound@ab461807ca14 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1294732, 1271165
milestone51.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1294732 - Back out all of bug 1271165 as it has served its purpose. r=glandium
js/public/Utility.h
js/src/jit/JitAllocPolicy.h
js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
memory/build/malloc_decls.h
memory/build/mozjemalloc_compat.c
memory/build/mozmemory_wrap.c
memory/build/mozmemory_wrap.h
memory/build/replace_malloc.c
memory/mozalloc/mozalloc.cpp
memory/mozjemalloc/jemalloc.c
memory/replace/replace/ReplaceMalloc.cpp
mozglue/build/mozglue.def.in
mozglue/build/replace_malloc.mk
--- a/js/public/Utility.h
+++ b/js/public/Utility.h
@@ -43,22 +43,16 @@ JS_Assert(const char* s, const char* fil
 
 /*
  * Custom allocator support for SpiderMonkey
  */
 #if defined JS_USE_CUSTOM_ALLOCATOR
 # include "jscustomallocator.h"
 #else
 
-#include "mozilla/Types.h"
-MOZ_BEGIN_EXTERN_C
-MFBT_API void malloc_protect(void* ptr, uint32_t* id);
-MFBT_API void malloc_unprotect(void* ptr, uint32_t* id);
-MOZ_END_EXTERN_C
-
 namespace js {
 namespace oom {
 
 /*
  * To make testing OOM in certain helper threads more effective,
  * allow restricting the OOM testing to a certain helper thread
  * type. This allows us to fail e.g. in off-thread script parsing
  * without causing an OOM in the main thread first.
@@ -258,73 +252,16 @@ static inline void* js_realloc(void* p, 
     return realloc(p, bytes);
 }
 
 static inline void js_free(void* p)
 {
     free(p);
 }
 
-/*
- * js_malloc_protect marks the region referenced by |ptr| as protected in
- * jemalloc by a unique ID. As a result, the region cannot be modified through
- * calls to allocation functions (realloc and free). Note that this only
- * protects against access through jemalloc - the memory can still be written
- * to by anyone.
- */
-static inline void js_malloc_protect(void* ptr, uint32_t* id)
-{
-    malloc_protect(ptr, id);
-}
-
-/*
- * js_malloc_unprotect must be called with the correct ID to release a
- * protected region before anyone can modify it. The |_protected| allocation
- * functions below automate this process of protecting and unprotecting memory.
- */
-static inline void js_malloc_unprotect(void* ptr, uint32_t* id)
-{
-    malloc_unprotect(ptr, id);
-}
-
-static inline void* js_malloc_protected(size_t bytes, uint32_t* id)
-{
-    void* ret = js_malloc(bytes);
-    js_malloc_protect(ret, id);
-    return ret;
-}
-
-static inline void* js_calloc_protected(size_t bytes, uint32_t* id)
-{
-    void* ret = js_calloc(bytes);
-    js_malloc_protect(ret, id);
-    return ret;
-}
-
-static inline void* js_calloc_protected(size_t nmemb, size_t size, uint32_t* id)
-{
-    void* ret = js_calloc(nmemb, size);
-    js_malloc_protect(ret, id);
-    return ret;
-}
-
-static inline void* js_realloc_protected(void* p, size_t bytes, uint32_t* id)
-{
-    js_malloc_unprotect(p, id);
-    void* ret = js_realloc(p, bytes);
-    js_malloc_protect(ret ? ret : p, id);
-    return ret;
-}
-
-static inline void js_free_protected(void* p, uint32_t* id)
-{
-    js_malloc_unprotect(p, id);
-    js_free(p);
-}
-
 static inline char* js_strdup(const char* s)
 {
     JS_OOM_POSSIBLY_FAIL();
     return strdup(s);
 }
 #endif/* JS_USE_CUSTOM_ALLOCATOR */
 
 #include <new>
@@ -499,44 +436,16 @@ js_pod_realloc(T* prior, size_t oldSize,
 {
     MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
     size_t bytes;
     if (MOZ_UNLIKELY(!js::CalculateAllocSize<T>(newSize, &bytes)))
         return nullptr;
     return static_cast<T*>(js_realloc(prior, bytes));
 }
 
-template <class T>
-static MOZ_ALWAYS_INLINE T*
-js_pod_malloc_protected(size_t numElems, uint32_t* id)
-{
-    T* ret = js_pod_malloc<T>(numElems);
-    js_malloc_protect(ret, id);
-    return ret;
-}
-
-template <class T>
-static MOZ_ALWAYS_INLINE T*
-js_pod_calloc_protected(size_t numElems, uint32_t* id)
-{
-    T* ret = js_pod_calloc<T>(numElems);
-    js_malloc_protect(ret, id);
-    return ret;
-}
-
-template <class T>
-static MOZ_ALWAYS_INLINE T*
-js_pod_realloc_protected(T* prior, size_t oldSize, size_t newSize, uint32_t* id)
-{
-    js_malloc_unprotect(prior, id);
-    T* ret = js_pod_realloc<T>(prior, oldSize, newSize);
-    js_malloc_protect(ret ? ret : prior, id);
-    return ret;
-}
-
 namespace js {
 
 template<typename T>
 struct ScopedFreePtrTraits
 {
     typedef T* type;
     static T* empty() { return nullptr; }
     static void release(T* ptr) { js_free(ptr); }
--- a/js/src/jit/JitAllocPolicy.h
+++ b/js/src/jit/JitAllocPolicy.h
@@ -9,21 +9,19 @@
 
 #include "mozilla/Attributes.h"
 #include "mozilla/GuardObjects.h"
 #include "mozilla/OperatorNewExtensions.h"
 #include "mozilla/TypeTraits.h"
 
 #include "jscntxt.h"
 
-#include "ds/InlineTable.h"
 #include "ds/LifoAlloc.h"
 #include "jit/InlineList.h"
 #include "jit/Ion.h"
-#include "js/Utility.h"
 
 namespace js {
 namespace jit {
 
 class TempAllocator
 {
     LifoAllocScope lifoScope_;
 
@@ -129,99 +127,16 @@ class JitAllocPolicy
     }
     void reportAllocOverflow() const {
     }
     MOZ_MUST_USE bool checkSimulatedOOM() const {
         return !js::oom::ShouldFailWithOOM();
     }
 };
 
-/*
- * A policy for using system memory functions that protects against
- * realloc-after-free and free-after-free from unrelated locations.
- */
-class ProtectedSystemAllocPolicy
-{
-    InlineMap<void*, uint32_t, 2, DefaultHasher<void*>, SystemAllocPolicy> allocIDs;
-
-  public:
-    ProtectedSystemAllocPolicy() {}
-
-    /*
-     * While possible, copying protected allocations would defeat the purpose
-     * of this policy, so we only allow copy-constructing from vanilla policies.
-     */
-    ProtectedSystemAllocPolicy(const ProtectedSystemAllocPolicy& that) {
-        MOZ_RELEASE_ASSERT(that.allocIDs.empty());
-    }
-
-    ~ProtectedSystemAllocPolicy() { MOZ_RELEASE_ASSERT(allocIDs.empty()); }
-
-    template <typename T> T* maybe_pod_malloc(size_t numElems) {
-        uint32_t allocID;
-        T* ret = js_pod_malloc_protected<T>(numElems, &allocID);
-        if (ret) {
-            AutoEnterOOMUnsafeRegion oomUnsafe;
-            if (!allocIDs.put(ret, allocID))
-                oomUnsafe.crash("Failed to store allocation ID.");
-        }
-        return ret;
-    }
-
-    template <typename T> T* maybe_pod_calloc(size_t numElems) {
-        uint32_t allocID;
-        T* ret = js_pod_calloc_protected<T>(numElems, &allocID);
-        if (ret) {
-            AutoEnterOOMUnsafeRegion oomUnsafe;
-            if (!allocIDs.put(ret, allocID))
-                oomUnsafe.crash("Failed to store allocation ID.");
-        }
-        return ret;
-    }
-
-    template <typename T> T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
-        uint32_t allocID = 0;
-        if (p) {
-            auto entry = allocIDs.lookup(p);
-            MOZ_RELEASE_ASSERT(entry.found());
-            allocID = entry->value();
-            allocIDs.remove(entry);
-        }
-        T* ret = js_pod_realloc_protected<T>(p, oldSize, newSize, &allocID);
-        if (ret || p) {
-            AutoEnterOOMUnsafeRegion oomUnsafe;
-            if (!allocIDs.put(ret ? ret : p, allocID))
-                oomUnsafe.crash("Failed to store allocation ID.");
-        }
-        return ret;
-    }
-
-    template <typename T> T* pod_malloc(size_t numElems) { return maybe_pod_malloc<T>(numElems); }
-    template <typename T> T* pod_calloc(size_t numElems) { return maybe_pod_calloc<T>(numElems); }
-    template <typename T> T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
-        return maybe_pod_realloc<T>(p, oldSize, newSize);
-    }
-
-    void free_(void* p) {
-        uint32_t allocID = 0;
-        if (p) {
-            auto entry = allocIDs.lookup(p);
-            MOZ_RELEASE_ASSERT(entry.found());
-            allocID = entry->value();
-            allocIDs.remove(entry);
-        }
-        js_free_protected(p, &allocID);
-    }
-
-    void reportAllocOverflow() const {}
-    MOZ_MUST_USE bool checkSimulatedOOM() const {
-        return !js::oom::ShouldFailWithOOM();
-    }
-};
-
 class AutoJitContextAlloc
 {
     TempAllocator tempAlloc_;
     JitContext* jcx_;
     TempAllocator* prevAlloc_;
 
   public:
     explicit AutoJitContextAlloc(JSContext* cx)
--- a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -30,17 +30,16 @@
 #ifndef jit_x86_shared_AssemblerBuffer_x86_shared_h
 #define jit_x86_shared_AssemblerBuffer_x86_shared_h
 
 #include <stdarg.h>
 #include <string.h>
 
 #include "ds/PageProtectingVector.h"
 #include "jit/ExecutableAllocator.h"
-#include "jit/JitAllocPolicy.h"
 #include "jit/JitSpewer.h"
 
 // Spew formatting helpers.
 #define PRETTYHEX(x)                       (((x)<0)?"-":""),(((x)<0)?-(x):(x))
 
 #define MEM_o     "%s0x%x"
 #define MEM_os    MEM_o   "(,%s,%d)"
 #define MEM_ob    MEM_o   "(%s)"
@@ -165,17 +164,17 @@ namespace jit {
          *
          * See also the |buffer| method.
          */
         void oomDetected() {
             m_oom = true;
             m_buffer.clear();
         }
 
-        PageProtectingVector<unsigned char, 256, ProtectedSystemAllocPolicy> m_buffer;
+        PageProtectingVector<unsigned char, 256, SystemAllocPolicy> m_buffer;
         bool m_oom;
     };
 
     class GenericAssembler
     {
         Sprinter* printer;
 
       public:
--- a/memory/build/malloc_decls.h
+++ b/memory/build/malloc_decls.h
@@ -20,26 +20,23 @@
 #endif
 
 typedef MALLOC_USABLE_SIZE_CONST_PTR void * usable_ptr_t;
 
 #  define MALLOC_FUNCS_MALLOC 1
 #  define MALLOC_FUNCS_JEMALLOC 2
 #  define MALLOC_FUNCS_INIT 4
 #  define MALLOC_FUNCS_BRIDGE 8
-#  define MALLOC_FUNCS_EXTRA 16
 #  define MALLOC_FUNCS_ALL (MALLOC_FUNCS_INIT | MALLOC_FUNCS_BRIDGE | \
-                            MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
-                            MALLOC_FUNCS_EXTRA)
+                            MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC)
 
 #endif /* malloc_decls_h */
 
 #ifndef MALLOC_FUNCS
-#  define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC | \
-                        MALLOC_FUNCS_EXTRA)
+#  define MALLOC_FUNCS (MALLOC_FUNCS_MALLOC | MALLOC_FUNCS_JEMALLOC)
 #endif
 
 #ifdef MALLOC_DECL
 #  ifndef MALLOC_DECL_VOID
 #    define MALLOC_DECL_VOID(func, ...) MALLOC_DECL(func, void, __VA_ARGS__)
 #  endif
 
 #  if MALLOC_FUNCS & MALLOC_FUNCS_INIT
@@ -55,20 +52,16 @@ MALLOC_DECL(aligned_alloc, void *, size_
 MALLOC_DECL(calloc, void *, size_t, size_t)
 MALLOC_DECL(realloc, void *, void *, size_t)
 MALLOC_DECL_VOID(free, void *)
 MALLOC_DECL(memalign, void *, size_t, size_t)
 MALLOC_DECL(valloc, void *, size_t)
 MALLOC_DECL(malloc_usable_size, size_t, usable_ptr_t)
 MALLOC_DECL(malloc_good_size, size_t, size_t)
 #  endif
-#  if MALLOC_FUNCS & MALLOC_FUNCS_EXTRA
-MALLOC_DECL_VOID(malloc_protect, void *, uint32_t *)
-MALLOC_DECL_VOID(malloc_unprotect, void *, uint32_t *)
-#  endif
 #  if MALLOC_FUNCS & MALLOC_FUNCS_JEMALLOC
 MALLOC_DECL_VOID(jemalloc_stats, jemalloc_stats_t *)
 MALLOC_DECL_VOID(jemalloc_purge_freed_pages, void)
 MALLOC_DECL_VOID(jemalloc_free_dirty_pages, void)
 #  endif
 
 #  undef MALLOC_DECL_VOID
 #endif /* MALLOC_DECL */
--- a/memory/build/mozjemalloc_compat.c
+++ b/memory/build/mozjemalloc_compat.c
@@ -66,35 +66,17 @@
 #    endif
 #  endif
 #  define VARIABLE_ARRAY(type, name, count) \
 	type *name = alloca(sizeof(type) * (count))
 #else
 #  define VARIABLE_ARRAY(type, name, count) type name[(count)]
 #endif
 
-MFBT_API void
-malloc_protect_impl(void *ptr, uint32_t *id)
-{
-  if (ptr)
-    *id = 1;
-}
-
-MFBT_API void
-malloc_unprotect_impl(void *ptr, uint32_t *id)
-{
-  *id = 0;
-}
-
-#if defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_REPLACE_MALLOC)
-static inline
-#else
-MOZ_MEMORY_API
-#endif
-size_t
+MOZ_MEMORY_API size_t
 malloc_good_size_impl(size_t size)
 {
   /* je_nallocx crashes when given a size of 0. As
    * malloc_usable_size(malloc(0)) and malloc_usable_size(malloc(1))
    * return the same value, use a size of 1. */
   if (size == 0)
     size = 1;
   return je_(nallocx)(size, 0);
--- a/memory/build/mozmemory_wrap.c
+++ b/memory/build/mozmemory_wrap.c
@@ -7,30 +7,16 @@
 #include "mozilla/Types.h"
 
 /* Declare malloc implementation functions with the right return and
  * argument types. */
 #define MALLOC_DECL(name, return_type, ...) \
   MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
 #include "malloc_decls.h"
 
-#ifdef XP_DARWIN
-MFBT_API void
-malloc_protect(void* ptr, uint32_t* id)
-{
-  malloc_protect_impl(ptr, id);
-}
-
-MFBT_API void
-malloc_unprotect(void* ptr, uint32_t* id)
-{
-  malloc_unprotect_impl(ptr, id);
-}
-#endif
-
 #ifdef MOZ_WRAP_NEW_DELETE
 /* operator new(unsigned int) */
 MOZ_MEMORY_API void *
 mozmem_malloc_impl(_Znwj)(unsigned int size)
 {
   return malloc_impl(size);
 }
 /* operator new[](unsigned int) */
--- a/memory/build/mozmemory_wrap.h
+++ b/memory/build/mozmemory_wrap.h
@@ -16,18 +16,16 @@
  *   - malloc
  *   - posix_memalign
  *   - aligned_alloc
  *   - calloc
  *   - realloc
  *   - free
  *   - memalign
  *   - valloc
- *   - malloc_protect
- *   - malloc_unprotect
  *   - malloc_usable_size
  *   - malloc_good_size
  *   Some of these functions are specific to some systems, but for
  *   convenience, they are treated as being cross-platform, and available
  *   as such.
  *
  * - duplication functions:
  *   - strndup
@@ -189,30 +187,19 @@
 #define malloc_impl              mozmem_malloc_impl(malloc)
 #define posix_memalign_impl      mozmem_malloc_impl(posix_memalign)
 #define aligned_alloc_impl       mozmem_malloc_impl(aligned_alloc)
 #define calloc_impl              mozmem_malloc_impl(calloc)
 #define realloc_impl             mozmem_malloc_impl(realloc)
 #define free_impl                mozmem_malloc_impl(free)
 #define memalign_impl            mozmem_malloc_impl(memalign)
 #define valloc_impl              mozmem_malloc_impl(valloc)
-#define malloc_protect_impl      mozmem_malloc_impl(malloc_protect)
-#define malloc_unprotect_impl    mozmem_malloc_impl(malloc_unprotect)
 #define malloc_usable_size_impl  mozmem_malloc_impl(malloc_usable_size)
 #define malloc_good_size_impl    mozmem_malloc_impl(malloc_good_size)
 
-#ifdef XP_DARWIN
-MOZ_BEGIN_EXTERN_C
-
-MFBT_API void malloc_protect(void* ptr, uint32_t* id);
-MFBT_API void malloc_unprotect(void* ptr, uint32_t* id);
-
-MOZ_END_EXTERN_C
-#endif
-
 /* Duplication functions */
 #define strndup_impl   mozmem_dup_impl(strndup)
 #define strdup_impl    mozmem_dup_impl(strdup)
 #ifdef XP_WIN
 #  define wcsdup_impl  mozmem_dup_impl(wcsdup)
 #endif
 
 /* String functions */
--- a/memory/build/replace_malloc.c
+++ b/memory/build/replace_malloc.c
@@ -109,21 +109,16 @@ replace_malloc_init_funcs()
  * specific functions MOZ_JEMALLOC_API; see mozmemory_wrap.h
  */
 #define MALLOC_DECL(name, return_type, ...) \
   MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
 #include "malloc_decls.h"
 
 #define MALLOC_DECL(name, return_type, ...) \
-  MFBT_API return_type name ## _impl(__VA_ARGS__);
-#define MALLOC_FUNCS MALLOC_FUNCS_EXTRA
-#include "malloc_decls.h"
-
-#define MALLOC_DECL(name, return_type, ...) \
   MOZ_JEMALLOC_API return_type name ## _impl(__VA_ARGS__);
 #define MALLOC_FUNCS MALLOC_FUNCS_JEMALLOC
 #include "malloc_decls.h"
 
 static int replace_malloc_initialized = 0;
 static void
 init()
 {
@@ -223,38 +218,16 @@ valloc_impl(size_t size)
 {
   if (MOZ_UNLIKELY(!replace_malloc_initialized))
     init();
   if (MOZ_LIKELY(!replace_valloc))
     return je_valloc(size);
   return replace_valloc(size);
 }
 
-void
-malloc_protect_impl(void* ptr, uint32_t* id)
-{
-  if (MOZ_UNLIKELY(!replace_malloc_initialized))
-    init();
-  if (MOZ_LIKELY(!replace_malloc_protect))
-    je_malloc_protect(ptr, id);
-  else
-    replace_malloc_protect(ptr, id);
-}
-
-void
-malloc_unprotect_impl(void* ptr, uint32_t* id)
-{
-  if (MOZ_UNLIKELY(!replace_malloc_initialized))
-    init();
-  if (MOZ_LIKELY(!replace_malloc_unprotect))
-    je_malloc_unprotect(ptr, id);
-  else
-    replace_malloc_unprotect(ptr, id);
-}
-
 size_t
 malloc_usable_size_impl(usable_ptr_t ptr)
 {
   if (MOZ_UNLIKELY(!replace_malloc_initialized))
     init();
   if (MOZ_LIKELY(!replace_malloc_usable_size))
     return je_malloc_usable_size(ptr);
   return replace_malloc_usable_size(ptr);
--- a/memory/mozalloc/mozalloc.cpp
+++ b/memory/mozalloc/mozalloc.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: sw=4 ts=4 et :
  */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <stddef.h>             // for size_t
-#include <stdint.h>             // for uint32_t
 
 // Building with USE_STATIC_LIBS = True sets -MT instead of -MD. -MT sets _MT,
 // while -MD sets _MT and _DLL.
 #if defined(_MT) && !defined(_DLL)
 #define MOZ_STATIC_RUNTIME
 #endif
 
 #if defined(MOZ_MEMORY) && !defined(MOZ_STATIC_RUNTIME)
@@ -28,21 +27,16 @@
 // it needs to use _impl suffixes. However, with libmozglue growing, this is
 // becoming cumbersome, so we will likely use a malloc.h wrapper of some sort
 // and allow the use of the functions without a _impl suffix.
 #define MALLOC_DECL(name, return_type, ...) \
   extern "C" MOZ_MEMORY_API return_type name ## _impl(__VA_ARGS__);
 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
 #include "malloc_decls.h"
 
-#define MALLOC_DECL(name, return_type, ...) \
-  extern "C" MFBT_API return_type name ## _impl(__VA_ARGS__);
-#define MALLOC_FUNCS MALLOC_FUNCS_EXTRA
-#include "malloc_decls.h"
-
 extern "C" MOZ_MEMORY_API char *strdup_impl(const char *);
 extern "C" MOZ_MEMORY_API char *strndup_impl(const char *, size_t);
 
 #else
 // When jemalloc is disabled, or when building the static runtime variant,
 // we need not to use the suffixes.
 
 #if defined(MALLOC_H)
@@ -55,18 +49,16 @@ extern "C" MOZ_MEMORY_API char *strndup_
 
 #define malloc_impl malloc
 #define posix_memalign_impl posix_memalign
 #define calloc_impl calloc
 #define realloc_impl realloc
 #define free_impl free
 #define memalign_impl memalign
 #define valloc_impl valloc
-#define malloc_protect_impl malloc_protect
-#define malloc_unprotect_impl malloc_unprotect
 #define malloc_usable_size_impl malloc_usable_size
 #define strdup_impl strdup
 #define strndup_impl strndup
 
 #endif
 
 #include <errno.h>
 #include <new>                  // for std::bad_alloc
@@ -80,35 +72,16 @@ extern "C" MOZ_MEMORY_API char *strndup_
 #ifdef __GNUC__
 #define LIKELY(x)    (__builtin_expect(!!(x), 1))
 #define UNLIKELY(x)  (__builtin_expect(!!(x), 0))
 #else
 #define LIKELY(x)    (x)
 #define UNLIKELY(x)  (x)
 #endif
 
-#ifndef MOZ_MEMORY
-MOZ_BEGIN_EXTERN_C
-
-MFBT_API void
-malloc_protect(void* ptr, uint32_t* id)
-{
-    if (ptr)
-        *id = 1;
-}
-
-MFBT_API void
-malloc_unprotect(void* ptr, uint32_t* id)
-{
-    *id = 0;
-}
-
-MOZ_END_EXTERN_C
-#endif
-
 void*
 moz_xmalloc(size_t size)
 {
     void* ptr = malloc_impl(size);
     if (UNLIKELY(!ptr && size)) {
         mozalloc_handle_oom(size);
         return moz_xmalloc(size);
     }
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -189,25 +189,16 @@
 /*
  * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
  * validation.  There are many possible errors that validation does not even
  * attempt to detect.
  */
 #define MALLOC_VALIDATE
 
 /*
- * MALLOC_PROTECTED_REGIONS enables the allocation of 'protected' regions,
- * which can only be reallocated or deallocated using a unique ID. This
- * may help trace a class of use-after-free bugs where a thread attempts
- * to reallocate a region of memory it previously freed, which has since
- * been allocated for use by another thread.
- */
-#define MALLOC_PROTECTED_REGIONS
-
-/*
  * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
  * re-balances arena load if exponentially averaged contention exceeds a
  * certain threshold.
  */
 /* #define	MALLOC_BALANCE */
 
 #if defined(MOZ_MEMORY_LINUX) && !defined(MOZ_MEMORY_ANDROID)
 #define	_GNU_SOURCE /* For mremap(2). */
@@ -778,38 +769,16 @@ struct extent_node_s {
 	/* Total region size. */
 	size_t	size;
 
 	/* True if zero-filled; used by chunk recycling code. */
 	bool	zeroed;
 };
 typedef rb_tree(extent_node_t) extent_tree_t;
 
-#ifdef MALLOC_PROTECTED_REGIONS
-/* Tree of protected memory regions. */
-typedef struct protected_node_s protected_node_t;
-struct protected_node_s {
-	/* Linkage for the address-ordered tree. */
-	rb_node(protected_node_t) link_ad;
-
-	/* Linkage for the ID-ordered tree. */
-	rb_node(protected_node_t) link_id;
-
-	/* The starting address of the region. */
-	uintptr_t	addr;
-
-	/* The size of the region. */
-	size_t		size;
-
-	/* The ID used to access the region. */
-	uint32_t	id;
-};
-typedef rb_tree(protected_node_t) protected_tree_t;
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 /******************************************************************************/
 /*
  * Radix tree data structures.
  */
 
 #ifdef MALLOC_VALIDATE
    /*
     * Size of each radix tree node (must be a power of 2).  This impacts tree
@@ -1252,32 +1221,16 @@ static malloc_mutex_t	chunks_mtx;
  * Trees of chunks that were previously allocated (trees differ only in node
  * ordering).  These are used when allocating chunks, in an attempt to re-use
  * address space.  Depending on function, different tree orderings are needed,
  * which is why there are two trees with the same contents.
  */
 static extent_tree_t	chunks_szad_mmap;
 static extent_tree_t	chunks_ad_mmap;
 
-#ifdef MALLOC_PROTECTED_REGIONS
-/* Stores unused protected nodes. */
-static protected_node_t	*protected_nodes;
-
-/* Protects access to the protected region trees. */
-static malloc_mutex_t	protected_tree_mtx;
-
-/*
- * Trees of regions that were allocated using the protected allocation API.
- * As with the extent trees, different tree orderings are needed depending on
- * function, so there are two trees with the same contents.
- */
-static protected_tree_t	protected_tree_ad;
-static protected_tree_t	protected_tree_id;
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 /* Protects huge allocation-related data structures. */
 static malloc_mutex_t	huge_mtx;
 
 /* Tree of chunks that are stand-alone huge allocations. */
 static extent_tree_t	huge;
 
 #ifdef MALLOC_STATS
 /* Huge allocation statistics. */
@@ -1946,17 +1899,17 @@ pow2_ceil(size_t x)
 	x |= x >> 16;
 #if (SIZEOF_PTR == 8)
 	x |= x >> 32;
 #endif
 	x++;
 	return (x);
 }
 
-#if defined(MALLOC_BALANCE) || defined(MALLOC_PROTECTED_REGIONS)
+#ifdef MALLOC_BALANCE
 /*
  * Use a simple linear congruential pseudo-random number generator:
  *
  *   prn(y) = (a*x + c) % m
  *
  * where the following constants ensure maximal period:
  *
  *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
@@ -1996,22 +1949,16 @@ prn_##suffix(uint32_t lg_range)						\
 #endif
 
 #ifdef MALLOC_BALANCE
 /* Define the PRNG used for arena assignment. */
 static __thread uint32_t balance_x;
 PRN_DEFINE(balance, balance_x, 1297, 1301)
 #endif
 
-#ifdef MALLOC_PROTECTED_REGIONS
-/* Define the PRNG used for protected region ID assignment. */
-static uint32_t protected_id_x;
-PRN_DEFINE(protected_id, protected_id_x, 1297, 1301)
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 #ifdef MALLOC_UTRACE
 static int
 utrace(const void *addr, size_t len)
 {
 	malloc_utrace_t *ut = (malloc_utrace_t *)addr;
 	char buf_a[UMAX2S_BUFSIZE];
 	char buf_b[UMAX2S_BUFSIZE];
 
@@ -2409,46 +2356,16 @@ extent_ad_comp(extent_node_t *a, extent_
 rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
     extent_ad_comp)
 
 /*
  * End extent tree code.
  */
 /******************************************************************************/
 /*
- * Begin protected region tree code.
- */
-
-#ifdef MALLOC_PROTECTED_REGIONS
-static inline int
-protected_ad_comp(protected_node_t *a, protected_node_t *b)
-{
-	return ((a->addr > b->addr) - (a->addr < b->addr));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, protected_tree_ad_, protected_tree_t, protected_node_t,
-	link_ad, protected_ad_comp)
-
-static inline int
-protected_id_comp(protected_node_t *a, protected_node_t *b)
-{
-	return ((a->id > b->id) - (a->id < b->id));
-}
-
-/* Wrap red-black tree macros in functions. */
-rb_wrap(static, protected_tree_id_, protected_tree_t, protected_node_t,
-	link_id, protected_id_comp)
-#endif /* MALLOC_PROTECTED_REGIONS */
-
-/*
- * End protected region tree code.
- */
-/******************************************************************************/
-/*
  * Begin chunk management functions.
  */
 
 #ifdef MOZ_MEMORY_WINDOWS
 
 static void *
 pages_map(void *addr, size_t size)
 {
@@ -4669,47 +4586,16 @@ isalloc(const void *ptr)
 		ret = node->size;
 
 		malloc_mutex_unlock(&huge_mtx);
 	}
 
 	return (ret);
 }
 
-#ifdef MALLOC_PROTECTED_REGIONS
-static void
-assert_unprotected(void *p)
-{
-	uintptr_t addr;
-	protected_node_t key;
-	protected_node_t *node;
-
-	if (!p)
-		return;
-
-	addr = (uintptr_t)p;
-	key.addr = addr;
-
-	malloc_mutex_lock(&protected_tree_mtx);
-
-	node = protected_tree_ad_psearch(&protected_tree_ad, &key);
-	if (!node)
-		goto RETURN;
-
-	/* Crash if |addr| is within the region protected by |node|. */
-	if (addr >= node->addr && addr < node->addr + node->size)
-		jemalloc_crash();
-
-RETURN:
-	malloc_mutex_unlock(&protected_tree_mtx);
-}
-#else /* !MALLOC_PROTECTED_REGIONS */
-static inline void assert_unprotected(void *p) { }
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 static inline void
 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     arena_chunk_map_t *mapelm)
 {
 	arena_run_t *run;
 	arena_bin_t *bin;
 	size_t size;
 
@@ -4791,18 +4677,16 @@ arena_dalloc_small(arena_t *arena, arena
 	arena->stats.allocated_small -= size;
 	arena->stats.ndalloc_small++;
 #endif
 }
 
 static void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
-	assert_unprotected(ptr);
-
 	/* Large allocation. */
 	malloc_spin_lock(&arena->lock);
 
 #ifdef MALLOC_FILL
 #ifndef MALLOC_STATS
 	if (opt_poison)
 #endif
 #endif
@@ -5053,19 +4937,16 @@ iralloc(void *ptr, size_t size)
 {
 	size_t oldsize;
 
 	assert(ptr != NULL);
 	assert(size != 0);
 
 	oldsize = isalloc(ptr);
 
-	if (oldsize > bin_maxclass)
-		assert_unprotected(ptr);
-
 	if (size <= arena_maxclass)
 		return (arena_ralloc(ptr, size, oldsize));
 	else
 		return (huge_ralloc(ptr, size, oldsize));
 }
 
 static bool
 arena_new(arena_t *arena)
@@ -5375,18 +5256,16 @@ huge_ralloc(void *ptr, size_t size, size
 	return (ret);
 }
 
 static void
 huge_dalloc(void *ptr)
 {
 	extent_node_t *node, key;
 
-	assert_unprotected(ptr);
-
 	malloc_mutex_lock(&huge_mtx);
 
 	/* Extract from tree of huge allocations. */
 	key.addr = ptr;
 	node = extent_tree_ad_search(&huge, &key);
 	assert(node != NULL);
 	assert(node->addr == ptr);
 	extent_tree_ad_remove(&huge, node);
@@ -6032,26 +5911,16 @@ MALLOC_OUT:
 	assert(chunksize >= pagesize);
 	assert(quantum * 4 <= chunksize);
 
 	/* Initialize chunks data. */
 	malloc_mutex_init(&chunks_mtx);
 	extent_tree_szad_new(&chunks_szad_mmap);
 	extent_tree_ad_new(&chunks_ad_mmap);
 
-#ifdef MALLOC_PROTECTED_REGIONS
-	/* Initialize protected region data. */
-	protected_nodes = NULL;
-	malloc_mutex_init(&protected_tree_mtx);
-	protected_tree_ad_new(&protected_tree_ad);
-	protected_tree_id_new(&protected_tree_id);
-	/* The seed doesn't really matter, so long as it's valid. */
-	SPRN(protected_id, 42);
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 	/* Initialize huge allocation data. */
 	malloc_mutex_init(&huge_mtx);
 	extent_tree_ad_new(&huge);
 #ifdef MALLOC_STATS
 	huge_nmalloc = 0;
 	huge_ndalloc = 0;
 	huge_allocated = 0;
 	huge_mapped = 0;
@@ -6255,98 +6124,16 @@ MALLOC_OUT:
 void
 malloc_shutdown()
 {
 
 	malloc_print_stats();
 }
 #endif
 
-#ifdef MALLOC_PROTECTED_REGIONS
-static protected_node_t *
-protected_node_alloc()
-{
-	protected_node_t *ret;
-
-	if (protected_nodes) {
-		ret = protected_nodes;
-		protected_nodes = *(protected_node_t **)ret;
-	} else {
-		ret = (protected_node_t *)base_alloc(sizeof(protected_node_t));
-	}
-
-	return (ret);
-}
-
-/*
- * Analogous to base_node_dealloc, we don't actually
- * release tree nodes. Instead, we save them for later.
- */
-static void
-protected_node_dealloc(protected_node_t *node)
-{
-	*(protected_node_t **)node = protected_nodes;
-	protected_nodes = node;
-}
-
-static void
-create_protected_region(void *p, uint32_t *id)
-{
-	protected_node_t *key;
-	protected_node_t *node;
-	size_t size = isalloc(p);
-	uintptr_t addr = (uintptr_t)p;
-
-	malloc_mutex_lock(&protected_tree_mtx);
-
-	key = protected_node_alloc();
-	key->addr = addr;
-	key->size = size;
-	key->id = PRN(protected_id, 32);
-
-	/* Ensure the current address isn't already protected. */
-	node = protected_tree_ad_search(&protected_tree_ad, key);
-	if (node)
-		jemalloc_crash();
-
-	/* Generate a unique, valid key. Reserve 1 for dummy implementations. */
-	while (key->id < 2 || protected_tree_id_search(&protected_tree_id, key))
-		key->id = PRN(protected_id, 32);
-
-	*id = key->id;
-	protected_tree_ad_insert(&protected_tree_ad, key);
-	protected_tree_id_insert(&protected_tree_id, key);
-
-	malloc_mutex_unlock(&protected_tree_mtx);
-}
-
-static void
-remove_protected_region(void *p, uint32_t *id)
-{
-	protected_node_t key;
-	protected_node_t *node;
-	uintptr_t addr = (uintptr_t)p;
-	key.addr = addr;
-
-	malloc_mutex_lock(&protected_tree_mtx);
-
-	node = protected_tree_ad_search(&protected_tree_ad, &key);
-
-	/* Ensure the node exists, and the right ID was passed in. */
-	if (!node || node->id != *id)
-		jemalloc_crash();
-
-	protected_tree_ad_remove(&protected_tree_ad, node);
-	protected_tree_id_remove(&protected_tree_id, node);
-	protected_node_dealloc(node);
-
-	malloc_mutex_unlock(&protected_tree_mtx);
-}
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 /*
  * End general internal functions.
  */
 /******************************************************************************/
 /*
  * Begin malloc(3)-compatible functions.
  */
 
@@ -6697,46 +6484,16 @@ free_impl(void *ptr)
 /*
  * End malloc(3)-compatible functions.
  */
 /******************************************************************************/
 /*
  * Begin non-standard functions.
  */
 
-#ifdef MALLOC_PROTECTED_REGIONS
-MFBT_API void
-malloc_protect_impl(void *ptr, uint32_t *id)
-{
-	if (ptr)
-		create_protected_region(ptr, id);
-}
-
-MFBT_API void
-malloc_unprotect_impl(void *ptr, uint32_t *id)
-{
-	if (ptr)
-		remove_protected_region(ptr, id);
-	*id = 0;
-}
-#else /* !MALLOC_PROTECTED_REGIONS */
-MFBT_API void
-malloc_protect_impl(void *ptr, uint32_t *id)
-{
-	if (ptr)
-		*id = 1;
-}
-
-MFBT_API void
-malloc_unprotect_impl(void *ptr, uint32_t *id)
-{
-	*id = 0;
-}
-#endif /* MALLOC_PROTECTED_REGIONS */
-
 /* This was added by Mozilla for use by SQLite. */
 #if defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_REPLACE_MALLOC)
 static
 #else
 MOZ_MEMORY_API
 #endif
 size_t
 malloc_good_size_impl(size_t size)
--- a/memory/replace/replace/ReplaceMalloc.cpp
+++ b/memory/replace/replace/ReplaceMalloc.cpp
@@ -195,36 +195,16 @@ replace_valloc(size_t aSize)
     if (hook_table->valloc_hook) {
       return hook_table->valloc_hook(ptr, aSize);
     }
     return hook_table->malloc_hook(ptr, aSize);
   }
   return ptr;
 }
 
-void
-replace_malloc_protect(void* aPtr, uint32_t* aID)
-{
-  gFuncs->malloc_protect(aPtr, aID);
-  const malloc_hook_table_t* hook_table = gHookTable;
-  if (hook_table && hook_table->malloc_protect_hook) {
-    hook_table->malloc_protect_hook(aPtr, aID);
-  }
-}
-
-void
-replace_malloc_unprotect(void* aPtr, uint32_t* aID)
-{
-  gFuncs->malloc_unprotect(aPtr, aID);
-  const malloc_hook_table_t* hook_table = gHookTable;
-  if (hook_table && hook_table->malloc_unprotect_hook) {
-    hook_table->malloc_unprotect_hook(aPtr, aID);
-  }
-}
-
 size_t
 replace_malloc_usable_size(usable_ptr_t aPtr)
 {
   size_t ret = gFuncs->malloc_usable_size(aPtr);
   const malloc_hook_table_t* hook_table = gHookTable;
   if (hook_table && hook_table->malloc_usable_size_hook) {
     return hook_table->malloc_usable_size_hook(ret, aPtr);
   }
--- a/mozglue/build/mozglue.def.in
+++ b/mozglue/build/mozglue.def.in
@@ -8,39 +8,32 @@ EXPORTS
 #ifdef MOZ_MEMORY
   ; symbols that are actually useful
 #ifdef MOZ_REPLACE_MALLOC
   malloc=malloc_impl
   calloc=calloc_impl
   realloc=realloc_impl
   free=free_impl
   posix_memalign=posix_memalign_impl
-  malloc_protect=malloc_protect_impl
-  malloc_unprotect=malloc_unprotect_impl
   malloc_usable_size=malloc_usable_size_impl
   malloc_good_size=malloc_good_size_impl
   _aligned_free=free_impl
 #else
   malloc=je_malloc
   calloc=je_calloc
   realloc=je_realloc
   free=je_free
   posix_memalign=je_posix_memalign
-  malloc_protect=je_malloc_protect
-  malloc_unprotect=je_malloc_unprotect
   malloc_usable_size=je_malloc_usable_size
   malloc_good_size=je_malloc_good_size
   _aligned_free=je_free
 #endif
   _aligned_malloc
   strndup=wrap_strndup
   strdup=wrap_strdup
   _strdup=wrap_strdup
   wcsdup=wrap_wcsdup
   _wcsdup=wrap_wcsdup
   jemalloc_stats
   jemalloc_free_dirty_pages
   ; A hack to work around the CRT (see giant comment in Makefile.in)
   frex=dumb_free_thunk
-#else
-  malloc_protect
-  malloc_unprotect
 #endif
--- a/mozglue/build/replace_malloc.mk
+++ b/mozglue/build/replace_malloc.mk
@@ -9,18 +9,16 @@ OS_LDFLAGS += \
   -Wl,-U,_replace_malloc \
   -Wl,-U,_replace_posix_memalign \
   -Wl,-U,_replace_aligned_alloc \
   -Wl,-U,_replace_calloc \
   -Wl,-U,_replace_realloc \
   -Wl,-U,_replace_free \
   -Wl,-U,_replace_memalign \
   -Wl,-U,_replace_valloc \
-  -Wl,-U,_replace_malloc_protect \
-  -Wl,-U,_replace_malloc_unprotect \
   -Wl,-U,_replace_malloc_usable_size \
   -Wl,-U,_replace_malloc_good_size \
   -Wl,-U,_replace_jemalloc_stats \
   -Wl,-U,_replace_jemalloc_purge_freed_pages \
   -Wl,-U,_replace_jemalloc_free_dirty_pages \
   $(NULL)
 
 ifneq ($(MOZ_REPLACE_MALLOC_LINKAGE),compiler support)