Bug 1123237 - Part 11. Don't use STL in memory-profiler. r=BenWa,cervantes
☠☠ backed out by 3c46b6ef0ded ☠ ☠
authorKan-Ru Chen <kanru@kanru.info>
Wed, 26 Aug 2015 18:55:55 +0800
changeset 293693 9c26b3b787f8a6666a742591f4ff6c6e8b95834c
parent 293692 1fcec0dc93d5bb4a8dad4f50e0421c304c396c8c
child 293694 f5047a4f41b4144c2fbf9e78a2bd1cb7edccf60f
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersBenWa, cervantes
bugs1123237
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1123237 - Part 11. Don't use STL in memory-profiler. r=BenWa,cervantes
tools/memory-profiler/CompactTraceTable.h
tools/memory-profiler/GCHeapProfilerImpl.cpp
tools/memory-profiler/GCHeapProfilerImpl.h
tools/memory-profiler/MemoryProfiler.cpp
tools/memory-profiler/MemoryProfiler.h
tools/memory-profiler/NativeProfilerImpl.cpp
tools/memory-profiler/NativeProfilerImpl.h
tools/memory-profiler/UncensoredAllocator.cpp
tools/memory-profiler/UncensoredAllocator.h
--- a/tools/memory-profiler/CompactTraceTable.h
+++ b/tools/memory-profiler/CompactTraceTable.h
@@ -2,135 +2,115 @@
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef memory_profiler_CompactTraceTable_h
 #define memory_profiler_CompactTraceTable_h
 
-#include "UncensoredAllocator.h"
+#include "mozilla/HashFunctions.h"
 
-#include <functional>
-#include <utility>
-
-#include "mozilla/HashFunctions.h"
+#include "nsDataHashtable.h"
+#include "nsTArray.h"
 
 namespace mozilla {
 
 struct TrieNode final
 {
   uint32_t parentIdx;
   uint32_t nameIdx;
   bool operator==(const TrieNode t) const
   {
     return parentIdx == t.parentIdx && nameIdx == t.nameIdx;
   }
-};
-
-} // namespace mozilla
-
-namespace std {
-template<>
-struct hash<mozilla::TrieNode>
-{
-  size_t operator()(const mozilla::TrieNode& v) const
+  uint32_t Hash() const
   {
-    uint64_t k = static_cast<uint64_t>(v.parentIdx) << 32 | v.nameIdx;
-    return std::hash<uint64_t>()(k);
+    return HashGeneric(parentIdx, nameIdx);
   }
 };
-#ifdef MOZ_REPLACE_MALLOC
-template<>
-struct hash<mozilla::u_string>
-{
-  size_t operator()(const mozilla::u_string& v) const
-  {
-    return mozilla::HashString(v.c_str());
-  }
-};
-#endif
-} // namespace std
-
-namespace mozilla {
 
 // This class maps a Node of type T to its parent's index in the
 // map. When serializing, the map is traversed and put into an ordered
-// vector of Nodes.
-template<typename T>
+// array of Nodes.
+template<typename KeyClass, typename T>
 class NodeIndexMap final
 {
 public:
   uint32_t Insert(const T& e)
   {
-    auto i = mMap.insert(std::make_pair(e, mMap.size()));
-    return i.first->second;
+    uint32_t size = mMap.Count();
+    if (!mMap.Get(e, nullptr)) {
+      mMap.Put(e, size);
+    }
+    return size;
   }
 
-  u_vector<T> Serialize() const
+  nsTArray<T> Serialize() const
   {
-    u_vector<T> v(mMap.size());
-    for (auto i: mMap) {
-      v[i.second] = i.first;
+    nsTArray<T> v;
+    v.SetLength(mMap.Count());
+    for (auto iter = mMap.ConstIter(); !iter.Done(); iter.Next()) {
+      v[iter.Data()] = iter.Key();
     }
     return v;
   }
 
   uint32_t Size() const
   {
-    return mMap.size();
+    return mMap.Count();
   }
 
   void Clear()
   {
-    mMap.clear();
+    mMap.Clear();
   }
 private:
-  u_unordered_map<T, uint32_t> mMap;
+  nsDataHashtable<KeyClass, uint32_t> mMap;
 };
 
 // Backtraces are stored in a trie to save spaces.
 // Function names are stored in an unique table and TrieNodes contain indexes
 // into that table.
 // The trie is implemented with a hash table; children are stored in
 // traces[TrieNode{parent node index, branch/function name index}].
 class CompactTraceTable final
 {
 public:
   CompactTraceTable()
   {
-    mNames.Insert("(unknown)");
+    mNames.Insert(nsAutoCString("(unknown)"));
     mTraces.Insert(TrieNode{0, 0});
   }
 
-  u_vector<u_string> GetNames() const
+  nsTArray<nsCString> GetNames() const
   {
     return mNames.Serialize();
   }
 
-  u_vector<TrieNode> GetTraces() const
+  nsTArray<TrieNode> GetTraces() const
   {
     return mTraces.Serialize();
   }
 
   // Returns an ID to a stacktrace.
-  uint32_t Insert(const u_vector<u_string>& aRawStacktrace)
+  uint32_t Insert(const nsTArray<nsCString>& aRawStacktrace)
   {
     uint32_t parent = 0;
     for (auto& frame: aRawStacktrace) {
       parent = mTraces.Insert(TrieNode{parent, mNames.Insert(frame)});
     }
     return parent;
   }
 
   void Reset()
   {
     mNames.Clear();
     mTraces.Clear();
   }
 private:
-  NodeIndexMap<u_string> mNames;
-  NodeIndexMap<TrieNode> mTraces;
+  NodeIndexMap<nsCStringHashKey, nsCString> mNames;
+  NodeIndexMap<nsGenericHashKey<TrieNode>, TrieNode> mTraces;
 };
 
 } // namespace mozilla
 
 #endif // memory_profiler_CompactTraceTable_h
--- a/tools/memory-profiler/GCHeapProfilerImpl.cpp
+++ b/tools/memory-profiler/GCHeapProfilerImpl.cpp
@@ -1,61 +1,59 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "GCHeapProfilerImpl.h"
 
-#include "mozilla/TimeStamp.h"
-
-#include "prlock.h"
+#include "UncensoredAllocator.h"
 
 namespace mozilla {
 
 GCHeapProfilerImpl::GCHeapProfilerImpl()
 {
   mLock = PR_NewLock();
   mMarking = false;
 }
 
 GCHeapProfilerImpl::~GCHeapProfilerImpl()
 {
   if (mLock) {
     PR_DestroyLock(mLock);
   }
 }
 
-u_vector<u_string>
+nsTArray<nsCString>
 GCHeapProfilerImpl::GetNames() const
 {
   return mTraceTable.GetNames();
 }
 
-u_vector<TrieNode>
+nsTArray<TrieNode>
 GCHeapProfilerImpl::GetTraces() const
 {
   return mTraceTable.GetTraces();
 }
 
-const u_vector<AllocEvent>&
+const nsTArray<AllocEvent>&
 GCHeapProfilerImpl::GetEvents() const
 {
   return mAllocEvents;
 }
 
 void
 GCHeapProfilerImpl::reset()
 {
   mTraceTable.Reset();
-  mAllocEvents.clear();
-  mNurseryEntries.clear();
-  mTenuredEntriesFG.clear();
-  mTenuredEntriesBG.clear();
+  mAllocEvents.Clear();
+  mNurseryEntries.Clear();
+  mTenuredEntriesFG.Clear();
+  mTenuredEntriesBG.Clear();
 }
 
 void
 GCHeapProfilerImpl::sampleTenured(void* addr, uint32_t size)
 {
   SampleInternal(addr, size, mTenuredEntriesFG);
 }
 
@@ -63,101 +61,108 @@ void
 GCHeapProfilerImpl::sampleNursery(void* addr, uint32_t size)
 {
   SampleInternal(addr, size, mNurseryEntries);
 }
 
 void
 GCHeapProfilerImpl::markTenuredStart()
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
   if (!mMarking) {
     mMarking = true;
-    Swap(mTenuredEntriesFG, mTenuredEntriesBG);
-    MOZ_ASSERT(mTenuredEntriesFG.empty());
+    mTenuredEntriesFG.SwapElements(mTenuredEntriesBG);
+    MOZ_ASSERT(mTenuredEntriesFG.Count() == 0);
   }
 }
 
 void
 GCHeapProfilerImpl::markTenured(void* addr)
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
   if (mMarking) {
-    auto res = mTenuredEntriesBG.find(addr);
-    if (res != mTenuredEntriesBG.end()) {
-      res->second.mMarked = true;
+    AllocEntry entry;
+    if (mTenuredEntriesBG.Get(addr, &entry)) {
+      entry.mMarked = true;
+      mTenuredEntriesBG.Put(addr, entry);
     }
   }
 }
 
 void
 GCHeapProfilerImpl::sweepTenured()
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
   if (mMarking) {
     mMarking = false;
-    for (auto& entry: mTenuredEntriesBG) {
-      if (entry.second.mMarked) {
-        entry.second.mMarked = false;
-        mTenuredEntriesFG.insert(entry);
+    for (auto iter = mTenuredEntriesBG.Iter(); !iter.Done(); iter.Next()) {
+      if (iter.Data().mMarked) {
+        iter.Data().mMarked = false;
+        mTenuredEntriesFG.Put(iter.Key(), iter.Data());
       } else {
-        AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
+        AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx];
         AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
-        mAllocEvents.push_back(newEvent);
+        mAllocEvents.AppendElement(newEvent);
       }
     }
-    mTenuredEntriesBG.clear();
+    mTenuredEntriesBG.Clear();
   }
 }
 
 void
 GCHeapProfilerImpl::sweepNursery()
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
-  for (auto& entry: mNurseryEntries) {
-    AllocEvent& oldEvent = mAllocEvents[entry.second.mEventIdx];
+  for (auto iter = mNurseryEntries.Iter(); !iter.Done(); iter.Next()) {
+    AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx];
     AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
-    mAllocEvents.push_back(newEvent);
+    mAllocEvents.AppendElement(newEvent);
   }
-  mNurseryEntries.clear();
+  mNurseryEntries.Clear();
 }
 
 void
 GCHeapProfilerImpl::moveNurseryToTenured(void* addrOld, void* addrNew)
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
-  auto iterOld = mNurseryEntries.find(addrOld);
-  if (iterOld == mNurseryEntries.end()) {
+  AllocEntry entryOld;
+  if (!mNurseryEntries.Get(addrOld, &entryOld)) {
     return;
   }
 
   // Because the tenured heap is sampled, the address might already be there.
   // If not, the address is inserted with the old event.
-  auto res = mTenuredEntriesFG.insert(
-    std::make_pair(addrNew, AllocEntry(iterOld->second.mEventIdx)));
-  auto iterNew = res.first;
-
-  // If it is already inserted, the insertion above will fail and the
-  // iterator of the already-inserted element is returned.
-  // We choose to ignore the the new event by setting its size zero and point
-  // the newly allocated address to the old event.
-  // An event of size zero will be skipped when reporting.
-  if (!res.second) {
-    mAllocEvents[iterNew->second.mEventIdx].mSize = 0;
-    iterNew->second.mEventIdx = iterOld->second.mEventIdx;
+  AllocEntry tenuredEntryOld;
+  if (!mTenuredEntriesFG.Get(addrNew, &tenuredEntryOld)) {
+    mTenuredEntriesFG.Put(addrNew, AllocEntry(entryOld.mEventIdx));
+  } else {
+    // If it is already inserted, the insertion above will fail and the
+    // iterator of the already-inserted element is returned.
+    // We choose to ignore the the new event by setting its size zero and point
+    // the newly allocated address to the old event.
+    // An event of size zero will be skipped when reporting.
+    mAllocEvents[entryOld.mEventIdx].mSize = 0;
+    tenuredEntryOld.mEventIdx = entryOld.mEventIdx;
+    mTenuredEntriesFG.Put(addrNew, tenuredEntryOld);
   }
-  mNurseryEntries.erase(iterOld);
+  mNurseryEntries.Remove(addrOld);
 }
 
 void
 GCHeapProfilerImpl::SampleInternal(void* aAddr, uint32_t aSize, AllocMap& aTable)
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
   size_t nSamples = AddBytesSampled(aSize);
   if (nSamples > 0) {
-    u_vector<u_string> trace = GetStacktrace();
+    nsTArray<nsCString> trace = GetStacktrace();
     AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
-    aTable.insert(std::make_pair(aAddr, AllocEntry(mAllocEvents.size())));
-    mAllocEvents.push_back(ai);
+    aTable.Put(aAddr, AllocEntry(mAllocEvents.Length()));
+    mAllocEvents.AppendElement(ai);
   }
 }
 
 } // namespace mozilla
--- a/tools/memory-profiler/GCHeapProfilerImpl.h
+++ b/tools/memory-profiler/GCHeapProfilerImpl.h
@@ -16,19 +16,19 @@ namespace mozilla {
 
 class GCHeapProfilerImpl final : public GCHeapProfiler
                                , public ProfilerImpl
 {
 public:
   GCHeapProfilerImpl();
   ~GCHeapProfilerImpl() override;
 
-  u_vector<u_string> GetNames() const override;
-  u_vector<TrieNode> GetTraces() const override;
-  const u_vector<AllocEvent>& GetEvents() const override;
+  nsTArray<nsCString> GetNames() const override;
+  nsTArray<TrieNode> GetTraces() const override;
+  const nsTArray<AllocEvent>& GetEvents() const override;
 
   void reset() override;
   void sampleTenured(void* addr, uint32_t size) override;
   void sampleNursery(void* addr, uint32_t size) override;
   void markTenuredStart() override;
   void markTenured(void* addr) override;
   void sweepTenured() override;
   void sweepNursery() override;
@@ -39,15 +39,15 @@ private:
 
   PRLock* mLock;
   bool mMarking;
 
   AllocMap mNurseryEntries;
   AllocMap mTenuredEntriesFG;
   AllocMap mTenuredEntriesBG;
 
-  u_vector<AllocEvent> mAllocEvents;
+  nsTArray<AllocEvent> mAllocEvents;
   CompactTraceTable mTraceTable;
 };
 
 } // namespace mozilla
 
 #endif // memory_profiler_GCHeapProfilerImpl_h
--- a/tools/memory-profiler/MemoryProfiler.cpp
+++ b/tools/memory-profiler/MemoryProfiler.cpp
@@ -4,71 +4,56 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MemoryProfiler.h"
 
 #include <cmath>
 #include <cstdlib>
 
-#include "mozilla/Compiler.h"
+#include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Move.h"
 #include "mozilla/TimeStamp.h"
-#include "mozilla/unused.h"
 
 #include "GCHeapProfilerImpl.h"
 #include "GeckoProfiler.h"
 #include "NativeProfilerImpl.h"
 #include "UncensoredAllocator.h"
 #include "js/TypeDecls.h"
 #include "jsfriendapi.h"
 #include "nsIDOMClassInfo.h"
 #include "nsIGlobalObject.h"
 #include "prtime.h"
 #include "xpcprivate.h"
 
 struct JSRuntime;
 
-#if MOZ_USING_STLPORT
-namespace std {
-template<class T>
-struct hash<T*>
-{
-  size_t operator()(T* v) const
-  {
-    return hash<void*>()(static_cast<void*>(v));
-  }
-};
-} // namespace std
-#endif
-
 namespace mozilla {
 
 #define MEMORY_PROFILER_SAMPLE_SIZE 65536
 #define BACKTRACE_BUFFER_SIZE 16384
 
 ProfilerImpl::ProfilerImpl()
   : mSampleSize(MEMORY_PROFILER_SAMPLE_SIZE)
 {
   mLog1minusP = std::log(1.0 - 1.0 / mSampleSize);
   mRemainingBytes = std::floor(std::log(1.0 - DRandom()) / mLog1minusP);
 }
 
-u_vector<u_string>
+nsTArray<nsCString>
 ProfilerImpl::GetStacktrace()
 {
-  u_vector<u_string> trace;
-  char* output = (char*)u_malloc(BACKTRACE_BUFFER_SIZE);
+  nsTArray<nsCString> trace;
+  nsAutoArrayPtr<char> output(new char[BACKTRACE_BUFFER_SIZE]);
 
   profiler_get_backtrace_noalloc(output, BACKTRACE_BUFFER_SIZE);
   for (const char* p = output; *p; p += strlen(p) + 1) {
-    trace.push_back(p);
+    trace.AppendElement(nsDependentCString(p));
   }
 
-  u_free(output);
   return trace;
 }
 
 // Generate a random number in [0, 1).
 double
 ProfilerImpl::DRandom()
 {
   return double(((uint64_t(std::rand()) & ((1 << 26) - 1)) << 27) +
@@ -87,219 +72,239 @@ ProfilerImpl::AddBytesSampled(uint32_t a
   mRemainingBytes -= aBytes;
   return nSamples;
 }
 
 NS_IMPL_ISUPPORTS(MemoryProfiler, nsIMemoryProfiler)
 
 PRLock* MemoryProfiler::sLock;
 uint32_t MemoryProfiler::sProfileRuntimeCount;
-NativeProfilerImpl* MemoryProfiler::sNativeProfiler;
-JSRuntimeProfilerMap* MemoryProfiler::sJSRuntimeProfilerMap;
+StaticAutoPtr<NativeProfilerImpl> MemoryProfiler::sNativeProfiler;
+StaticAutoPtr<JSRuntimeProfilerMap> MemoryProfiler::sJSRuntimeProfilerMap;
 TimeStamp MemoryProfiler::sStartTime;
 
 void
 MemoryProfiler::InitOnce()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   static bool initialized = false;
 
   if (!initialized) {
-    InitializeMallocHook();
+    MallocHook::Initialize();
     sLock = PR_NewLock();
     sProfileRuntimeCount = 0;
     sJSRuntimeProfilerMap = new JSRuntimeProfilerMap();
+    ClearOnShutdown(&sJSRuntimeProfilerMap);
+    ClearOnShutdown(&sNativeProfiler);
     std::srand(PR_Now());
     bool ignored;
     sStartTime = TimeStamp::ProcessCreation(ignored);
     initialized = true;
   }
 }
 
 NS_IMETHODIMP
 MemoryProfiler::StartProfiler()
 {
   InitOnce();
-  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(sLock);
-  if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
-    (*sJSRuntimeProfilerMap)[runtime].mEnabled = true;
+  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  ProfilerForJSRuntime profiler;
+  if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
+      !profiler.mEnabled) {
     if (sProfileRuntimeCount == 0) {
       js::EnableRuntimeProfilingStack(runtime, true);
       if (!sNativeProfiler) {
         sNativeProfiler = new NativeProfilerImpl();
       }
       MemProfiler::SetNativeProfiler(sNativeProfiler);
     }
     GCHeapProfilerImpl* gp = new GCHeapProfilerImpl();
-    (*sJSRuntimeProfilerMap)[runtime].mProfiler = gp;
+    profiler.mEnabled = true;
+    profiler.mProfiler = gp;
+    sJSRuntimeProfilerMap->Put(runtime, profiler);
     MemProfiler::GetMemProfiler(runtime)->start(gp);
     if (sProfileRuntimeCount == 0) {
-      EnableMallocHook(sNativeProfiler);
+      MallocHook::Enable(sNativeProfiler);
     }
     sProfileRuntimeCount++;
   }
   return NS_OK;
 }
 
 NS_IMETHODIMP
 MemoryProfiler::StopProfiler()
 {
   InitOnce();
-  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(sLock);
-  if ((*sJSRuntimeProfilerMap)[runtime].mEnabled) {
+  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  ProfilerForJSRuntime profiler;
+  if (sJSRuntimeProfilerMap->Get(runtime, &profiler) &&
+      profiler.mEnabled) {
     MemProfiler::GetMemProfiler(runtime)->stop();
     if (--sProfileRuntimeCount == 0) {
-      DisableMallocHook();
+      MallocHook::Disable();
       MemProfiler::SetNativeProfiler(nullptr);
       js::EnableRuntimeProfilingStack(runtime, false);
     }
-    (*sJSRuntimeProfilerMap)[runtime].mEnabled = false;
+    profiler.mEnabled = false;
+    sJSRuntimeProfilerMap->Put(runtime, profiler);
   }
   return NS_OK;
 }
 
 NS_IMETHODIMP
 MemoryProfiler::ResetProfiler()
 {
   InitOnce();
-  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(sLock);
-  if (!(*sJSRuntimeProfilerMap)[runtime].mEnabled) {
-    delete (*sJSRuntimeProfilerMap)[runtime].mProfiler;
-    (*sJSRuntimeProfilerMap)[runtime].mProfiler = nullptr;
+  JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
+  ProfilerForJSRuntime profiler;
+  if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
+      !profiler.mEnabled) {
+    delete profiler.mProfiler;
+    profiler.mProfiler = nullptr;
+    sJSRuntimeProfilerMap->Put(runtime, profiler);
   }
   if (sProfileRuntimeCount == 0) {
-    delete sNativeProfiler;
     sNativeProfiler = nullptr;
   }
   return NS_OK;
 }
 
 struct MergedTraces
 {
-  u_vector<u_string> mNames;
-  u_vector<TrieNode> mTraces;
-  u_vector<AllocEvent> mEvents;
+  nsTArray<nsCString> mNames;
+  nsTArray<TrieNode> mTraces;
+  nsTArray<AllocEvent> mEvents;
 };
 
 // Merge events and corresponding traces and names.
 static MergedTraces
-MergeResults(u_vector<u_string> names0, u_vector<TrieNode> traces0, u_vector<AllocEvent> events0,
-             u_vector<u_string> names1, u_vector<TrieNode> traces1, u_vector<AllocEvent> events1)
+MergeResults(const nsTArray<nsCString>& names0,
+             const nsTArray<TrieNode>& traces0,
+             const nsTArray<AllocEvent>& events0,
+             const nsTArray<nsCString>& names1,
+             const nsTArray<TrieNode>& traces1,
+             const nsTArray<AllocEvent>& events1)
 {
-  NodeIndexMap<u_string> names;
-  NodeIndexMap<TrieNode> traces;
-  u_vector<AllocEvent> events;
+  NodeIndexMap<nsCStringHashKey, nsCString> names;
+  NodeIndexMap<nsGenericHashKey<TrieNode>, TrieNode> traces;
+  nsTArray<AllocEvent> events;
 
-  u_vector<size_t> names1Tonames0;
-  u_vector<size_t> traces1Totraces0(1, 0);
+  nsTArray<size_t> names1Tonames0(names1.Length());
+  nsTArray<size_t> traces1Totraces0(traces1.Length());
 
   // Merge names.
   for (auto& i: names0) {
     names.Insert(i);
   }
   for (auto& i: names1) {
-    names1Tonames0.push_back(names.Insert(i));
+    names1Tonames0.AppendElement(names.Insert(i));
   }
 
   // Merge traces. Note that traces1[i].parentIdx < i for all i > 0.
   for (auto& i: traces0) {
     traces.Insert(i);
   }
-  for (size_t i = 1; i < traces1.size(); i++) {
+  traces1Totraces0.AppendElement(0);
+  for (size_t i = 1; i < traces1.Length(); i++) {
     TrieNode node = traces1[i];
     node.parentIdx = traces1Totraces0[node.parentIdx];
     node.nameIdx = names1Tonames0[node.nameIdx];
-    traces1Totraces0.push_back(traces.Insert(node));
-  }
-
-  // Update events1
-  for (auto& i: events1) {
-    i.mTraceIdx = traces1Totraces0[i.mTraceIdx];
+    traces1Totraces0.AppendElement(traces.Insert(node));
   }
 
   // Merge the events according to timestamps.
   auto p0 = events0.begin();
   auto p1 = events1.begin();
 
   while (p0 != events0.end() && p1 != events1.end()) {
     if (p0->mTimestamp < p1->mTimestamp) {
-      events.push_back(*p0++);
+      events.AppendElement(*p0++);
     } else {
-      events.push_back(*p1++);
+      events.AppendElement(*p1++);
+      events.LastElement().mTraceIdx =
+        traces1Totraces0[events.LastElement().mTraceIdx];
     }
   }
 
   while (p0 != events0.end()) {
-    events.push_back(*p0++);
+    events.AppendElement(*p0++);
   }
 
   while (p1 != events1.end()) {
-    events.push_back(*p1++);
+    events.AppendElement(*p1++);
+    events.LastElement().mTraceIdx =
+      traces1Totraces0[events.LastElement().mTraceIdx];
   }
 
   return MergedTraces{names.Serialize(), traces.Serialize(), Move(events)};
 }
 
 NS_IMETHODIMP
 MemoryProfiler::GetResults(JSContext* cx, JS::MutableHandle<JS::Value> aResult)
 {
   InitOnce();
+  AutoUseUncensoredAllocator ua;
+  AutoMPLock lock(sLock);
   JSRuntime* runtime = XPCJSRuntime::Get()->Runtime();
-  AutoMPLock lock(sLock);
   // Getting results when the profiler is running is not allowed.
   if (sProfileRuntimeCount > 0) {
     return NS_OK;
   }
   // Return immediately when native profiler does not exist.
   if (!sNativeProfiler) {
     return NS_OK;
   }
   // Return immediately when there's no result in current runtime.
-  if (!(*sJSRuntimeProfilerMap)[runtime].mProfiler) {
+  ProfilerForJSRuntime profiler;
+  if (!sJSRuntimeProfilerMap->Get(runtime, &profiler) ||
+      !profiler.mProfiler) {
     return NS_OK;
   }
-  GCHeapProfilerImpl* gp = (*sJSRuntimeProfilerMap)[runtime].mProfiler;
+  GCHeapProfilerImpl* gp = profiler.mProfiler;
 
   auto results = MergeResults(gp->GetNames(), gp->GetTraces(), gp->GetEvents(),
                               sNativeProfiler->GetNames(),
                               sNativeProfiler->GetTraces(),
                               sNativeProfiler->GetEvents());
-  u_vector<u_string> names = Move(results.mNames);
-  u_vector<TrieNode> traces = Move(results.mTraces);
-  u_vector<AllocEvent> events = Move(results.mEvents);
+  const nsTArray<nsCString>& names = results.mNames;
+  const nsTArray<TrieNode>& traces = results.mTraces;
+  const nsTArray<AllocEvent>& events = results.mEvents;
 
-  JS::RootedObject jsnames(cx, JS_NewArrayObject(cx, names.size()));
-  JS::RootedObject jstraces(cx, JS_NewArrayObject(cx, traces.size()));
-  JS::RootedObject jsevents(cx, JS_NewArrayObject(cx, events.size()));
+  JS::RootedObject jsnames(cx, JS_NewArrayObject(cx, names.Length()));
+  JS::RootedObject jstraces(cx, JS_NewArrayObject(cx, traces.Length()));
+  JS::RootedObject jsevents(cx, JS_NewArrayObject(cx, events.Length()));
 
-  for (size_t i = 0; i < names.size(); i++) {
-    JS::RootedString name(cx, JS_NewStringCopyZ(cx, names[i].c_str()));
+  for (size_t i = 0; i < names.Length(); i++) {
+    JS::RootedString name(cx, JS_NewStringCopyZ(cx, names[i].get()));
     JS_SetElement(cx, jsnames, i, name);
   }
 
-  for (size_t i = 0; i < traces.size(); i++) {
+  for (size_t i = 0; i < traces.Length(); i++) {
     JS::RootedObject tn(cx, JS_NewPlainObject(cx));
     JS::RootedValue nameIdx(cx, JS_NumberValue(traces[i].nameIdx));
     JS::RootedValue parentIdx(cx, JS_NumberValue(traces[i].parentIdx));
     JS_SetProperty(cx, tn, "nameIdx", nameIdx);
     JS_SetProperty(cx, tn, "parentIdx", parentIdx);
     JS_SetElement(cx, jstraces, i, tn);
   }
 
   int i = 0;
   for (auto ent: events) {
     if (ent.mSize == 0) {
       continue;
     }
     MOZ_ASSERT(!sStartTime.IsNull());
-    double time = (sStartTime - ent.mTimestamp).ToMilliseconds();
+    double time = (ent.mTimestamp - sStartTime).ToMilliseconds();
     JS::RootedObject tn(cx, JS_NewPlainObject(cx));
     JS::RootedValue size(cx, JS_NumberValue(ent.mSize));
     JS::RootedValue traceIdx(cx, JS_NumberValue(ent.mTraceIdx));
     JS::RootedValue timestamp(cx, JS_NumberValue(time));
     JS_SetProperty(cx, tn, "size", size);
     JS_SetProperty(cx, tn, "traceIdx", traceIdx);
     JS_SetProperty(cx, tn, "timestamp", timestamp);
     JS_SetElement(cx, jsevents, i++, tn);
--- a/tools/memory-profiler/MemoryProfiler.h
+++ b/tools/memory-profiler/MemoryProfiler.h
@@ -4,64 +4,66 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef tools_profiler_MemoryProfiler_h
 #define tools_profiler_MemoryProfiler_h
 
 #include "nsIMemoryProfiler.h"
 
+#include "mozilla/StaticPtr.h"
 #include "mozilla/TimeStamp.h"
 
 #include "CompactTraceTable.h"
-#include "UncensoredAllocator.h"
-
+#include "nsTArray.h"
 #include "prlock.h"
 
 #define MEMORY_PROFILER_CID                                     \
   { 0xf976eaa2, 0xcc1f, 0x47ee,                                 \
     { 0x81, 0x29, 0xb8, 0x26, 0x2a, 0x3d, 0xb6, 0xb2 } }
 
 #define MEMORY_PROFILER_CONTRACT_ID "@mozilla.org/tools/memory-profiler;1"
 
 struct JSRuntime;
+struct PRLock;
 
 namespace mozilla {
 
 class NativeProfilerImpl;
 class GCHeapProfilerImpl;
 
 struct ProfilerForJSRuntime
 {
   ProfilerForJSRuntime()
     : mProfiler(nullptr)
     , mEnabled(false)
   {}
   GCHeapProfilerImpl* mProfiler;
   bool mEnabled;
 };
-using JSRuntimeProfilerMap = u_unordered_map<JSRuntime*, ProfilerForJSRuntime>;
+using JSRuntimeProfilerMap =
+  nsDataHashtable<nsClearingPtrHashKey<JSRuntime>, ProfilerForJSRuntime>;
 
 class MemoryProfiler final : public nsIMemoryProfiler
 {
 public:
   NS_DECL_ISUPPORTS
   NS_DECL_NSIMEMORYPROFILER
 
 private:
   static void InitOnce();
   ~MemoryProfiler() {}
 
   // The accesses to other static member are guarded by sLock and
   // sProfileRuntimeCount.
   static PRLock* sLock;
   static uint32_t sProfileRuntimeCount;
 
-  static NativeProfilerImpl* sNativeProfiler;
-  static JSRuntimeProfilerMap* sJSRuntimeProfilerMap;
+  static StaticAutoPtr<NativeProfilerImpl> sNativeProfiler;
+  static StaticAutoPtr<JSRuntimeProfilerMap> sJSRuntimeProfilerMap;
   static TimeStamp sStartTime;
 };
 
 // Allocation events to be reported.
 struct AllocEvent {
   TimeStamp mTimestamp;
   // index to a stacktrace singleton.
   uint32_t mTraceIdx;
@@ -75,34 +77,40 @@ struct AllocEvent {
   {}
 };
 
 // Index to allocation events but also a mark bit to be GC-able.
 struct AllocEntry {
   uint32_t mEventIdx : 31;
   bool mMarked : 1;
 
-  AllocEntry(int aEventIdx)
+  // Default constructor for uninitialized stack value required by
+  // getter methods.
+  AllocEntry()
+    : mEventIdx(0)
+    , mMarked(false)
+  {}
+  explicit AllocEntry(int aEventIdx)
     : mEventIdx(aEventIdx)
     , mMarked(false)
   {}
 };
 
-using AllocMap = u_unordered_map<void*, AllocEntry>;
+using AllocMap = nsDataHashtable<nsClearingVoidPtrHashKey, AllocEntry>;
 
 class ProfilerImpl
 {
 public:
-  static u_vector<u_string> GetStacktrace();
+  static nsTArray<nsCString> GetStacktrace();
   static double DRandom();
 
   ProfilerImpl();
-  virtual u_vector<u_string> GetNames() const = 0;
-  virtual u_vector<TrieNode> GetTraces() const = 0;
-  virtual const u_vector<AllocEvent>& GetEvents() const = 0;
+  virtual nsTArray<nsCString> GetNames() const = 0;
+  virtual nsTArray<TrieNode> GetTraces() const = 0;
+  virtual const nsTArray<AllocEvent>& GetEvents() const = 0;
 
 protected:
   /**
    * The sampler generates a random variable which conforms to a geometric
    * distribution of probability p = 1 / mSampleSize to calculate the
    * next-to-be-sampled byte directly; It avoids rolling a dice on each byte.
    *
    * Let Bn denote a Bernoulli process with first success on n-th trial, the
--- a/tools/memory-profiler/NativeProfilerImpl.cpp
+++ b/tools/memory-profiler/NativeProfilerImpl.cpp
@@ -1,82 +1,82 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "NativeProfilerImpl.h"
 
-#include "mozilla/TimeStamp.h"
-
-#include "prlock.h"
+#include "UncensoredAllocator.h"
 
 namespace mozilla {
 
 NativeProfilerImpl::NativeProfilerImpl()
 {
   mLock = PR_NewLock();
 }
 
 NativeProfilerImpl::~NativeProfilerImpl()
 {
   if (mLock) {
     PR_DestroyLock(mLock);
   }
 }
 
-u_vector<u_string>
+nsTArray<nsCString>
 NativeProfilerImpl::GetNames() const
 {
   return mTraceTable.GetNames();
 }
 
-u_vector<TrieNode>
+nsTArray<TrieNode>
 NativeProfilerImpl::GetTraces() const
 {
   return mTraceTable.GetTraces();
 }
 
-const u_vector<AllocEvent>&
+const nsTArray<AllocEvent>&
 NativeProfilerImpl::GetEvents() const
 {
   return mAllocEvents;
 }
 
 void
 NativeProfilerImpl::reset()
 {
   mTraceTable.Reset();
-  mAllocEvents.clear();
-  mNativeEntries.clear();
+  mAllocEvents.Clear();
+  mNativeEntries.Clear();
 }
 
 void
 NativeProfilerImpl::sampleNative(void* addr, uint32_t size)
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
   size_t nSamples = AddBytesSampled(size);
   if (nSamples > 0) {
-    u_vector<u_string> trace = GetStacktrace();
+    nsTArray<nsCString> trace = GetStacktrace();
     AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now());
-    mNativeEntries.insert(std::make_pair(addr, AllocEntry(mAllocEvents.size())));
-    mAllocEvents.push_back(ai);
+    mNativeEntries.Put(addr, AllocEntry(mAllocEvents.Length()));
+    mAllocEvents.AppendElement(ai);
   }
 }
 
 void
 NativeProfilerImpl::removeNative(void* addr)
 {
+  AutoUseUncensoredAllocator ua;
   AutoMPLock lock(mLock);
 
-  auto res = mNativeEntries.find(addr);
-  if (res == mNativeEntries.end()) {
+  AllocEntry entry;
+  if (!mNativeEntries.Get(addr, &entry)) {
     return;
   }
 
-  AllocEvent& oldEvent = mAllocEvents[res->second.mEventIdx];
+  AllocEvent& oldEvent = mAllocEvents[entry.mEventIdx];
   AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now());
-  mAllocEvents.push_back(newEvent);
-  mNativeEntries.erase(res);
+  mAllocEvents.AppendElement(newEvent);
+  mNativeEntries.Remove(addr);
 }
 
 } // namespace mozilla
--- a/tools/memory-profiler/NativeProfilerImpl.h
+++ b/tools/memory-profiler/NativeProfilerImpl.h
@@ -18,26 +18,26 @@ namespace mozilla {
 
 class NativeProfilerImpl final : public NativeProfiler
                                , public ProfilerImpl
 {
 public:
   NativeProfilerImpl();
   ~NativeProfilerImpl() override;
 
-  u_vector<u_string> GetNames() const override;
-  u_vector<TrieNode> GetTraces() const override;
-  const u_vector<AllocEvent>& GetEvents() const override;
+  nsTArray<nsCString> GetNames() const override;
+  nsTArray<TrieNode> GetTraces() const override;
+  const nsTArray<AllocEvent>& GetEvents() const override;
 
   void reset() override;
   void sampleNative(void* addr, uint32_t size) override;
   void removeNative(void* addr) override;
 
 private:
   PRLock* mLock;
   AllocMap mNativeEntries;
-  u_vector<AllocEvent> mAllocEvents;
+  nsTArray<AllocEvent> mAllocEvents;
   CompactTraceTable mTraceTable;
 };
 
 } // namespace mozilla
 
 #endif // memory_profiler_NativeProfilerImpl_h
--- a/tools/memory-profiler/UncensoredAllocator.cpp
+++ b/tools/memory-profiler/UncensoredAllocator.cpp
@@ -1,109 +1,126 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "UncensoredAllocator.h"
 
+#include "mozilla/Assertions.h"
 #include "mozilla/unused.h"
 
+#include "MainThreadUtils.h"
 #include "jsfriendapi.h"
+#include "nsDebug.h"
+#include "prlock.h"
 #ifdef MOZ_REPLACE_MALLOC
 #include "replace_malloc_bridge.h"
 #endif
 
 namespace mozilla {
 
-static void* (*uncensored_malloc)(size_t size);
-static void (*uncensored_free)(void* ptr);
-
 #ifdef MOZ_REPLACE_MALLOC
+ThreadLocal<bool> MallocHook::mEnabledTLS;
+NativeProfiler* MallocHook::mNativeProfiler;
+malloc_hook_table_t MallocHook::mMallocHook;
+#endif
 
-static bool sMemoryHookEnabled = false;
-static NativeProfiler* sNativeProfiler;
-static malloc_hook_table_t sMallocHook;
+AutoUseUncensoredAllocator::AutoUseUncensoredAllocator()
+{
+#ifdef MOZ_REPLACE_MALLOC
+  MallocHook::mEnabledTLS.set(false);
+#endif
+}
 
-static void*
-SampleNative(void* addr, size_t size)
+AutoUseUncensoredAllocator::~AutoUseUncensoredAllocator()
 {
-  if (sMemoryHookEnabled) {
-    sNativeProfiler->sampleNative(addr, size);
-  }
-  return addr;
+#ifdef MOZ_REPLACE_MALLOC
+  MallocHook::mEnabledTLS.set(true);
+#endif
 }
 
-static void
-RemoveNative(void* addr)
+bool
+MallocHook::Enabled()
 {
-  if (sMemoryHookEnabled) {
-    sNativeProfiler->removeNative(addr);
-  }
+#ifdef MOZ_REPLACE_MALLOC
+  return mEnabledTLS.get() && mNativeProfiler;
+#else
+  return false;
+#endif
 }
-#endif
 
 void*
-u_malloc(size_t size)
+MallocHook::SampleNative(void* aAddr, size_t aSize)
 {
-  if (uncensored_malloc) {
-    return uncensored_malloc(size);
-  } else {
-    return malloc(size);
+#ifdef MOZ_REPLACE_MALLOC
+  if (MallocHook::Enabled()) {
+    mNativeProfiler->sampleNative(aAddr, aSize);
   }
+#endif
+  return aAddr;
 }
 
 void
-u_free(void* ptr)
+MallocHook::RemoveNative(void* aAddr)
 {
-  if (uncensored_free) {
-    uncensored_free(ptr);
-  } else {
-    free(ptr);
+#ifdef MOZ_REPLACE_MALLOC
+  if (MallocHook::Enabled()) {
+    mNativeProfiler->removeNative(aAddr);
   }
+#endif
 }
 
-void InitializeMallocHook()
+void
+MallocHook::Initialize()
 {
 #ifdef MOZ_REPLACE_MALLOC
-  sMallocHook.free_hook = RemoveNative;
-  sMallocHook.malloc_hook = SampleNative;
+  MOZ_ASSERT(NS_IsMainThread());
+  mMallocHook.free_hook = RemoveNative;
+  mMallocHook.malloc_hook = SampleNative;
   ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
   if (bridge) {
     mozilla::unused << bridge->RegisterHook("memory-profiler", nullptr, nullptr);
   }
+  if (!mEnabledTLS.initialized()) {
+    bool success = mEnabledTLS.init();
+    if (NS_WARN_IF(!success)) {
+      return;
+    }
+    mEnabledTLS.set(false);
+  }
 #endif
-  if (!uncensored_malloc && !uncensored_free) {
-    uncensored_malloc = malloc;
-    uncensored_free = free;
-  }
 }
 
-void EnableMallocHook(NativeProfiler* aNativeProfiler)
+void
+MallocHook::Enable(NativeProfiler* aNativeProfiler)
 {
 #ifdef MOZ_REPLACE_MALLOC
+  MOZ_ASSERT(NS_IsMainThread());
+  if (NS_WARN_IF(!mEnabledTLS.initialized())) {
+    return;
+  }
   ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
   if (bridge) {
     const malloc_table_t* alloc_funcs =
-      bridge->RegisterHook("memory-profiler", nullptr, &sMallocHook);
+      bridge->RegisterHook("memory-profiler", nullptr, &mMallocHook);
     if (alloc_funcs) {
-      uncensored_malloc = alloc_funcs->malloc;
-      uncensored_free = alloc_funcs->free;
-      sNativeProfiler = aNativeProfiler;
-      sMemoryHookEnabled = true;
+      mNativeProfiler = aNativeProfiler;
     }
   }
 #endif
 }
 
-void DisableMallocHook()
+void
+MallocHook::Disable()
 {
 #ifdef MOZ_REPLACE_MALLOC
+  MOZ_ASSERT(NS_IsMainThread());
   ReplaceMallocBridge* bridge = ReplaceMallocBridge::Get(3);
   if (bridge) {
     bridge->RegisterHook("memory-profiler", nullptr, nullptr);
-    sMemoryHookEnabled = false;
+    mNativeProfiler = nullptr;
   }
 #endif
 }
 
 } // namespace mozilla
--- a/tools/memory-profiler/UncensoredAllocator.h
+++ b/tools/memory-profiler/UncensoredAllocator.h
@@ -2,105 +2,46 @@
 /* vim: set ts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef memory_profiler_UncensoredAllocator_h
 #define memory_profiler_UncensoredAllocator_h
 
-#include "mozilla/Compiler.h"
+#include "mozilla/ThreadLocal.h"
 
-#include <string>
-#include <unordered_map>
-#include <vector>
+#ifdef MOZ_REPLACE_MALLOC
+#include "replace_malloc_bridge.h"
+#endif
 
 class NativeProfiler;
 
-#if MOZ_USING_STLPORT
-namespace std {
-using tr1::unordered_map;
-} // namespace std
-#endif
-
 namespace mozilla {
 
-void InitializeMallocHook();
-void EnableMallocHook(NativeProfiler* aNativeProfiler);
-void DisableMallocHook();
-void* u_malloc(size_t size);
-void u_free(void* ptr);
-
-#ifdef MOZ_REPLACE_MALLOC
-template<class Tp>
-struct UncensoredAllocator
+class MallocHook final
 {
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
-  typedef Tp* pointer;
-  typedef const Tp* const_pointer;
-  typedef Tp& reference;
-  typedef const Tp& const_reference;
-  typedef Tp value_type;
-
-  UncensoredAllocator() {}
-
-  template<class T>
-  UncensoredAllocator(const UncensoredAllocator<T>&) {}
-
-  template<class Other>
-  struct rebind
-  {
-    typedef UncensoredAllocator<Other> other;
-  };
-  Tp* allocate(size_t n)
-  {
-    return reinterpret_cast<Tp*>(u_malloc(n * sizeof(Tp)));
-  }
-  void deallocate(Tp* p, size_t n)
-  {
-    u_free(reinterpret_cast<void*>(p));
-  }
-  void construct(Tp* p, const Tp& val)
-  {
-    new ((void*)p) Tp(val);
-  }
-  void destroy(Tp* p)
-  {
-    p->Tp::~Tp();
-  }
-  bool operator==(const UncensoredAllocator& rhs) const
-  {
-    return true;
-  }
-  bool operator!=(const UncensoredAllocator& rhs) const
-  {
-    return false;
-  }
-  size_type max_size() const
-  {
-    return static_cast<size_type>(-1) / sizeof(Tp);
-  }
+public:
+  static void Initialize();
+  static void Enable(NativeProfiler* aNativeProfiler);
+  static void Disable();
+  static bool Enabled();
+private:
+  static void* SampleNative(void* aAddr, size_t aSize);
+  static void RemoveNative(void* aAddr);
+#ifdef MOZ_REPLACE_MALLOC
+  static ThreadLocal<bool> mEnabledTLS;
+  static NativeProfiler* mNativeProfiler;
+  static malloc_hook_table_t mMallocHook;
+#endif
+  friend class AutoUseUncensoredAllocator;
 };
 
-using u_string =
-  std::basic_string<char, std::char_traits<char>, UncensoredAllocator<char>>;
-
-template<typename T>
-using u_vector = std::vector<T, UncensoredAllocator<T>>;
-
-template<typename K, typename V, typename H = std::hash<K>>
-using u_unordered_map =
-  std::unordered_map<K, V, H, std::equal_to<K>, UncensoredAllocator<std::pair<K, V>>>;
+class AutoUseUncensoredAllocator final
+{
+public:
+  AutoUseUncensoredAllocator();
+  ~AutoUseUncensoredAllocator();
+};
 
-#else
-
-using u_string = std::string;
-template<typename T>
-using u_vector = std::vector<T>;
-template<typename K, typename V, typename H = std::hash<K>>
-using u_unordered_map =
-  std::unordered_map<K, V, H>;
-
-#endif
 } // namespace mozilla
 
 #endif // memory_profiler_UncensoredAllocator_h