Bug 1413096 - Add "using namespace mozilla" to mozjemalloc.cpp. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Mon, 30 Oct 2017 17:19:44 +0900
changeset 689160 fe612f0bec8c1a88a4822708410167a982ac1309
parent 689159 298018147adae93ecbfd3ea14cd362dfb9b118ef
child 689161 6171f26df9dff5ad7c45019f77d9fe1137346fdf
push id86942
push userbmo:mh+mozilla@glandium.org
push dateTue, 31 Oct 2017 06:24:30 +0000
reviewersnjn
bugs1413096
milestone58.0a1
Bug 1413096 - Add "using namespace mozilla" to mozjemalloc.cpp. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -145,16 +145,18 @@
 #include <errno.h>
 #include <stdlib.h>
 #include <limits.h>
 #include <stdarg.h>
 #include <stdio.h>
 #include <string.h>
 #include <algorithm>
 
+using namespace mozilla;
+
 #ifdef XP_WIN
 
 // Some defines from the CRT internal headers that we need here.
 #define _CRT_SPINCOUNT 5000
 #include <io.h>
 #include <windows.h>
 #include <intrin.h>
 
@@ -458,17 +460,17 @@ static size_t arena_maxclass; // Max siz
 
 // Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
 // 6.25% of the process address space on a 32-bit OS for later use.
 #define CHUNK_RECYCLE_LIMIT 128
 
 static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
 
 // The current amount of recycled bytes, updated atomically.
-static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gRecycledSize;
+static Atomic<size_t, ReleaseAcquire> gRecycledSize;
 
 // ***************************************************************************
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
@@ -506,17 +508,17 @@ struct MOZ_RAII MutexAutoLock
   ~MutexAutoLock() { mMutex.Unlock(); }
 
 private:
   MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
   Mutex& mMutex;
 };
 
 // Set to true once the allocator has been initialized.
-static mozilla::Atomic<bool> malloc_initialized(false);
+static Atomic<bool> malloc_initialized(false);
 
 #if defined(XP_WIN)
 // No init lock for Windows.
 #elif defined(XP_DARWIN)
 static Mutex gInitLock = { OS_SPINLOCK_INIT };
 #elif defined(XP_LINUX) && !defined(ANDROID)
 static Mutex gInitLock = { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP };
 #else
@@ -805,17 +807,17 @@ struct arena_chunk_t
 
 #ifdef MALLOC_DOUBLE_PURGE
   // If we're double-purging, we maintain a linked list of chunks which
   // have pages which have been madvise(MADV_FREE)'d but not explicitly
   // purged.
   //
   // We're currently lazy and don't remove a chunk from this list when
   // all its madvised pages are recommitted.
-  mozilla::DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
+  DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
 #endif
 
   // Number of dirty pages.
   size_t ndirty;
 
   // Map of pages within chunk that keeps track of free/large/small.
   arena_chunk_map_t map[1]; // Dynamically sized.
 };
@@ -919,17 +921,17 @@ struct arena_t
 
 private:
   // Tree of dirty-page-containing chunks this arena manages.
   RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> mChunksDirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
   // Head of a linked list of MADV_FREE'd-page-containing chunks this
   // arena manages.
-  mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
+  DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
 #endif
 
   // In order to avoid rapid chunk allocation/deallocation when an arena
   // oscillates right on the cusp of needing a new chunk, cache the most
   // recently freed chunk.  The spare is left in the arena's chunk trees
   // until it is deleted.
   //
   // There is one spare chunk per arena, rather than one spare total, in
@@ -1193,18 +1195,17 @@ static size_t base_committed;
 
 // The arena associated with the current thread (per jemalloc_thread_local_arena)
 // On OSX, __thread/thread_local circles back calling malloc to allocate storage
 // on first access on each thread, which leads to an infinite loop, but
 // pthread-based TLS somehow doesn't have this problem.
 #if !defined(XP_DARWIN)
 static MOZ_THREAD_LOCAL(arena_t*) thread_arena;
 #else
-static mozilla::detail::ThreadLocal<arena_t*,
-                                    mozilla::detail::ThreadLocalKeyStorage>
+static detail::ThreadLocal<arena_t*, detail::ThreadLocalKeyStorage>
   thread_arena;
 #endif
 
 // *****************************
 // Runtime configuration options.
 
 const uint8_t kAllocJunk = 0xe4;
 const uint8_t kAllocPoison = 0xe5;
@@ -1592,17 +1593,17 @@ base_node_dealloc(extent_node_t* aNode)
   base_nodes = aNode;
 }
 
 struct BaseNodeFreePolicy
 {
   void operator()(extent_node_t* aPtr) { base_node_dealloc(aPtr); }
 };
 
-using UniqueBaseNode = mozilla::UniquePtr<extent_node_t, BaseNodeFreePolicy>;
+using UniqueBaseNode = UniquePtr<extent_node_t, BaseNodeFreePolicy>;
 
 // End Utility functions/macros.
 // ***************************************************************************
 // Begin chunk management functions.
 
 #ifdef XP_WIN
 
 static void*
@@ -2596,18 +2597,17 @@ arena_t::InitChunk(arena_chunk_t* aChunk
   pages_decommit(run, arena_maxclass);
 #endif
   mStats.committed += arena_chunk_header_npages;
 
   // Insert the run into the tree of available runs.
   mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
-  new (&aChunk->chunks_madvised_elem)
-    mozilla::DoublyLinkedListElement<arena_chunk_t>();
+  new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 void
 arena_t::DeallocChunk(arena_chunk_t* aChunk)
 {
   if (mSpare) {
     if (mSpare->ndirty > 0) {
@@ -3855,17 +3855,17 @@ arena_t::arena_t()
   MOZ_RELEASE_ASSERT(mLock.Init());
 
   memset(&mLink, 0, sizeof(mLink));
   memset(&mStats, 0, sizeof(arena_stats_t));
 
   // Initialize chunks.
   mChunksDirty.Init();
 #ifdef MALLOC_DOUBLE_PURGE
-  new (&mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
+  new (&mChunksMAdvised) DoublyLinkedList<arena_chunk_t>();
 #endif
   mSpare = nullptr;
 
   mNumDirty = 0;
   // Reduce the maximum amount of dirty pages we allow to be kept on
   // thread local arenas. TODO: make this more flexible.
   mMaxDirty = opt_dirty_max >> 3;
 
@@ -4752,18 +4752,18 @@ hard_purge_chunk(arena_chunk_t* aChunk)
       aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
     }
 
     // We could use mincore to find out which pages are actually
     // present, but it's not clear that's better.
     if (npages > 0) {
       pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
                      npages << pagesize_2pow);
-      mozilla::Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
-                                      npages << pagesize_2pow);
+      Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
+                             npages << pagesize_2pow);
     }
     i += npages;
   }
 }
 
 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
 void
 arena_t::HardPurge()