Bug 1402283 - Rename extent_node_t fields. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 08 Nov 2017 17:20:20 +0900
changeset 391070 e2df7f0bf18245e0644b170a7b0173928da76afc
parent 391069 257e3b6e6cf823fa6a5b38dc4e68696f94acd0f6
child 391071 3f44c449e280d5b1327028ccfdc0505070336209
push id97190
push usermh@glandium.org
push dateFri, 10 Nov 2017 07:05:41 +0000
treeherdermozilla-inbound@ee9d4052e949 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1402283
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1402283 - Rename extent_node_t fields. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -613,65 +613,65 @@ enum ChunkType
   HUGE_CHUNK,     // used to back huge allocations (e.g. huge_malloc).
   RECYCLED_CHUNK, // chunk has been stored for future use by chunk_recycle.
 };
 
 // Tree of extents.
 struct extent_node_t
 {
   // Linkage for the size/address-ordered tree.
-  RedBlackTreeNode<extent_node_t> link_szad;
+  RedBlackTreeNode<extent_node_t> mLinkBySize;
 
   // Linkage for the address-ordered tree.
-  RedBlackTreeNode<extent_node_t> link_ad;
+  RedBlackTreeNode<extent_node_t> mLinkByAddr;
 
   // Pointer to the extent that this tree node is responsible for.
-  void* addr;
+  void* mAddr;
 
   // Total region size.
-  size_t size;
+  size_t mSize;
 
   // What type of chunk is there; used by chunk recycling code.
-  ChunkType chunk_type;
+  ChunkType mChunkType;
 };
 
 struct ExtentTreeSzTrait
 {
   static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
   {
-    return aThis->link_szad;
+    return aThis->mLinkBySize;
   }
 
   static inline int Compare(extent_node_t* aNode, extent_node_t* aOther)
   {
-    int ret = (aNode->size > aOther->size) - (aNode->size < aOther->size);
-    return ret ? ret : CompareAddr(aNode->addr, aOther->addr);
+    int ret = (aNode->mSize > aOther->mSize) - (aNode->mSize < aOther->mSize);
+    return ret ? ret : CompareAddr(aNode->mAddr, aOther->mAddr);
   }
 };
 
 struct ExtentTreeTrait
 {
   static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
   {
-    return aThis->link_ad;
+    return aThis->mLinkByAddr;
   }
 
   static inline int Compare(extent_node_t* aNode, extent_node_t* aOther)
   {
-    return CompareAddr(aNode->addr, aOther->addr);
+    return CompareAddr(aNode->mAddr, aOther->mAddr);
   }
 };
 
 struct ExtentTreeBoundsTrait : public ExtentTreeTrait
 {
   static inline int Compare(extent_node_t* aKey, extent_node_t* aNode)
   {
-    uintptr_t key_addr = reinterpret_cast<uintptr_t>(aKey->addr);
-    uintptr_t node_addr = reinterpret_cast<uintptr_t>(aNode->addr);
-    size_t node_size = aNode->size;
+    uintptr_t key_addr = reinterpret_cast<uintptr_t>(aKey->mAddr);
+    uintptr_t node_addr = reinterpret_cast<uintptr_t>(aNode->mAddr);
+    size_t node_size = aNode->mSize;
 
     // Is aKey within aNode?
     if (node_addr <= key_addr && key_addr < node_addr + node_size) {
       return 0;
     }
 
     return (key_addr > node_addr) - (key_addr < node_addr);
   }
@@ -1962,39 +1962,39 @@ chunk_recycle(size_t aSize, size_t aAlig
 {
   extent_node_t key;
 
   size_t alloc_size = aSize + aAlignment - kChunkSize;
   // Beware size_t wrap-around.
   if (alloc_size < aSize) {
     return nullptr;
   }
-  key.addr = nullptr;
-  key.size = alloc_size;
+  key.mAddr = nullptr;
+  key.mSize = alloc_size;
   chunks_mtx.Lock();
   extent_node_t* node = gChunksBySize.SearchOrNext(&key);
   if (!node) {
     chunks_mtx.Unlock();
     return nullptr;
   }
-  size_t leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, aAlignment) -
-                    (uintptr_t)node->addr;
-  MOZ_ASSERT(node->size >= leadsize + aSize);
-  size_t trailsize = node->size - leadsize - aSize;
-  void* ret = (void*)((uintptr_t)node->addr + leadsize);
-  ChunkType chunk_type = node->chunk_type;
+  size_t leadsize = ALIGNMENT_CEILING((uintptr_t)node->mAddr, aAlignment) -
+                    (uintptr_t)node->mAddr;
+  MOZ_ASSERT(node->mSize >= leadsize + aSize);
+  size_t trailsize = node->mSize - leadsize - aSize;
+  void* ret = (void*)((uintptr_t)node->mAddr + leadsize);
+  ChunkType chunk_type = node->mChunkType;
   if (aZeroed) {
     *aZeroed = (chunk_type == ZEROED_CHUNK);
   }
   // Remove node from the tree.
   gChunksBySize.Remove(node);
   gChunksByAddress.Remove(node);
   if (leadsize != 0) {
     // Insert the leading space as a smaller chunk.
-    node->size = leadsize;
+    node->mSize = leadsize;
     gChunksBySize.Insert(node);
     gChunksByAddress.Insert(node);
     node = nullptr;
   }
   if (trailsize != 0) {
     // Insert the trailing space as a smaller chunk.
     if (!node) {
       // An additional node is required, but
@@ -2005,19 +2005,19 @@ chunk_recycle(size_t aSize, size_t aAlig
       chunks_mtx.Unlock();
       node = base_node_alloc();
       if (!node) {
         chunk_dealloc(ret, aSize, chunk_type);
         return nullptr;
       }
       chunks_mtx.Lock();
     }
-    node->addr = (void*)((uintptr_t)(ret) + aSize);
-    node->size = trailsize;
-    node->chunk_type = chunk_type;
+    node->mAddr = (void*)((uintptr_t)(ret) + aSize);
+    node->mSize = trailsize;
+    node->mChunkType = chunk_type;
     gChunksBySize.Insert(node);
     gChunksByAddress.Insert(node);
     node = nullptr;
   }
 
   gRecycledSize -= aSize;
 
   chunks_mtx.Unlock();
@@ -2120,61 +2120,61 @@ chunk_record(void* aChunk, size_t aSize,
   // held.
   UniqueBaseNode xnode(base_node_alloc());
   // Use xprev to implement conditional deferred deallocation of prev.
   UniqueBaseNode xprev;
 
   // RAII deallocates xnode and xprev defined above after unlocking
   // in order to avoid potential dead-locks
   MutexAutoLock lock(chunks_mtx);
-  key.addr = (void*)((uintptr_t)aChunk + aSize);
+  key.mAddr = (void*)((uintptr_t)aChunk + aSize);
   extent_node_t* node = gChunksByAddress.SearchOrNext(&key);
   // Try to coalesce forward.
-  if (node && node->addr == key.addr) {
+  if (node && node->mAddr == key.mAddr) {
     // Coalesce chunk with the following address range.  This does
     // not change the position within gChunksByAddress, so only
     // remove/insert from/into gChunksBySize.
     gChunksBySize.Remove(node);
-    node->addr = aChunk;
-    node->size += aSize;
-    if (node->chunk_type != aType) {
-      node->chunk_type = RECYCLED_CHUNK;
+    node->mAddr = aChunk;
+    node->mSize += aSize;
+    if (node->mChunkType != aType) {
+      node->mChunkType = RECYCLED_CHUNK;
     }
     gChunksBySize.Insert(node);
   } else {
     // Coalescing forward failed, so insert a new node.
     if (!xnode) {
       // base_node_alloc() failed, which is an exceedingly
       // unlikely failure.  Leak chunk; its pages have
       // already been purged, so this is only a virtual
       // memory leak.
       return;
     }
     node = xnode.release();
-    node->addr = aChunk;
-    node->size = aSize;
-    node->chunk_type = aType;
+    node->mAddr = aChunk;
+    node->mSize = aSize;
+    node->mChunkType = aType;
     gChunksByAddress.Insert(node);
     gChunksBySize.Insert(node);
   }
 
   // Try to coalesce backward.
   extent_node_t* prev = gChunksByAddress.Prev(node);
-  if (prev && (void*)((uintptr_t)prev->addr + prev->size) == aChunk) {
+  if (prev && (void*)((uintptr_t)prev->mAddr + prev->mSize) == aChunk) {
     // Coalesce chunk with the previous address range.  This does
     // not change the position within gChunksByAddress, so only
     // remove/insert node from/into gChunksBySize.
     gChunksBySize.Remove(prev);
     gChunksByAddress.Remove(prev);
 
     gChunksBySize.Remove(node);
-    node->addr = prev->addr;
-    node->size += prev->size;
-    if (node->chunk_type != prev->chunk_type) {
-      node->chunk_type = RECYCLED_CHUNK;
+    node->mAddr = prev->mAddr;
+    node->mSize += prev->mSize;
+    if (node->mChunkType != prev->mChunkType) {
+      node->mChunkType = RECYCLED_CHUNK;
     }
     gChunksBySize.Insert(node);
 
     xprev.reset(prev);
   }
 
   gRecycledSize += aSize;
 }
@@ -3289,21 +3289,21 @@ isalloc_validate(const void* aPtr)
   if (chunk != aPtr) {
     MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
     return arena_salloc(aPtr);
   }
 
   extent_node_t key;
 
   // Chunk.
-  key.addr = (void*)chunk;
+  key.mAddr = (void*)chunk;
   MutexAutoLock lock(huge_mtx);
   extent_node_t* node = huge.Search(&key);
   if (node) {
-    return node->size;
+    return node->mSize;
   }
   return 0;
 }
 
 static inline size_t
 isalloc(const void* aPtr)
 {
   MOZ_ASSERT(aPtr);
@@ -3317,21 +3317,21 @@ isalloc(const void* aPtr)
   }
 
   extent_node_t key;
 
   // Chunk (huge allocation).
   MutexAutoLock lock(huge_mtx);
 
   // Extract from tree of huge allocations.
-  key.addr = const_cast<void*>(aPtr);
+  key.mAddr = const_cast<void*>(aPtr);
   extent_node_t* node = huge.Search(&key);
   MOZ_DIAGNOSTIC_ASSERT(node);
 
-  return node->size;
+  return node->mSize;
 }
 
 template<>
 inline void
 MozJemalloc::jemalloc_ptr_info(const void* aPtr, jemalloc_ptr_info_t* aInfo)
 {
   arena_chunk_t* chunk = GetChunkForPtr(aPtr);
 
@@ -3345,23 +3345,23 @@ MozJemalloc::jemalloc_ptr_info(const voi
 
   // Look for huge allocations before looking for |chunk| in gChunkRTree.
   // This is necessary because |chunk| won't be in gChunkRTree if it's
   // the second or subsequent chunk in a huge allocation.
   extent_node_t* node;
   extent_node_t key;
   {
     MutexAutoLock lock(huge_mtx);
-    key.addr = const_cast<void*>(aPtr);
+    key.mAddr = const_cast<void*>(aPtr);
     node =
       reinterpret_cast<RedBlackTree<extent_node_t, ExtentTreeBoundsTrait>*>(
         &huge)
         ->Search(&key);
     if (node) {
-      *aInfo = { TagLiveHuge, node->addr, node->size };
+      *aInfo = { TagLiveHuge, node->mAddr, node->mSize };
       return;
     }
   }
 
   // It's not a huge allocation. Check if we have a known chunk.
   if (!gChunkRTree.Get(chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
@@ -3858,19 +3858,19 @@ huge_palloc(size_t aSize, size_t aAlignm
     base_node_dealloc(node);
     return nullptr;
   }
   if (aZero) {
     chunk_ensure_zero(ret, csize, zeroed);
   }
 
   // Insert node into huge.
-  node->addr = ret;
+  node->mAddr = ret;
   psize = PAGE_CEILING(aSize);
-  node->size = psize;
+  node->mSize = psize;
 
   {
     MutexAutoLock lock(huge_mtx);
     huge.Insert(node);
 
     // Although we allocated space for csize bytes, we indicate that we've
     // allocated only psize bytes.
     //
@@ -3879,17 +3879,17 @@ huge_palloc(size_t aSize, size_t aAlignm
     //
     // If DECOMMIT is not defined, then we're relying on the OS to be lazy
     // about how it allocates physical pages to mappings.  If we never
     // touch the pages in excess of psize, the OS won't allocate a physical
     // page, and we won't use more than psize bytes of physical memory.
     //
     // A correct program will only touch memory in excess of how much it
     // requested if it first calls malloc_usable_size and finds out how
-    // much space it has to play with.  But because we set node->size =
+    // much space it has to play with.  But because we set node->mSize =
     // psize above, malloc_usable_size will return psize, not csize, and
     // the program will (hopefully) never touch bytes in excess of psize.
     // Thus those bytes won't take up space in physical memory, and we can
     // reasonably claim we never "allocated" them in the first place.
     huge_allocated += psize;
     huge_mapped += csize;
   }
 
@@ -3934,23 +3934,23 @@ huge_ralloc(void* aPtr, size_t aSize, si
 #ifdef MALLOC_DECOMMIT
     if (psize < aOldSize) {
       extent_node_t key;
 
       pages_decommit((void*)((uintptr_t)aPtr + psize), aOldSize - psize);
 
       // Update recorded size.
       MutexAutoLock lock(huge_mtx);
-      key.addr = const_cast<void*>(aPtr);
+      key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
-      MOZ_ASSERT(node->size == aOldSize);
+      MOZ_ASSERT(node->mSize == aOldSize);
       huge_allocated -= aOldSize - psize;
       // No need to change huge_mapped, because we didn't (un)map anything.
-      node->size = psize;
+      node->mSize = psize;
     } else if (psize > aOldSize) {
       if (!pages_commit((void*)((uintptr_t)aPtr + aOldSize),
                         psize - aOldSize)) {
         return nullptr;
       }
     }
 #endif
 
@@ -3958,24 +3958,24 @@ huge_ralloc(void* aPtr, size_t aSize, si
     // DECOMMIT is not defined and the size class didn't change, we
     // do need to update the recorded size if the size increased,
     // so malloc_usable_size doesn't return a value smaller than
     // what was requested via realloc().
     if (psize > aOldSize) {
       // Update recorded size.
       extent_node_t key;
       MutexAutoLock lock(huge_mtx);
-      key.addr = const_cast<void*>(aPtr);
+      key.mAddr = const_cast<void*>(aPtr);
       extent_node_t* node = huge.Search(&key);
       MOZ_ASSERT(node);
-      MOZ_ASSERT(node->size == aOldSize);
+      MOZ_ASSERT(node->mSize == aOldSize);
       huge_allocated += psize - aOldSize;
       // No need to change huge_mapped, because we didn't
       // (un)map anything.
-      node->size = psize;
+      node->mSize = psize;
     }
 
     if (opt_zero && aSize > aOldSize) {
       memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
     }
     return aPtr;
   }
 
@@ -4004,28 +4004,28 @@ static void
 huge_dalloc(void* aPtr)
 {
   extent_node_t* node;
   {
     extent_node_t key;
     MutexAutoLock lock(huge_mtx);
 
     // Extract from tree of huge allocations.
-    key.addr = aPtr;
+    key.mAddr = aPtr;
     node = huge.Search(&key);
     MOZ_ASSERT(node);
-    MOZ_ASSERT(node->addr == aPtr);
+    MOZ_ASSERT(node->mAddr == aPtr);
     huge.Remove(node);
 
-    huge_allocated -= node->size;
-    huge_mapped -= CHUNK_CEILING(node->size);
+    huge_allocated -= node->mSize;
+    huge_mapped -= CHUNK_CEILING(node->mSize);
   }
 
   // Unmap chunk.
-  chunk_dealloc(node->addr, CHUNK_CEILING(node->size), HUGE_CHUNK);
+  chunk_dealloc(node->mAddr, CHUNK_CEILING(node->mSize), HUGE_CHUNK);
 
   base_node_dealloc(node);
 }
 
 static size_t
 GetKernelPageSize()
 {
   static size_t kernel_page_size = ([]() {