Bug 1414155 - Rename page size related constants. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 12:13:17 +0900
changeset 443495 9d00f52783833b3b4e1af733200dea4bc4f7151a
parent 443494 578f15a2e0f63e785065576f67f803a1fe8f8a87
child 443496 a826adc9b1c76827939c31a2344edbb4e62175a7
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1414155
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1414155 - Rename page size related constants. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -417,25 +417,25 @@ static const size_t kChunkSize = 1_MiB;
 static const size_t kChunkSizeMask = kChunkSize - 1;
 
 #ifdef MALLOC_STATIC_PAGESIZE
 // VM page size. It must divide the runtime CPU page size or the code
 // will abort.
 // Platform specific page size conditions copied from js/public/HeapAPI.h
 #if (defined(SOLARIS) || defined(__FreeBSD__)) &&                              \
   (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
-static const size_t pagesize = 8_KiB;
+static const size_t gPageSize = 8_KiB;
 #elif defined(__powerpc64__)
-static const size_t pagesize = 64_KiB;
+static const size_t gPageSize = 64_KiB;
 #else
-static const size_t pagesize = 4_KiB;
+static const size_t gPageSize = 4_KiB;
 #endif
 
 #else
-static size_t pagesize;
+static size_t gPageSize;
 #endif
 
 #ifdef MALLOC_STATIC_PAGESIZE
 #define DECLARE_GLOBAL(type, name)
 #define DEFINE_GLOBALS
 #define END_GLOBALS
 #define DEFINE_GLOBAL(type) static const type
 #define GLOBAL_LOG2 LOG2
@@ -453,57 +453,57 @@ static size_t pagesize;
 #define END_GLOBALS }
 #define DEFINE_GLOBAL(type)
 #define GLOBAL_LOG2 FloorLog2
 #define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
 #endif
 
 DECLARE_GLOBAL(size_t, gMaxSubPageClass)
 DECLARE_GLOBAL(uint8_t, nsbins)
-DECLARE_GLOBAL(uint8_t, pagesize_2pow)
-DECLARE_GLOBAL(size_t, pagesize_mask)
+DECLARE_GLOBAL(uint8_t, gPageSize2Pow)
+DECLARE_GLOBAL(size_t, gPageSizeMask)
 DECLARE_GLOBAL(size_t, chunk_npages)
 DECLARE_GLOBAL(size_t, arena_chunk_header_npages)
 DECLARE_GLOBAL(size_t, gMaxLargeClass)
 
 DEFINE_GLOBALS
 // Largest sub-page size class.
-DEFINE_GLOBAL(size_t) gMaxSubPageClass = pagesize / 2;
+DEFINE_GLOBAL(size_t) gMaxSubPageClass = gPageSize / 2;
 
 // Max size class for bins.
 #define gMaxBinClass gMaxSubPageClass
 
 // Number of (2^n)-spaced sub-page bins.
 DEFINE_GLOBAL(uint8_t)
 nsbins = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
 
-DEFINE_GLOBAL(uint8_t) pagesize_2pow = GLOBAL_LOG2(pagesize);
-DEFINE_GLOBAL(size_t) pagesize_mask = pagesize - 1;
+DEFINE_GLOBAL(uint8_t) gPageSize2Pow = GLOBAL_LOG2(gPageSize);
+DEFINE_GLOBAL(size_t) gPageSizeMask = gPageSize - 1;
 
 // Number of pages in a chunk.
-DEFINE_GLOBAL(size_t) chunk_npages = kChunkSize >> pagesize_2pow;
+DEFINE_GLOBAL(size_t) chunk_npages = kChunkSize >> gPageSize2Pow;
 
 // Number of pages necessary for a chunk header.
 DEFINE_GLOBAL(size_t)
 arena_chunk_header_npages =
   ((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1) +
-    pagesize_mask) &
-   ~pagesize_mask) >>
-  pagesize_2pow;
+    gPageSizeMask) &
+   ~gPageSizeMask) >>
+  gPageSize2Pow;
 
 // Max size class for arenas.
 DEFINE_GLOBAL(size_t)
-gMaxLargeClass = kChunkSize - (arena_chunk_header_npages << pagesize_2pow);
+gMaxLargeClass = kChunkSize - (arena_chunk_header_npages << gPageSize2Pow);
 
 // Various sanity checks that regard configuration.
-GLOBAL_ASSERT(1ULL << pagesize_2pow == pagesize,
+GLOBAL_ASSERT(1ULL << gPageSize2Pow == gPageSize,
               "Page size is not a power of two");
 GLOBAL_ASSERT(kQuantum >= sizeof(void*));
-GLOBAL_ASSERT(kQuantum <= pagesize);
-GLOBAL_ASSERT(kChunkSize >= pagesize);
+GLOBAL_ASSERT(kQuantum <= gPageSize);
+GLOBAL_ASSERT(kChunkSize >= gPageSize);
 GLOBAL_ASSERT(kQuantum * 4 <= kChunkSize);
 END_GLOBALS
 
 // Recycle at most 128 MiB of chunks. This means we retain at most
 // 6.25% of the process address space on a 32-bit OS for later use.
 static const size_t gRecycleLimit = 128_MiB;
 
 // The current amount of recycled bytes, updated atomically.
@@ -539,17 +539,17 @@ static size_t opt_dirty_max = DIRTY_MAX_
 // Return the smallest cacheline multiple that is >= s.
 #define CACHELINE_CEILING(s)                                                   \
   (((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
 
 // Return the smallest quantum multiple that is >= a.
 #define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
 
 // Return the smallest pagesize multiple that is >= s.
-#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
+#define PAGE_CEILING(s) (((s) + gPageSizeMask) & ~gPageSizeMask)
 
 // ***************************************************************************
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
@@ -810,18 +810,18 @@ struct ArenaRunTreeTrait : public ArenaC
     return CompareAddr(aNode, aOther);
   }
 };
 
 struct ArenaAvailTreeTrait : public ArenaChunkMapLink
 {
   static inline int Compare(arena_chunk_map_t* aNode, arena_chunk_map_t* aOther)
   {
-    size_t size1 = aNode->bits & ~pagesize_mask;
-    size_t size2 = aOther->bits & ~pagesize_mask;
+    size_t size1 = aNode->bits & ~gPageSizeMask;
+    size_t size2 = aOther->bits & ~gPageSizeMask;
     int ret = (size1 > size2) - (size1 < size2);
     return ret ? ret
                : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
                              aOther);
   }
 };
 
 struct ArenaDirtyChunkTrait
@@ -1692,24 +1692,24 @@ pages_map(void* aAddr, size_t aSize)
 #else
   MOZ_ASSERT(!ret || (!aAddr && ret != aAddr) || (aAddr && ret == aAddr));
 #endif
   return ret;
 }
 #endif
 
 #ifdef XP_DARWIN
-#define VM_COPY_MIN (pagesize << 5)
+#define VM_COPY_MIN (gPageSize * 32)
 static inline void
 pages_copy(void* dest, const void* src, size_t n)
 {
 
-  MOZ_ASSERT((void*)((uintptr_t)dest & ~pagesize_mask) == dest);
+  MOZ_ASSERT((void*)((uintptr_t)dest & ~gPageSizeMask) == dest);
   MOZ_ASSERT(n >= VM_COPY_MIN);
-  MOZ_ASSERT((void*)((uintptr_t)src & ~pagesize_mask) == src);
+  MOZ_ASSERT((void*)((uintptr_t)src & ~gPageSizeMask) == src);
 
   vm_copy(
     mach_task_self(), (vm_address_t)src, (vm_size_t)n, (vm_address_t)dest);
 }
 #endif
 
 template<size_t Bits>
 bool
@@ -1848,17 +1848,17 @@ pages_trim(void* addr, size_t alloc_size
 }
 
 static void*
 chunk_alloc_mmap_slow(size_t size, size_t alignment)
 {
   void *ret, *pages;
   size_t alloc_size, leadsize;
 
-  alloc_size = size + alignment - pagesize;
+  alloc_size = size + alignment - gPageSize;
   // Beware size_t wrap-around.
   if (alloc_size < size) {
     return nullptr;
   }
   do {
     pages = pages_map(nullptr, alloc_size);
     if (!pages) {
       return nullptr;
@@ -2404,19 +2404,19 @@ arena_run_reg_dalloc(arena_run_t* run, a
 bool
 arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
 {
   arena_chunk_t* chunk;
   size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
 
   chunk = GetChunkForPtr(aRun);
   old_ndirty = chunk->ndirty;
-  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
-  total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
-  need_pages = (aSize >> pagesize_2pow);
+  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
+  total_pages = (chunk->map[run_ind].bits & ~gPageSizeMask) >> gPageSize2Pow;
+  need_pages = (aSize >> gPageSize2Pow);
   MOZ_ASSERT(need_pages > 0);
   MOZ_ASSERT(need_pages <= total_pages);
   rem_pages = total_pages - need_pages;
 
   for (i = 0; i < need_pages; i++) {
     // Commit decommitted pages if necessary.  If a decommitted
     // page is encountered, commit all needed adjacent decommitted
     // pages in one operation, in order to reduce system call
@@ -2434,18 +2434,18 @@ arena_t::SplitRun(arena_run_t* aRun, siz
         MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
                      chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
 
         chunk->map[run_ind + i + j].bits &= ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
       }
 
 #ifdef MALLOC_DECOMMIT
       bool committed = pages_commit(
-        (void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
-        j << pagesize_2pow);
+        (void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
+        j << gPageSize2Pow);
       // pages_commit zeroes pages, so mark them as such if it succeeded.
       // That's checked further below to avoid manually zeroing the pages.
       for (size_t k = 0; k < j; k++) {
         chunk->map[run_ind + i + k].bits |=
           committed ? CHUNK_MAP_ZEROED : CHUNK_MAP_DECOMMITTED;
       }
       if (!committed) {
         return false;
@@ -2456,31 +2456,31 @@ arena_t::SplitRun(arena_run_t* aRun, siz
     }
   }
 
   mRunsAvail.Remove(&chunk->map[run_ind]);
 
   // Keep track of trailing unused pages for later use.
   if (rem_pages > 0) {
     chunk->map[run_ind + need_pages].bits =
-      (rem_pages << pagesize_2pow) |
-      (chunk->map[run_ind + need_pages].bits & pagesize_mask);
+      (rem_pages << gPageSize2Pow) |
+      (chunk->map[run_ind + need_pages].bits & gPageSizeMask);
     chunk->map[run_ind + total_pages - 1].bits =
-      (rem_pages << pagesize_2pow) |
-      (chunk->map[run_ind + total_pages - 1].bits & pagesize_mask);
+      (rem_pages << gPageSize2Pow) |
+      (chunk->map[run_ind + total_pages - 1].bits & gPageSizeMask);
     mRunsAvail.Insert(&chunk->map[run_ind + need_pages]);
   }
 
   for (i = 0; i < need_pages; i++) {
     // Zero if necessary.
     if (aZero) {
       if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
-        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
                0,
-               pagesize);
+               gPageSize);
         // CHUNK_MAP_ZEROED is cleared below.
       }
     }
 
     // Update dirty page accounting.
     if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
       chunk->ndirty--;
       mNumDirty--;
@@ -2532,17 +2532,17 @@ arena_t::InitChunk(arena_chunk_t* aChunk
 
   // Claim that no pages are in use, since the header is merely overhead.
   aChunk->ndirty = 0;
 
   // Initialize the map to contain one maximal free untouched run.
 #ifdef MALLOC_DECOMMIT
   arena_run_t* run =
     (arena_run_t*)(uintptr_t(aChunk) +
-                   (arena_chunk_header_npages << pagesize_2pow));
+                   (arena_chunk_header_npages << gPageSize2Pow));
 #endif
 
   for (i = 0; i < arena_chunk_header_npages; i++) {
     aChunk->map[i].bits = 0;
   }
   aChunk->map[i].bits = gMaxLargeClass | flags;
   for (i++; i < chunk_npages - 1; i++) {
     aChunk->map[i].bits = flags;
@@ -2596,48 +2596,48 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
 arena_run_t*
 arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
   arena_chunk_map_t* mapelm;
   arena_chunk_map_t key;
 
   MOZ_ASSERT(aSize <= gMaxLargeClass);
-  MOZ_ASSERT((aSize & pagesize_mask) == 0);
+  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
 
   // Search the arena's chunks for the lowest best fit.
   key.bits = aSize | CHUNK_MAP_KEY;
   mapelm = mRunsAvail.SearchOrNext(&key);
   if (mapelm) {
     arena_chunk_t* chunk = GetChunkForPtr(mapelm);
     size_t pageind =
       (uintptr_t(mapelm) - uintptr_t(chunk->map)) / sizeof(arena_chunk_map_t);
 
-    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
+    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << gPageSize2Pow));
   } else if (mSpare) {
     // Use the spare.
     arena_chunk_t* chunk = mSpare;
     mSpare = nullptr;
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << pagesize_2pow));
+                         (arena_chunk_header_npages << gPageSize2Pow));
     // Insert the run into the tree of available runs.
     mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
   } else {
     // No usable runs.  Create a new chunk from which to allocate
     // the run.
     bool zeroed;
     arena_chunk_t* chunk =
       (arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false, &zeroed);
     if (!chunk) {
       return nullptr;
     }
 
     InitChunk(chunk, zeroed);
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << pagesize_2pow));
+                         (arena_chunk_header_npages << gPageSize2Pow));
   }
   // Update page map.
   return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
 }
 
 void
 arena_t::Purge(bool aAll)
 {
@@ -2685,24 +2685,24 @@ arena_t::Purge(bool aAll)
           MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
                      0);
           chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
         }
         chunk->ndirty -= npages;
         mNumDirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
-        pages_decommit((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
-                       (npages << pagesize_2pow));
+        pages_decommit((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
+                       (npages << gPageSize2Pow));
 #endif
         mStats.committed -= npages;
 
 #ifndef MALLOC_DECOMMIT
-        madvise((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
-                (npages << pagesize_2pow),
+        madvise((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
+                (npages << gPageSize2Pow),
                 MADV_FREE);
 #ifdef MALLOC_DOUBLE_PURGE
         madvised = true;
 #endif
 #endif
         if (mNumDirty <= (dirty_max >> 1)) {
           break;
         }
@@ -2727,25 +2727,25 @@ arena_t::Purge(bool aAll)
 
 void
 arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
   arena_chunk_t* chunk;
   size_t size, run_ind, run_pages;
 
   chunk = GetChunkForPtr(aRun);
-  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
   MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
   MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
   if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
-    size = chunk->map[run_ind].bits & ~pagesize_mask;
+    size = chunk->map[run_ind].bits & ~gPageSizeMask;
   } else {
     size = aRun->bin->mRunSize;
   }
-  run_pages = (size >> pagesize_2pow);
+  run_pages = (size >> gPageSize2Pow);
 
   // Mark pages as unallocated in the chunk map.
   if (aDirty) {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) ==
                             0);
@@ -2759,85 +2759,85 @@ arena_t::DallocRun(arena_run_t* aRun, bo
     mNumDirty += run_pages;
   } else {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
     }
   }
-  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & pagesize_mask);
+  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & gPageSizeMask);
   chunk->map[run_ind + run_pages - 1].bits =
-    size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+    size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
 
   // Try to coalesce forward.
   if (run_ind + run_pages < chunk_npages &&
       (chunk->map[run_ind + run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
-    size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~pagesize_mask;
+    size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~gPageSizeMask;
 
     // Remove successor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind + run_pages]);
 
     size += nrun_size;
-    run_pages = size >> pagesize_2pow;
+    run_pages = size >> gPageSize2Pow;
 
     MOZ_DIAGNOSTIC_ASSERT(
-      (chunk->map[run_ind + run_pages - 1].bits & ~pagesize_mask) == nrun_size);
+      (chunk->map[run_ind + run_pages - 1].bits & ~gPageSizeMask) == nrun_size);
     chunk->map[run_ind].bits =
-      size | (chunk->map[run_ind].bits & pagesize_mask);
+      size | (chunk->map[run_ind].bits & gPageSizeMask);
     chunk->map[run_ind + run_pages - 1].bits =
-      size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+      size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Try to coalesce backward.
   if (run_ind > arena_chunk_header_npages &&
       (chunk->map[run_ind - 1].bits & CHUNK_MAP_ALLOCATED) == 0) {
-    size_t prun_size = chunk->map[run_ind - 1].bits & ~pagesize_mask;
-
-    run_ind -= prun_size >> pagesize_2pow;
+    size_t prun_size = chunk->map[run_ind - 1].bits & ~gPageSizeMask;
+
+    run_ind -= prun_size >> gPageSize2Pow;
 
     // Remove predecessor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind]);
 
     size += prun_size;
-    run_pages = size >> pagesize_2pow;
-
-    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+    run_pages = size >> gPageSize2Pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~gPageSizeMask) ==
                           prun_size);
     chunk->map[run_ind].bits =
-      size | (chunk->map[run_ind].bits & pagesize_mask);
+      size | (chunk->map[run_ind].bits & gPageSizeMask);
     chunk->map[run_ind + run_pages - 1].bits =
-      size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+      size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Insert into tree of available runs, now that coalescing is complete.
   mRunsAvail.Insert(&chunk->map[run_ind]);
 
   // Deallocate chunk if it is now completely unused.
   if ((chunk->map[arena_chunk_header_npages].bits &
-       (~pagesize_mask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
+       (~gPageSizeMask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
     DeallocChunk(chunk);
   }
 
   // Enforce mMaxDirty.
   if (mNumDirty > mMaxDirty) {
     Purge(false);
   }
 }
 
 void
 arena_t::TrimRunHead(arena_chunk_t* aChunk,
                      arena_run_t* aRun,
                      size_t aOldSize,
                      size_t aNewSize)
 {
-  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t head_npages = (aOldSize - aNewSize) >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t head_npages = (aOldSize - aNewSize) >> gPageSize2Pow;
 
   MOZ_ASSERT(aOldSize > aNewSize);
 
   // Update the chunk map so that arena_t::RunDalloc() can treat the
   // leading run as separately allocated.
   aChunk->map[pageind].bits =
     (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   aChunk->map[pageind + head_npages].bits =
@@ -2848,18 +2848,18 @@ arena_t::TrimRunHead(arena_chunk_t* aChu
 
 void
 arena_t::TrimRunTail(arena_chunk_t* aChunk,
                      arena_run_t* aRun,
                      size_t aOldSize,
                      size_t aNewSize,
                      bool aDirty)
 {
-  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t npages = aNewSize >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t npages = aNewSize >> gPageSize2Pow;
 
   MOZ_ASSERT(aOldSize > aNewSize);
 
   // Update the chunk map so that arena_t::RunDalloc() can treat the
   // trailing run as separately allocated.
   aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   aChunk->map[pageind + npages].bits =
     (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -2874,17 +2874,17 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
   arena_run_t* run;
   unsigned i, remainder;
 
   // Look for a usable run.
   mapelm = aBin->mNonFullRuns.First();
   if (mapelm) {
     // run is guaranteed to have available space.
     aBin->mNonFullRuns.Remove(mapelm);
-    run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+    run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
     return run;
   }
   // No existing runs have any space available.
 
   // Allocate a new run.
   run = AllocRun(aBin, aBin->mRunSize, false, false);
   if (!run) {
     return nullptr;
@@ -2962,17 +2962,17 @@ arena_t::MallocBinHard(arena_bin_t* aBin
 // also calculated here, since these settings are all interdependent.
 static size_t
 arena_bin_run_size_calc(arena_bin_t* bin, size_t min_run_size)
 {
   size_t try_run_size, good_run_size;
   unsigned good_nregs, good_mask_nelms, good_reg0_offset;
   unsigned try_nregs, try_mask_nelms, try_reg0_offset;
 
-  MOZ_ASSERT(min_run_size >= pagesize);
+  MOZ_ASSERT(min_run_size >= gPageSize);
   MOZ_ASSERT(min_run_size <= gMaxLargeClass);
 
   // Calculate known-valid settings before entering the mRunSize
   // expansion loop, so that the first part of the loop always copies
   // valid settings.
   //
   // The do..while loop iteratively reduces the number of regions until
   // the run header and the regions no longer overlap.  A closed formula
@@ -2994,17 +2994,17 @@ arena_bin_run_size_calc(arena_bin_t* bin
   do {
     // Copy valid settings before trying more aggressive settings.
     good_run_size = try_run_size;
     good_nregs = try_nregs;
     good_mask_nelms = try_mask_nelms;
     good_reg0_offset = try_reg0_offset;
 
     // Try more aggressive settings.
-    try_run_size += pagesize;
+    try_run_size += gPageSize;
     try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->mSizeClass) +
                 1; // Counter-act try_nregs-- in loop.
     do {
       try_nregs--;
       try_mask_nelms =
         (try_nregs >> (LOG2(sizeof(int)) + 3)) +
         ((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
       try_reg0_offset = try_run_size - (try_nregs * bin->mSizeClass);
@@ -3134,30 +3134,30 @@ imalloc(size_t aSize, bool aZero, arena_
 // Only handles large allocations that require more than page alignment.
 void*
 arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
 {
   void* ret;
   size_t offset;
   arena_chunk_t* chunk;
 
-  MOZ_ASSERT((aSize & pagesize_mask) == 0);
-  MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
+  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
+  MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
 
   {
     MutexAutoLock lock(mLock);
     ret = AllocRun(nullptr, aAllocSize, true, false);
     if (!ret) {
       return nullptr;
     }
 
     chunk = GetChunkForPtr(ret);
 
     offset = uintptr_t(ret) & (aAlignment - 1);
-    MOZ_ASSERT((offset & pagesize_mask) == 0);
+    MOZ_ASSERT((offset & gPageSizeMask) == 0);
     MOZ_ASSERT(offset < aAllocSize);
     if (offset == 0) {
       TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
     } else {
       size_t leadsize, trailsize;
 
       leadsize = aAlignment - offset;
       if (leadsize > 0) {
@@ -3211,18 +3211,18 @@ ipalloc(size_t aAlignment, size_t aSize,
 
   // (ceil_size < aSize) protects against the combination of maximal
   // alignment and size greater than maximal alignment.
   if (ceil_size < aSize) {
     // size_t overflow.
     return nullptr;
   }
 
-  if (ceil_size <= pagesize ||
-      (aAlignment <= pagesize && ceil_size <= gMaxLargeClass)) {
+  if (ceil_size <= gPageSize ||
+      (aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
     aArena = aArena ? aArena : choose_arena(aSize);
     ret = aArena->Malloc(ceil_size, false);
   } else {
     size_t run_size;
 
     // We can't achieve sub-page alignment, so round up alignment
     // permanently; it makes later calculations simpler.
     aAlignment = PAGE_CEILING(aAlignment);
@@ -3241,26 +3241,26 @@ ipalloc(size_t aAlignment, size_t aSize,
     if (ceil_size < aSize || ceil_size + aAlignment < ceil_size) {
       // size_t overflow.
       return nullptr;
     }
 
     // Calculate the size of the over-size run that arena_palloc()
     // would need to allocate in order to guarantee the alignment.
     if (ceil_size >= aAlignment) {
-      run_size = ceil_size + aAlignment - pagesize;
+      run_size = ceil_size + aAlignment - gPageSize;
     } else {
       // It is possible that (aAlignment << 1) will cause
       // overflow, but it doesn't matter because we also
       // subtract pagesize, which in the case of overflow
       // leaves us with a very large run_size.  That causes
       // the first conditional below to fail, which means
       // that the bogus run_size value never gets used for
       // anything important.
-      run_size = (aAlignment << 1) - pagesize;
+      run_size = (aAlignment << 1) - gPageSize;
     }
 
     if (run_size <= gMaxLargeClass) {
       aArena = aArena ? aArena : choose_arena(aSize);
       ret = aArena->Palloc(aAlignment, ceil_size, run_size);
     } else if (aAlignment <= kChunkSize) {
       ret = huge_malloc(ceil_size, false);
     } else {
@@ -3279,25 +3279,25 @@ arena_salloc(const void* ptr)
   size_t ret;
   arena_chunk_t* chunk;
   size_t pageind, mapbits;
 
   MOZ_ASSERT(ptr);
   MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
 
   chunk = GetChunkForPtr(ptr);
-  pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+  pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> gPageSize2Pow);
   mapbits = chunk->map[pageind].bits;
   MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapbits & CHUNK_MAP_LARGE) == 0) {
-    arena_run_t* run = (arena_run_t*)(mapbits & ~pagesize_mask);
+    arena_run_t* run = (arena_run_t*)(mapbits & ~gPageSizeMask);
     MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
     ret = run->bin->mSizeClass;
   } else {
-    ret = mapbits & ~pagesize_mask;
+    ret = mapbits & ~gPageSizeMask;
     MOZ_DIAGNOSTIC_ASSERT(ret != 0);
   }
 
   return ret;
 }
 
 // Validate ptr before assuming that it points to an allocation.  Currently,
 // the following validation is performed:
@@ -3401,17 +3401,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
   if (!gChunkRTree.Get(chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
-  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
+  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
   if (pageind < arena_chunk_header_npages) {
     // Within the chunk header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   size_t mapbits = chunk->map[pageind].bits;
 
@@ -3424,28 +3424,28 @@ MozJemalloc::jemalloc_ptr_info(const voi
     } else if (mapbits & CHUNK_MAP_MADVISED) {
       tag = TagFreedPageMadvised;
     } else if (mapbits & CHUNK_MAP_ZEROED) {
       tag = TagFreedPageZeroed;
     } else {
       MOZ_CRASH();
     }
 
-    void* pageaddr = (void*)(uintptr_t(aPtr) & ~pagesize_mask);
-    *aInfo = { tag, pageaddr, pagesize };
+    void* pageaddr = (void*)(uintptr_t(aPtr) & ~gPageSizeMask);
+    *aInfo = { tag, pageaddr, gPageSize };
     return;
   }
 
   if (mapbits & CHUNK_MAP_LARGE) {
     // It's a large allocation. Only the first page of a large
     // allocation contains its size, so if the address is not in
     // the first page, scan back to find the allocation size.
     size_t size;
     while (true) {
-      size = mapbits & ~pagesize_mask;
+      size = mapbits & ~gPageSizeMask;
       if (size != 0) {
         break;
       }
 
       // The following two return paths shouldn't occur in
       // practice unless there is heap corruption.
       pageind--;
       MOZ_DIAGNOSTIC_ASSERT(pageind >= arena_chunk_header_npages);
@@ -3457,23 +3457,23 @@ MozJemalloc::jemalloc_ptr_info(const voi
       mapbits = chunk->map[pageind].bits;
       MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
       if (!(mapbits & CHUNK_MAP_LARGE)) {
         *aInfo = { TagUnknown, nullptr, 0 };
         return;
       }
     }
 
-    void* addr = ((char*)chunk) + (pageind << pagesize_2pow);
+    void* addr = ((char*)chunk) + (pageind << gPageSize2Pow);
     *aInfo = { TagLiveLarge, addr, size };
     return;
   }
 
   // It must be a small allocation.
-  auto run = (arena_run_t*)(mapbits & ~pagesize_mask);
+  auto run = (arena_run_t*)(mapbits & ~gPageSizeMask);
   MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
 
   // The allocation size is stored in the run metadata.
   size_t size = run->bin->mSizeClass;
 
   // Address of the first possible pointer in the run after its headers.
   uintptr_t reg0_addr = (uintptr_t)run + run->bin->mRunFirstRegionOffset;
   if (aPtr < (void*)reg0_addr) {
@@ -3512,17 +3512,17 @@ void
 arena_t::DallocSmall(arena_chunk_t* aChunk,
                      void* aPtr,
                      arena_chunk_map_t* aMapElm)
 {
   arena_run_t* run;
   arena_bin_t* bin;
   size_t size;
 
-  run = (arena_run_t*)(aMapElm->bits & ~pagesize_mask);
+  run = (arena_run_t*)(aMapElm->bits & ~gPageSizeMask);
   MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
   bin = run->bin;
   size = bin->mSizeClass;
   MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >=
                         uintptr_t(run) + bin->mRunFirstRegionOffset);
   MOZ_DIAGNOSTIC_ASSERT(
     (uintptr_t(aPtr) - (uintptr_t(run) + bin->mRunFirstRegionOffset)) % size ==
     0);
@@ -3533,17 +3533,17 @@ arena_t::DallocSmall(arena_chunk_t* aChu
   run->nfree++;
 
   if (run->nfree == bin->mRunNumRegions) {
     // Deallocate run.
     if (run == bin->mCurrentRun) {
       bin->mCurrentRun = nullptr;
     } else if (bin->mRunNumRegions != 1) {
       size_t run_pageind =
-        (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+        (uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
 
       // This block's conditional is necessary because if the
       // run only contains one region, then it never gets
       // inserted into the non-full runs tree.
       MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == run_mapelm);
       bin->mNonFullRuns.Remove(run_mapelm);
     }
@@ -3558,42 +3558,42 @@ arena_t::DallocSmall(arena_chunk_t* aChu
     if (!bin->mCurrentRun) {
       bin->mCurrentRun = run;
     } else if (uintptr_t(run) < uintptr_t(bin->mCurrentRun)) {
       // Switch mCurrentRun.
       if (bin->mCurrentRun->nfree > 0) {
         arena_chunk_t* runcur_chunk = GetChunkForPtr(bin->mCurrentRun);
         size_t runcur_pageind =
           (uintptr_t(bin->mCurrentRun) - uintptr_t(runcur_chunk)) >>
-          pagesize_2pow;
+          gPageSize2Pow;
         arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
 
         // Insert runcur.
         MOZ_DIAGNOSTIC_ASSERT(!bin->mNonFullRuns.Search(runcur_mapelm));
         bin->mNonFullRuns.Insert(runcur_mapelm);
       }
       bin->mCurrentRun = run;
     } else {
       size_t run_pageind =
-        (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+        (uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
 
       MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == nullptr);
       bin->mNonFullRuns.Insert(run_mapelm);
     }
   }
   mStats.allocated_small -= size;
 }
 
 void
 arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr)
 {
-  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & pagesize_mask) == 0);
-  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t size = aChunk->map[pageind].bits & ~pagesize_mask;
+  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & gPageSizeMask) == 0);
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t size = aChunk->map[pageind].bits & ~gPageSizeMask;
 
   memset(aPtr, kAllocPoison, size);
   mStats.allocated_large -= size;
 
   DallocRun((arena_run_t*)aPtr, true);
 }
 
 static inline void
@@ -3604,17 +3604,17 @@ arena_dalloc(void* aPtr, size_t aOffset)
   MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
 
   auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
   auto arena = chunk->arena;
   MOZ_ASSERT(arena);
   MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
   MutexAutoLock lock(arena->mLock);
-  size_t pageind = aOffset >> pagesize_2pow;
+  size_t pageind = aOffset >> gPageSize2Pow;
   arena_chunk_map_t* mapelm = &chunk->map[pageind];
   MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
     // Small allocation.
     arena->DallocSmall(chunk, aPtr, mapelm);
   } else {
     // Large allocation.
     arena->DallocLarge(chunk, aPtr);
@@ -3653,34 +3653,34 @@ arena_t::RallocShrinkLarge(arena_chunk_t
 
 // Returns whether reallocation was successful.
 bool
 arena_t::RallocGrowLarge(arena_chunk_t* aChunk,
                          void* aPtr,
                          size_t aSize,
                          size_t aOldSize)
 {
-  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t npages = aOldSize >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t npages = aOldSize >> gPageSize2Pow;
 
   MutexAutoLock lock(mLock);
   MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
-                        (aChunk->map[pageind].bits & ~pagesize_mask));
+                        (aChunk->map[pageind].bits & ~gPageSizeMask));
 
   // Try to extend the run.
   MOZ_ASSERT(aSize > aOldSize);
   if (pageind + npages < chunk_npages &&
       (aChunk->map[pageind + npages].bits & CHUNK_MAP_ALLOCATED) == 0 &&
-      (aChunk->map[pageind + npages].bits & ~pagesize_mask) >=
+      (aChunk->map[pageind + npages].bits & ~gPageSizeMask) >=
         aSize - aOldSize) {
     // The next run is available and sufficiently large.  Split the
     // following run, then merge the first part with the existing
     // allocation.
     if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
-                                 ((pageind + npages) << pagesize_2pow)),
+                                 ((pageind + npages) << gPageSize2Pow)),
                   aSize - aOldSize,
                   true,
                   false)) {
       return false;
     }
 
     aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
     aChunk->map[pageind + npages].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -3809,17 +3809,17 @@ arena_t::arena_t()
   mNumDirty = 0;
   // Reduce the maximum amount of dirty pages we allow to be kept on
   // thread local arenas. TODO: make this more flexible.
   mMaxDirty = opt_dirty_max >> 3;
 
   mRunsAvail.Init();
 
   // Initialize bins.
-  prev_run_size = pagesize;
+  prev_run_size = gPageSize;
   SizeClass sizeClass(1);
 
   for (i = 0;; i++) {
     bin = &mBins[i];
     bin->mCurrentRun = nullptr;
     bin->mNonFullRuns.Init();
 
     bin->mSizeClass = sizeClass.Size();
@@ -4112,24 +4112,24 @@ static
     return true;
   }
 
   // Get page size and number of CPUs
   result = GetKernelPageSize();
   // We assume that the page size is a power of 2.
   MOZ_ASSERT(((result - 1) & result) == 0);
 #ifdef MALLOC_STATIC_PAGESIZE
-  if (pagesize % (size_t)result) {
+  if (gPageSize % (size_t)result) {
     _malloc_message(
       _getprogname(),
       "Compile-time page size does not divide the runtime one.\n");
     MOZ_CRASH();
   }
 #else
-  pagesize = (size_t)result;
+  gPageSize = (size_t)result;
   DefineGlobals();
 #endif
 
   // Get runtime configuration.
   if ((opts = getenv("MALLOC_OPTIONS"))) {
     for (i = 0; opts[i] != '\0'; i++) {
       unsigned j, nreps;
       bool nseen;
@@ -4510,17 +4510,17 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
   // Gather runtime settings.
   aStats->opt_junk = opt_junk;
   aStats->opt_zero = opt_zero;
   aStats->quantum = kQuantum;
   aStats->small_max = kMaxQuantumClass;
   aStats->large_max = gMaxLargeClass;
   aStats->chunksize = kChunkSize;
-  aStats->page_size = pagesize;
+  aStats->page_size = gPageSize;
   aStats->dirty_max = opt_dirty_max;
 
   // Gather current memory usage statistics.
   aStats->narenas = 0;
   aStats->mapped = 0;
   aStats->allocated = 0;
   aStats->waste = 0;
   aStats->page_cache = 0;
@@ -4556,29 +4556,29 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
     arena_unused = 0;
 
     {
       MutexAutoLock lock(arena->mLock);
 
       arena_mapped = arena->mStats.mapped;
 
       // "committed" counts dirty and allocated memory.
-      arena_committed = arena->mStats.committed << pagesize_2pow;
+      arena_committed = arena->mStats.committed << gPageSize2Pow;
 
       arena_allocated =
         arena->mStats.allocated_small + arena->mStats.allocated_large;
 
-      arena_dirty = arena->mNumDirty << pagesize_2pow;
+      arena_dirty = arena->mNumDirty << gPageSize2Pow;
 
       for (j = 0; j < ntbins + nqbins + nsbins; j++) {
         arena_bin_t* bin = &arena->mBins[j];
         size_t bin_unused = 0;
 
         for (auto mapelm : bin->mNonFullRuns.iter()) {
-          run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+          run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
           bin_unused += run->nfree * bin->mSizeClass;
         }
 
         if (bin->mCurrentRun) {
           bin_unused += bin->mCurrentRun->nfree * bin->mSizeClass;
         }
 
         arena_unused += bin_unused;
@@ -4600,17 +4600,17 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
     aStats->bookkeeping += arena_headers;
     aStats->narenas++;
   }
   gArenas.mLock.Unlock();
 
   // Account for arena chunk headers in bookkeeping rather than waste.
   chunk_header_size =
     ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages)
-    << pagesize_2pow;
+    << gPageSize2Pow;
 
   aStats->mapped += non_arena_mapped;
   aStats->bookkeeping += chunk_header_size;
   aStats->waste -= chunk_header_size;
 
   MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
                                  aStats->page_cache + aStats->bookkeeping);
 }
@@ -4633,20 +4633,20 @@ hard_purge_chunk(arena_chunk_t* aChunk)
       MOZ_DIAGNOSTIC_ASSERT(
         !(aChunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
       aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
     }
 
     // We could use mincore to find out which pages are actually
     // present, but it's not clear that's better.
     if (npages > 0) {
-      pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
-                     npages << pagesize_2pow);
-      Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
-                             npages << pagesize_2pow);
+      pages_decommit(((char*)aChunk) + (i << gPageSize2Pow),
+                     npages << gPageSize2Pow);
+      Unused << pages_commit(((char*)aChunk) + (i << gPageSize2Pow),
+                             npages << gPageSize2Pow);
     }
     i += npages;
   }
 }
 
 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
 void
 arena_t::HardPurge()