merge mozilla-central to mozilla-inbound. r=merge a=merge
authorSebastian Hengst <archaeopteryx@coole-files.de>
Sun, 05 Nov 2017 12:39:18 +0200
changeset 443499 c73851d5851b51f8d3cb262242e363a0962ba892
parent 443483 ebc611dc8c85055ac62bc945e9871cdc317cf6f9 (current diff)
parent 443498 849017ffe2976f7721c20c3625d8fafeb3cce8dd (diff)
child 443500 73a48b92351a1c615108c36324c4900cb8ad41a6
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-central to mozilla-inbound. r=merge a=merge
--- a/memory/build/Utils.h
+++ b/memory/build/Utils.h
@@ -13,9 +13,32 @@
 template<size_t N>
 struct Log2 : mozilla::tl::CeilingLog2<N>
 {
   using mozilla::tl::CeilingLog2<N>::value;
   static_assert(1ULL << value == N, "Number is not a power of 2");
 };
 #define LOG2(N) Log2<N>::value
 
+// Compare two addresses. Returns whether the first address is smaller (-1),
+// equal (0) or greater (1) than the second address.
+template<typename T>
+int
+CompareAddr(T* aAddr1, T* aAddr2)
+{
+  uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
+  uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
+
+  return (addr1 > addr2) - (addr1 < addr2);
+}
+
+// User-defined literals to make constants more legible
+constexpr unsigned long long int operator"" _KiB(unsigned long long int aNum)
+{
+  return aNum * 1024;
+}
+
+constexpr unsigned long long int operator"" _MiB(unsigned long long int aNum)
+{
+  return aNum * 1024_KiB;
+}
+
 #endif
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -54,18 +54,17 @@
 // of the original request size is maintained.  Allocations are broken into
 // categories according to size class.  Assuming runtime defaults, 4 kB pages
 // and a 16 byte quantum on a 32-bit system, the size classes in each category
 // are as follows:
 //
 //   |=====================================|
 //   | Category | Subcategory    |    Size |
 //   |=====================================|
-//   | Small    | Tiny           |       2 |
-//   |          |                |       4 |
+//   | Small    | Tiny           |       4 |
 //   |          |                |       8 |
 //   |          |----------------+---------|
 //   |          | Quantum-spaced |      16 |
 //   |          |                |      32 |
 //   |          |                |      48 |
 //   |          |                |     ... |
 //   |          |                |     480 |
 //   |          |                |     496 |
@@ -169,16 +168,28 @@ using namespace mozilla;
 #ifdef XP_DARWIN
 #define MALLOC_DOUBLE_PURGE
 #endif
 
 #ifdef XP_WIN
 #define MALLOC_DECOMMIT
 #endif
 
+// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
+// compile-time for better performance, as opposed to determined at
+// runtime. Some platforms can have different page sizes at runtime
+// depending on kernel configuration, so they are opted out by default.
+// Debug builds are opted out too, for test coverage.
+#ifndef MOZ_DEBUG
+#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) &&         \
+  !defined(__aarch64__)
+#define MALLOC_STATIC_PAGESIZE 1
+#endif
+#endif
+
 #ifdef XP_WIN
 #define STDERR_FILENO 2
 
 // Implement getenv without using malloc.
 static char mozillaMallocOptionsBuf[64];
 
 #define getenv xgetenv
 static char*
@@ -243,47 +254,270 @@ static inline void*
 #endif
 #endif
 }
 #define mmap _mmap
 #define munmap(a, l) syscall(SYS_munmap, a, l)
 #endif
 #endif
 
-// Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
-#define QUANTUM_2POW_MIN 4
-
-// Size and alignment of memory chunks that are allocated by the OS's virtual
-// memory system.
-#define CHUNK_2POW_DEFAULT 20
-// Maximum number of dirty pages per arena.
-#define DIRTY_MAX_DEFAULT (1U << 8)
-
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+// ***************************************************************************
+// Structures for chunk headers for chunks used for non-huge allocations.
+
+struct arena_t;
+
+// Each element of the chunk map corresponds to one page within the chunk.
+struct arena_chunk_map_t
+{
+  // Linkage for run trees.  There are two disjoint uses:
+  //
+  // 1) arena_t's tree or available runs.
+  // 2) arena_run_t conceptually uses this linkage for in-use non-full
+  //    runs, rather than directly embedding linkage.
+  RedBlackTreeNode<arena_chunk_map_t> link;
+
+  // Run address (or size) and various flags are stored together.  The bit
+  // layout looks like (assuming 32-bit system):
+  //
+  //   ???????? ???????? ????---- -mckdzla
+  //
+  // ? : Unallocated: Run address for first/last pages, unset for internal
+  //                  pages.
+  //     Small: Run address.
+  //     Large: Run size for first page, unset for trailing pages.
+  // - : Unused.
+  // m : MADV_FREE/MADV_DONTNEED'ed?
+  // c : decommitted?
+  // k : key?
+  // d : dirty?
+  // z : zeroed?
+  // l : large?
+  // a : allocated?
+  //
+  // Following are example bit patterns for the three types of runs.
+  //
+  // r : run address
+  // s : run size
+  // x : don't care
+  // - : 0
+  // [cdzla] : bit set
+  //
+  //   Unallocated:
+  //     ssssssss ssssssss ssss---- --c-----
+  //     xxxxxxxx xxxxxxxx xxxx---- ----d---
+  //     ssssssss ssssssss ssss---- -----z--
+  //
+  //   Small:
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //     rrrrrrrr rrrrrrrr rrrr---- -------a
+  //
+  //   Large:
+  //     ssssssss ssssssss ssss---- ------la
+  //     -------- -------- -------- ------la
+  //     -------- -------- -------- ------la
+  size_t bits;
+
+// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
+// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
+//
+// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
+// re-committed with pages_commit() before it may be touched.  If
+// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
+//
+// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
+// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
+// CHUNK_MAP_MADVISED.
+//
+// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
+// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
+// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
+// as CHUNK_MAP_DECOMMITTED.
+#define CHUNK_MAP_MADVISED ((size_t)0x40U)
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
+#define CHUNK_MAP_MADVISED_OR_DECOMMITTED                                      \
+  (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
+#define CHUNK_MAP_KEY ((size_t)0x10U)
+#define CHUNK_MAP_DIRTY ((size_t)0x08U)
+#define CHUNK_MAP_ZEROED ((size_t)0x04U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+};
+
+// Arena chunk header.
+struct arena_chunk_t
+{
+  // Arena that owns the chunk.
+  arena_t* arena;
+
+  // Linkage for the arena's tree of dirty chunks.
+  RedBlackTreeNode<arena_chunk_t> link_dirty;
+
+#ifdef MALLOC_DOUBLE_PURGE
+  // If we're double-purging, we maintain a linked list of chunks which
+  // have pages which have been madvise(MADV_FREE)'d but not explicitly
+  // purged.
+  //
+  // We're currently lazy and don't remove a chunk from this list when
+  // all its madvised pages are recommitted.
+  DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
+#endif
+
+  // Number of dirty pages.
+  size_t ndirty;
+
+  // Map of pages within chunk that keeps track of free/large/small.
+  arena_chunk_map_t map[1]; // Dynamically sized.
+};
+
+// ***************************************************************************
+// Constants defining allocator size classes and behavior.
 
 // Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
 // so over-estimates are okay (up to a point), but under-estimates will
 // negatively affect performance.
-#define CACHELINE_2POW 6
-#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
+static const size_t kCacheLineSize = 64;
 
 // Smallest size class to support.  On Windows the smallest allocation size
 // must be 8 bytes on 32-bit, 16 bytes on 64-bit.  On Linux and Mac, even
 // malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
 #ifdef XP_WIN
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 4 : 3)
+static const size_t kMinTinyClass = sizeof(void*) * 2;
 #else
-#define TINY_MIN_2POW (sizeof(void*) == 8 ? 3 : 2)
+static const size_t kMinTinyClass = sizeof(void*);
+#endif
+
+// Maximum tiny size class.
+static const size_t kMaxTinyClass = 8;
+
+// Amount (quantum) separating quantum-spaced size classes.
+static const size_t kQuantum = 16;
+static const size_t kQuantumMask = kQuantum - 1;
+
+// Smallest quantum-spaced size classes. It could actually also be labelled a
+// tiny allocation, and is spaced as such from the largest tiny size class.
+// Tiny classes being powers of 2, this is twice as large as the largest of
+// them.
+static const size_t kMinQuantumClass = kMaxTinyClass * 2;
+
+// Largest quantum-spaced size classes.
+static const size_t kMaxQuantumClass = 512;
+
+static_assert(kMaxQuantumClass % kQuantum == 0,
+              "kMaxQuantumClass is not a multiple of kQuantum");
+
+// Number of (2^n)-spaced tiny classes.
+static const size_t kNumTinyClasses =
+  LOG2(kMinQuantumClass) - LOG2(kMinTinyClass);
+
+// Number of quantum-spaced classes.
+static const size_t kNumQuantumClasses = kMaxQuantumClass / kQuantum;
+
+// Size and alignment of memory chunks that are allocated by the OS's virtual
+// memory system.
+static const size_t kChunkSize = 1_MiB;
+static const size_t kChunkSizeMask = kChunkSize - 1;
+
+#ifdef MALLOC_STATIC_PAGESIZE
+// VM page size. It must divide the runtime CPU page size or the code
+// will abort.
+// Platform specific page size conditions copied from js/public/HeapAPI.h
+#if (defined(SOLARIS) || defined(__FreeBSD__)) &&                              \
+  (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
+static const size_t gPageSize = 8_KiB;
+#elif defined(__powerpc64__)
+static const size_t gPageSize = 64_KiB;
+#else
+static const size_t gPageSize = 4_KiB;
+#endif
+
+#else
+static size_t gPageSize;
 #endif
 
-// Maximum size class that is a multiple of the quantum, but not (necessarily)
-// a power of 2.  Above this size, allocations are rounded up to the nearest
-// power of 2.
-#define SMALL_MAX_2POW_DEFAULT 9
-#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
+#ifdef MALLOC_STATIC_PAGESIZE
+#define DECLARE_GLOBAL(type, name)
+#define DEFINE_GLOBALS
+#define END_GLOBALS
+#define DEFINE_GLOBAL(type) static const type
+#define GLOBAL_LOG2 LOG2
+#define GLOBAL_ASSERT_HELPER1(x) static_assert(x, #x)
+#define GLOBAL_ASSERT_HELPER2(x, y) static_assert(x, y)
+#define GLOBAL_ASSERT(...)                                                     \
+  MACRO_CALL(                                                                  \
+    MOZ_PASTE_PREFIX_AND_ARG_COUNT(GLOBAL_ASSERT_HELPER, __VA_ARGS__),         \
+    (__VA_ARGS__))
+#else
+#define DECLARE_GLOBAL(type, name) static type name;
+#define DEFINE_GLOBALS                                                         \
+  static void DefineGlobals()                                                  \
+  {
+#define END_GLOBALS }
+#define DEFINE_GLOBAL(type)
+#define GLOBAL_LOG2 FloorLog2
+#define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
+#endif
+
+DECLARE_GLOBAL(size_t, gMaxSubPageClass)
+DECLARE_GLOBAL(uint8_t, gNumSubPageClasses)
+DECLARE_GLOBAL(uint8_t, gPageSize2Pow)
+DECLARE_GLOBAL(size_t, gPageSizeMask)
+DECLARE_GLOBAL(size_t, gChunkNumPages)
+DECLARE_GLOBAL(size_t, gChunkHeaderNumPages)
+DECLARE_GLOBAL(size_t, gMaxLargeClass)
+
+DEFINE_GLOBALS
+// Largest sub-page size class.
+DEFINE_GLOBAL(size_t) gMaxSubPageClass = gPageSize / 2;
+
+// Max size class for bins.
+#define gMaxBinClass gMaxSubPageClass
+
+// Number of (2^n)-spaced sub-page bins.
+DEFINE_GLOBAL(uint8_t)
+gNumSubPageClasses = GLOBAL_LOG2(gMaxSubPageClass) - LOG2(kMaxQuantumClass);
+
+DEFINE_GLOBAL(uint8_t) gPageSize2Pow = GLOBAL_LOG2(gPageSize);
+DEFINE_GLOBAL(size_t) gPageSizeMask = gPageSize - 1;
+
+// Number of pages in a chunk.
+DEFINE_GLOBAL(size_t) gChunkNumPages = kChunkSize >> gPageSize2Pow;
+
+// Number of pages necessary for a chunk header.
+DEFINE_GLOBAL(size_t)
+gChunkHeaderNumPages =
+  ((sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (gChunkNumPages - 1) +
+    gPageSizeMask) &
+   ~gPageSizeMask) >>
+  gPageSize2Pow;
+
+// Max size class for arenas.
+DEFINE_GLOBAL(size_t)
+gMaxLargeClass = kChunkSize - (gChunkHeaderNumPages << gPageSize2Pow);
+
+// Various sanity checks that regard configuration.
+GLOBAL_ASSERT(1ULL << gPageSize2Pow == gPageSize,
+              "Page size is not a power of two");
+GLOBAL_ASSERT(kQuantum >= sizeof(void*));
+GLOBAL_ASSERT(kQuantum <= gPageSize);
+GLOBAL_ASSERT(kChunkSize >= gPageSize);
+GLOBAL_ASSERT(kQuantum * 4 <= kChunkSize);
+END_GLOBALS
+
+// Recycle at most 128 MiB of chunks. This means we retain at most
+// 6.25% of the process address space on a 32-bit OS for later use.
+static const size_t gRecycleLimit = 128_MiB;
+
+// The current amount of recycled bytes, updated atomically.
+static Atomic<size_t, ReleaseAcquire> gRecycledSize;
+
+// Maximum number of dirty pages per arena.
+#define DIRTY_MAX_DEFAULT (1U << 8)
+
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
 
 // RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
 // as small as possible such that this setting is still honored, without
 // violating other constraints.  The goal is to make runs as small as possible
 // without exceeding a per run external fragmentation threshold.
 //
 // We use binary fixed point math for overhead computations, where the binary
 // point is implicitly RUN_BFP bits to the left.
@@ -294,117 +528,28 @@ static size_t opt_dirty_max = DIRTY_MAX_
 // that are so small that the per-region overhead is greater than:
 //
 //   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
 #define RUN_BFP 12
 //                                    \/   Implicit binary fixed point.
 #define RUN_MAX_OVRHD 0x0000003dU
 #define RUN_MAX_OVRHD_RELAX 0x00001800U
 
-// When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
-// compile-time for better performance, as opposed to determined at
-// runtime. Some platforms can have different page sizes at runtime
-// depending on kernel configuration, so they are opted out by default.
-// Debug builds are opted out too, for test coverage.
-#ifndef MOZ_DEBUG
-#if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) &&         \
-  !defined(__aarch64__)
-#define MALLOC_STATIC_PAGESIZE 1
-#endif
-#endif
-
-// Various quantum-related settings.
-#define QUANTUM_DEFAULT (size_t(1) << QUANTUM_2POW_MIN)
-static const size_t quantum = QUANTUM_DEFAULT;
-static const size_t quantum_mask = QUANTUM_DEFAULT - 1;
-
-// Various bin-related settings.
-static const size_t small_min = (QUANTUM_DEFAULT >> 1) + 1;
-static const size_t small_max = size_t(SMALL_MAX_DEFAULT);
-
-// Number of (2^n)-spaced tiny bins.
-static const unsigned ntbins = unsigned(QUANTUM_2POW_MIN - TINY_MIN_2POW);
-
-// Number of quantum-spaced bins.
-static const unsigned nqbins = unsigned(SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN);
-
-#ifdef MALLOC_STATIC_PAGESIZE
-
-// VM page size. It must divide the runtime CPU page size or the code
-// will abort.
-// Platform specific page size conditions copied from js/public/HeapAPI.h
-#if (defined(SOLARIS) || defined(__FreeBSD__)) &&                              \
-  (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
-#define pagesize_2pow (size_t(13))
-#elif defined(__powerpc64__)
-#define pagesize_2pow (size_t(16))
-#else
-#define pagesize_2pow (size_t(12))
-#endif
-#define pagesize (size_t(1) << pagesize_2pow)
-#define pagesize_mask (pagesize - 1)
-
-// Max size class for bins.
-static const size_t bin_maxclass = pagesize >> 1;
-
-// Number of (2^n)-spaced sub-page bins.
-static const unsigned nsbins =
-  unsigned(pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1);
-
-#else // !MALLOC_STATIC_PAGESIZE
-
-// VM page size.
-static size_t pagesize;
-static size_t pagesize_mask;
-static size_t pagesize_2pow;
-
-// Various bin-related settings.
-static size_t bin_maxclass; // Max size class for bins.
-static unsigned nsbins;     // Number of (2^n)-spaced sub-page bins.
-#endif
-
-// Various chunk-related settings.
-
-// Compute the header size such that it is large enough to contain the page map
-// and enough nodes for the worst case: one node per non-header page plus one
-// extra for situations where we briefly have one more node allocated than we
-// will need.
-#define calculate_arena_header_size()                                          \
-  (sizeof(arena_chunk_t) + sizeof(arena_chunk_map_t) * (chunk_npages - 1))
-
-#define calculate_arena_header_pages()                                         \
-  ((calculate_arena_header_size() >> pagesize_2pow) +                          \
-   ((calculate_arena_header_size() & pagesize_mask) ? 1 : 0))
-
-// Max size class for arenas.
-#define calculate_arena_maxclass()                                             \
-  (chunksize - (arena_chunk_header_npages << pagesize_2pow))
-
-#define CHUNKSIZE_DEFAULT ((size_t)1 << CHUNK_2POW_DEFAULT)
-static const size_t chunksize = CHUNKSIZE_DEFAULT;
-static const size_t chunksize_mask = CHUNKSIZE_DEFAULT - 1;
-
-#ifdef MALLOC_STATIC_PAGESIZE
-static const size_t chunk_npages = CHUNKSIZE_DEFAULT >> pagesize_2pow;
-#define arena_chunk_header_npages calculate_arena_header_pages()
-#define arena_maxclass calculate_arena_maxclass()
-#else
-static size_t chunk_npages;
-static size_t arena_chunk_header_npages;
-static size_t arena_maxclass; // Max size class for arenas.
-#endif
-
-// Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
-// 6.25% of the process address space on a 32-bit OS for later use.
-#define CHUNK_RECYCLE_LIMIT 128
-
-static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
-
-// The current amount of recycled bytes, updated atomically.
-static Atomic<size_t, ReleaseAcquire> gRecycledSize;
+// Return the smallest chunk multiple that is >= s.
+#define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask)
+
+// Return the smallest cacheline multiple that is >= s.
+#define CACHELINE_CEILING(s)                                                   \
+  (((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
+
+// Return the smallest quantum multiple that is >= a.
+#define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
+
+// Return the smallest pagesize multiple that is >= s.
+#define PAGE_CEILING(s) (((s) + gPageSizeMask) & ~gPageSizeMask)
 
 // ***************************************************************************
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
@@ -502,26 +647,16 @@ struct extent_node_t
 
   // Total region size.
   size_t size;
 
   // What type of chunk is there; used by chunk recycling code.
   ChunkType chunk_type;
 };
 
-template<typename T>
-int
-CompareAddr(T* aAddr1, T* aAddr2)
-{
-  uintptr_t addr1 = reinterpret_cast<uintptr_t>(aAddr1);
-  uintptr_t addr2 = reinterpret_cast<uintptr_t>(aAddr2);
-
-  return (addr1 > addr2) - (addr1 < addr2);
-}
-
 struct ExtentTreeSzTrait
 {
   static RedBlackTreeNode<extent_node_t>& GetTreeNode(extent_node_t* aThis)
   {
     return aThis->link_szad;
   }
 
   static inline int Compare(extent_node_t* aNode, extent_node_t* aOther)
@@ -556,16 +691,60 @@ struct ExtentTreeBoundsTrait : public Ex
     if (node_addr <= key_addr && key_addr < node_addr + node_size) {
       return 0;
     }
 
     return (key_addr > node_addr) - (key_addr < node_addr);
   }
 };
 
+// Describe size classes to which allocations are rounded up to.
+// TODO: add large and huge types when the arena allocation code
+// changes in a way that allows it to be beneficial.
+class SizeClass
+{
+public:
+  enum ClassType
+  {
+    Tiny,
+    Quantum,
+    SubPage,
+  };
+
+  explicit inline SizeClass(size_t aSize)
+  {
+    if (aSize <= kMaxTinyClass) {
+      mType = Tiny;
+      mSize = std::max(RoundUpPow2(aSize), kMinTinyClass);
+    } else if (aSize <= kMaxQuantumClass) {
+      mType = Quantum;
+      mSize = QUANTUM_CEILING(aSize);
+    } else if (aSize <= gMaxSubPageClass) {
+      mType = SubPage;
+      mSize = RoundUpPow2(aSize);
+    } else {
+      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
+    }
+  }
+
+  SizeClass& operator=(const SizeClass& aOther) = default;
+
+  bool operator==(const SizeClass& aOther) { return aOther.mSize == mSize; }
+
+  size_t Size() { return mSize; }
+
+  ClassType Type() { return mType; }
+
+  SizeClass Next() { return SizeClass(mSize + 1); }
+
+private:
+  ClassType mType;
+  size_t mSize;
+};
+
 // ***************************************************************************
 // Radix tree data structures.
 //
 // The number of bits passed to the template is the number of significant bits
 // in an address to do a radix lookup with.
 //
 // An address is looked up by splitting it in kBitsPerLevel bit chunks, except
 // the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
@@ -575,21 +754,21 @@ struct ExtentTreeBoundsTrait : public Ex
 // like the following:
 // 0x12345678 -> mRoot[0x12][0x34]
 template<size_t Bits>
 class AddressRadixTree
 {
 // Size of each radix tree node (as a power of 2).
 // This impacts tree depth.
 #ifdef HAVE_64BIT_BUILD
-  static const size_t kNodeSize2Pow = CACHELINE_2POW;
+  static const size_t kNodeSize = kCacheLineSize;
 #else
-  static const size_t kNodeSize2Pow = 14;
+  static const size_t kNodeSize = 16_KiB;
 #endif
-  static const size_t kBitsPerLevel = kNodeSize2Pow - LOG2(sizeof(void*));
+  static const size_t kBitsPerLevel = LOG2(kNodeSize) - LOG2(sizeof(void*));
   static const size_t kBitsAtLevel1 =
     (Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel;
   static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
   static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
                 "AddressRadixTree parameters don't work out");
 
   Mutex mLock;
   void** mRoot;
@@ -606,97 +785,18 @@ public:
 
 private:
   inline void** GetSlot(void* aAddr, bool aCreate = false);
 };
 
 // ***************************************************************************
 // Arena data structures.
 
-struct arena_t;
 struct arena_bin_t;
 
-// Each element of the chunk map corresponds to one page within the chunk.
-struct arena_chunk_map_t
-{
-  // Linkage for run trees.  There are two disjoint uses:
-  //
-  // 1) arena_t's tree or available runs.
-  // 2) arena_run_t conceptually uses this linkage for in-use non-full
-  //    runs, rather than directly embedding linkage.
-  RedBlackTreeNode<arena_chunk_map_t> link;
-
-  // Run address (or size) and various flags are stored together.  The bit
-  // layout looks like (assuming 32-bit system):
-  //
-  //   ???????? ???????? ????---- -mckdzla
-  //
-  // ? : Unallocated: Run address for first/last pages, unset for internal
-  //                  pages.
-  //     Small: Run address.
-  //     Large: Run size for first page, unset for trailing pages.
-  // - : Unused.
-  // m : MADV_FREE/MADV_DONTNEED'ed?
-  // c : decommitted?
-  // k : key?
-  // d : dirty?
-  // z : zeroed?
-  // l : large?
-  // a : allocated?
-  //
-  // Following are example bit patterns for the three types of runs.
-  //
-  // r : run address
-  // s : run size
-  // x : don't care
-  // - : 0
-  // [cdzla] : bit set
-  //
-  //   Unallocated:
-  //     ssssssss ssssssss ssss---- --c-----
-  //     xxxxxxxx xxxxxxxx xxxx---- ----d---
-  //     ssssssss ssssssss ssss---- -----z--
-  //
-  //   Small:
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //     rrrrrrrr rrrrrrrr rrrr---- -------a
-  //
-  //   Large:
-  //     ssssssss ssssssss ssss---- ------la
-  //     -------- -------- -------- ------la
-  //     -------- -------- -------- ------la
-  size_t bits;
-
-// Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
-// MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
-//
-// If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
-// re-committed with pages_commit() before it may be touched.  If
-// MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
-//
-// If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
-// are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
-// CHUNK_MAP_MADVISED.
-//
-// Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
-// defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
-// When it's finally freed with jemalloc_purge_freed_pages, the page is marked
-// as CHUNK_MAP_DECOMMITTED.
-#define CHUNK_MAP_MADVISED ((size_t)0x40U)
-#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
-#define CHUNK_MAP_MADVISED_OR_DECOMMITTED                                      \
-  (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
-#define CHUNK_MAP_KEY ((size_t)0x10U)
-#define CHUNK_MAP_DIRTY ((size_t)0x08U)
-#define CHUNK_MAP_ZEROED ((size_t)0x04U)
-#define CHUNK_MAP_LARGE ((size_t)0x02U)
-#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
-};
-
 struct ArenaChunkMapLink
 {
   static RedBlackTreeNode<arena_chunk_map_t>& GetTreeNode(
     arena_chunk_map_t* aThis)
   {
     return aThis->link;
   }
 };
@@ -710,51 +810,25 @@ struct ArenaRunTreeTrait : public ArenaC
     return CompareAddr(aNode, aOther);
   }
 };
 
 struct ArenaAvailTreeTrait : public ArenaChunkMapLink
 {
   static inline int Compare(arena_chunk_map_t* aNode, arena_chunk_map_t* aOther)
   {
-    size_t size1 = aNode->bits & ~pagesize_mask;
-    size_t size2 = aOther->bits & ~pagesize_mask;
+    size_t size1 = aNode->bits & ~gPageSizeMask;
+    size_t size2 = aOther->bits & ~gPageSizeMask;
     int ret = (size1 > size2) - (size1 < size2);
     return ret ? ret
                : CompareAddr((aNode->bits & CHUNK_MAP_KEY) ? nullptr : aNode,
                              aOther);
   }
 };
 
-// Arena chunk header.
-struct arena_chunk_t
-{
-  // Arena that owns the chunk.
-  arena_t* arena;
-
-  // Linkage for the arena's tree of dirty chunks.
-  RedBlackTreeNode<arena_chunk_t> link_dirty;
-
-#ifdef MALLOC_DOUBLE_PURGE
-  // If we're double-purging, we maintain a linked list of chunks which
-  // have pages which have been madvise(MADV_FREE)'d but not explicitly
-  // purged.
-  //
-  // We're currently lazy and don't remove a chunk from this list when
-  // all its madvised pages are recommitted.
-  DoublyLinkedListElement<arena_chunk_t> chunks_madvised_elem;
-#endif
-
-  // Number of dirty pages.
-  size_t ndirty;
-
-  // Map of pages within chunk that keeps track of free/large/small.
-  arena_chunk_map_t map[1]; // Dynamically sized.
-};
-
 struct ArenaDirtyChunkTrait
 {
   static RedBlackTreeNode<arena_chunk_t>& GetTreeNode(arena_chunk_t* aThis)
   {
     return aThis->link_dirty;
   }
 
   static inline int Compare(arena_chunk_t* aNode, arena_chunk_t* aOther)
@@ -975,18 +1049,19 @@ public:
 
   void* operator new(size_t aCount, const fallible_t&)
 #if !defined(_MSC_VER) || defined(_CPPUNWIND)
     noexcept
 #endif
   {
     MOZ_ASSERT(aCount == sizeof(arena_t));
     // Allocate enough space for trailing bins.
-    return base_alloc(aCount +
-                      (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+    return base_alloc(
+      aCount + (sizeof(arena_bin_t) * (kNumTinyClasses + kNumQuantumClasses +
+                                       gNumSubPageClasses - 1)));
   }
 
   void operator delete(void*) = delete;
 };
 
 struct ArenaTreeTrait
 {
   static RedBlackTreeNode<arena_t>& GetTreeNode(arena_t* aThis)
@@ -1080,17 +1155,17 @@ private:
   Tree mArenas;
   Tree mPrivateArenas;
 };
 
 static ArenaCollection gArenas;
 
 // ******
 // Chunks.
-static AddressRadixTree<(sizeof(void*) << 3) - CHUNK_2POW_DEFAULT> gChunkRTree;
+static AddressRadixTree<(sizeof(void*) << 3) - LOG2(kChunkSize)> gChunkRTree;
 
 // Protects chunk-related data structures.
 static Mutex chunks_mtx;
 
 // Trees of chunks that were previously allocated (trees differ only in node
 // ordering).  These are used when allocating chunks, in an attempt to re-use
 // address space.  Depending on function, different tree orderings are needed,
 // which is why there are two trees with the same contents.
@@ -1302,38 +1377,26 @@ Mutex::Unlock()
 // End mutex.
 // ***************************************************************************
 // Begin Utility functions/macros.
 
 // Return the chunk address for allocation address a.
 static inline arena_chunk_t*
 GetChunkForPtr(const void* aPtr)
 {
-  return (arena_chunk_t*)(uintptr_t(aPtr) & ~chunksize_mask);
+  return (arena_chunk_t*)(uintptr_t(aPtr) & ~kChunkSizeMask);
 }
 
 // Return the chunk offset of address a.
 static inline size_t
 GetChunkOffsetForPtr(const void* aPtr)
 {
-  return (size_t)(uintptr_t(aPtr) & chunksize_mask);
+  return (size_t)(uintptr_t(aPtr) & kChunkSizeMask);
 }
 
-// Return the smallest chunk multiple that is >= s.
-#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
-
-// Return the smallest cacheline multiple that is >= s.
-#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
-
-// Return the smallest quantum multiple that is >= a.
-#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
-
-// Return the smallest pagesize multiple that is >= s.
-#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
-
 static inline const char*
 _getprogname(void)
 {
 
   return "<jemalloc>";
 }
 
 // ***************************************************************************
@@ -1341,24 +1404,24 @@ static inline const char*
 static inline void
 pages_decommit(void* aAddr, size_t aSize)
 {
 #ifdef XP_WIN
   // The region starting at addr may have been allocated in multiple calls
   // to VirtualAlloc and recycled, so decommitting the entire region in one
   // go may not be valid. However, since we allocate at least a chunk at a
   // time, we may touch any region in chunksized increments.
-  size_t pages_size = std::min(aSize, chunksize - GetChunkOffsetForPtr(aAddr));
+  size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
   while (aSize > 0) {
     if (!VirtualFree(aAddr, pages_size, MEM_DECOMMIT)) {
       MOZ_CRASH();
     }
     aAddr = (void*)((uintptr_t)aAddr + pages_size);
     aSize -= pages_size;
-    pages_size = std::min(aSize, chunksize);
+    pages_size = std::min(aSize, kChunkSize);
   }
 #else
   if (mmap(
         aAddr, aSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
       MAP_FAILED) {
     MOZ_CRASH();
   }
   MozTagAnonymousMemory(aAddr, aSize, "jemalloc-decommitted");
@@ -1369,24 +1432,24 @@ pages_decommit(void* aAddr, size_t aSize
 MOZ_MUST_USE static inline bool
 pages_commit(void* aAddr, size_t aSize)
 {
 #ifdef XP_WIN
   // The region starting at addr may have been allocated in multiple calls
   // to VirtualAlloc and recycled, so committing the entire region in one
   // go may not be valid. However, since we allocate at least a chunk at a
   // time, we may touch any region in chunksized increments.
-  size_t pages_size = std::min(aSize, chunksize - GetChunkOffsetForPtr(aAddr));
+  size_t pages_size = std::min(aSize, kChunkSize - GetChunkOffsetForPtr(aAddr));
   while (aSize > 0) {
     if (!VirtualAlloc(aAddr, pages_size, MEM_COMMIT, PAGE_READWRITE)) {
       return false;
     }
     aAddr = (void*)((uintptr_t)aAddr + pages_size);
     aSize -= pages_size;
-    pages_size = std::min(aSize, chunksize);
+    pages_size = std::min(aSize, kChunkSize);
   }
 #else
   if (mmap(aAddr,
            aSize,
            PROT_READ | PROT_WRITE,
            MAP_FIXED | MAP_PRIVATE | MAP_ANON,
            -1,
            0) == MAP_FAILED) {
@@ -1400,17 +1463,17 @@ pages_commit(void* aAddr, size_t aSize)
 static bool
 base_pages_alloc(size_t minsize)
 {
   size_t csize;
   size_t pminsize;
 
   MOZ_ASSERT(minsize != 0);
   csize = CHUNK_CEILING(minsize);
-  base_pages = chunk_alloc(csize, chunksize, true);
+  base_pages = chunk_alloc(csize, kChunkSize, true);
   if (!base_pages) {
     return true;
   }
   base_next_addr = base_pages;
   base_past_addr = (void*)((uintptr_t)base_pages + csize);
   // Leave enough pages for minsize committed, since otherwise they would
   // have to be immediately recommitted.
   pminsize = PAGE_CEILING(minsize);
@@ -1570,17 +1633,17 @@ pages_map(void* aAddr, size_t aSize)
 #if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
   const uintptr_t start = 0x0000070000000000ULL;
   const uintptr_t end = 0x0000800000000000ULL;
 
   // Copied from js/src/gc/Memory.cpp and adapted for this source
   uintptr_t hint;
   void* region = MAP_FAILED;
   for (hint = start; region == MAP_FAILED && hint + aSize <= end;
-       hint += chunksize) {
+       hint += kChunkSize) {
     region = mmap((void*)hint,
                   aSize,
                   PROT_READ | PROT_WRITE,
                   MAP_PRIVATE | MAP_ANON,
                   -1,
                   0);
     if (region != MAP_FAILED) {
       if (((size_t)region + (aSize - 1)) & 0xffff800000000000) {
@@ -1630,24 +1693,24 @@ pages_map(void* aAddr, size_t aSize)
 #else
   MOZ_ASSERT(!ret || (!aAddr && ret != aAddr) || (aAddr && ret == aAddr));
 #endif
   return ret;
 }
 #endif
 
 #ifdef XP_DARWIN
-#define VM_COPY_MIN (pagesize << 5)
+#define VM_COPY_MIN (gPageSize * 32)
 static inline void
 pages_copy(void* dest, const void* src, size_t n)
 {
 
-  MOZ_ASSERT((void*)((uintptr_t)dest & ~pagesize_mask) == dest);
+  MOZ_ASSERT((void*)((uintptr_t)dest & ~gPageSizeMask) == dest);
   MOZ_ASSERT(n >= VM_COPY_MIN);
-  MOZ_ASSERT((void*)((uintptr_t)src & ~pagesize_mask) == src);
+  MOZ_ASSERT((void*)((uintptr_t)src & ~gPageSizeMask) == src);
 
   vm_copy(
     mach_task_self(), (vm_address_t)src, (vm_size_t)n, (vm_address_t)dest);
 }
 #endif
 
 template<size_t Bits>
 bool
@@ -1786,17 +1849,17 @@ pages_trim(void* addr, size_t alloc_size
 }
 
 static void*
 chunk_alloc_mmap_slow(size_t size, size_t alignment)
 {
   void *ret, *pages;
   size_t alloc_size, leadsize;
 
-  alloc_size = size + alignment - pagesize;
+  alloc_size = size + alignment - gPageSize;
   // Beware size_t wrap-around.
   if (alloc_size < size) {
     return nullptr;
   }
   do {
     pages = pages_map(nullptr, alloc_size);
     if (!pages) {
       return nullptr;
@@ -1858,22 +1921,22 @@ pages_purge(void* addr, size_t length, b
     memset(addr, 0, length);
   }
 #endif
 #ifdef XP_WIN
   // The region starting at addr may have been allocated in multiple calls
   // to VirtualAlloc and recycled, so resetting the entire region in one
   // go may not be valid. However, since we allocate at least a chunk at a
   // time, we may touch any region in chunksized increments.
-  size_t pages_size = std::min(length, chunksize - GetChunkOffsetForPtr(addr));
+  size_t pages_size = std::min(length, kChunkSize - GetChunkOffsetForPtr(addr));
   while (length > 0) {
     VirtualAlloc(addr, pages_size, MEM_RESET, PAGE_READWRITE);
     addr = (void*)((uintptr_t)addr + pages_size);
     length -= pages_size;
-    pages_size = std::min(length, chunksize);
+    pages_size = std::min(length, kChunkSize);
   }
   return force_zero;
 #else
 #ifdef XP_LINUX
 #define JEMALLOC_MADV_PURGE MADV_DONTNEED
 #define JEMALLOC_MADV_ZEROS true
 #else // FreeBSD and Darwin.
 #define JEMALLOC_MADV_PURGE MADV_FREE
@@ -1887,17 +1950,17 @@ pages_purge(void* addr, size_t length, b
 #endif
 }
 
 static void*
 chunk_recycle(size_t aSize, size_t aAlignment, bool* aZeroed)
 {
   extent_node_t key;
 
-  size_t alloc_size = aSize + aAlignment - chunksize;
+  size_t alloc_size = aSize + aAlignment - kChunkSize;
   // Beware size_t wrap-around.
   if (alloc_size < aSize) {
     return nullptr;
   }
   key.addr = nullptr;
   key.size = alloc_size;
   chunks_mtx.Lock();
   extent_node_t* node = gChunksBySize.SearchOrNext(&key);
@@ -1967,36 +2030,36 @@ chunk_recycle(size_t aSize, size_t aAlig
   return ret;
 }
 
 #ifdef XP_WIN
 // On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
 // awkward to recycle allocations of varying sizes. Therefore we only allow
 // recycling when the size equals the chunksize, unless deallocation is entirely
 // disabled.
-#define CAN_RECYCLE(size) (size == chunksize)
+#define CAN_RECYCLE(size) (size == kChunkSize)
 #else
 #define CAN_RECYCLE(size) true
 #endif
 
 // Allocates `size` bytes of system memory aligned for `alignment`.
 // `base` indicates whether the memory will be used for the base allocator
 // (e.g. base_alloc).
 // `zeroed` is an outvalue that returns whether the allocated memory is
 // guaranteed to be full of zeroes. It can be omitted when the caller doesn't
 // care about the result.
 static void*
 chunk_alloc(size_t aSize, size_t aAlignment, bool aBase, bool* aZeroed)
 {
   void* ret = nullptr;
 
   MOZ_ASSERT(aSize != 0);
-  MOZ_ASSERT((aSize & chunksize_mask) == 0);
+  MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
   MOZ_ASSERT(aAlignment != 0);
-  MOZ_ASSERT((aAlignment & chunksize_mask) == 0);
+  MOZ_ASSERT((aAlignment & kChunkSizeMask) == 0);
 
   // Base allocations can't be fulfilled by recycling because of
   // possible deadlock or infinite recursion.
   if (CAN_RECYCLE(aSize) && !aBase) {
     ret = chunk_recycle(aSize, aAlignment, aZeroed);
   }
   if (!ret) {
     ret = chunk_alloc_mmap(aSize, aAlignment);
@@ -2110,17 +2173,17 @@ chunk_record(void* aChunk, size_t aSize,
 }
 
 static void
 chunk_dealloc(void* aChunk, size_t aSize, ChunkType aType)
 {
   MOZ_ASSERT(aChunk);
   MOZ_ASSERT(GetChunkOffsetForPtr(aChunk) == 0);
   MOZ_ASSERT(aSize != 0);
-  MOZ_ASSERT((aSize & chunksize_mask) == 0);
+  MOZ_ASSERT((aSize & kChunkSizeMask) == 0);
 
   gChunkRTree.Unset(aChunk);
 
   if (CAN_RECYCLE(aSize)) {
     size_t recycled_so_far = gRecycledSize;
     // In case some race condition put us above the limit.
     if (recycled_so_far < gRecycleLimit) {
       size_t recycle_remaining = gRecycleLimit - recycled_so_far;
@@ -2178,18 +2241,18 @@ static inline arena_t*
 choose_arena(size_t size)
 {
   arena_t* ret = nullptr;
 
   // We can only use TLS if this is a PIC library, since for the static
   // library version, libc's malloc is used by TLS allocation, which
   // introduces a bootstrapping issue.
 
-  // Only use a thread local arena for small sizes.
-  if (size <= small_max) {
+  // Only use a thread local arena for quantum and tiny sizes.
+  if (size <= kMaxQuantumClass) {
     ret = thread_arena.get();
   }
 
   if (!ret) {
     ret = thread_local_arena(false);
   }
   MOZ_DIAGNOSTIC_ASSERT(ret);
   return ret;
@@ -2257,48 +2320,38 @@ arena_run_reg_dalloc(arena_run_t* run, a
 {
 // To divide by a number D that is not a power of two we multiply
 // by (2^21 / D) and then right shift by 21 positions.
 //
 //   X / D
 //
 // becomes
 //
-//   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+//   (X * size_invs[(D / kQuantum) - 3]) >> SIZE_INV_SHIFT
 
 #define SIZE_INV_SHIFT 21
-#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s * kQuantum)) + 1)
   // clang-format off
   static const unsigned size_invs[] = {
     SIZE_INV(3),
     SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
     SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
     SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
     SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
     SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
     SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
     SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
-#if (QUANTUM_2POW_MIN < 4)
-    ,
-    SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
-    SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
-    SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
-    SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
-    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
-    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
-    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
-    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
-#endif
   };
   // clang-format on
   unsigned diff, regind, elm, bit;
 
   MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-  MOZ_ASSERT(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
-             (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
+  static_assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
+                  kNumQuantumClasses,
+                "size_invs doesn't have enough values");
 
   // Avoid doing division with a variable divisor if possible.  Using
   // actual division here can reduce allocator throughput by over 20%!
   diff =
     (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->mRunFirstRegionOffset);
   if ((size & (size - 1)) == 0) {
     // log2_table allows fast division of a power of two in the
     // [1..128] range.
@@ -2321,19 +2374,18 @@ arena_run_reg_dalloc(arena_run_t* run, a
       regind = (diff >> log2_table[size - 1]);
     } else if (size <= 32768) {
       regind = diff >> (8 + log2_table[(size >> 8) - 1]);
     } else {
       // The run size is too large for us to use the lookup
       // table.  Use real division.
       regind = diff / size;
     }
-  } else if (size <=
-             ((sizeof(size_invs) / sizeof(unsigned)) << QUANTUM_2POW_MIN) + 2) {
-    regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+  } else if (size <= ((sizeof(size_invs) / sizeof(unsigned)) * kQuantum) + 2) {
+    regind = size_invs[(size / kQuantum) - 3] * diff;
     regind >>= SIZE_INV_SHIFT;
   } else {
     // size_invs isn't large enough to handle this size class, so
     // calculate regind using actual division.  This only happens
     // if the user increases small_max via the 'S' runtime
     // configuration option.
     regind = diff / size;
   };
@@ -2354,19 +2406,19 @@ arena_run_reg_dalloc(arena_run_t* run, a
 bool
 arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
 {
   arena_chunk_t* chunk;
   size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
 
   chunk = GetChunkForPtr(aRun);
   old_ndirty = chunk->ndirty;
-  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
-  total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
-  need_pages = (aSize >> pagesize_2pow);
+  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
+  total_pages = (chunk->map[run_ind].bits & ~gPageSizeMask) >> gPageSize2Pow;
+  need_pages = (aSize >> gPageSize2Pow);
   MOZ_ASSERT(need_pages > 0);
   MOZ_ASSERT(need_pages <= total_pages);
   rem_pages = total_pages - need_pages;
 
   for (i = 0; i < need_pages; i++) {
     // Commit decommitted pages if necessary.  If a decommitted
     // page is encountered, commit all needed adjacent decommitted
     // pages in one operation, in order to reduce system call
@@ -2384,18 +2436,18 @@ arena_t::SplitRun(arena_run_t* aRun, siz
         MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
                      chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
 
         chunk->map[run_ind + i + j].bits &= ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
       }
 
 #ifdef MALLOC_DECOMMIT
       bool committed = pages_commit(
-        (void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
-        j << pagesize_2pow);
+        (void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
+        j << gPageSize2Pow);
       // pages_commit zeroes pages, so mark them as such if it succeeded.
       // That's checked further below to avoid manually zeroing the pages.
       for (size_t k = 0; k < j; k++) {
         chunk->map[run_ind + i + k].bits |=
           committed ? CHUNK_MAP_ZEROED : CHUNK_MAP_DECOMMITTED;
       }
       if (!committed) {
         return false;
@@ -2406,31 +2458,31 @@ arena_t::SplitRun(arena_run_t* aRun, siz
     }
   }
 
   mRunsAvail.Remove(&chunk->map[run_ind]);
 
   // Keep track of trailing unused pages for later use.
   if (rem_pages > 0) {
     chunk->map[run_ind + need_pages].bits =
-      (rem_pages << pagesize_2pow) |
-      (chunk->map[run_ind + need_pages].bits & pagesize_mask);
+      (rem_pages << gPageSize2Pow) |
+      (chunk->map[run_ind + need_pages].bits & gPageSizeMask);
     chunk->map[run_ind + total_pages - 1].bits =
-      (rem_pages << pagesize_2pow) |
-      (chunk->map[run_ind + total_pages - 1].bits & pagesize_mask);
+      (rem_pages << gPageSize2Pow) |
+      (chunk->map[run_ind + total_pages - 1].bits & gPageSizeMask);
     mRunsAvail.Insert(&chunk->map[run_ind + need_pages]);
   }
 
   for (i = 0; i < need_pages; i++) {
     // Zero if necessary.
     if (aZero) {
       if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
-        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << gPageSize2Pow)),
                0,
-               pagesize);
+               gPageSize);
         // CHUNK_MAP_ZEROED is cleared below.
       }
     }
 
     // Update dirty page accounting.
     if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
       chunk->ndirty--;
       mNumDirty--;
@@ -2471,48 +2523,47 @@ arena_t::InitChunk(arena_chunk_t* aChunk
   // all it can contain is an arena chunk header (which we're overwriting),
   // and zeroed or poisoned memory (because a recycled arena chunk will
   // have been emptied before being recycled). In that case, we can get
   // away with reusing the chunk as-is, marking all runs as madvised.
 
   size_t flags =
     aZeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED : CHUNK_MAP_MADVISED;
 
-  mStats.mapped += chunksize;
+  mStats.mapped += kChunkSize;
 
   aChunk->arena = this;
 
   // Claim that no pages are in use, since the header is merely overhead.
   aChunk->ndirty = 0;
 
   // Initialize the map to contain one maximal free untouched run.
 #ifdef MALLOC_DECOMMIT
   arena_run_t* run =
-    (arena_run_t*)(uintptr_t(aChunk) +
-                   (arena_chunk_header_npages << pagesize_2pow));
+    (arena_run_t*)(uintptr_t(aChunk) + (gChunkHeaderNumPages << gPageSize2Pow));
 #endif
 
-  for (i = 0; i < arena_chunk_header_npages; i++) {
+  for (i = 0; i < gChunkHeaderNumPages; i++) {
     aChunk->map[i].bits = 0;
   }
-  aChunk->map[i].bits = arena_maxclass | flags;
-  for (i++; i < chunk_npages - 1; i++) {
+  aChunk->map[i].bits = gMaxLargeClass | flags;
+  for (i++; i < gChunkNumPages - 1; i++) {
     aChunk->map[i].bits = flags;
   }
-  aChunk->map[chunk_npages - 1].bits = arena_maxclass | flags;
+  aChunk->map[gChunkNumPages - 1].bits = gMaxLargeClass | flags;
 
 #ifdef MALLOC_DECOMMIT
   // Start out decommitted, in order to force a closer correspondence
   // between dirty pages and committed untouched pages.
-  pages_decommit(run, arena_maxclass);
+  pages_decommit(run, gMaxLargeClass);
 #endif
-  mStats.committed += arena_chunk_header_npages;
+  mStats.committed += gChunkHeaderNumPages;
 
   // Insert the run into the tree of available runs.
-  mRunsAvail.Insert(&aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Insert(&aChunk->map[gChunkHeaderNumPages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
   new (&aChunk->chunks_madvised_elem) DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 void
 arena_t::DeallocChunk(arena_chunk_t* aChunk)
@@ -2525,69 +2576,69 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
     }
 
 #ifdef MALLOC_DOUBLE_PURGE
     if (mChunksMAdvised.ElementProbablyInList(mSpare)) {
       mChunksMAdvised.remove(mSpare);
     }
 #endif
 
-    chunk_dealloc((void*)mSpare, chunksize, ARENA_CHUNK);
-    mStats.mapped -= chunksize;
-    mStats.committed -= arena_chunk_header_npages;
+    chunk_dealloc((void*)mSpare, kChunkSize, ARENA_CHUNK);
+    mStats.mapped -= kChunkSize;
+    mStats.committed -= gChunkHeaderNumPages;
   }
 
   // Remove run from the tree of available runs, so that the arena does not use it.
   // Dirty page flushing only uses the tree of dirty chunks, so leaving this
   // chunk in the chunks_* trees is sufficient for that purpose.
-  mRunsAvail.Remove(&aChunk->map[arena_chunk_header_npages]);
+  mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
 arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
   arena_chunk_map_t* mapelm;
   arena_chunk_map_t key;
 
-  MOZ_ASSERT(aSize <= arena_maxclass);
-  MOZ_ASSERT((aSize & pagesize_mask) == 0);
+  MOZ_ASSERT(aSize <= gMaxLargeClass);
+  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
 
   // Search the arena's chunks for the lowest best fit.
   key.bits = aSize | CHUNK_MAP_KEY;
   mapelm = mRunsAvail.SearchOrNext(&key);
   if (mapelm) {
     arena_chunk_t* chunk = GetChunkForPtr(mapelm);
     size_t pageind =
       (uintptr_t(mapelm) - uintptr_t(chunk->map)) / sizeof(arena_chunk_map_t);
 
-    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
+    run = (arena_run_t*)(uintptr_t(chunk) + (pageind << gPageSize2Pow));
   } else if (mSpare) {
     // Use the spare.
     arena_chunk_t* chunk = mSpare;
     mSpare = nullptr;
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << pagesize_2pow));
+                         (gChunkHeaderNumPages << gPageSize2Pow));
     // Insert the run into the tree of available runs.
-    mRunsAvail.Insert(&chunk->map[arena_chunk_header_npages]);
+    mRunsAvail.Insert(&chunk->map[gChunkHeaderNumPages]);
   } else {
     // No usable runs.  Create a new chunk from which to allocate
     // the run.
     bool zeroed;
     arena_chunk_t* chunk =
-      (arena_chunk_t*)chunk_alloc(chunksize, chunksize, false, &zeroed);
+      (arena_chunk_t*)chunk_alloc(kChunkSize, kChunkSize, false, &zeroed);
     if (!chunk) {
       return nullptr;
     }
 
     InitChunk(chunk, zeroed);
     run = (arena_run_t*)(uintptr_t(chunk) +
-                         (arena_chunk_header_npages << pagesize_2pow));
+                         (gChunkHeaderNumPages << gPageSize2Pow));
   }
   // Update page map.
   return SplitRun(run, aSize, aLarge, aZero) ? run : nullptr;
 }
 
 void
 arena_t::Purge(bool aAll)
 {
@@ -2610,49 +2661,49 @@ arena_t::Purge(bool aAll)
   // purged.
   while (mNumDirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
     bool madvised = false;
 #endif
     chunk = mChunksDirty.Last();
     MOZ_DIAGNOSTIC_ASSERT(chunk);
 
-    for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
-      MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
+    for (i = gChunkNumPages - 1; chunk->ndirty > 0; i--) {
+      MOZ_DIAGNOSTIC_ASSERT(i >= gChunkHeaderNumPages);
 
       if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
         const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
         const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
         MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
                    0);
         chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
         // Find adjacent dirty run(s).
-        for (npages = 1; i > arena_chunk_header_npages &&
+        for (npages = 1; i > gChunkHeaderNumPages &&
                          (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
              npages++) {
           i--;
           MOZ_ASSERT((chunk->map[i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) ==
                      0);
           chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
         }
         chunk->ndirty -= npages;
         mNumDirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
-        pages_decommit((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
-                       (npages << pagesize_2pow));
+        pages_decommit((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
+                       (npages << gPageSize2Pow));
 #endif
         mStats.committed -= npages;
 
 #ifndef MALLOC_DECOMMIT
-        madvise((void*)(uintptr_t(chunk) + (i << pagesize_2pow)),
-                (npages << pagesize_2pow),
+        madvise((void*)(uintptr_t(chunk) + (i << gPageSize2Pow)),
+                (npages << gPageSize2Pow),
                 MADV_FREE);
 #ifdef MALLOC_DOUBLE_PURGE
         madvised = true;
 #endif
 #endif
         if (mNumDirty <= (dirty_max >> 1)) {
           break;
         }
@@ -2677,25 +2728,25 @@ arena_t::Purge(bool aAll)
 
 void
 arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
   arena_chunk_t* chunk;
   size_t size, run_ind, run_pages;
 
   chunk = GetChunkForPtr(aRun);
-  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
-  MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
-  MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
+  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind >= gChunkHeaderNumPages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind < gChunkNumPages);
   if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
-    size = chunk->map[run_ind].bits & ~pagesize_mask;
+    size = chunk->map[run_ind].bits & ~gPageSizeMask;
   } else {
     size = aRun->bin->mRunSize;
   }
-  run_pages = (size >> pagesize_2pow);
+  run_pages = (size >> gPageSize2Pow);
 
   // Mark pages as unallocated in the chunk map.
   if (aDirty) {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) ==
                             0);
@@ -2709,85 +2760,85 @@ arena_t::DallocRun(arena_run_t* aRun, bo
     mNumDirty += run_pages;
   } else {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
     }
   }
-  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & pagesize_mask);
+  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & gPageSizeMask);
   chunk->map[run_ind + run_pages - 1].bits =
-    size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+    size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
 
   // Try to coalesce forward.
-  if (run_ind + run_pages < chunk_npages &&
+  if (run_ind + run_pages < gChunkNumPages &&
       (chunk->map[run_ind + run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
-    size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~pagesize_mask;
+    size_t nrun_size = chunk->map[run_ind + run_pages].bits & ~gPageSizeMask;
 
     // Remove successor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind + run_pages]);
 
     size += nrun_size;
-    run_pages = size >> pagesize_2pow;
+    run_pages = size >> gPageSize2Pow;
 
     MOZ_DIAGNOSTIC_ASSERT(
-      (chunk->map[run_ind + run_pages - 1].bits & ~pagesize_mask) == nrun_size);
+      (chunk->map[run_ind + run_pages - 1].bits & ~gPageSizeMask) == nrun_size);
     chunk->map[run_ind].bits =
-      size | (chunk->map[run_ind].bits & pagesize_mask);
+      size | (chunk->map[run_ind].bits & gPageSizeMask);
     chunk->map[run_ind + run_pages - 1].bits =
-      size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+      size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Try to coalesce backward.
-  if (run_ind > arena_chunk_header_npages &&
+  if (run_ind > gChunkHeaderNumPages &&
       (chunk->map[run_ind - 1].bits & CHUNK_MAP_ALLOCATED) == 0) {
-    size_t prun_size = chunk->map[run_ind - 1].bits & ~pagesize_mask;
-
-    run_ind -= prun_size >> pagesize_2pow;
+    size_t prun_size = chunk->map[run_ind - 1].bits & ~gPageSizeMask;
+
+    run_ind -= prun_size >> gPageSize2Pow;
 
     // Remove predecessor from tree of available runs; the coalesced run is
     // inserted later.
     mRunsAvail.Remove(&chunk->map[run_ind]);
 
     size += prun_size;
-    run_pages = size >> pagesize_2pow;
-
-    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+    run_pages = size >> gPageSize2Pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~gPageSizeMask) ==
                           prun_size);
     chunk->map[run_ind].bits =
-      size | (chunk->map[run_ind].bits & pagesize_mask);
+      size | (chunk->map[run_ind].bits & gPageSizeMask);
     chunk->map[run_ind + run_pages - 1].bits =
-      size | (chunk->map[run_ind + run_pages - 1].bits & pagesize_mask);
+      size | (chunk->map[run_ind + run_pages - 1].bits & gPageSizeMask);
   }
 
   // Insert into tree of available runs, now that coalescing is complete.
   mRunsAvail.Insert(&chunk->map[run_ind]);
 
   // Deallocate chunk if it is now completely unused.
-  if ((chunk->map[arena_chunk_header_npages].bits &
-       (~pagesize_mask | CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+  if ((chunk->map[gChunkHeaderNumPages].bits &
+       (~gPageSizeMask | CHUNK_MAP_ALLOCATED)) == gMaxLargeClass) {
     DeallocChunk(chunk);
   }
 
   // Enforce mMaxDirty.
   if (mNumDirty > mMaxDirty) {
     Purge(false);
   }
 }
 
 void
 arena_t::TrimRunHead(arena_chunk_t* aChunk,
                      arena_run_t* aRun,
                      size_t aOldSize,
                      size_t aNewSize)
 {
-  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t head_npages = (aOldSize - aNewSize) >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t head_npages = (aOldSize - aNewSize) >> gPageSize2Pow;
 
   MOZ_ASSERT(aOldSize > aNewSize);
 
   // Update the chunk map so that arena_t::RunDalloc() can treat the
   // leading run as separately allocated.
   aChunk->map[pageind].bits =
     (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   aChunk->map[pageind + head_npages].bits =
@@ -2798,18 +2849,18 @@ arena_t::TrimRunHead(arena_chunk_t* aChu
 
 void
 arena_t::TrimRunTail(arena_chunk_t* aChunk,
                      arena_run_t* aRun,
                      size_t aOldSize,
                      size_t aNewSize,
                      bool aDirty)
 {
-  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t npages = aNewSize >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aRun) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t npages = aNewSize >> gPageSize2Pow;
 
   MOZ_ASSERT(aOldSize > aNewSize);
 
   // Update the chunk map so that arena_t::RunDalloc() can treat the
   // trailing run as separately allocated.
   aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
   aChunk->map[pageind + npages].bits =
     (aOldSize - aNewSize) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -2824,17 +2875,17 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
   arena_run_t* run;
   unsigned i, remainder;
 
   // Look for a usable run.
   mapelm = aBin->mNonFullRuns.First();
   if (mapelm) {
     // run is guaranteed to have available space.
     aBin->mNonFullRuns.Remove(mapelm);
-    run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+    run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
     return run;
   }
   // No existing runs have any space available.
 
   // Allocate a new run.
   run = AllocRun(aBin, aBin->mRunSize, false, false);
   if (!run) {
     return nullptr;
@@ -2899,31 +2950,31 @@ arena_t::MallocBinHard(arena_bin_t* aBin
   MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->nfree > 0);
 
   return MallocBinEasy(aBin, aBin->mCurrentRun);
 }
 
 // Calculate bin->mRunSize such that it meets the following constraints:
 //
 //   *) bin->mRunSize >= min_run_size
-//   *) bin->mRunSize <= arena_maxclass
-//   *) bin->mRunSize <= RUN_MAX_SMALL
+//   *) bin->mRunSize <= gMaxLargeClass
+//   *) bin->mRunSize <= gMaxBinClass
 //   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
 //
 // bin->mRunNumRegions, bin->mRunNumRegionsMask, and bin->mRunFirstRegionOffset are
 // also calculated here, since these settings are all interdependent.
 static size_t
 arena_bin_run_size_calc(arena_bin_t* bin, size_t min_run_size)
 {
   size_t try_run_size, good_run_size;
   unsigned good_nregs, good_mask_nelms, good_reg0_offset;
   unsigned try_nregs, try_mask_nelms, try_reg0_offset;
 
-  MOZ_ASSERT(min_run_size >= pagesize);
-  MOZ_ASSERT(min_run_size <= arena_maxclass);
+  MOZ_ASSERT(min_run_size >= gPageSize);
+  MOZ_ASSERT(min_run_size <= gMaxLargeClass);
 
   // Calculate known-valid settings before entering the mRunSize
   // expansion loop, so that the first part of the loop always copies
   // valid settings.
   //
   // The do..while loop iteratively reduces the number of regions until
   // the run header and the regions no longer overlap.  A closed formula
   // would be quite messy, since there is an interdependency between the
@@ -2944,28 +2995,28 @@ arena_bin_run_size_calc(arena_bin_t* bin
   do {
     // Copy valid settings before trying more aggressive settings.
     good_run_size = try_run_size;
     good_nregs = try_nregs;
     good_mask_nelms = try_mask_nelms;
     good_reg0_offset = try_reg0_offset;
 
     // Try more aggressive settings.
-    try_run_size += pagesize;
+    try_run_size += gPageSize;
     try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->mSizeClass) +
                 1; // Counter-act try_nregs-- in loop.
     do {
       try_nregs--;
       try_mask_nelms =
         (try_nregs >> (LOG2(sizeof(int)) + 3)) +
         ((try_nregs & ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
       try_reg0_offset = try_run_size - (try_nregs * bin->mSizeClass);
     } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) >
              try_reg0_offset);
-  } while (try_run_size <= arena_maxclass &&
+  } while (try_run_size <= gMaxLargeClass &&
            RUN_MAX_OVRHD * (bin->mSizeClass << 3) > RUN_MAX_OVRHD_RELAX &&
            (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
 
   MOZ_ASSERT(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) <=
              good_reg0_offset);
   MOZ_ASSERT((good_mask_nelms << (LOG2(sizeof(int)) + 3)) >= good_nregs);
 
   // Copy final settings.
@@ -2978,33 +3029,32 @@ arena_bin_run_size_calc(arena_bin_t* bin
 }
 
 void*
 arena_t::MallocSmall(size_t aSize, bool aZero)
 {
   void* ret;
   arena_bin_t* bin;
   arena_run_t* run;
-
-  if (aSize < small_min) {
-    // Tiny.
-    aSize = RoundUpPow2(aSize);
-    if (aSize < (1U << TINY_MIN_2POW)) {
-      aSize = 1U << TINY_MIN_2POW;
-    }
-    bin = &mBins[FloorLog2(aSize >> TINY_MIN_2POW)];
-  } else if (aSize <= small_max) {
-    // Quantum-spaced.
-    aSize = QUANTUM_CEILING(aSize);
-    bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
-  } else {
-    // Sub-page.
-    aSize = RoundUpPow2(aSize);
-    bin = &mBins[ntbins + nqbins +
-                 (FloorLog2(aSize >> SMALL_MAX_2POW_DEFAULT) - 1)];
+  SizeClass sizeClass(aSize);
+  aSize = sizeClass.Size();
+
+  switch (sizeClass.Type()) {
+    case SizeClass::Tiny:
+      bin = &mBins[FloorLog2(aSize / kMinTinyClass)];
+      break;
+    case SizeClass::Quantum:
+      bin = &mBins[kNumTinyClasses + (aSize / kQuantum) - 1];
+      break;
+    case SizeClass::SubPage:
+      bin = &mBins[kNumTinyClasses + kNumQuantumClasses +
+                   (FloorLog2(aSize / kMaxQuantumClass) - 1)];
+      break;
+    default:
+      MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
   }
   MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
 
   {
     MutexAutoLock lock(mLock);
     if ((run = bin->mCurrentRun) && run->nfree > 0) {
       ret = MallocBinEasy(bin, run);
     } else {
@@ -3059,56 +3109,56 @@ arena_t::MallocLarge(size_t aSize, bool 
   return ret;
 }
 
 void*
 arena_t::Malloc(size_t aSize, bool aZero)
 {
   MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
   MOZ_ASSERT(aSize != 0);
-  MOZ_ASSERT(QUANTUM_CEILING(aSize) <= arena_maxclass);
-
-  return (aSize <= bin_maxclass) ? MallocSmall(aSize, aZero)
+  MOZ_ASSERT(QUANTUM_CEILING(aSize) <= gMaxLargeClass);
+
+  return (aSize <= gMaxBinClass) ? MallocSmall(aSize, aZero)
                                  : MallocLarge(aSize, aZero);
 }
 
 static inline void*
 imalloc(size_t aSize, bool aZero, arena_t* aArena)
 {
   MOZ_ASSERT(aSize != 0);
 
-  if (aSize <= arena_maxclass) {
+  if (aSize <= gMaxLargeClass) {
     aArena = aArena ? aArena : choose_arena(aSize);
     return aArena->Malloc(aSize, aZero);
   }
   return huge_malloc(aSize, aZero);
 }
 
 // Only handles large allocations that require more than page alignment.
 void*
 arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
 {
   void* ret;
   size_t offset;
   arena_chunk_t* chunk;
 
-  MOZ_ASSERT((aSize & pagesize_mask) == 0);
-  MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
+  MOZ_ASSERT((aSize & gPageSizeMask) == 0);
+  MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
 
   {
     MutexAutoLock lock(mLock);
     ret = AllocRun(nullptr, aAllocSize, true, false);
     if (!ret) {
       return nullptr;
     }
 
     chunk = GetChunkForPtr(ret);
 
     offset = uintptr_t(ret) & (aAlignment - 1);
-    MOZ_ASSERT((offset & pagesize_mask) == 0);
+    MOZ_ASSERT((offset & gPageSizeMask) == 0);
     MOZ_ASSERT(offset < aAllocSize);
     if (offset == 0) {
       TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
     } else {
       size_t leadsize, trailsize;
 
       leadsize = aAlignment - offset;
       if (leadsize > 0) {
@@ -3162,18 +3212,18 @@ ipalloc(size_t aAlignment, size_t aSize,
 
   // (ceil_size < aSize) protects against the combination of maximal
   // alignment and size greater than maximal alignment.
   if (ceil_size < aSize) {
     // size_t overflow.
     return nullptr;
   }
 
-  if (ceil_size <= pagesize ||
-      (aAlignment <= pagesize && ceil_size <= arena_maxclass)) {
+  if (ceil_size <= gPageSize ||
+      (aAlignment <= gPageSize && ceil_size <= gMaxLargeClass)) {
     aArena = aArena ? aArena : choose_arena(aSize);
     ret = aArena->Malloc(ceil_size, false);
   } else {
     size_t run_size;
 
     // We can't achieve sub-page alignment, so round up alignment
     // permanently; it makes later calculations simpler.
     aAlignment = PAGE_CEILING(aAlignment);
@@ -3192,32 +3242,32 @@ ipalloc(size_t aAlignment, size_t aSize,
     if (ceil_size < aSize || ceil_size + aAlignment < ceil_size) {
       // size_t overflow.
       return nullptr;
     }
 
     // Calculate the size of the over-size run that arena_palloc()
     // would need to allocate in order to guarantee the alignment.
     if (ceil_size >= aAlignment) {
-      run_size = ceil_size + aAlignment - pagesize;
+      run_size = ceil_size + aAlignment - gPageSize;
     } else {
       // It is possible that (aAlignment << 1) will cause
       // overflow, but it doesn't matter because we also
       // subtract pagesize, which in the case of overflow
       // leaves us with a very large run_size.  That causes
       // the first conditional below to fail, which means
       // that the bogus run_size value never gets used for
       // anything important.
-      run_size = (aAlignment << 1) - pagesize;
+      run_size = (aAlignment << 1) - gPageSize;
     }
 
-    if (run_size <= arena_maxclass) {
+    if (run_size <= gMaxLargeClass) {
       aArena = aArena ? aArena : choose_arena(aSize);
       ret = aArena->Palloc(aAlignment, ceil_size, run_size);
-    } else if (aAlignment <= chunksize) {
+    } else if (aAlignment <= kChunkSize) {
       ret = huge_malloc(ceil_size, false);
     } else {
       ret = huge_palloc(ceil_size, aAlignment, false);
     }
   }
 
   MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
   return ret;
@@ -3230,25 +3280,25 @@ arena_salloc(const void* ptr)
   size_t ret;
   arena_chunk_t* chunk;
   size_t pageind, mapbits;
 
   MOZ_ASSERT(ptr);
   MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
 
   chunk = GetChunkForPtr(ptr);
-  pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+  pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> gPageSize2Pow);
   mapbits = chunk->map[pageind].bits;
   MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapbits & CHUNK_MAP_LARGE) == 0) {
-    arena_run_t* run = (arena_run_t*)(mapbits & ~pagesize_mask);
+    arena_run_t* run = (arena_run_t*)(mapbits & ~gPageSizeMask);
     MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
     ret = run->bin->mSizeClass;
   } else {
-    ret = mapbits & ~pagesize_mask;
+    ret = mapbits & ~gPageSizeMask;
     MOZ_DIAGNOSTIC_ASSERT(ret != 0);
   }
 
   return ret;
 }
 
 // Validate ptr before assuming that it points to an allocation.  Currently,
 // the following validation is performed:
@@ -3352,18 +3402,18 @@ MozJemalloc::jemalloc_ptr_info(const voi
   if (!gChunkRTree.Get(chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
-  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
-  if (pageind < arena_chunk_header_npages) {
+  size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> gPageSize2Pow);
+  if (pageind < gChunkHeaderNumPages) {
     // Within the chunk header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   size_t mapbits = chunk->map[pageind].bits;
 
   if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
@@ -3375,56 +3425,56 @@ MozJemalloc::jemalloc_ptr_info(const voi
     } else if (mapbits & CHUNK_MAP_MADVISED) {
       tag = TagFreedPageMadvised;
     } else if (mapbits & CHUNK_MAP_ZEROED) {
       tag = TagFreedPageZeroed;
     } else {
       MOZ_CRASH();
     }
 
-    void* pageaddr = (void*)(uintptr_t(aPtr) & ~pagesize_mask);
-    *aInfo = { tag, pageaddr, pagesize };
+    void* pageaddr = (void*)(uintptr_t(aPtr) & ~gPageSizeMask);
+    *aInfo = { tag, pageaddr, gPageSize };
     return;
   }
 
   if (mapbits & CHUNK_MAP_LARGE) {
     // It's a large allocation. Only the first page of a large
     // allocation contains its size, so if the address is not in
     // the first page, scan back to find the allocation size.
     size_t size;
     while (true) {
-      size = mapbits & ~pagesize_mask;
+      size = mapbits & ~gPageSizeMask;
       if (size != 0) {
         break;
       }
 
       // The following two return paths shouldn't occur in
       // practice unless there is heap corruption.
       pageind--;
-      MOZ_DIAGNOSTIC_ASSERT(pageind >= arena_chunk_header_npages);
-      if (pageind < arena_chunk_header_npages) {
+      MOZ_DIAGNOSTIC_ASSERT(pageind >= gChunkHeaderNumPages);
+      if (pageind < gChunkHeaderNumPages) {
         *aInfo = { TagUnknown, nullptr, 0 };
         return;
       }
 
       mapbits = chunk->map[pageind].bits;
       MOZ_DIAGNOSTIC_ASSERT(mapbits & CHUNK_MAP_LARGE);
       if (!(mapbits & CHUNK_MAP_LARGE)) {
         *aInfo = { TagUnknown, nullptr, 0 };
         return;
       }
     }
 
-    void* addr = ((char*)chunk) + (pageind << pagesize_2pow);
+    void* addr = ((char*)chunk) + (pageind << gPageSize2Pow);
     *aInfo = { TagLiveLarge, addr, size };
     return;
   }
 
   // It must be a small allocation.
-  auto run = (arena_run_t*)(mapbits & ~pagesize_mask);
+  auto run = (arena_run_t*)(mapbits & ~gPageSizeMask);
   MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
 
   // The allocation size is stored in the run metadata.
   size_t size = run->bin->mSizeClass;
 
   // Address of the first possible pointer in the run after its headers.
   uintptr_t reg0_addr = (uintptr_t)run + run->bin->mRunFirstRegionOffset;
   if (aPtr < (void*)reg0_addr) {
@@ -3463,17 +3513,17 @@ void
 arena_t::DallocSmall(arena_chunk_t* aChunk,
                      void* aPtr,
                      arena_chunk_map_t* aMapElm)
 {
   arena_run_t* run;
   arena_bin_t* bin;
   size_t size;
 
-  run = (arena_run_t*)(aMapElm->bits & ~pagesize_mask);
+  run = (arena_run_t*)(aMapElm->bits & ~gPageSizeMask);
   MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
   bin = run->bin;
   size = bin->mSizeClass;
   MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >=
                         uintptr_t(run) + bin->mRunFirstRegionOffset);
   MOZ_DIAGNOSTIC_ASSERT(
     (uintptr_t(aPtr) - (uintptr_t(run) + bin->mRunFirstRegionOffset)) % size ==
     0);
@@ -3484,17 +3534,17 @@ arena_t::DallocSmall(arena_chunk_t* aChu
   run->nfree++;
 
   if (run->nfree == bin->mRunNumRegions) {
     // Deallocate run.
     if (run == bin->mCurrentRun) {
       bin->mCurrentRun = nullptr;
     } else if (bin->mRunNumRegions != 1) {
       size_t run_pageind =
-        (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+        (uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
 
       // This block's conditional is necessary because if the
       // run only contains one region, then it never gets
       // inserted into the non-full runs tree.
       MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == run_mapelm);
       bin->mNonFullRuns.Remove(run_mapelm);
     }
@@ -3509,42 +3559,42 @@ arena_t::DallocSmall(arena_chunk_t* aChu
     if (!bin->mCurrentRun) {
       bin->mCurrentRun = run;
     } else if (uintptr_t(run) < uintptr_t(bin->mCurrentRun)) {
       // Switch mCurrentRun.
       if (bin->mCurrentRun->nfree > 0) {
         arena_chunk_t* runcur_chunk = GetChunkForPtr(bin->mCurrentRun);
         size_t runcur_pageind =
           (uintptr_t(bin->mCurrentRun) - uintptr_t(runcur_chunk)) >>
-          pagesize_2pow;
+          gPageSize2Pow;
         arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
 
         // Insert runcur.
         MOZ_DIAGNOSTIC_ASSERT(!bin->mNonFullRuns.Search(runcur_mapelm));
         bin->mNonFullRuns.Insert(runcur_mapelm);
       }
       bin->mCurrentRun = run;
     } else {
       size_t run_pageind =
-        (uintptr_t(run) - uintptr_t(aChunk)) >> pagesize_2pow;
+        (uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
 
       MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == nullptr);
       bin->mNonFullRuns.Insert(run_mapelm);
     }
   }
   mStats.allocated_small -= size;
 }
 
 void
 arena_t::DallocLarge(arena_chunk_t* aChunk, void* aPtr)
 {
-  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & pagesize_mask) == 0);
-  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t size = aChunk->map[pageind].bits & ~pagesize_mask;
+  MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr) & gPageSizeMask) == 0);
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t size = aChunk->map[pageind].bits & ~gPageSizeMask;
 
   memset(aPtr, kAllocPoison, size);
   mStats.allocated_large -= size;
 
   DallocRun((arena_run_t*)aPtr, true);
 }
 
 static inline void
@@ -3555,17 +3605,17 @@ arena_dalloc(void* aPtr, size_t aOffset)
   MOZ_ASSERT(GetChunkOffsetForPtr(aPtr) == aOffset);
 
   auto chunk = (arena_chunk_t*)((uintptr_t)aPtr - aOffset);
   auto arena = chunk->arena;
   MOZ_ASSERT(arena);
   MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
   MutexAutoLock lock(arena->mLock);
-  size_t pageind = aOffset >> pagesize_2pow;
+  size_t pageind = aOffset >> gPageSize2Pow;
   arena_chunk_map_t* mapelm = &chunk->map[pageind];
   MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
     // Small allocation.
     arena->DallocSmall(chunk, aPtr, mapelm);
   } else {
     // Large allocation.
     arena->DallocLarge(chunk, aPtr);
@@ -3604,34 +3654,34 @@ arena_t::RallocShrinkLarge(arena_chunk_t
 
 // Returns whether reallocation was successful.
 bool
 arena_t::RallocGrowLarge(arena_chunk_t* aChunk,
                          void* aPtr,
                          size_t aSize,
                          size_t aOldSize)
 {
-  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
-  size_t npages = aOldSize >> pagesize_2pow;
+  size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> gPageSize2Pow;
+  size_t npages = aOldSize >> gPageSize2Pow;
 
   MutexAutoLock lock(mLock);
   MOZ_DIAGNOSTIC_ASSERT(aOldSize ==
-                        (aChunk->map[pageind].bits & ~pagesize_mask));
+                        (aChunk->map[pageind].bits & ~gPageSizeMask));
 
   // Try to extend the run.
   MOZ_ASSERT(aSize > aOldSize);
-  if (pageind + npages < chunk_npages &&
+  if (pageind + npages < gChunkNumPages &&
       (aChunk->map[pageind + npages].bits & CHUNK_MAP_ALLOCATED) == 0 &&
-      (aChunk->map[pageind + npages].bits & ~pagesize_mask) >=
+      (aChunk->map[pageind + npages].bits & ~gPageSizeMask) >=
         aSize - aOldSize) {
     // The next run is available and sufficiently large.  Split the
     // following run, then merge the first part with the existing
     // allocation.
     if (!SplitRun((arena_run_t*)(uintptr_t(aChunk) +
-                                 ((pageind + npages) << pagesize_2pow)),
+                                 ((pageind + npages) << gPageSize2Pow)),
                   aSize - aOldSize,
                   true,
                   false)) {
       return false;
     }
 
     aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
     aChunk->map[pageind + npages].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -3680,35 +3730,28 @@ arena_ralloc_large(void* aPtr, size_t aS
 
 static void*
 arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
 {
   void* ret;
   size_t copysize;
 
   // Try to avoid moving the allocation.
-  if (aSize < small_min) {
-    if (aOldSize < small_min &&
-        (RoundUpPow2(aSize) >> (TINY_MIN_2POW + 1) ==
-         RoundUpPow2(aOldSize) >> (TINY_MIN_2POW + 1))) {
-      goto IN_PLACE; // Same size class.
+  if (aSize <= gMaxBinClass) {
+    if (aOldSize <= gMaxBinClass && SizeClass(aSize) == SizeClass(aOldSize)) {
+      if (aSize < aOldSize) {
+        memset(
+          (void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
+      } else if (opt_zero && aSize > aOldSize) {
+        memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
+      }
+      return aPtr;
     }
-  } else if (aSize <= small_max) {
-    if (aOldSize >= small_min && aOldSize <= small_max &&
-        (QUANTUM_CEILING(aSize) >> QUANTUM_2POW_MIN) ==
-          (QUANTUM_CEILING(aOldSize) >> QUANTUM_2POW_MIN)) {
-      goto IN_PLACE; // Same size class.
-    }
-  } else if (aSize <= bin_maxclass) {
-    if (aOldSize > small_max && aOldSize <= bin_maxclass &&
-        RoundUpPow2(aSize) == RoundUpPow2(aOldSize)) {
-      goto IN_PLACE; // Same size class.
-    }
-  } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
-    MOZ_ASSERT(aSize > bin_maxclass);
+  } else if (aOldSize > gMaxBinClass && aOldSize <= gMaxLargeClass) {
+    MOZ_ASSERT(aSize > gMaxBinClass);
     if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
       return aPtr;
     }
   }
 
   // If we get here, then aSize and aOldSize are different enough that we
   // need to move the object.  In that case, fall back to allocating new
   // space and copying.
@@ -3725,36 +3768,29 @@ arena_ralloc(void* aPtr, size_t aSize, s
     pages_copy(ret, aPtr, copysize);
   } else
 #endif
   {
     memcpy(ret, aPtr, copysize);
   }
   idalloc(aPtr);
   return ret;
-IN_PLACE:
-  if (aSize < aOldSize) {
-    memset((void*)(uintptr_t(aPtr) + aSize), kAllocPoison, aOldSize - aSize);
-  } else if (opt_zero && aSize > aOldSize) {
-    memset((void*)(uintptr_t(aPtr) + aOldSize), 0, aSize - aOldSize);
-  }
-  return aPtr;
 }
 
 static inline void*
 iralloc(void* aPtr, size_t aSize, arena_t* aArena)
 {
   size_t oldsize;
 
   MOZ_ASSERT(aPtr);
   MOZ_ASSERT(aSize != 0);
 
   oldsize = isalloc(aPtr);
 
-  return (aSize <= arena_maxclass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
+  return (aSize <= gMaxLargeClass) ? arena_ralloc(aPtr, aSize, oldsize, aArena)
                                    : huge_ralloc(aPtr, aSize, oldsize);
 }
 
 arena_t::arena_t()
 {
   unsigned i;
   arena_bin_t* bin;
   size_t prev_run_size;
@@ -3774,56 +3810,38 @@ arena_t::arena_t()
   mNumDirty = 0;
   // Reduce the maximum amount of dirty pages we allow to be kept on
   // thread local arenas. TODO: make this more flexible.
   mMaxDirty = opt_dirty_max >> 3;
 
   mRunsAvail.Init();
 
   // Initialize bins.
-  prev_run_size = pagesize;
-
-  // (2^n)-spaced tiny bins.
-  for (i = 0; i < ntbins; i++) {
+  prev_run_size = gPageSize;
+  SizeClass sizeClass(1);
+
+  for (i = 0;; i++) {
     bin = &mBins[i];
     bin->mCurrentRun = nullptr;
     bin->mNonFullRuns.Init();
 
-    bin->mSizeClass = (1ULL << (TINY_MIN_2POW + i));
+    bin->mSizeClass = sizeClass.Size();
 
     prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
     bin->mNumRuns = 0;
-  }
-
-  // Quantum-spaced bins.
-  for (; i < ntbins + nqbins; i++) {
-    bin = &mBins[i];
-    bin->mCurrentRun = nullptr;
-    bin->mNonFullRuns.Init();
-
-    bin->mSizeClass = quantum * (i - ntbins + 1);
-
-    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-    bin->mNumRuns = 0;
-  }
-
-  // (2^n)-spaced sub-page bins.
-  for (; i < ntbins + nqbins + nsbins; i++) {
-    bin = &mBins[i];
-    bin->mCurrentRun = nullptr;
-    bin->mNonFullRuns.Init();
-
-    bin->mSizeClass = (small_max << (i - (ntbins + nqbins) + 1));
-
-    prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-    bin->mNumRuns = 0;
-  }
+
+    // SizeClass doesn't want sizes larger than gMaxSubPageClass for now.
+    if (sizeClass.Size() == gMaxSubPageClass) {
+      break;
+    }
+    sizeClass = sizeClass.Next();
+  }
+  MOZ_ASSERT(i ==
+             kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses - 1);
 
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   mMagic = ARENA_MAGIC;
 #endif
 }
 
 arena_t*
 ArenaCollection::CreateArena(bool aIsPrivate)
@@ -3852,17 +3870,17 @@ ArenaCollection::CreateArena(bool aIsPri
 
 // End arena.
 // ***************************************************************************
 // Begin general internal functions.
 
 static void*
 huge_malloc(size_t size, bool zero)
 {
-  return huge_palloc(size, chunksize, zero);
+  return huge_palloc(size, kChunkSize, zero);
 }
 
 static void*
 huge_palloc(size_t aSize, size_t aAlignment, bool aZero)
 {
   void* ret;
   size_t csize;
   size_t psize;
@@ -3949,17 +3967,17 @@ huge_palloc(size_t aSize, size_t aAlignm
 
 static void*
 huge_ralloc(void* aPtr, size_t aSize, size_t aOldSize)
 {
   void* ret;
   size_t copysize;
 
   // Avoid moving the allocation if the size class would not change.
-  if (aOldSize > arena_maxclass &&
+  if (aOldSize > gMaxLargeClass &&
       CHUNK_CEILING(aSize) == CHUNK_CEILING(aOldSize)) {
     size_t psize = PAGE_CEILING(aSize);
     if (aSize < aOldSize) {
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
     }
 #ifdef MALLOC_DECOMMIT
     if (psize < aOldSize) {
       extent_node_t key;
@@ -4096,28 +4114,25 @@ static
     return true;
   }
 
   // Get page size and number of CPUs
   result = GetKernelPageSize();
   // We assume that the page size is a power of 2.
   MOZ_ASSERT(((result - 1) & result) == 0);
 #ifdef MALLOC_STATIC_PAGESIZE
-  if (pagesize % (size_t)result) {
+  if (gPageSize % (size_t)result) {
     _malloc_message(
       _getprogname(),
       "Compile-time page size does not divide the runtime one.\n");
     MOZ_CRASH();
   }
 #else
-  pagesize = (size_t)result;
-  pagesize_mask = (size_t)result - 1;
-  pagesize_2pow = FloorLog2(result);
-  MOZ_RELEASE_ASSERT(1ULL << pagesize_2pow == pagesize,
-                     "Page size is not a power of two");
+  gPageSize = (size_t)result;
+  DefineGlobals();
 #endif
 
   // Get runtime configuration.
   if ((opts = getenv("MALLOC_OPTIONS"))) {
     for (i = 0; opts[i] != '\0'; i++) {
       unsigned j, nreps;
       bool nseen;
 
@@ -4185,35 +4200,18 @@ static
                             cbuf,
                             "'\n");
           }
         }
       }
     }
   }
 
-#ifndef MALLOC_STATIC_PAGESIZE
-  // Set bin-related variables.
-  bin_maxclass = (pagesize >> 1);
-  nsbins = pagesize_2pow - SMALL_MAX_2POW_DEFAULT - 1;
-
-  chunk_npages = (chunksize >> pagesize_2pow);
-
-  arena_chunk_header_npages = calculate_arena_header_pages();
-  arena_maxclass = calculate_arena_maxclass();
-#endif
-
   gRecycledSize = 0;
 
-  // Various sanity checks that regard configuration.
-  MOZ_ASSERT(quantum >= sizeof(void*));
-  MOZ_ASSERT(quantum <= pagesize);
-  MOZ_ASSERT(chunksize >= pagesize);
-  MOZ_ASSERT(quantum * 4 <= chunksize);
-
   // Initialize chunks data.
   chunks_mtx.Init();
   gChunksBySize.Init();
   gChunksByAddress.Init();
 
   // Initialize huge allocation data.
   huge_mtx.Init();
   huge.Init();
@@ -4470,35 +4468,20 @@ MozJemalloc::valloc(size_t aSize)
 // ***************************************************************************
 // Begin non-standard functions.
 
 // This was added by Mozilla for use by SQLite.
 template<>
 inline size_t
 MozJemalloc::malloc_good_size(size_t aSize)
 {
-  // This duplicates the logic in imalloc(), arena_malloc() and
-  // arena_t::MallocSmall().
-  if (aSize < small_min) {
-    // Small (tiny).
-    aSize = RoundUpPow2(aSize);
-
-    // We omit the #ifdefs from arena_t::MallocSmall() --
-    // it can be inaccurate with its size in some cases, but this
-    // function must be accurate.
-    if (aSize < (1U << TINY_MIN_2POW)) {
-      aSize = (1U << TINY_MIN_2POW);
-    }
-  } else if (aSize <= small_max) {
-    // Small (quantum-spaced).
-    aSize = QUANTUM_CEILING(aSize);
-  } else if (aSize <= bin_maxclass) {
-    // Small (sub-page).
-    aSize = RoundUpPow2(aSize);
-  } else if (aSize <= arena_maxclass) {
+  if (aSize <= gMaxSubPageClass) {
+    // Small
+    aSize = SizeClass(aSize).Size();
+  } else if (aSize <= gMaxLargeClass) {
     // Large.
     aSize = PAGE_CEILING(aSize);
   } else {
     // Huge.  We use PAGE_CEILING to get psize, instead of using
     // CHUNK_CEILING to get csize.  This ensures that this
     // malloc_usable_size(malloc(n)) always matches
     // malloc_good_size(n).
     aSize = PAGE_CEILING(aSize);
@@ -4525,21 +4508,21 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
   if (!malloc_initialized) {
     memset(aStats, 0, sizeof(*aStats));
     return;
   }
 
   // Gather runtime settings.
   aStats->opt_junk = opt_junk;
   aStats->opt_zero = opt_zero;
-  aStats->quantum = quantum;
-  aStats->small_max = small_max;
-  aStats->large_max = arena_maxclass;
-  aStats->chunksize = chunksize;
-  aStats->page_size = pagesize;
+  aStats->quantum = kQuantum;
+  aStats->small_max = kMaxQuantumClass;
+  aStats->large_max = gMaxLargeClass;
+  aStats->chunksize = kChunkSize;
+  aStats->page_size = gPageSize;
   aStats->dirty_max = opt_dirty_max;
 
   // Gather current memory usage statistics.
   aStats->narenas = 0;
   aStats->mapped = 0;
   aStats->allocated = 0;
   aStats->waste = 0;
   aStats->page_cache = 0;
@@ -4575,29 +4558,30 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
     arena_unused = 0;
 
     {
       MutexAutoLock lock(arena->mLock);
 
       arena_mapped = arena->mStats.mapped;
 
       // "committed" counts dirty and allocated memory.
-      arena_committed = arena->mStats.committed << pagesize_2pow;
+      arena_committed = arena->mStats.committed << gPageSize2Pow;
 
       arena_allocated =
         arena->mStats.allocated_small + arena->mStats.allocated_large;
 
-      arena_dirty = arena->mNumDirty << pagesize_2pow;
-
-      for (j = 0; j < ntbins + nqbins + nsbins; j++) {
+      arena_dirty = arena->mNumDirty << gPageSize2Pow;
+
+      for (j = 0; j < kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses;
+           j++) {
         arena_bin_t* bin = &arena->mBins[j];
         size_t bin_unused = 0;
 
         for (auto mapelm : bin->mNonFullRuns.iter()) {
-          run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+          run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
           bin_unused += run->nfree * bin->mSizeClass;
         }
 
         if (bin->mCurrentRun) {
           bin_unused += bin->mCurrentRun->nfree * bin->mSizeClass;
         }
 
         arena_unused += bin_unused;
@@ -4618,54 +4602,54 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
     aStats->bin_unused += arena_unused;
     aStats->bookkeeping += arena_headers;
     aStats->narenas++;
   }
   gArenas.mLock.Unlock();
 
   // Account for arena chunk headers in bookkeeping rather than waste.
   chunk_header_size =
-    ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages)
-    << pagesize_2pow;
+    ((aStats->mapped / aStats->chunksize) * gChunkHeaderNumPages)
+    << gPageSize2Pow;
 
   aStats->mapped += non_arena_mapped;
   aStats->bookkeeping += chunk_header_size;
   aStats->waste -= chunk_header_size;
 
   MOZ_ASSERT(aStats->mapped >= aStats->allocated + aStats->waste +
                                  aStats->page_cache + aStats->bookkeeping);
 }
 
 #ifdef MALLOC_DOUBLE_PURGE
 
 // Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
 static void
 hard_purge_chunk(arena_chunk_t* aChunk)
 {
   // See similar logic in arena_t::Purge().
-  for (size_t i = arena_chunk_header_npages; i < chunk_npages; i++) {
+  for (size_t i = gChunkHeaderNumPages; i < gChunkNumPages; i++) {
     // Find all adjacent pages with CHUNK_MAP_MADVISED set.
     size_t npages;
     for (npages = 0; aChunk->map[i + npages].bits & CHUNK_MAP_MADVISED &&
-                     i + npages < chunk_npages;
+                     i + npages < gChunkNumPages;
          npages++) {
       // Turn off the chunk's MADV_FREED bit and turn on its
       // DECOMMITTED bit.
       MOZ_DIAGNOSTIC_ASSERT(
         !(aChunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
       aChunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
     }
 
     // We could use mincore to find out which pages are actually
     // present, but it's not clear that's better.
     if (npages > 0) {
-      pages_decommit(((char*)aChunk) + (i << pagesize_2pow),
-                     npages << pagesize_2pow);
-      Unused << pages_commit(((char*)aChunk) + (i << pagesize_2pow),
-                             npages << pagesize_2pow);
+      pages_decommit(((char*)aChunk) + (i << gPageSize2Pow),
+                     npages << gPageSize2Pow);
+      Unused << pages_commit(((char*)aChunk) + (i << gPageSize2Pow),
+                             npages << gPageSize2Pow);
     }
     i += npages;
   }
 }
 
 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
 void
 arena_t::HardPurge()
--- a/memory/gtest/TestJemalloc.cpp
+++ b/memory/gtest/TestJemalloc.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/mozalloc.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/Vector.h"
 #include "mozmemory.h"
+#include "Utils.h"
 
 #include "gtest/gtest.h"
 
 using namespace mozilla;
 
 static inline void
 TestOne(size_t size)
 {
@@ -30,33 +31,30 @@ TestOne(size_t size)
 static inline void
 TestThree(size_t size)
 {
   ASSERT_NO_FATAL_FAILURE(TestOne(size - 1));
   ASSERT_NO_FATAL_FAILURE(TestOne(size));
   ASSERT_NO_FATAL_FAILURE(TestOne(size + 1));
 }
 
-#define K   * 1024
-#define M   * 1024 * 1024
-
 TEST(Jemalloc, UsableSizeInAdvance)
 {
   /*
    * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
    * various sizes beyond that.
    */
 
-  for (size_t n = 0; n < 16 K; n++)
+  for (size_t n = 0; n < 16_KiB; n++)
     ASSERT_NO_FATAL_FAILURE(TestOne(n));
 
-  for (size_t n = 16 K; n < 1 M; n += 4 K)
+  for (size_t n = 16_KiB; n < 1_MiB; n += 4_KiB)
     ASSERT_NO_FATAL_FAILURE(TestThree(n));
 
-  for (size_t n = 1 M; n < 8 M; n += 128 K)
+  for (size_t n = 1_MiB; n < 8_MiB; n += 128_KiB)
     ASSERT_NO_FATAL_FAILURE(TestThree(n));
 }
 
 static int gStaticVar;
 
 bool InfoEq(jemalloc_ptr_info_t& aInfo, PtrInfoTag aTag, void* aAddr,
             size_t aSize)
 {
@@ -94,28 +92,28 @@ TEST(Jemalloc, PtrInfo)
     ASSERT_TRUE(small.append(p));
     for (size_t j = 0; j < usable; j++) {
       jemalloc_ptr_info(&p[j], &info);
       ASSERT_TRUE(InfoEq(info, TagLiveSmall, p, usable));
     }
   }
 
   // Similar for large (2KiB + 1 KiB .. 1MiB - 8KiB) allocations.
-  for (size_t n = small_max + 1 K; n <= stats.large_max; n += 1 K) {
+  for (size_t n = small_max + 1_KiB; n <= stats.large_max; n += 1_KiB) {
     auto p = (char*)malloc(n);
     size_t usable = moz_malloc_size_of(p);
     ASSERT_TRUE(large.append(p));
     for (size_t j = 0; j < usable; j += 347) {
       jemalloc_ptr_info(&p[j], &info);
       ASSERT_TRUE(InfoEq(info, TagLiveLarge, p, usable));
     }
   }
 
   // Similar for huge (> 1MiB - 8KiB) allocations.
-  for (size_t n = stats.chunksize; n <= 10 M; n += 512 K) {
+  for (size_t n = stats.chunksize; n <= 10_MiB; n += 512_KiB) {
     auto p = (char*)malloc(n);
     size_t usable = moz_malloc_size_of(p);
     ASSERT_TRUE(huge.append(p));
     for (size_t j = 0; j < usable; j += 567) {
       jemalloc_ptr_info(&p[j], &info);
       ASSERT_TRUE(InfoEq(info, TagLiveHuge, p, usable));
     }
   }
@@ -220,11 +218,8 @@ TEST(Jemalloc, PtrInfo)
   // Entire chunk. It's impossible to check what is put into |info| for all of
   // these addresses; this is more about checking that we don't crash.
   for (size_t i = 0; i < stats.chunksize; i += 256) {
     jemalloc_ptr_info(&chunk[i], &info);
   }
 
   jemalloc_thread_local_arena(false);
 }
-
-#undef K
-#undef M
--- a/memory/gtest/moz.build
+++ b/memory/gtest/moz.build
@@ -4,8 +4,12 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 UNIFIED_SOURCES += [
     'TestJemalloc.cpp',
 ]
 
 FINAL_LIBRARY = 'xul-gtest'
+
+LOCAL_INCLUDES += [
+    '../build',
+]
--- a/servo/docs/COMMAND_LINE_ARGS.md
+++ b/servo/docs/COMMAND_LINE_ARGS.md
@@ -13,17 +13,17 @@ Only arguments that need more explanatio
 ## Enable Experimental Features
 Use `--pref` to enable experimental features like experimental DOM API, JavaScript API and CSS properties.
 
 e.g. To enable `flex` and `flex-direction` css properties:
 ```
 ./mach run -d -- --pref layout.flex.enabled --pref layout.flex-direction.enabled ...
 ```
 
-You can find all the available preferences at [resources/prefs.json](http://mxr.mozilla.org/servo/source/resources/prefs.json).
+You can find all the available preferences at [resources/prefs.json](https://dxr.mozilla.org/servo/source/resources/prefs.json).
 
 # Debugging
 ## Remote Debugging
 Use `--devtools 6000` to start the devtools server on port 6000.
 
 e.g.
 ```
 ./mach run -d --devtools 6000 https://servo.org