Bug 1003230 - Refactor page based memory allocation functions r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 15 Jul 2014 09:42:47 +0100
changeset 216064 e7e6db764418d757a19e3843ab08f09d7e74cf2a
parent 216000 ebf6185fafce3c71e087441dc3abe1d4bbb2b24d
child 216065 5340b5943f31fd1c24ea5c7d81493978400705f5
push id515
push userraliiev@mozilla.com
push dateMon, 06 Oct 2014 12:51:51 +0000
treeherdermozilla-release@267c7a481bef [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1003230
milestone33.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1003230 - Refactor page based memory allocation functions r=sfink
js/src/gc/GCRuntime.h
js/src/gc/Memory.cpp
js/src/gc/Memory.h
js/src/gc/Nursery.cpp
js/src/gc/Statistics.cpp
js/src/jsapi-tests/testGCAllocator.cpp
js/src/jscntxt.h
js/src/jsgc.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ThreadPool.cpp
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -414,18 +414,16 @@ class GCRuntime
     JSRuntime             *rt;
 
     /* Embedders can use this zone however they wish. */
     JS::Zone              *systemZone;
 
     /* List of compartments and zones (protected by the GC lock). */
     js::gc::ZoneVector    zones;
 
-    js::gc::SystemPageAllocator pageAllocator;
-
 #ifdef JSGC_GENERATIONAL
     js::Nursery           nursery;
     js::gc::StoreBuffer   storeBuffer;
 #endif
 
     js::gcstats::Statistics stats;
 
     js::GCMarker          marker;
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -6,63 +6,123 @@
 
 #include "gc/Memory.h"
 
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
-using namespace js;
-using namespace js::gc;
+#if defined(XP_WIN)
+
+#include "jswin.h"
+#include <psapi.h>
+
+#elif defined(SOLARIS)
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#elif defined(XP_UNIX)
+
+#include <algorithm>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#endif
+
+namespace js {
+namespace gc {
+
+// The GC can only safely decommit memory when the page size of the
+// running process matches the compiled arena size.
+static size_t pageSize = 0;
 
-bool
-SystemPageAllocator::decommitEnabled()
+// The OS allocation granularity may not match the page size.
+static size_t allocGranularity = 0;
+
+#if defined(XP_UNIX)
+// The addresses handed out by mmap may grow up or down.
+static int growthDirection = 0;
+#endif
+
+// The maximum number of unalignable chunks to temporarily keep alive in
+// the last ditch allocation pass. OOM crash reports generally show <= 7
+// unaligned chunks available (bug 1005844 comment #16).
+static const int MaxLastDitchAttempts = 8;
+
+static void GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
+                        size_t alignment);
+static bool GetNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
+                             size_t size, size_t alignment, bool addrsGrowDown);
+static void *MapAlignedPagesSlow(size_t size, size_t alignment);
+static void *MapAlignedPagesLastDitch(size_t size, size_t alignment);
+
+size_t
+SystemPageSize()
+{
+    return pageSize;
+}
+
+static bool
+DecommitEnabled()
 {
     return pageSize == ArenaSize;
 }
 
 /*
  * This returns the offset of address p from the nearest aligned address at
  * or below p - or alternatively, the number of unaligned bytes at the end of
  * the region starting at p (as we assert that allocation size is an integer
  * multiple of the alignment).
  */
 static inline size_t
 OffsetFromAligned(void *p, size_t alignment)
 {
     return uintptr_t(p) % alignment;
 }
 
+void *
+TestMapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+    return MapAlignedPagesLastDitch(size, alignment);
+}
+
+
 #if defined(XP_WIN)
-#include "jswin.h"
-#include <psapi.h>
 
-SystemPageAllocator::SystemPageAllocator()
+void
+InitMemorySubsystem()
 {
-    SYSTEM_INFO sysinfo;
-    GetSystemInfo(&sysinfo);
-    pageSize = sysinfo.dwPageSize;
-    allocGranularity = sysinfo.dwAllocationGranularity;
+    if (pageSize == 0) {
+        SYSTEM_INFO sysinfo;
+        GetSystemInfo(&sysinfo);
+        pageSize = sysinfo.dwPageSize;
+        allocGranularity = sysinfo.dwAllocationGranularity;
+    }
 }
 
 static inline void *
 MapMemoryAt(void *desired, size_t length, int flags, int prot = PAGE_READWRITE)
 {
     return VirtualAlloc(desired, length, flags, prot);
 }
 
 static inline void *
 MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
 {
     return VirtualAlloc(nullptr, length, flags, prot);
 }
 
 void *
-SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
+MapAlignedPages(size_t size, size_t alignment)
 {
     MOZ_ASSERT(size >= alignment);
     MOZ_ASSERT(size % alignment == 0);
     MOZ_ASSERT(size % pageSize == 0);
     MOZ_ASSERT(alignment % allocGranularity == 0);
 
     void *p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
 
@@ -70,35 +130,35 @@ SystemPageAllocator::mapAlignedPages(siz
     if (alignment == allocGranularity)
         return p;
 
     if (OffsetFromAligned(p, alignment) == 0)
         return p;
 
     void *retainedAddr;
     size_t retainedSize;
-    getNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
+    GetNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
     if (retainedAddr)
         unmapPages(retainedAddr, retainedSize);
     if (p) {
         if (OffsetFromAligned(p, alignment) == 0)
             return p;
         unmapPages(p, size);
     }
 
-    p = mapAlignedPagesSlow(size, alignment);
+    p = MapAlignedPagesSlow(size, alignment);
     if (!p)
-        return mapAlignedPagesLastDitch(size, alignment);
+        return MapAlignedPagesLastDitch(size, alignment);
 
     MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
     return p;
 }
 
-void *
-SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
+static void *
+MapAlignedPagesSlow(size_t size, size_t alignment)
 {
     /*
      * Windows requires that there be a 1:1 mapping between VM allocation
      * and deallocation operations.  Therefore, take care here to acquire the
      * final result via one mapping operation.  This means unmapping any
      * preliminary result that is not correctly aligned.
      */
     void *p;
@@ -128,32 +188,32 @@ SystemPageAllocator::mapAlignedPagesSlow
 /*
  * Even though there aren't any |size + alignment - pageSize| byte chunks left,
  * the allocator may still be able to give us |size| byte chunks that are
  * either already aligned, or *can* be aligned by allocating in the nearest
  * aligned location. Since we can't tell the allocator to give us a different
  * address each time, we temporarily hold onto the unaligned part of each chunk
  * until the allocator gives us a chunk that either is, or can be aligned.
  */
-void *
-SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
+static void *
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
 {
     void *p = nullptr;
     void *tempMaps[MaxLastDitchAttempts];
     int attempt = 0;
     for (; attempt < MaxLastDitchAttempts; ++attempt) {
         size_t retainedSize;
-        getNewChunk(&p, tempMaps + attempt, &retainedSize, size, alignment);
+        GetNewChunk(&p, tempMaps + attempt, &retainedSize, size, alignment);
         if (OffsetFromAligned(p, alignment) == 0) {
             if (tempMaps[attempt])
                 unmapPages(tempMaps[attempt], retainedSize);
             break;
         }
         if (!tempMaps[attempt]) {
-            /* getNewChunk failed, but we can still try the simpler method. */
+            /* GetNewChunk failed, but we can still try the simpler method. */
             tempMaps[attempt] = p;
             p = nullptr;
         }
     }
     if (OffsetFromAligned(p, alignment)) {
         unmapPages(p, size);
         p = nullptr;
     }
@@ -162,19 +222,19 @@ SystemPageAllocator::mapAlignedPagesLast
     return p;
 }
 
 /*
  * On Windows, map and unmap calls must be matched, so we deallocate the
  * unaligned chunk, then reallocate the unaligned part to block off the
  * old address and force the allocator to give us a new one.
  */
-void
-SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
-                                 size_t size, size_t alignment)
+static void
+GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
+            size_t alignment)
 {
     void *address = *aAddress;
     void *retainedAddr = nullptr;
     size_t retainedSize = 0;
     do {
         if (!address)
             address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
         size_t offset = OffsetFromAligned(address, alignment);
@@ -187,147 +247,141 @@ SystemPageAllocator::getNewChunk(void **
         /* If retainedAddr is null here, we raced with another thread. */
     } while (!retainedAddr);
     *aAddress = address;
     *aRetainedAddr = retainedAddr;
     *aRetainedSize = retainedSize;
 }
 
 void
-SystemPageAllocator::unmapPages(void *p, size_t size)
+UnmapPages(void *p, size_t size)
 {
     MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
 bool
-SystemPageAllocator::markPagesUnused(void *p, size_t size)
+MarkPagesUnused(void *p, size_t size)
 {
     if (!decommitEnabled())
         return true;
 
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
     return p2 == p;
 }
 
 bool
-SystemPageAllocator::markPagesInUse(void *p, size_t size)
+MarkPagesInUse(void *p, size_t size)
 {
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     return true;
 }
 
 size_t
-SystemPageAllocator::GetPageFaultCount()
+GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
         return 0;
     return pmc.PageFaultCount;
 }
 
 void *
-SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
     // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
     return nullptr;
 }
 
 // Deallocate mapped memory for object.
 void
-SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
+DeallocateMappedContent(void *p, size_t length)
 {
     // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
 }
 
 #elif defined(SOLARIS)
 
-#include <sys/mman.h>
-#include <unistd.h>
-
 #ifndef MAP_NOSYNC
 # define MAP_NOSYNC 0
 #endif
 
-SystemPageAllocator::SystemPageAllocator()
+void
+InitMemorySubsystem()
 {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
+    if (pageSize == 0)
+        pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 void *
-SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
+MapAlignedPages(size_t size, size_t alignment)
 {
     MOZ_ASSERT(size >= alignment);
     MOZ_ASSERT(size % alignment == 0);
     MOZ_ASSERT(size % pageSize == 0);
     MOZ_ASSERT(alignment % allocGranularity == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
 
     void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
     if (p == MAP_FAILED)
         return nullptr;
     return p;
 }
 
 void
-SystemPageAllocator::unmapPages(void *p, size_t size)
+UnmapPages(void *p, size_t size)
 {
     MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
 }
 
 bool
-SystemPageAllocator::markPagesUnused(void *p, size_t size)
+MarkPagesUnused(void *p, size_t size)
 {
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     return true;
 }
 
 bool
-SystemPageAllocator::markPagesInUse(void *p, size_t size)
+MarkPagesInUse(void *p, size_t size)
 {
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     return true;
 }
 
 size_t
-SystemPageAllocator::GetPageFaultCount()
+GetPageFaultCount()
 {
     return 0;
 }
 
 void *
-SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
     // Not implemented.
     return nullptr;
 }
 
 // Deallocate mapped memory for object.
 void
-SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
+DeallocateMappedContent(void *p, size_t length)
 {
     // Not implemented.
 }
 
 #elif defined(XP_UNIX)
 
-#include <algorithm>
-#include <errno.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-SystemPageAllocator::SystemPageAllocator()
+void
+InitMemorySubsystem()
 {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
-    growthDirection = 0;
+    if (pageSize == 0) {
+        pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
+        growthDirection = 0;
+    }
 }
 
 static inline void *
 MapMemoryAt(void *desired, size_t length, int prot = PROT_READ | PROT_WRITE,
             int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
 {
 #if defined(__ia64__)
     MOZ_ASSERT(0xffff800000000000ULL & (uintptr_t(desired) + length - 1) == 0);
@@ -383,17 +437,17 @@ MapMemory(size_t length, int prot = PROT
     void *region = MozTaggedAnonymousMmap(nullptr, length, prot, flags, fd, offset, "js-gc-heap");
     if (region == MAP_FAILED)
         return nullptr;
     return region;
 #endif
 }
 
 void *
-SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
+MapAlignedPages(size_t size, size_t alignment)
 {
     MOZ_ASSERT(size >= alignment);
     MOZ_ASSERT(size % alignment == 0);
     MOZ_ASSERT(size % pageSize == 0);
     MOZ_ASSERT(alignment % allocGranularity == 0);
 
     void *p = MapMemory(size);
 
@@ -401,35 +455,35 @@ SystemPageAllocator::mapAlignedPages(siz
     if (alignment == allocGranularity)
         return p;
 
     if (OffsetFromAligned(p, alignment) == 0)
         return p;
 
     void *retainedAddr;
     size_t retainedSize;
-    getNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
+    GetNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
     if (retainedAddr)
-        unmapPages(retainedAddr, retainedSize);
+        UnmapPages(retainedAddr, retainedSize);
     if (p) {
         if (OffsetFromAligned(p, alignment) == 0)
             return p;
-        unmapPages(p, size);
+        UnmapPages(p, size);
     }
 
-    p = mapAlignedPagesSlow(size, alignment);
+    p = MapAlignedPagesSlow(size, alignment);
     if (!p)
-        return mapAlignedPagesLastDitch(size, alignment);
+        return MapAlignedPagesLastDitch(size, alignment);
 
     MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
     return p;
 }
 
-void *
-SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
+static void *
+MapAlignedPagesSlow(size_t size, size_t alignment)
 {
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = size + alignment - pageSize;
     void *region = MapMemory(reqSize);
     if (!region)
         return nullptr;
 
     void *regionEnd = (void *)(uintptr_t(region) + reqSize);
@@ -441,102 +495,102 @@ SystemPageAllocator::mapAlignedPagesSlow
         front = (void *)(uintptr_t(end) - size);
     } else {
         size_t offset = OffsetFromAligned(region, alignment);
         front = (void *)(uintptr_t(region) + (offset ? alignment - offset : 0));
         end = (void *)(uintptr_t(front) + size);
     }
 
     if (front != region)
-        unmapPages(region, uintptr_t(front) - uintptr_t(region));
+        UnmapPages(region, uintptr_t(front) - uintptr_t(region));
     if (end != regionEnd)
-        unmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
+        UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
 
     return front;
 }
 
 /*
  * Even though there aren't any |size + alignment - pageSize| byte chunks left,
  * the allocator may still be able to give us |size| byte chunks that are
  * either already aligned, or *can* be aligned by allocating in the nearest
  * aligned location. Since we can't tell the allocator to give us a different
  * address each time, we temporarily hold onto the unaligned part of each chunk
  * until the allocator gives us a chunk that either is, or can be aligned.
  */
-void *
-SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
+static void *
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
 {
     void *p = nullptr;
     void *tempMaps[MaxLastDitchAttempts];
     size_t tempSizes[MaxLastDitchAttempts];
     int attempt = 0;
     for (; attempt < MaxLastDitchAttempts; ++attempt) {
-        getNewChunk(&p, tempMaps + attempt, tempSizes + attempt, size, alignment);
+        GetNewChunk(&p, tempMaps + attempt, tempSizes + attempt, size, alignment);
         if (OffsetFromAligned(p, alignment) == 0) {
             if (tempMaps[attempt])
-                unmapPages(tempMaps[attempt], tempSizes[attempt]);
+                UnmapPages(tempMaps[attempt], tempSizes[attempt]);
             break;
         }
         if (!tempMaps[attempt]) {
-            /* getNewChunk failed, but we can still try the simpler method. */
+            /* GetNewChunk failed, but we can still try the simpler method. */
             tempMaps[attempt] = p;
             tempSizes[attempt] = size;
             p = nullptr;
         }
     }
     if (OffsetFromAligned(p, alignment)) {
-        unmapPages(p, size);
+        UnmapPages(p, size);
         p = nullptr;
     }
     while (--attempt >= 0)
-        unmapPages(tempMaps[attempt], tempSizes[attempt]);
+        UnmapPages(tempMaps[attempt], tempSizes[attempt]);
     return p;
 }
 
 /*
  * mmap calls don't have to be matched with calls to munmap, so we can unmap
  * just the pages we don't need. However, as we don't know a priori if addresses
  * are handed out in increasing or decreasing order, we have to try both
  * directions (depending on the environment, one will always fail).
  */
-void
-SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
-                                 size_t size, size_t alignment)
+static void
+GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
+            size_t alignment)
 {
     void *address = *aAddress;
     void *retainedAddr = nullptr;
     size_t retainedSize = 0;
     do {
         bool addrsGrowDown = growthDirection <= 0;
         /* Try the direction indicated by growthDirection. */
-        if (getNewChunkInner(&address, &retainedAddr, &retainedSize, size,
+        if (GetNewChunkInner(&address, &retainedAddr, &retainedSize, size,
                              alignment, addrsGrowDown)) {
             break;
         }
         /* If that failed, try the opposite direction. */
-        if (getNewChunkInner(&address, &retainedAddr, &retainedSize, size,
+        if (GetNewChunkInner(&address, &retainedAddr, &retainedSize, size,
                              alignment, !addrsGrowDown)) {
             break;
         }
         /* If retainedAddr is non-null here, we raced with another thread. */
     } while (retainedAddr);
     *aAddress = address;
     *aRetainedAddr = retainedAddr;
     *aRetainedSize = retainedSize;
 }
 
 #define SET_OUT_PARAMS_AND_RETURN(address_, retainedAddr_, retainedSize_, toReturn_)\
     do {                                                                            \
         *aAddress = address_; *aRetainedAddr = retainedAddr_;                       \
         *aRetainedSize = retainedSize_; return toReturn_;                           \
     } while(false)
 
-bool
-SystemPageAllocator::getNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
-                                      size_t size, size_t alignment, bool addrsGrowDown)
+static bool
+GetNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
+                 size_t alignment, bool addrsGrowDown)
 {
     void *initial = *aAddress;
     if (!initial)
         initial = MapMemory(size);
     if (OffsetFromAligned(initial, alignment) == 0)
         SET_OUT_PARAMS_AND_RETURN(initial, nullptr, 0, true);
     /* Set the parameters based on whether addresses grow up or down. */
     size_t offset;
@@ -550,84 +604,84 @@ SystemPageAllocator::getNewChunkInner(vo
         delta = -1;
     } else {
         offset = alignment - OffsetFromAligned(initial, alignment);
         discardedAddr = (void*)(uintptr_t(initial) + offset);
         retainedAddr = initial;
         delta = 1;
     }
     /* Keep only the |offset| unaligned bytes. */
-    unmapPages(discardedAddr, size - offset);
+    UnmapPages(discardedAddr, size - offset);
     void *address = MapMemory(size);
     if (!address) {
         /* Map the rest of the original chunk again in case we can recover. */
         address = MapMemoryAt(initial, size - offset);
         if (!address)
-            unmapPages(retainedAddr, offset);
+            UnmapPages(retainedAddr, offset);
         SET_OUT_PARAMS_AND_RETURN(address, nullptr, 0, false);
     }
     if ((addrsGrowDown && address < retainedAddr) || (!addrsGrowDown && address > retainedAddr)) {
         growthDirection += delta;
         SET_OUT_PARAMS_AND_RETURN(address, retainedAddr, offset, true);
     }
     /* If we didn't choose the right direction, reduce its score. */
     growthDirection -= delta;
     /* Accept an aligned address if growthDirection didn't just flip. */
     if (OffsetFromAligned(address, alignment) == 0 && growthDirection + delta != 0)
         SET_OUT_PARAMS_AND_RETURN(address, retainedAddr, offset, true);
-    unmapPages(address, size);
+    UnmapPages(address, size);
     /* Map the original chunk again since we chose the wrong direction. */
     address = MapMemoryAt(initial, size - offset);
     if (!address) {
         /* Return non-null retainedAddr to indicate thread-related failure. */
-        unmapPages(retainedAddr, offset);
+        UnmapPages(retainedAddr, offset);
         SET_OUT_PARAMS_AND_RETURN(nullptr, retainedAddr, 0, false);
     }
     SET_OUT_PARAMS_AND_RETURN(address, nullptr, 0, false);
 }
 
 #undef SET_OUT_PARAMS_AND_RETURN
 
 void
-SystemPageAllocator::unmapPages(void *p, size_t size)
+UnmapPages(void *p, size_t size)
 {
     if (munmap(p, size))
         MOZ_ASSERT(errno == ENOMEM);
 }
 
 bool
-SystemPageAllocator::markPagesUnused(void *p, size_t size)
+MarkPagesUnused(void *p, size_t size)
 {
-    if (!decommitEnabled())
+    if (!DecommitEnabled())
         return false;
 
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     int result = madvise(p, size, MADV_DONTNEED);
     return result != -1;
 }
 
 bool
-SystemPageAllocator::markPagesInUse(void *p, size_t size)
+MarkPagesInUse(void *p, size_t size)
 {
     MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
     return true;
 }
 
 size_t
-SystemPageAllocator::GetPageFaultCount()
+GetPageFaultCount()
 {
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
     if (err)
         return 0;
     return usage.ru_majflt;
 }
 
 void *
-SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
 #define NEED_PAGE_ALIGNED 0
     size_t pa_start; // Page aligned starting
     size_t pa_end; // Page aligned ending
     size_t pa_size; // Total page aligned size
     size_t page_size = sysconf(_SC_PAGESIZE); // Page size
     struct stat st;
     uint8_t *buf;
@@ -666,23 +720,26 @@ SystemPageAllocator::AllocateMappedConte
 
     // Reset the data after target file, which we don't need to see.
     memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));
 
     return buf + (offset - pa_start);
 }
 
 void
-SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
+DeallocateMappedContent(void *p, size_t length)
 {
     void *pa_start; // Page aligned starting
     size_t page_size = sysconf(_SC_PAGESIZE); // Page size
     size_t total_size; // Total allocated size
 
     pa_start = (void *)(uintptr_t(p) & ~(page_size - 1));
     total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start);
     if (munmap(pa_start, total_size))
         MOZ_ASSERT(errno == ENOMEM);
 }
 
 #else
 #error "Memory mapping functions are not defined for your OS."
 #endif
+
+} // namespace gc
+} // namespace js
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -9,77 +9,43 @@
 
 #include <stddef.h>
 
 struct JSRuntime;
 
 namespace js {
 namespace gc {
 
-class SystemPageAllocator
-{
-  public:
-    // Sanity check that our compiled configuration matches the currently
-    // running instance and initialize any runtime data needed for allocation.
-    SystemPageAllocator();
+// Sanity check that our compiled configuration matches the currently
+// running instance and initialize any runtime data needed for allocation.
+void InitMemorySubsystem();
 
-    size_t systemPageSize() { return pageSize; }
-    size_t systemAllocGranularity() { return allocGranularity; }
-
-    // Allocate or deallocate pages from the system with the given alignment.
-    void *mapAlignedPages(size_t size, size_t alignment);
-    void unmapPages(void *p, size_t size);
+size_t SystemPageSize();
 
-    // Tell the OS that the given pages are not in use, so they should not be
-    // written to a paging file. This may be a no-op on some platforms.
-    bool markPagesUnused(void *p, size_t size);
+// Allocate or deallocate pages from the system with the given alignment.
+void *MapAlignedPages(size_t size, size_t alignment);
+void UnmapPages(void *p, size_t size);
 
-    // Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
-    // and should be paged in and out normally. This may be a no-op on some
-    // platforms.
-    bool markPagesInUse(void *p, size_t size);
-
-    // Returns #(hard faults) + #(soft faults)
-    static size_t GetPageFaultCount();
-
-    // Allocate memory mapped content.
-    // The offset must be aligned according to alignment requirement.
-    static void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
+// Tell the OS that the given pages are not in use, so they should not be
+// written to a paging file. This may be a no-op on some platforms.
+bool MarkPagesUnused(void *p, size_t size);
 
-    // Deallocate memory mapped content.
-    static void DeallocateMappedContent(void *p, size_t length);
+// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms.
+bool MarkPagesInUse(void *p, size_t size);
 
-  private:
-    bool decommitEnabled();
-    void *mapAlignedPagesSlow(size_t size, size_t alignment);
-    void *mapAlignedPagesLastDitch(size_t size, size_t alignment);
-    void getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
-                     size_t size, size_t alignment);
-    bool getNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
-                          size_t size, size_t alignment, bool addrsGrowDown);
-
-    // The GC can only safely decommit memory when the page size of the
-    // running process matches the compiled arena size.
-    size_t              pageSize;
+// Returns #(hard faults) + #(soft faults)
+size_t GetPageFaultCount();
 
-    // The OS allocation granularity may not match the page size.
-    size_t              allocGranularity;
-
-#if defined(XP_UNIX)
-    // The addresses handed out by mmap may grow up or down.
-    int                 growthDirection;
-#endif
+// Allocate memory mapped content.
+// The offset must be aligned according to alignment requirement.
+void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
 
-    // The maximum number of unalignable chunks to temporarily keep alive in
-    // the last ditch allocation pass. OOM crash reports generally show <= 7
-    // unaligned chunks available (bug 1005844 comment #16).
-    static const int    MaxLastDitchAttempts = 8;
+// Deallocate memory mapped content.
+void DeallocateMappedContent(void *p, size_t length);
 
-public:
-    void *testMapAlignedPagesLastDitch(size_t size, size_t alignment) {
-        return mapAlignedPagesLastDitch(size, alignment);
-    }
-};
+void *TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
 
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -56,17 +56,17 @@ js::Nursery::init(uint32_t maxNurseryByt
 
     /* If no chunks are specified then the nursery is permenantly disabled. */
     if (numNurseryChunks_ == 0)
         return true;
 
     if (!hugeSlots.init())
         return false;
 
-    void *heap = runtime()->gc.pageAllocator.mapAlignedPages(nurserySize(), Alignment);
+    void *heap = MapAlignedPages(nurserySize(), Alignment);
     if (!heap)
         return false;
 
     heapStart_ = uintptr_t(heap);
     heapEnd_ = heapStart_ + nurserySize();
     currentStart_ = start();
     numActiveChunks_ = 1;
     JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, nurserySize());
@@ -81,32 +81,32 @@ js::Nursery::init(uint32_t maxNurseryByt
 
     JS_ASSERT(isEnabled());
     return true;
 }
 
 js::Nursery::~Nursery()
 {
     if (start())
-        runtime()->gc.pageAllocator.unmapPages((void *)start(), nurserySize());
+        UnmapPages((void *)start(), nurserySize());
 }
 
 void
 js::Nursery::updateDecommittedRegion()
 {
 #ifndef JS_GC_ZEAL
     if (numActiveChunks_ < numNurseryChunks_) {
         // Bug 994054: madvise on MacOS is too slow to make this
         //             optimization worthwhile.
 # ifndef XP_MACOSX
         uintptr_t decommitStart = chunk(numActiveChunks_).start();
         uintptr_t decommitSize = heapEnd() - decommitStart;
         JS_ASSERT(decommitStart == AlignBytes(decommitStart, Alignment));
         JS_ASSERT(decommitSize == AlignBytes(decommitStart, Alignment));
-        runtime()->gc.pageAllocator.markPagesUnused((void *)decommitStart, decommitSize);
+        MarkPagesUnused((void *)decommitStart, decommitSize);
 # endif
     }
 #endif
 }
 
 void
 js::Nursery::enable()
 {
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -576,17 +576,17 @@ Statistics::beginSlice(int collectedCoun
     this->collectedCount = collectedCount;
     this->zoneCount = zoneCount;
     this->compartmentCount = compartmentCount;
 
     bool first = runtime->gc.state() == gc::NO_INCREMENTAL;
     if (first)
         beginGC();
 
-    SliceData data(reason, PRMJ_Now(), SystemPageAllocator::GetPageFaultCount());
+    SliceData data(reason, PRMJ_Now(), GetPageFaultCount());
     (void) slices.append(data); /* Ignore any OOMs here. */
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback)
         (*cb)(JS_TELEMETRY_GC_REASON, reason);
 
     // Slice callbacks should only fire for the outermost level
     if (++gcDepth == 1) {
         bool wasFullGC = collectedCount == zoneCount;
@@ -595,17 +595,17 @@ Statistics::beginSlice(int collectedCoun
                              JS::GCDescription(!wasFullGC));
     }
 }
 
 void
 Statistics::endSlice()
 {
     slices.back().end = PRMJ_Now();
-    slices.back().endFaults = SystemPageAllocator::GetPageFaultCount();
+    slices.back().endFaults = GetPageFaultCount();
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback) {
         (*cb)(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));
         (*cb)(JS_TELEMETRY_GC_RESET, !!slices.back().resetReason);
     }
 
     bool last = runtime->gc.state() == gc::NO_INCREMENTAL;
     if (last)
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -87,30 +87,28 @@ testGCAllocatorUp(const size_t PageSize)
     }
     mapMemoryAt(stagingArea, StagingSize);
     // Make sure there are no available chunks below the staging area.
     int tempChunks;
     if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false))
         return false;
     // Unmap the staging area so we can set it up for testing.
     unmapPages(stagingArea, StagingSize);
-    // Reuse the same allocator so it learns the address growth direction.
-    js::gc::SystemPageAllocator GCAlloc;
     // Check that the first chunk is used if it is aligned.
-    CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool, tempChunks));
     // Check that the first chunk is used if it can be aligned.
-    CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool, tempChunks));
     // Check that an aligned chunk after a single unalignable chunk is used.
-    CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool, tempChunks));
     // Check that we fall back to the slow path after two unalignable chunks.
-    CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool, tempChunks));
     // Check that we also fall back after an unalignable and an alignable chunk.
-    CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool, tempChunks));
     // Check that the last ditch allocator works as expected.
-    CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool, tempChunks, GCAlloc,
+    CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool, tempChunks,
                             UseLastDitchAllocator));
 
     // Clean up.
     while (--tempChunks >= 0)
         unmapPages(chunkPool[tempChunks], 2 * Chunk);
     return true;
 }
 
@@ -133,30 +131,28 @@ testGCAllocatorDown(const size_t PageSiz
     }
     mapMemoryAt(stagingArea, StagingSize);
     // Make sure there are no available chunks above the staging area.
     int tempChunks;
     if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true))
         return false;
     // Unmap the staging area so we can set it up for testing.
     unmapPages(stagingArea, StagingSize);
-    // Reuse the same allocator so it learns the address growth direction.
-    js::gc::SystemPageAllocator GCAlloc;
     // Check that the first chunk is used if it is aligned.
-    CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks));
     // Check that the first chunk is used if it can be aligned.
-    CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks));
     // Check that an aligned chunk after a single unalignable chunk is used.
-    CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks));
     // Check that we fall back to the slow path after two unalignable chunks.
-    CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks));
     // Check that we also fall back after an unalignable and an alignable chunk.
-    CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks));
     // Check that the last ditch allocator works as expected.
-    CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc,
+    CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks,
                             UseLastDitchAllocator));
 
     // Clean up.
     while (--tempChunks >= 0)
         unmapPages(chunkPool[tempChunks], 2 * Chunk);
     return true;
 }
 
@@ -189,17 +185,17 @@ fillSpaceBeforeStagingArea(int &tempChun
         unmapPages(stagingArea, StagingSize);
         return false;
     }
     return true;
 }
 
 bool
 positionIsCorrect(const char *str, void *base, void **chunkPool, int tempChunks,
-                  js::gc::SystemPageAllocator& GCAlloc, AllocType allocator = UseNormalAllocator)
+                  AllocType allocator = UseNormalAllocator)
 {
     // str represents a region of memory, with each character representing a
     // region of Chunk bytes. str should contain only x, o and -, where
     // x = mapped by the test to set up the initial conditions,
     // o = mapped by the GC allocator, and
     // - = unmapped.
     // base should point to a region of contiguous free memory
     // large enough to hold strlen(str) chunks of Chunk bytes.
@@ -211,30 +207,30 @@ positionIsCorrect(const char *str, void 
     // Map the regions indicated by str.
     for (i = 0; i < len; ++i) {
         if (str[i] == 'x')
             mapMemoryAt((void *)(uintptr_t(base) +  i * Chunk), Chunk);
     }
     // Allocate using the GC's allocator.
     void *result;
     if (allocator == UseNormalAllocator)
-        result = GCAlloc.mapAlignedPages(2 * Chunk, Alignment);
+        result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
     else
-        result = GCAlloc.testMapAlignedPagesLastDitch(2 * Chunk, Alignment);
+        result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
     // Clean up the mapped regions.
     if (result)
-        GCAlloc.unmapPages(result, 2 * Chunk);
+        js::gc::UnmapPages(result, 2 * Chunk);
     for (--i; i >= 0; --i) {
         if (str[i] == 'x')
-            unmapPages((void *)(uintptr_t(base) +  i * Chunk), Chunk);
+            js::gc::UnmapPages((void *)(uintptr_t(base) +  i * Chunk), Chunk);
     }
     // CHECK returns, so clean up on failure.
     if (result != desired) {
         while (--tempChunks >= 0)
-            unmapPages(chunkPool[tempChunks], 2 * Chunk);
+            js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
     }
     return result == desired;
 }
 
 #if defined(XP_WIN)
 
 void *
 mapMemoryAt(void *desired, size_t length)
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -271,17 +271,17 @@ struct ThreadSafeContext : ContextFriend
     StaticStrings &staticStrings() { return *runtime_->staticStrings; }
     AtomSet &permanentAtoms() { return *runtime_->permanentAtoms; }
     const JS::AsmJSCacheOps &asmJSCacheOps() { return runtime_->asmJSCacheOps; }
     PropertyName *emptyString() { return runtime_->emptyString; }
     FreeOp *defaultFreeOp() { return runtime_->defaultFreeOp(); }
     void *runtimeAddressForJit() { return runtime_; }
     void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
     void *stackLimitAddressForJitCode(StackKind kind);
-    size_t gcSystemPageSize() { return runtime_->gc.pageAllocator.systemPageSize(); }
+    size_t gcSystemPageSize() { return gc::SystemPageSize(); }
     bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
     bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
 
     // Thread local data that may be accessed freely.
     DtoaState *dtoaState() {
         return perThreadData->dtoaState;
     }
 };
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -622,23 +622,23 @@ FinalizeArenas(FreeOp *fop,
       default:
         MOZ_CRASH("Invalid alloc kind");
     }
 }
 
 static inline Chunk *
 AllocChunk(JSRuntime *rt)
 {
-    return static_cast<Chunk *>(rt->gc.pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
+    return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
 }
 
 static inline void
 FreeChunk(JSRuntime *rt, Chunk *p)
 {
-    rt->gc.pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
+    UnmapPages(static_cast<void *>(p), ChunkSize);
 }
 
 /* Must be called with the GC lock taken. */
 inline Chunk *
 ChunkPool::get(JSRuntime *rt)
 {
     Chunk *chunk = emptyChunkListHead;
     if (!chunk) {
@@ -770,17 +770,17 @@ GCRuntime::prepareToFreeChunk(ChunkInfo 
      */
     info.numArenasFreeCommitted = 0;
 #endif
 }
 
 void Chunk::decommitAllArenas(JSRuntime *rt)
 {
     decommittedArenas.clear(true);
-    rt->gc.pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
+    MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
 
     info.freeArenasHead = nullptr;
     info.lastDecommittedArenaOffset = 0;
     info.numArenasFree = ArenasPerChunk;
     info.numArenasFreeCommitted = 0;
 }
 
 void
@@ -879,17 +879,17 @@ Chunk::fetchNextDecommittedArena()
     JS_ASSERT(info.numArenasFree > 0);
 
     unsigned offset = findDecommittedArenaOffset();
     info.lastDecommittedArenaOffset = offset + 1;
     --info.numArenasFree;
     decommittedArenas.unset(offset);
 
     Arena *arena = &arenas[offset];
-    info.trailer.runtime->gc.pageAllocator.markPagesInUse(arena, ArenaSize);
+    MarkPagesInUse(arena, ArenaSize);
     arena->aheader.setAsNotAllocated();
 
     return &arena->aheader;
 }
 
 inline void
 GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo &info)
 {
@@ -1271,16 +1271,18 @@ GCRuntime::initZeal()
 #endif
 
 /* Lifetime for type sets attached to scripts containing observed types. */
 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
 
 bool
 GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
 {
+    InitMemorySubsystem();
+
 #ifdef JS_THREADSAFE
     lock = PR_NewLock();
     if (!lock)
         return false;
 #endif
 
     if (!chunkSet.init(INITIAL_CHUNK_CAPACITY))
         return false;
@@ -2501,17 +2503,17 @@ GCRuntime::decommitArenasFromAvailableLi
                 /*
                  * If the main thread waits for the decommit to finish, skip
                  * potentially expensive unlock/lock pair on the contested
                  * lock.
                  */
                 Maybe<AutoUnlockGC> maybeUnlock;
                 if (!isHeapBusy())
                     maybeUnlock.construct(rt);
-                ok = pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
+                ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
             }
 
             if (ok) {
                 ++chunk->info.numArenasFree;
                 chunk->decommittedArenas.set(arenaIndex);
             } else {
                 chunk->addArenaToFreeList(rt, aheader);
             }
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -531,26 +531,26 @@ ArrayBufferObject::canNeuterAsmJSArrayBu
 #else
     return true;
 #endif
 }
 
 void *
 ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length)
 {
-    return SystemPageAllocator::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
+    return AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
 }
 
 void
 ArrayBufferObject::releaseMappedArray()
 {
     if(!isMappedArrayBuffer() || isNeutered())
         return;
 
-    SystemPageAllocator::DeallocateMappedContent(dataPointer(), byteLength());
+    DeallocateMappedContent(dataPointer(), byteLength());
 }
 
 void
 ArrayBufferObject::addView(ArrayBufferViewObject *view)
 {
     // Note that pre-barriers are not needed here because either the list was
     // previously empty, in which case no pointer is being overwritten, or the
     // list was nonempty and will be made weak during this call (and weak
@@ -1142,17 +1142,17 @@ JS_PUBLIC_API(void *)
 JS_CreateMappedArrayBufferContents(int fd, size_t offset, size_t length)
 {
     return ArrayBufferObject::createMappedContents(fd, offset, length);
 }
 
 JS_PUBLIC_API(void)
 JS_ReleaseMappedArrayBufferContents(void *contents, size_t length)
 {
-    SystemPageAllocator::DeallocateMappedContent(contents, length);
+    DeallocateMappedContent(contents, length);
 }
 
 JS_FRIEND_API(bool)
 JS_IsMappedArrayBufferObject(JSObject *obj)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return false;
--- a/js/src/vm/ThreadPool.cpp
+++ b/js/src/vm/ThreadPool.cpp
@@ -497,20 +497,20 @@ ThreadPool::abortJob()
 }
 
 // We are not using the markPagesUnused() / markPagesInUse() APIs here
 // for two reasons.  One, the free list is threaded through the
 // chunks, so some pages are actually in use.  Two, the expectation is
 // that a small number of chunks will be used intensively for a short
 // while and then be abandoned at the next GC.
 //
-// It's an open question whether it's best to go directly to the
-// pageAllocator, as now, or go via the GC's chunk pool.  Either way
-// there's a need to manage a predictable chunk cache here as we don't
-// want chunks to be deallocated during a parallel section.
+// It's an open question whether it's best to map the chunk directly,
+// as now, or go via the GC's chunk pool.  Either way there's a need
+// to manage a predictable chunk cache here as we don't want chunks to
+// be deallocated during a parallel section.
 
 gc::ForkJoinNurseryChunk *
 ThreadPool::getChunk()
 {
 #ifdef JSGC_FJGENERATIONAL
     PR_Lock(chunkLock_);
     timeOfLastAllocation_ = PRMJ_Now()/1000000;
     ChunkFreeList *p = freeChunks_;
@@ -519,17 +519,17 @@ ThreadPool::getChunk()
     PR_Unlock(chunkLock_);
 
     if (p) {
         // Already poisoned.
         return reinterpret_cast<gc::ForkJoinNurseryChunk *>(p);
     }
     gc::ForkJoinNurseryChunk *c =
         reinterpret_cast<gc::ForkJoinNurseryChunk *>(
-            runtime_->gc.pageAllocator.mapAlignedPages(gc::ChunkSize, gc::ChunkSize));
+            gc::MapAlignedPages(gc::ChunkSize, gc::ChunkSize));
     if (!c)
         return c;
     poisonChunk(c);
     return c;
 #else
     return nullptr;
 #endif
 }
@@ -575,12 +575,12 @@ ThreadPool::clearChunkCache()
     PR_Lock(chunkLock_);
     ChunkFreeList *p = freeChunks_;
     freeChunks_ = nullptr;
     PR_Unlock(chunkLock_);
 
     while (p) {
         ChunkFreeList *victim = p;
         p = p->next;
-        runtime_->gc.pageAllocator.unmapPages(victim, gc::ChunkSize);
+        gc::UnmapPages(victim, gc::ChunkSize);
     }
 #endif
 }