Bug 988486 - Split out page allocator state into new class r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 30 Apr 2014 12:13:55 +0100
changeset 181553 b0beb424c86ee6ac269e7e5fdedfe128ea61bb52
parent 181552 de07501e90b580503b0af6e67c0daca62a7c4dc2
child 181554 fd9b3cd32b47b018adc22680e9965ae291da3694
push id272
push userpvanderbeken@mozilla.com
push dateMon, 05 May 2014 16:31:18 +0000
reviewersterrence
bugs988486
milestone32.0a1
Bug 988486 - Split out page allocator state into new class r=terrence
js/src/gc/Heap.h
js/src/gc/Memory.cpp
js/src/gc/Memory.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/Statistics.cpp
js/src/jscntxt.h
js/src/jsgc.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/Runtime.h
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -816,25 +816,17 @@ struct Chunk
 
     ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind);
 
     void releaseArena(ArenaHeader *aheader);
     void recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind);
 
     static Chunk *allocate(JSRuntime *rt);
 
-    void decommitAllArenas(JSRuntime *rt) {
-        decommittedArenas.clear(true);
-        MarkPagesUnused(rt, &arenas[0], ArenasPerChunk * ArenaSize);
-
-        info.freeArenasHead = nullptr;
-        info.lastDecommittedArenaOffset = 0;
-        info.numArenasFree = ArenasPerChunk;
-        info.numArenasFreeCommitted = 0;
-    }
+    void decommitAllArenas(JSRuntime *rt);
 
     /* Must be called with the GC lock taken. */
     static inline void release(JSRuntime *rt, Chunk *chunk);
     static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
 
     /* Must be called with the GC lock taken. */
     inline void prepareToBeFreed(JSRuntime *rt);
 
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -7,45 +7,44 @@
 #include "gc/Memory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
 using namespace js;
 using namespace js::gc;
 
-static bool
-DecommitEnabled(JSRuntime *rt)
+bool
+SystemPageAllocator::decommitEnabled()
 {
-    return rt->gcSystemPageSize == ArenaSize;
+    return pageSize == ArenaSize;
 }
 
 #if defined(XP_WIN)
 #include "jswin.h"
 #include <psapi.h>
 
-void
-gc::InitMemorySubsystem(JSRuntime *rt)
+SystemPageAllocator::SystemPageAllocator()
 {
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
-    rt->gcSystemPageSize = sysinfo.dwPageSize;
-    rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity;
+    pageSize = sysinfo.dwPageSize;
+    allocGranularity = sysinfo.dwAllocationGranularity;
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % pageSize == 0);
+    JS_ASSERT(alignment % allocGranularity == 0);
 
     /* Special case: If we want allocation alignment, no further work is needed. */
-    if (alignment == rt->gcSystemAllocGranularity) {
+    if (alignment == allocGranularity) {
         return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
     }
 
     /*
      * Windows requires that there be a 1:1 mapping between VM allocation
      * and deallocation operations.  Therefore, take care here to acquire the
      * final result via one mapping operation.  This means unmapping any
      * preliminary result that is not correctly aligned.
@@ -55,163 +54,161 @@ gc::MapAlignedPages(JSRuntime *rt, size_
         /*
          * Over-allocate in order to map a memory region that is definitely
          * large enough, then deallocate and allocate again the correct size,
          * within the over-sized mapping.
          *
          * Since we're going to unmap the whole thing anyway, the first
          * mapping doesn't have to commit pages.
          */
-        size_t reserveSize = size + alignment - rt->gcSystemPageSize;
+        size_t reserveSize = size + alignment - pageSize;
         p = VirtualAlloc(nullptr, reserveSize, MEM_RESERVE, PAGE_READWRITE);
         if (!p)
             return nullptr;
         void *chunkStart = (void *)AlignBytes(uintptr_t(p), alignment);
-        UnmapPages(rt, p, reserveSize);
+        unmapPages(p, reserveSize);
         p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
 
         /* Failure here indicates a race with another thread, so try again. */
     }
 
     JS_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::unmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
-    if (!DecommitEnabled(rt))
+    if (!decommitEnabled())
         return true;
 
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
     return p2 == p;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
-gc::GetPageFaultCount()
+SystemPageAllocator::GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
         return 0;
     return pmc.PageFaultCount;
 }
 
 void *
-gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
     // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
     return nullptr;
 }
 
 // Deallocate mapped memory for object.
 void
-gc::DeallocateMappedContent(void *p, size_t length)
+SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
 {
     // TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
 }
 
 #elif defined(SOLARIS)
 
 #include <sys/mman.h>
 #include <unistd.h>
 
 #ifndef MAP_NOSYNC
 # define MAP_NOSYNC 0
 #endif
 
-void
-gc::InitMemorySubsystem(JSRuntime *rt)
+SystemPageAllocator::SystemPageAllocator()
 {
-    rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
+    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % pageSize == 0);
+    JS_ASSERT(alignment % allocGranularity == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
 
     void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
     if (p == MAP_FAILED)
         return nullptr;
     return p;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::unmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
-gc::GetPageFaultCount()
+SystemPageAllocator::GetPageFaultCount()
 {
     return 0;
 }
 
 void *
-gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
     // Not implemented.
     return nullptr;
 }
 
 // Deallocate mapped memory for object.
 void
-gc::DeallocateMappedContent(void *p, size_t length)
+SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
 {
     // Not implemented.
 }
 
 #elif defined(XP_UNIX)
 
 #include <algorithm>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 
-void
-gc::InitMemorySubsystem(JSRuntime *rt)
+SystemPageAllocator::SystemPageAllocator()
 {
-    rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
+    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 static inline void *
 MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
 {
 #if defined(__ia64__)
     /*
      * The JS engine assumes that all allocated pointers have their high 17 bits clear,
@@ -239,28 +236,28 @@ MapMemory(size_t length, int prot, int f
     }
     return region;
 #else
     return mmap(nullptr, length, prot, flags, fd, offset);
 #endif
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % pageSize == 0);
+    JS_ASSERT(alignment % allocGranularity == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON;
 
     /* Special case: If we want page alignment, no further work is needed. */
-    if (alignment == rt->gcSystemAllocGranularity) {
+    if (alignment == allocGranularity) {
         void *region = MapMemory(size, prot, flags, -1, 0);
         if (region == MAP_FAILED)
             return nullptr;
         return region;
     }
 
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = Min(size + 2 * alignment, 2 * size);
@@ -279,51 +276,51 @@ gc::MapAlignedPages(JSRuntime *rt, size_
     if (uintptr_t(end) != regionEnd)
         JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end)));
 
     JS_ASSERT(uintptr_t(front) % alignment == 0);
     return front;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::unmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(0 == munmap(p, size));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
-    if (!DecommitEnabled(rt))
+    if (!decommitEnabled())
         return false;
 
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     int result = madvise(p, size, MADV_DONTNEED);
     return result != -1;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
-gc::GetPageFaultCount()
+SystemPageAllocator::GetPageFaultCount()
 {
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
     if (err)
         return 0;
     return usage.ru_majflt;
 }
 
 void *
-gc::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
 #define NEED_PAGE_ALIGNED 0
     size_t pa_start; // Page aligned starting
     size_t pa_end; // Page aligned ending
     size_t pa_size; // Total page aligned size
     size_t page_size = sysconf(_SC_PAGESIZE); // Page size
     struct stat st;
     uint8_t *buf;
@@ -362,17 +359,17 @@ gc::AllocateMappedContent(int fd, size_t
 
     // Reset the data after target file, which we don't need to see.
     memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));
 
     return buf + (offset - pa_start);
 }
 
 void
-gc::DeallocateMappedContent(void *p, size_t length)
+SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
 {
     void *pa_start; // Page aligned starting
     size_t page_size = sysconf(_SC_PAGESIZE); // Page size
     size_t total_size; // Total allocated size
 
     pa_start = (void *)(uintptr_t(p) & ~(page_size - 1));
     total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start);
     munmap(pa_start, total_size);
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -9,48 +9,56 @@
 
 #include <stddef.h>
 
 struct JSRuntime;
 
 namespace js {
 namespace gc {
 
-// Sanity check that our compiled configuration matches the currently running
-// instance and initialize any runtime data needed for allocation.
-void
-InitMemorySubsystem(JSRuntime *rt);
+class SystemPageAllocator
+{
+  public:
+    // Sanity check that our compiled configuration matches the currently
+    // running instance and initialize any runtime data needed for allocation.
+    SystemPageAllocator();
 
-// Allocate or deallocate pages from the system with the given alignment.
-void *
-MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment);
+    size_t systemPageSize() { return pageSize; }
+    size_t systemAllocGranularity() { return allocGranularity; }
 
-void
-UnmapPages(JSRuntime *rt, void *p, size_t size);
+    // Allocate or deallocate pages from the system with the given alignment.
+    void *mapAlignedPages(size_t size, size_t alignment);
+    void unmapPages(void *p, size_t size);
 
-// Tell the OS that the given pages are not in use, so they should not
-// be written to a paging file. This may be a no-op on some platforms.
-bool
-MarkPagesUnused(JSRuntime *rt, void *p, size_t size);
+    // Tell the OS that the given pages are not in use, so they should not be
+    // written to a paging file. This may be a no-op on some platforms.
+    bool markPagesUnused(void *p, size_t size);
 
-// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
-// and should be paged in and out normally. This may be a no-op on some
-// platforms.
-bool
-MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
+    // Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
+    // and should be paged in and out normally. This may be a no-op on some
+    // platforms.
+    bool markPagesInUse(void *p, size_t size);
+
+    // Returns #(hard faults) + #(soft faults)
+    static size_t GetPageFaultCount();
+
+    // Allocate memory mapped content.
+    // The offset must be aligned according to alignment requirement.
+    static void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
 
-// Returns #(hard faults) + #(soft faults)
-size_t
-GetPageFaultCount();
+    // Deallocate memory mapped content.
+    static void DeallocateMappedContent(void *p, size_t length);
+
+  private:
+    bool decommitEnabled();
 
-// Allocate memory mapped content.
-// The offset must be aligned according to alignment requirement.
-void *
-AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
+    // The GC can only safely decommit memory when the page size of the
+    // running process matches the compiled arena size.
+    size_t              pageSize;
 
-// Deallocate memory mapped content.
-void
-DeallocateMappedContent(void *p, size_t length);
+    // The OS allocation granularity may not match the page size.
+    size_t              allocGranularity;
+};
 
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -48,17 +48,17 @@ static int64_t GCReportThreshold = INT64
 bool
 js::Nursery::init()
 {
     JS_ASSERT(start() == 0);
 
     if (!hugeSlots.init())
         return false;
 
-    void *heap = MapAlignedPages(runtime(), NurserySize, Alignment);
+    void *heap = runtime()->pageAllocator.mapAlignedPages(NurserySize, Alignment);
     if (!heap)
         return false;
 
     JSRuntime *rt = runtime();
     rt->gcNurseryStart_ = uintptr_t(heap);
     currentStart_ = start();
     rt->gcNurseryEnd_ = chunk(LastNurseryChunk).end();
     numActiveChunks_ = 1;
@@ -74,17 +74,33 @@ js::Nursery::init()
 
     JS_ASSERT(isEnabled());
     return true;
 }
 
 js::Nursery::~Nursery()
 {
     if (start())
-        UnmapPages(runtime(), (void *)start(), NurserySize);
+        runtime()->pageAllocator.unmapPages((void *)start(), NurserySize);
+}
+
+void
+js::Nursery::updateDecommittedRegion()
+{
+#ifndef JS_GC_ZEAL
+    if (numActiveChunks_ < NumNurseryChunks) {
+        // Bug 994054: madvise on MacOS is too slow to make this
+        //             optimization worthwhile.
+# ifndef XP_MACOSX
+        uintptr_t decommitStart = chunk(numActiveChunks_).start();
+        JS_ASSERT(decommitStart == AlignBytes(decommitStart, 1 << 20));
+        runtime()->pageAllocator.markPagesUnused((void *)decommitStart, heapEnd() - decommitStart);
+# endif
+    }
+#endif
 }
 
 void
 js::Nursery::enable()
 {
     JS_ASSERT(isEmpty());
     if (isEnabled())
         return;
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -211,29 +211,17 @@ class Nursery
         JS_ASSERT(chunkno < NumNurseryChunks);
         JS_ASSERT(chunkno < numActiveChunks_);
         currentChunk_ = chunkno;
         position_ = chunk(chunkno).start();
         currentEnd_ = chunk(chunkno).end();
         initChunk(chunkno);
     }
 
-    void updateDecommittedRegion() {
-#ifndef JS_GC_ZEAL
-        if (numActiveChunks_ < NumNurseryChunks) {
-            // Bug 994054: madvise on MacOS is too slow to make this
-            //             optimization worthwhile.
-# ifndef XP_MACOSX
-            uintptr_t decommitStart = chunk(numActiveChunks_).start();
-            JS_ASSERT(decommitStart == AlignBytes(decommitStart, 1 << 20));
-            gc::MarkPagesUnused(runtime(), (void *)decommitStart, heapEnd() - decommitStart);
-# endif
-        }
-#endif
-    }
+    void updateDecommittedRegion();
 
     MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
         JS_ASSERT(numActiveChunks_ > 0);
         return chunk(numActiveChunks_ - 1).end();
     }
 
     MOZ_ALWAYS_INLINE bool isFullyGrown() const {
         return numActiveChunks_ == NumNurseryChunks;
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -16,16 +16,17 @@
 #include "jsprf.h"
 #include "jsutil.h"
 #include "prmjtime.h"
 
 #include "gc/Memory.h"
 #include "vm/Runtime.h"
 
 using namespace js;
+using namespace js::gc;
 using namespace js::gcstats;
 
 using mozilla::PodArrayZero;
 
 /* Except for the first and last, slices of less than 42ms are not reported. */
 static const int64_t SLICE_MIN_REPORT_TIME = 42 * PRMJ_USEC_PER_MSEC;
 
 class gcstats::StatisticsSerializer
@@ -566,17 +567,17 @@ Statistics::beginSlice(int collectedCoun
     this->collectedCount = collectedCount;
     this->zoneCount = zoneCount;
     this->compartmentCount = compartmentCount;
 
     bool first = runtime->gcIncrementalState == gc::NO_INCREMENTAL;
     if (first)
         beginGC();
 
-    SliceData data(reason, PRMJ_Now(), gc::GetPageFaultCount());
+    SliceData data(reason, PRMJ_Now(), SystemPageAllocator::GetPageFaultCount());
     (void) slices.append(data); /* Ignore any OOMs here. */
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback)
         (*cb)(JS_TELEMETRY_GC_REASON, reason);
 
     // Slice callbacks should only fire for the outermost level
     if (++gcDepth == 1) {
         bool wasFullGC = collectedCount == zoneCount;
@@ -585,17 +586,17 @@ Statistics::beginSlice(int collectedCoun
                   JS::GCDescription(!wasFullGC));
     }
 }
 
 void
 Statistics::endSlice()
 {
     slices.back().end = PRMJ_Now();
-    slices.back().endFaults = gc::GetPageFaultCount();
+    slices.back().endFaults = SystemPageAllocator::GetPageFaultCount();
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback) {
         (*cb)(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));
         (*cb)(JS_TELEMETRY_GC_RESET, !!slices.back().resetReason);
     }
 
     bool last = runtime->gcIncrementalState == gc::NO_INCREMENTAL;
     if (last)
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -285,17 +285,17 @@ struct ThreadSafeContext : ContextFriend
     AtomSet &permanentAtoms() { return *runtime_->permanentAtoms; }
     const JS::AsmJSCacheOps &asmJSCacheOps() { return runtime_->asmJSCacheOps; }
     PropertyName *emptyString() { return runtime_->emptyString; }
     FreeOp *defaultFreeOp() { return runtime_->defaultFreeOp(); }
     bool useHelperThreads() { return runtime_->useHelperThreads(); }
     void *runtimeAddressForJit() { return runtime_; }
     void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
     void *stackLimitAddressForJitCode(StackKind kind);
-    size_t gcSystemPageSize() { return runtime_->gcSystemPageSize; }
+    size_t gcSystemPageSize() { return runtime_->pageAllocator.systemPageSize(); }
     bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
     bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
 
     // Thread local data that may be accessed freely.
     DtoaState *dtoaState() {
         return perThreadData->dtoaState;
     }
 };
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -605,23 +605,23 @@ FinalizeArenas(FreeOp *fop,
       default:
         MOZ_ASSUME_UNREACHABLE("Invalid alloc kind");
     }
 }
 
 static inline Chunk *
 AllocChunk(JSRuntime *rt)
 {
-    return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
+    return static_cast<Chunk *>(rt->pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
 }
 
 static inline void
 FreeChunk(JSRuntime *rt, Chunk *p)
 {
-    UnmapPages(rt, static_cast<void *>(p), ChunkSize);
+    rt->pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
 }
 
 inline bool
 ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
 {
     /*
      * To minimize memory waste we do not want to run the background chunk
      * allocation if we have empty chunks or when the runtime needs just few
@@ -754,16 +754,27 @@ Chunk::prepareToBeFreed(JSRuntime *rt)
     /*
      * Let FreeChunkList detect a missing prepareToBeFreed call before it
      * frees chunk.
      */
     info.numArenasFreeCommitted = 0;
 #endif
 }
 
+void Chunk::decommitAllArenas(JSRuntime *rt)
+{
+    decommittedArenas.clear(true);
+    rt->pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
+
+    info.freeArenasHead = nullptr;
+    info.lastDecommittedArenaOffset = 0;
+    info.numArenasFree = ArenasPerChunk;
+    info.numArenasFreeCommitted = 0;
+}
+
 void
 Chunk::init(JSRuntime *rt)
 {
     JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
 
     /*
      * We clear the bitmap to guard against xpc_IsGrayGCThing being called on
      * uninitialized data, which would happen before the first GC cycle.
@@ -854,17 +865,17 @@ Chunk::fetchNextDecommittedArena()
     JS_ASSERT(info.numArenasFree > 0);
 
     unsigned offset = findDecommittedArenaOffset();
     info.lastDecommittedArenaOffset = offset + 1;
     --info.numArenasFree;
     decommittedArenas.unset(offset);
 
     Arena *arena = &arenas[offset];
-    MarkPagesInUse(info.trailer.runtime, arena, ArenaSize);
+    info.trailer.runtime->pageAllocator.markPagesInUse(arena, ArenaSize);
     arena->aheader.setAsNotAllocated();
 
     return &arena->aheader;
 }
 
 inline ArenaHeader *
 Chunk::fetchNextFreeArena(JSRuntime *rt)
 {
@@ -1066,18 +1077,16 @@ InitGCZeal(JSRuntime *rt)
 #endif
 
 /* Lifetime for type sets attached to scripts containing observed types. */
 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
 
 bool
 js_InitGC(JSRuntime *rt, uint32_t maxbytes)
 {
-    InitMemorySubsystem(rt);
-
     if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
         return false;
 
     if (!rt->gcRootsHash.init(256))
         return false;
 
     if (!rt->gcHelperThread.init())
         return false;
@@ -2033,17 +2042,17 @@ DecommitArenasFromAvailableList(JSRuntim
                 /*
                  * If the main thread waits for the decommit to finish, skip
                  * potentially expensive unlock/lock pair on the contested
                  * lock.
                  */
                 Maybe<AutoUnlockGC> maybeUnlock;
                 if (!rt->isHeapBusy())
                     maybeUnlock.construct(rt);
-                ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
+                ok = rt->pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
             }
 
             if (ok) {
                 ++chunk->info.numArenasFree;
                 chunk->decommittedArenas.set(arenaIndex);
             } else {
                 chunk->addArenaToFreeList(rt, aheader);
             }
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -530,26 +530,26 @@ ArrayBufferObject::canNeuterAsmJSArrayBu
 #else
     return true;
 #endif
 }
 
 void *
 ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length)
 {
-    return AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
+    return SystemPageAllocator::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
 }
 
 void
 ArrayBufferObject::releaseMappedArray()
 {
     if(!isMappedArrayBuffer() || isNeutered())
         return;
 
-    DeallocateMappedContent(dataPointer(), byteLength());
+    SystemPageAllocator::DeallocateMappedContent(dataPointer(), byteLength());
 }
 
 void
 ArrayBufferObject::addView(ArrayBufferViewObject *view)
 {
     // Note that pre-barriers are not needed here because either the list was
     // previously empty, in which case no pointer is being overwritten, or the
     // list was nonempty and will be made weak during this call (and weak
@@ -1102,17 +1102,17 @@ JS_PUBLIC_API(void *)
 JS_CreateMappedArrayBufferContents(int fd, size_t offset, size_t length)
 {
     return ArrayBufferObject::createMappedContents(fd, offset, length);
 }
 
 JS_PUBLIC_API(void)
 JS_ReleaseMappedArrayBufferContents(void *contents, size_t length)
 {
-    DeallocateMappedContent(contents, length);
+    SystemPageAllocator::DeallocateMappedContent(contents, length);
 }
 
 JS_FRIEND_API(bool)
 JS_IsMappedArrayBufferObject(JSObject *obj)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return false;
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -1292,24 +1292,17 @@ struct JSRuntime : public JS::shadow::Ru
      * tracing through black roots and the other is for tracing through gray
      * roots. The black/gray distinction is only relevant to the cycle
      * collector.
      */
     typedef js::Vector<ExtraTracer, 4, js::SystemAllocPolicy> ExtraTracerVector;
     ExtraTracerVector   gcBlackRootTracers;
     ExtraTracer         gcGrayRootTracer;
 
-    /*
-     * The GC can only safely decommit memory when the page size of the
-     * running process matches the compiled arena size.
-     */
-    size_t              gcSystemPageSize;
-
-    /* The OS allocation granularity may not match the page size. */
-    size_t              gcSystemAllocGranularity;
+    js::gc::SystemPageAllocator pageAllocator;
 
     /* Strong references on scripts held for PCCount profiling API. */
     js::ScriptAndCountsVector *scriptAndCountsVector;
 
     /* Well-known numbers held for use by this runtime's contexts. */
     const js::Value     NaNValue;
     const js::Value     negativeInfinityValue;
     const js::Value     positiveInfinityValue;