Back out 1b81a9c88872 (bug 840242) for committing infanticide in Nursery.o
authorPhil Ringnalda <philringnalda@gmail.com>
Fri, 21 Jun 2013 18:53:24 -0700
changeset 147548 37392ce15efaa280552a4fdc68f761aa2af11634
parent 147547 1b81a9c888729c14728976b8e3e25f0d29b44938
child 147549 768a6a62fbc664ef88feb62c7897d774e9ab5b32
push id2697
push userbbajaj@mozilla.com
push dateMon, 05 Aug 2013 18:49:53 +0000
treeherdermozilla-beta@dfec938c7b63 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs840242
milestone24.0a1
backs out1b81a9c888729c14728976b8e3e25f0d29b44938
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Back out 1b81a9c88872 (bug 840242) for committing infanticide in Nursery.o
js/public/HeapAPI.h
js/src/gc/Memory.cpp
js/src/gc/Memory.h
js/src/ion/AsmJS.cpp
js/src/ion/AsmJS.h
js/src/ion/AsmJSModule.h
js/src/jsapi.cpp
js/src/jscntxt.h
js/src/jsgc.cpp
js/src/jstypedarray.cpp
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -8,17 +8,34 @@
 #define js_HeapAPI_h
 
 #include "jspubtd.h"
 
 /* These values are private to the JS engine. */
 namespace js {
 namespace gc {
 
+/*
+ * Page size must be static to support our arena pointer optimizations, so we
+ * are forced to support each platform with non-4096 pages as a special case.
+ * Note: The freelist supports a maximum arena shift of 15.
+ * Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
+ */
+#if (defined(SOLARIS) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
+    (defined(__sparc) || defined(__sparcv9) || defined(__ia64))
+const size_t PageShift = 13;
+const size_t ArenaShift = PageShift;
+#elif defined(__powerpc64__)
+const size_t PageShift = 16;
 const size_t ArenaShift = 12;
+#else
+const size_t PageShift = 12;
+const size_t ArenaShift = PageShift;
+#endif
+const size_t PageSize = size_t(1) << PageShift;
 const size_t ArenaSize = size_t(1) << ArenaShift;
 const size_t ArenaMask = ArenaSize - 1;
 
 const size_t ChunkShift = 20;
 const size_t ChunkSize = size_t(1) << ChunkShift;
 const size_t ChunkMask = ChunkSize - 1;
 
 const size_t CellShift = 3;
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -1,52 +1,56 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gc/Memory.h"
 
-#include "jscntxt.h"
-
 #include "js/HeapAPI.h"
 
 using namespace js;
 using namespace js::gc;
 
+/* Unused memory decommiting requires the arena size match the page size. */
 static bool
-DecommitEnabled(JSRuntime *rt)
+DecommitEnabled()
 {
-    return rt->gcSystemPageSize == ArenaSize;
+    return PageSize == ArenaSize;
 }
 
 #if defined(XP_WIN)
 #include "jswin.h"
 #include <psapi.h>
 
+static size_t AllocationGranularity = 0;
+
 void
-gc::InitMemorySubsystem(JSRuntime *rt)
+gc::InitMemorySubsystem()
 {
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
-    rt->gcSystemPageSize = sysinfo.dwPageSize;
-    rt->gcSystemAllocGranularity = sysinfo.dwAllocationGranularity;
+    if (sysinfo.dwPageSize != PageSize) {
+        fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
+        MOZ_CRASH();
+    }
+    AllocationGranularity = sysinfo.dwAllocationGranularity;
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+gc::MapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % PageSize == 0);
+    JS_ASSERT(alignment % AllocationGranularity == 0);
 
     /* Special case: If we want allocation alignment, no further work is needed. */
-    if (alignment == rt->gcSystemAllocGranularity) {
+    if (alignment == AllocationGranularity) {
         return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
     }
 
     /*
      * Windows requires that there be a 1:1 mapping between VM allocation
      * and deallocation operations.  Therefore, take care here to acquire the
      * final result via one mapping operation.  This means unmapping any
      * preliminary result that is not correctly aligned.
@@ -60,47 +64,47 @@ gc::MapAlignedPages(JSRuntime *rt, size_
          *
          * Since we're going to unmap the whole thing anyway, the first
          * mapping doesn't have to commit pages.
          */
         p = VirtualAlloc(NULL, size * 2, MEM_RESERVE, PAGE_READWRITE);
         if (!p)
             return NULL;
         void *chunkStart = (void *)(uintptr_t(p) + (alignment - (uintptr_t(p) % alignment)));
-        UnmapPages(rt, p, size * 2);
+        UnmapPages(p, size * 2);
         p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
 
         /* Failure here indicates a race with another thread, so try again. */
     }
 
     JS_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+gc::UnmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesUnused(void *p, size_t size)
 {
-    if (!DecommitEnabled(rt))
-        return true;
+    if (!DecommitEnabled())
+        return false;
 
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
     return p2 == p;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 size_t
 gc::GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
@@ -112,23 +116,22 @@ gc::GetPageFaultCount()
 
 #define INCL_DOSMEMMGR
 #include <os2.h>
 
 #define JS_GC_HAS_MAP_ALIGN 1
 #define OS2_MAX_RECURSIONS  16
 
 void
-gc::InitMemorySubsystem(JSRuntime *rt)
+gc::InitMemorySubsystem()
 {
-    rt->gcSystemPageSize = rt->gcSystemAllocGranularity = ArenaSize;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *addr, size_t size)
+gc::UnmapPages(void *addr, size_t size)
 {
     if (!DosFreeMem(addr))
         return;
 
     /*
      * If DosFreeMem() failed, 'addr' is probably part of an "expensive"
      * allocation, so calculate the base address and try again.
      */
@@ -139,17 +142,17 @@ gc::UnmapPages(JSRuntime *rt, void *addr
 
     uintptr_t base = reinterpret_cast<uintptr_t>(addr) - ((2 * size) - cb);
     DosFreeMem(reinterpret_cast<void*>(base));
 
     return;
 }
 
 static void *
-gc::MapAlignedPagesRecursively(JSRuntime *rt, size_t size, size_t alignment, int& recursions)
+gc::MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
 {
     if (++recursions >= OS2_MAX_RECURSIONS)
         return NULL;
 
     void *tmp;
     if (DosAllocMem(&tmp, size,
                     OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
         JS_ALWAYS_TRUE(DosAllocMem(&tmp, size,
@@ -165,46 +168,46 @@ gc::MapAlignedPagesRecursively(JSRuntime
      * in a race with another thread, the next recursion should succeed.
      */
     size_t filler = size + alignment - offset;
     unsigned long cb = filler;
     unsigned long flags = 0;
     unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
                                    &cb, &flags);
     if (!rc && (flags & PAG_FREE) && cb >= filler) {
-        UnmapPages(rt, tmp, 0);
+        UnmapPages(tmp, 0);
         if (DosAllocMem(&tmp, filler,
                         OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
             JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
                                        PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
         }
     }
 
-    void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
-    UnmapPages(rt, tmp, 0);
+    void *p = MapAlignedPagesRecursively(size, alignment, recursions);
+    UnmapPages(tmp, 0);
 
     return p;
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+gc::MapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % PageSize == 0);
+    JS_ASSERT(alignment % PageSize == 0);
 
     int recursions = -1;
 
     /*
      * Make up to OS2_MAX_RECURSIONS attempts to get an aligned block
      * of the right size by recursively allocating blocks of unaligned
      * free memory until only an aligned allocation is possible.
      */
-    void *p = MapAlignedPagesRecursively(rt, size, alignment, recursions);
+    void *p = MapAlignedPagesRecursively(size, alignment, recursions);
     if (p)
         return p;
 
     /*
      * If memory is heavily fragmented, the recursive strategy may fail;
      * instead, use the "expensive" strategy:  allocate twice as much
      * as requested and return an aligned address within this block.
      */
@@ -216,26 +219,26 @@ gc::MapAlignedPages(JSRuntime *rt, size_
 
     uintptr_t addr = reinterpret_cast<uintptr_t>(p);
     addr = (addr + (alignment - 1)) & ~(alignment - 1);
 
     return reinterpret_cast<void *>(addr);
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesUnused(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 size_t
 gc::GetPageFaultCount()
 {
     return 0;
 }
@@ -245,89 +248,91 @@ gc::GetPageFaultCount()
 #include <sys/mman.h>
 #include <unistd.h>
 
 #ifndef MAP_NOSYNC
 # define MAP_NOSYNC 0
 #endif
 
 void
-gc::InitMemorySubsystem(JSRuntime *rt)
+gc::InitMemorySubsystem()
 {
-    rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+gc::MapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % PageSize == 0);
+    JS_ASSERT(alignment % PageSize == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
 
     void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
     if (p == MAP_FAILED)
         return NULL;
     return p;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+gc::UnmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesUnused(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 size_t
 gc::GetPageFaultCount()
 {
     return 0;
 }
 
 #elif defined(XP_UNIX) || defined(XP_MACOSX) || defined(DARWIN)
 
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <unistd.h>
 
 void
-gc::InitMemorySubsystem(JSRuntime *rt)
+gc::InitMemorySubsystem()
 {
-    rt->gcSystemPageSize = rt->gcSystemAllocGranularity = size_t(sysconf(_SC_PAGESIZE));
+    if (size_t(sysconf(_SC_PAGESIZE)) != PageSize) {
+        fprintf(stderr,"SpiderMonkey compiled with incorrect page size; please update js/public/HeapAPI.h.\n");
+        MOZ_CRASH();
+    }
 }
 
 void *
-gc::MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment)
+gc::MapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % rt->gcSystemPageSize == 0);
-    JS_ASSERT(alignment % rt->gcSystemAllocGranularity == 0);
+    JS_ASSERT(size % PageSize == 0);
+    JS_ASSERT(alignment % PageSize == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON;
 
     /* Special case: If we want page alignment, no further work is needed. */
-    if (alignment == rt->gcSystemAllocGranularity) {
+    if (alignment == PageSize) {
         return mmap(NULL, size, prot, flags, -1, 0);
     }
 
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = Min(size + 2 * alignment, 2 * size);
     void *region = mmap(NULL, reqSize, prot, flags, -1, 0);
     if (region == MAP_FAILED)
         return NULL;
@@ -343,36 +348,36 @@ gc::MapAlignedPages(JSRuntime *rt, size_
     if (uintptr_t(end) != regionEnd)
         JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end)));
 
     JS_ASSERT(uintptr_t(front) % alignment == 0);
     return front;
 }
 
 void
-gc::UnmapPages(JSRuntime *rt, void *p, size_t size)
+gc::UnmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(0 == munmap(p, size));
 }
 
 bool
-gc::MarkPagesUnused(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesUnused(void *p, size_t size)
 {
-    if (!DecommitEnabled(rt))
+    if (!DecommitEnabled())
         return false;
 
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     int result = madvise(p, size, MADV_DONTNEED);
     return result != -1;
 }
 
 bool
-gc::MarkPagesInUse(JSRuntime *rt, void *p, size_t size)
+gc::MarkPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % rt->gcSystemPageSize == 0);
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
     return true;
 }
 
 size_t
 gc::GetPageFaultCount()
 {
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -10,37 +10,30 @@
 #include <stddef.h>
 #include "jsgc.h"
 
 namespace js {
 namespace gc {
 
 // Sanity check that our compiled configuration matches the currently running
 // instance and initialize any runtime data needed for allocation.
-void
-InitMemorySubsystem(JSRuntime *rt);
+void InitMemorySubsystem();
 
 // Allocate or deallocate pages from the system with the given alignment.
-void *
-MapAlignedPages(JSRuntime *rt, size_t size, size_t alignment);
-
-void
-UnmapPages(JSRuntime *rt, void *p, size_t size);
+void *MapAlignedPages(size_t size, size_t alignment);
+void UnmapPages(void *p, size_t size);
 
 // Tell the OS that the given pages are not in use, so they should not
 // be written to a paging file. This may be a no-op on some platforms.
-bool
-MarkPagesUnused(JSRuntime *rt, void *p, size_t size);
+bool MarkPagesUnused(void *p, size_t size);
 
 // Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
 // and should be paged in and out normally. This may be a no-op on some
 // platforms.
-bool
-MarkPagesInUse(JSRuntime *rt, void *p, size_t size);
+bool MarkPagesInUse(void *p, size_t size);
 
 // Returns #(hard faults) + #(soft faults)
-size_t
-GetPageFaultCount();
+size_t GetPageFaultCount();
 
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/ion/AsmJS.cpp
+++ b/js/src/ion/AsmJS.cpp
@@ -1400,17 +1400,17 @@ class MOZ_STACK_CLASS ModuleCompiler
         return exits_.add(p, Move(exitDescriptor), *exitIndex);
     }
     bool addFunctionCounts(IonScriptCounts *counts) {
         return module_->addFunctionCounts(counts);
     }
 
     void setSecondPassComplete() {
         JS_ASSERT(currentPass_ == 2);
-        masm_.align(AsmJSPageSize);
+        masm_.align(gc::PageSize);
         module_->setFunctionBytes(masm_.size());
         currentPass_ = 3;
     }
 
     void setInterpExitOffset(unsigned exitIndex) {
         JS_ASSERT(currentPass_ == 3);
 #if defined(JS_CPU_ARM)
         masm_.flush();
@@ -1473,25 +1473,25 @@ class MOZ_STACK_CLASS ModuleCompiler
         // The global data section sits immediately after the executable (and
         // other) data allocated by the MacroAssembler. Round up bytesNeeded so
         // that doubles/pointers stay aligned.
         size_t codeBytes = AlignBytes(masm_.bytesNeeded(), sizeof(double));
         size_t totalBytes = codeBytes + module_->globalDataBytes();
 
         // The code must be page aligned, so include extra space so that we can
         // AlignBytes the allocation result below.
-        size_t allocedBytes = totalBytes + AsmJSPageSize;
+        size_t allocedBytes = totalBytes + gc::PageSize;
 
         // Allocate the slab of memory.
         JSC::ExecutableAllocator *execAlloc = cx_->compartment()->ionCompartment()->execAlloc();
         JSC::ExecutablePool *pool;
         uint8_t *unalignedBytes = (uint8_t*)execAlloc->alloc(allocedBytes, &pool, JSC::ASMJS_CODE);
         if (!unalignedBytes)
             return false;
-        uint8_t *code = (uint8_t*)AlignBytes((uintptr_t)unalignedBytes, AsmJSPageSize);
+        uint8_t *code = (uint8_t*)AlignBytes((uintptr_t)unalignedBytes, gc::PageSize);
 
         // The ExecutablePool owns the memory and must be released by the AsmJSModule.
         module_->takeOwnership(pool, code, codeBytes, totalBytes);
 
         // Copy the buffer into executable memory (c.f. IonCode::copyFrom).
         masm_.executableCopy(code);
         masm_.processCodeLabels(code);
         JS_ASSERT(masm_.jumpRelocationTableBytes() == 0);
@@ -6105,19 +6105,16 @@ EnsureAsmJSSignalHandlersInstalled(JSRun
 bool
 js::CompileAsmJS(JSContext *cx, TokenStream &ts, ParseNode *fn, const CompileOptions &options,
                  ScriptSource *scriptSource, uint32_t bufStart, uint32_t bufEnd,
                  MutableHandleFunction moduleFun)
 {
     if (!JSC::MacroAssembler().supportsFloatingPoint())
         return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by lack of floating point support");
 
-    if (cx->runtime()->gcSystemPageSize != AsmJSPageSize)
-        return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by non 4KiB system page size");
-
     if (!cx->hasOption(JSOPTION_ASMJS))
         return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by javascript.options.asmjs in about:config");
 
     if (cx->compartment()->debugMode())
         return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by debugger");
 
     if (!EnsureAsmJSSignalHandlersInstalled(cx->runtime()))
         return Warn(cx, JSMSG_USE_ASM_TYPE_FAIL, "Platform missing signal handler support");
--- a/js/src/ion/AsmJS.h
+++ b/js/src/ion/AsmJS.h
@@ -84,19 +84,16 @@ class AsmJSActivation
 
     // Initialized by JIT code:
     static unsigned offsetOfErrorRejoinSP() { return offsetof(AsmJSActivation, errorRejoinSP_); }
 
     // Set from SIGSEGV handler:
     void setResumePC(void *pc) { resumePC_ = pc; }
 };
 
-// The assumed page size; dynamically checked in CompileAsmJS.
-const size_t AsmJSPageSize = 4096;
-
 // The asm.js spec requires that the ArrayBuffer's byteLength be a multiple of 4096.
 static const size_t AsmJSAllocationGranularity = 4096;
 
 #ifdef JS_CPU_X64
 // On x64, the internal ArrayBuffer data array is inflated to 4GiB (only the
 // byteLength portion of which is accessible) so that out-of-bounds accesses
 // (made using a uint32 index) are guaranteed to raise a SIGSEGV.
 static const size_t AsmJSBufferProtectedSize = 4 * 1024ULL * 1024ULL * 1024ULL;
--- a/js/src/ion/AsmJSModule.h
+++ b/js/src/ion/AsmJSModule.h
@@ -593,22 +593,22 @@ class AsmJSModule
                numFuncPtrTableElems_ * sizeof(void*) +
                exitIndex * sizeof(ExitDatum);
     }
     ExitDatum &exitIndexToGlobalDatum(unsigned exitIndex) const {
         return *(ExitDatum *)(globalData() + exitIndexToGlobalDataOffset(exitIndex));
     }
 
     void setFunctionBytes(size_t functionBytes) {
-        JS_ASSERT(functionBytes % AsmJSPageSize == 0);
+        JS_ASSERT(functionBytes % gc::PageSize == 0);
         functionBytes_ = functionBytes;
     }
     size_t functionBytes() const {
         JS_ASSERT(functionBytes_);
-        JS_ASSERT(functionBytes_ % AsmJSPageSize == 0);
+        JS_ASSERT(functionBytes_ % gc::PageSize == 0);
         return functionBytes_;
     }
     bool containsPC(void *pc) const {
         uint8_t *code = functionCode();
         return pc >= code && pc < (code + functionBytes());
     }
 
     bool addHeapAccesses(const ion::AsmJSHeapAccessVector &accesses) {
@@ -651,25 +651,25 @@ class AsmJSModule
     const ion::AsmJSBoundsCheck &boundsCheck(unsigned i) const {
         return boundsChecks_[i];
     }
 #endif
 
 
 
     void takeOwnership(JSC::ExecutablePool *pool, uint8_t *code, size_t codeBytes, size_t totalBytes) {
-        JS_ASSERT(uintptr_t(code) % AsmJSPageSize == 0);
+        JS_ASSERT(uintptr_t(code) % gc::PageSize == 0);
         codePool_ = pool;
         code_ = code;
         codeBytes_ = codeBytes;
         totalBytes_ = totalBytes;
     }
     uint8_t *functionCode() const {
         JS_ASSERT(code_);
-        JS_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
+        JS_ASSERT(uintptr_t(code_) % gc::PageSize == 0);
         return code_;
     }
 
     void setOperationCallbackExit(uint8_t *ptr) {
         operationCallbackExit_ = ptr;
     }
     uint8_t *operationCallbackExit() const {
         return operationCallbackExit_;
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -1135,16 +1135,18 @@ JS_NewRuntime(uint32_t maxbytes, JSUseHe
                 ++numfmtspecs;                                                \
         }                                                                     \
         JS_ASSERT(count == numfmtspecs);                                      \
     JS_END_MACRO;
 #include "js.msg"
 #undef MSG_DEF
 #endif /* DEBUG */
 
+        InitMemorySubsystem();
+
         if (!js::TlsPerThreadData.init())
             return NULL;
 
         js_NewRuntimeWasCalled = JS_TRUE;
     }
 
     JSRuntime *rt = js_new<JSRuntime>(useHelperThreads);
     if (!rt)
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -1133,25 +1133,16 @@ struct JSRuntime : public JS::shadow::Ru
     JSTraceDataOp       gcBlackRootsTraceOp;
     void                *gcBlackRootsData;
     JSTraceDataOp       gcGrayRootsTraceOp;
     void                *gcGrayRootsData;
 
     /* Stack of thread-stack-allocated GC roots. */
     js::AutoGCRooter   *autoGCRooters;
 
-    /*
-     * The GC can only safely decommit memory when the page size of the
-     * running process matches the compiled arena size.
-     */
-    size_t              gcSystemPageSize;
-
-    /* The OS allocation granularity may not match the page size. */
-    size_t              gcSystemAllocGranularity;
-
     /* Strong references on scripts held for PCCount profiling API. */
     js::ScriptAndCountsVector *scriptAndCountsVector;
 
     /* Well-known numbers held for use by this runtime's contexts. */
     js::Value           NaNValue;
     js::Value           negativeInfinityValue;
     js::Value           positiveInfinityValue;
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -452,25 +452,23 @@ FinalizeArenas(FreeOp *fop,
 #endif
       default:
         JS_NOT_REACHED("Invalid alloc kind");
         return true;
     }
 }
 
 static inline Chunk *
-AllocChunk(JSRuntime *rt)
-{
-    return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
+AllocChunk() {
+    return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
 }
 
 static inline void
-FreeChunk(JSRuntime *rt, Chunk *p)
-{
-    UnmapPages(rt, static_cast<void *>(p), ChunkSize);
+FreeChunk(Chunk *p) {
+    UnmapPages(static_cast<void *>(p), ChunkSize);
 }
 
 inline bool
 ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
 {
     /*
      * To minimize memory waste we do not want to run the background chunk
      * allocation if we have empty chunks or when the runtime needs just few
@@ -550,64 +548,64 @@ ChunkPool::expire(JSRuntime *rt, bool re
             chunkp = &chunk->info.next;
         }
     }
     JS_ASSERT_IF(releaseAll, !emptyCount);
     return freeList;
 }
 
 static void
-FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
+FreeChunkList(Chunk *chunkListHead)
 {
     while (Chunk *chunk = chunkListHead) {
         JS_ASSERT(!chunk->info.numArenasFreeCommitted);
         chunkListHead = chunk->info.next;
-        FreeChunk(rt, chunk);
+        FreeChunk(chunk);
     }
 }
 
 void
 ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
 {
-    FreeChunkList(rt, expire(rt, releaseAll));
+    FreeChunkList(expire(rt, releaseAll));
 }
 
 /* static */ Chunk *
 Chunk::allocate(JSRuntime *rt)
 {
-    Chunk *chunk = AllocChunk(rt);
+    Chunk *chunk = static_cast<Chunk *>(AllocChunk());
 
 #ifdef JSGC_ROOT_ANALYSIS
     // Our poison pointers are not guaranteed to be invalid on 64-bit
     // architectures, and often are valid. We can't just reserve the full
     // poison range, because it might already have been taken up by something
     // else (shared library, previous allocation). So we'll just loop and
     // discard poison pointers until we get something valid.
     //
     // This leaks all of these poisoned pointers. It would be better if they
     // were marked as uncommitted, but it's a little complicated to avoid
     // clobbering pre-existing unrelated mappings.
     while (IsPoisonedPtr(chunk))
-        chunk = AllocChunk(rt);
+        chunk = static_cast<Chunk *>(AllocChunk());
 #endif
 
     if (!chunk)
         return NULL;
     chunk->init(rt);
     rt->gcStats.count(gcstats::STAT_NEW_CHUNK);
     return chunk;
 }
 
 /* Must be called with the GC lock taken. */
 /* static */ inline void
 Chunk::release(JSRuntime *rt, Chunk *chunk)
 {
     JS_ASSERT(chunk);
     chunk->prepareToBeFreed(rt);
-    FreeChunk(rt, chunk);
+    FreeChunk(chunk);
 }
 
 inline void
 Chunk::prepareToBeFreed(JSRuntime *rt)
 {
     JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted);
     rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted;
     rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
@@ -725,17 +723,17 @@ Chunk::fetchNextDecommittedArena()
     JS_ASSERT(info.numArenasFree > 0);
 
     unsigned offset = findDecommittedArenaOffset();
     info.lastDecommittedArenaOffset = offset + 1;
     --info.numArenasFree;
     decommittedArenas.unset(offset);
 
     Arena *arena = &arenas[offset];
-    MarkPagesInUse(info.runtime, arena, ArenaSize);
+    MarkPagesInUse(arena, ArenaSize);
     arena->aheader.setAsNotAllocated();
 
     return &arena->aheader;
 }
 
 inline ArenaHeader *
 Chunk::fetchNextFreeArena(JSRuntime *rt)
 {
@@ -917,18 +915,16 @@ InitGCZeal(JSRuntime *rt)
 #endif
 
 /* Lifetime for type sets attached to scripts containing observed types. */
 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
 
 JSBool
 js_InitGC(JSRuntime *rt, uint32_t maxbytes)
 {
-    InitMemorySubsystem(rt);
-
     if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
         return false;
 
     if (!rt->gcRootsHash.init(256))
         return false;
 
 #ifdef JS_THREADSAFE
     rt->gcLock = PR_NewLock();
@@ -2046,17 +2042,17 @@ DecommitArenasFromAvailableList(JSRuntim
                 /*
                  * If the main thread waits for the decommit to finish, skip
                  * potentially expensive unlock/lock pair on the contested
                  * lock.
                  */
                 Maybe<AutoUnlockGC> maybeUnlock;
                 if (!rt->isHeapBusy())
                     maybeUnlock.construct(rt);
-                ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
+                ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
             }
 
             if (ok) {
                 ++chunk->info.numArenasFree;
                 chunk->decommittedArenas.set(arenaIndex);
             } else {
                 chunk->addArenaToFreeList(rt, aheader);
             }
@@ -2076,17 +2072,17 @@ DecommitArenasFromAvailableList(JSRuntim
                     if (!prev->hasAvailableArenas())
                         insertPoint = availableListHeadp;
                 }
                 chunk->insertToAvailableList(insertPoint);
             } else {
                 JS_ASSERT(chunk->info.prevp);
             }
 
-            if (rt->gcChunkAllocationSinceLastGC || !ok) {
+            if (rt->gcChunkAllocationSinceLastGC) {
                 /*
                  * The allocator thread has started to get new chunks. We should stop
                  * to avoid decommitting arenas in just allocated chunks.
                  */
                 return;
             }
         }
 
@@ -2114,17 +2110,17 @@ DecommitArenas(JSRuntime *rt)
 }
 
 /* Must be called with the GC lock taken. */
 static void
 ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
 {
     if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
         AutoUnlockGC unlock(rt);
-        FreeChunkList(rt, toFree);
+        FreeChunkList(toFree);
     }
 
     if (shouldShrink)
         DecommitArenas(rt);
 }
 
 static void
 SweepBackgroundThings(JSRuntime* rt, bool onBackgroundThread)
--- a/js/src/jstypedarray.cpp
+++ b/js/src/jstypedarray.cpp
@@ -351,19 +351,19 @@ ArrayBufferObject::uninlineData(JSContex
 //   |<------------------------------ 4GB + 1 pages --------------------->|
 //           |<--- sizeof --->|<------------------- 4GB ----------------->|
 //
 //   | waste | ObjectElements | data array | inaccessible reserved memory |
 //                            ^            ^                              ^
 //                            |            \                             /
 //                      obj->elements       required to be page boundaries
 //
-JS_STATIC_ASSERT(sizeof(ObjectElements) < AsmJSPageSize);
-JS_STATIC_ASSERT(AsmJSAllocationGranularity == AsmJSPageSize);
-static const size_t AsmJSMappedSize = AsmJSPageSize + AsmJSBufferProtectedSize;
+JS_STATIC_ASSERT(sizeof(ObjectElements) < PageSize);
+JS_STATIC_ASSERT(AsmJSAllocationGranularity == PageSize);
+static const size_t AsmJSMappedSize = PageSize + AsmJSBufferProtectedSize;
 
 bool
 ArrayBufferObject::prepareForAsmJS(JSContext *cx, Handle<ArrayBufferObject*> buffer)
 {
     if (buffer->isAsmJSArrayBuffer())
         return true;
 
     // Get the entire reserved region (with all pages inaccessible).
@@ -376,29 +376,29 @@ ArrayBufferObject::prepareForAsmJS(JSCon
     p = mmap(NULL, AsmJSMappedSize, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
     if (p == MAP_FAILED)
         return false;
 # endif
 
     // Enable access to the valid region.
     JS_ASSERT(buffer->byteLength() % AsmJSAllocationGranularity == 0);
 # ifdef XP_WIN
-    if (!VirtualAlloc(p, AsmJSPageSize + buffer->byteLength(), MEM_COMMIT, PAGE_READWRITE)) {
+    if (!VirtualAlloc(p, PageSize + buffer->byteLength(), MEM_COMMIT, PAGE_READWRITE)) {
         VirtualFree(p, 0, MEM_RELEASE);
         return false;
     }
 # else
-    if (mprotect(p, AsmJSPageSize + buffer->byteLength(), PROT_READ | PROT_WRITE)) {
+    if (mprotect(p, PageSize + buffer->byteLength(), PROT_READ | PROT_WRITE)) {
         munmap(p, AsmJSMappedSize);
         return false;
     }
 # endif
 
     // Copy over the current contents of the typed array.
-    uint8_t *data = reinterpret_cast<uint8_t*>(p) + AsmJSPageSize;
+    uint8_t *data = reinterpret_cast<uint8_t*>(p) + PageSize;
     memcpy(data, buffer->dataPointer(), buffer->byteLength());
 
     // Swap the new elements into the ArrayBufferObject.
     ObjectElements *newHeader = reinterpret_cast<ObjectElements*>(data - sizeof(ObjectElements));
     ObjectElements *oldHeader = buffer->hasDynamicElements() ? buffer->getElementsHeader() : NULL;
     buffer->changeContents(cx, newHeader);
     js_free(oldHeader);
 
@@ -410,18 +410,18 @@ ArrayBufferObject::prepareForAsmJS(JSCon
 }
 
 void
 ArrayBufferObject::releaseAsmJSArrayBuffer(FreeOp *fop, JSObject *obj)
 {
     ArrayBufferObject &buffer = obj->as<ArrayBufferObject>();
     JS_ASSERT(buffer.isAsmJSArrayBuffer());
 
-    uint8_t *p = buffer.dataPointer() - AsmJSPageSize ;
-    JS_ASSERT(uintptr_t(p) % AsmJSPageSize == 0);
+    uint8_t *p = buffer.dataPointer() - PageSize ;
+    JS_ASSERT(uintptr_t(p) % PageSize == 0);
 # ifdef XP_WIN
     VirtualFree(p, 0, MEM_RELEASE);
 # else
     munmap(p, AsmJSMappedSize);
 # endif
 }
 
 void