Bug 1502733 - Part 3: Tune new allocator to play nice with WASM. r=sfink
authorEmanuel Hoogeveen <emanuel.hoogeveen@protonmail.com>
Mon, 14 Jan 2019 12:14:00 +0200
changeset 511022 e5d3da4bdf58d26a21b311b6e02673c61aa84226
parent 511021 ac29aabfda36a42731a20f6ee687cdf739ab293f
child 511023 970559b6fcde9a19e47617bdcbb05a1788d17d72
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1502733
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1502733 - Part 3: Tune new allocator to play nice with WASM. r=sfink
js/src/gc/Memory.cpp
js/src/gc/Memory.h
js/src/jsapi-tests/testGCAllocator.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -9,17 +9,17 @@
 #include "mozilla/Atomics.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/RandomNum.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
-#if defined(XP_WIN)
+#ifdef XP_WIN
 
 #include "util/Windows.h"
 #include <psapi.h>
 
 #else
 
 #include <algorithm>
 #include <errno.h>
@@ -38,20 +38,18 @@ namespace gc {
  * System allocation functions generally require the allocation size
  * to be an integer multiple of the page size of the running process.
  */
 static size_t pageSize = 0;
 
 /* The OS allocation granularity may not match the page size. */
 static size_t allocGranularity = 0;
 
-#if defined(JS_64BIT)
-static size_t minValidAddress = 0;
-static size_t maxValidAddress = 0;
-#endif
+/* The number of bits used by addresses on this platform. */
+static size_t numAddressBits = 0;
 
 /*
  * System allocation functions may hand out regions of memory in increasing or
  * decreasing order. This ordering is used as a hint during chunk alignment to
  * reduce the number of system calls. On systems with 48-bit addresses, our
  * workarounds to obtain 47-bit pointers cause addresses to be handed out in
  * increasing order.
  *
@@ -64,22 +62,79 @@ static mozilla::Atomic<int, mozilla::Rel
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(1);
 #elif defined(XP_UNIX)
 static mozilla::Atomic<int, mozilla::Relaxed,
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(0);
 #endif
 
+/*
+ * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
+ * chunks available in low memory situations. These chunks may all need to be
+ * used up before we gain access to remaining *alignable* chunk-sized regions,
+ * so we use a generous limit of 32 unusable chunks to ensure we reach them.
+ */
+static const int MaxLastDitchAttempts = 32;
+
+#ifdef JS_64BIT
+/*
+ * On some 64-bit platforms we can use a random, scattershot allocator that
+ * tries addresses from the available range at random. If the address range
+ * is large enough this will have a high chance of success and additionally
+ * makes the memory layout of our process less predictable.
+ *
+ * However, not all 64-bit platforms have a very large address range. For
+ * example, AArch64 on Linux defaults to using 39-bit addresses to limit the
+ * number of translation tables used. On such configurations the scattershot
+ * approach to allocation creates a conflict with our desire to reserve large
+ * regions of memory for applications like WebAssembly: Small allocations may
+ * inadvertently block off all available 4-6GiB regions, and conversely
+ * reserving such regions may lower the success rate for smaller allocations to
+ * unacceptable levels.
+ *
+ * So we make a compromise: Instead of using the scattershot on all 64-bit
+ * platforms, we only use it on platforms that meet a minimum requirement for
+ * the available address range. In addition we split the address range,
+ * reserving the upper half for huge allocations and the lower half for smaller
+ * allocations. We use a limit of 43 bits so that at least 42 bits are available
+ * for huge allocations - this matches the 8TiB per process address space limit
+ * that we're already subject to on Windows.
+ */
+static const size_t MinAddressBitsForRandomAlloc = 43;
+
+/* The lower limit for huge allocations. This is fairly arbitrary. */
+static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
+
+/* The minimum and maximum valid addresses that can be allocated into. */
+static size_t minValidAddress = 0;
+static size_t maxValidAddress = 0;
+
+/* The upper limit for smaller allocations and the lower limit for huge ones. */
+static size_t hugeSplit = 0;
+#endif
+
+size_t SystemPageSize() { return pageSize; }
+
+size_t SystemAddressBits() { return numAddressBits; }
+
+bool UsingScattershotAllocator() {
+#ifdef JS_64BIT
+  return numAddressBits >= MinAddressBitsForRandomAlloc;
+#else
+  return false;
+#endif
+}
+
 enum class Commit : bool {
   No = false,
   Yes = true,
 };
 
-#if defined(XP_WIN)
+#ifdef XP_WIN
 enum class PageAccess : DWORD {
   None = PAGE_NOACCESS,
   Read = PAGE_READONLY,
   ReadWrite = PAGE_READWRITE,
   Execute = PAGE_EXECUTE,
   ReadExecute = PAGE_EXECUTE_READ,
   ReadWriteExecute = PAGE_EXECUTE_READWRITE,
 };
@@ -89,58 +144,47 @@ enum class PageAccess : int {
   Read = PROT_READ,
   ReadWrite = PROT_READ | PROT_WRITE,
   Execute = PROT_EXEC,
   ReadExecute = PROT_READ | PROT_EXEC,
   ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
 };
 #endif
 
-#if !defined(JS_64BIT)
-/*
- * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
- * chunks available in low memory situations. These chunks may all need to be
- * used up before we gain access to remaining *alignable* chunk-sized regions,
- * so we use a generous limit of 32 unusable chunks to ensure we reach them.
- */
-static const int MaxLastDitchAttempts = 32;
-#endif
-
 template <bool AlwaysGetNew = true>
 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
                             size_t length, size_t alignment);
 
-#if defined(JS_64BIT)
+static void* MapAlignedPagesSlow(size_t length, size_t alignment);
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
+
+#ifdef JS_64BIT
 static void* MapAlignedPagesRandom(size_t length, size_t alignment);
 void* TestMapAlignedPagesLastDitch(size_t, size_t) { return nullptr; }
 #else
-static void* MapAlignedPagesSlow(size_t length, size_t alignment);
-static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
 void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
   return MapAlignedPagesLastDitch(length, alignment);
 }
 #endif
 
-size_t SystemPageSize() { return pageSize; }
-
 /*
  * We can only decommit unused pages if the hardcoded Arena
  * size matches the page size for the running process.
  */
 static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
 
 /* Returns the offset from the nearest aligned address at or below |region|. */
 static inline size_t OffsetFromAligned(void* region, size_t alignment) {
   return uintptr_t(region) % alignment;
 }
 
 template <Commit commit, PageAccess prot>
 static inline void* MapInternal(void* desired, size_t length) {
   void* region = nullptr;
-#if defined(XP_WIN)
+#ifdef XP_WIN
   DWORD flags =
       (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
   region = VirtualAlloc(desired, length, flags, DWORD(prot));
 #else
   int flags = MAP_PRIVATE | MAP_ANON;
   region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
                                   "js-gc-heap");
   if (region == MAP_FAILED) {
@@ -149,42 +193,50 @@ static inline void* MapInternal(void* de
 #endif
   return region;
 }
 
 static inline void UnmapInternal(void* region, size_t length) {
   MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
   MOZ_ASSERT(length > 0 && length % pageSize == 0);
 
-#if defined(XP_WIN)
+#ifdef XP_WIN
   MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
 #else
   if (munmap(region, length)) {
     MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 #endif
 }
 
 template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
 static inline void* MapMemory(size_t length) {
   MOZ_ASSERT(length > 0);
 
   return MapInternal<commit, prot>(nullptr, length);
 }
 
+/*
+ * Attempts to map memory at the given address, but allows the system
+ * to return a different address that may still be suitable.
+ */
 template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
 static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
   MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
   MOZ_ASSERT(length > 0);
 
   // Note that some platforms treat the requested address as a hint, so the
   // returned address might not match the requested address.
   return MapInternal<commit, prot>(desired, length);
 }
 
+/*
+ * Attempts to map memory at the given address, returning nullptr if
+ * the system returns any address other than the requested one.
+ */
 template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
 static inline void* MapMemoryAt(void* desired, size_t length) {
   MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
   MOZ_ASSERT(length > 0);
 
   void* region = MapInternal<commit, prot>(desired, length);
   if (!region) {
     return nullptr;
@@ -194,17 +246,17 @@ static inline void* MapMemoryAt(void* de
   // check that the address we got is the address we requested.
   if (region != desired) {
     UnmapInternal(region, length);
     return nullptr;
   }
   return region;
 }
 
-#if defined(JS_64BIT)
+#ifdef JS_64BIT
 
 /* Returns a random number in the given range. */
 static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
   const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
   maxNum -= minNum;
   uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
 
   uint64_t rndNum;
@@ -214,56 +266,57 @@ static inline uint64_t GetNumberInRange(
       result = mozilla::RandomUint64();
     } while (!result);
     rndNum = result.value() / binSize;
   } while (rndNum > maxNum);
 
   return minNum + rndNum;
 }
 
-#if !defined(XP_WIN)
+#ifndef XP_WIN
 /*
  * The address range available to applications depends on both hardware and
  * kernel configuration. For example, AArch64 on Linux uses addresses with
  * 39 significant bits by default, but can be configured to use addresses with
  * 48 significant bits by enabling a 4th translation table. Unfortunately,
  * there appears to be no standard way to query the limit at runtime
  * (Windows exposes this via GetSystemInfo()).
  *
  * This function tries to find the address limit by performing a binary search
  * on the index of the most significant set bit in the addresses it attempts to
  * allocate. As the requested address is often treated as a hint by the
  * operating system, we use the actual returned addresses to narrow the range.
+ * We return the number of bits of an address that may be set.
  */
-static uint64_t FindAddressLimit() {
+static size_t FindAddressLimit() {
   const size_t length = allocGranularity;  // Used as both length and alignment.
 
   void* address;
   uint64_t startRaw, endRaw, start, end, desired, actual;
 
   // Use 32 bits as a lower bound in case we keep getting nullptr.
   size_t low = 31;
   uint64_t highestSeen = (UINT64_C(1) << 32) - length - 1;
 
-  // Start with addresses that have bit 46 set.
-  size_t high = 46;
+  // Start with addresses that have bit 47 set.
+  size_t high = 47;
   startRaw = UINT64_C(1) << high;
   endRaw = 2 * startRaw - length - 1;
   start = (startRaw + length - 1) / length;
   end = (endRaw - (length - 1)) / length;
 
   for (size_t tries = 0; tries < 4; ++tries) {
     desired = length * GetNumberInRange(start, end);
     address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
     actual = uint64_t(address);
     if (address) {
       UnmapInternal(address, length);
     }
     if (actual >= startRaw) {
-      return endRaw;  // Return early and skip the binary search.
+      return high + 1;  // Return early and skip the binary search.
     }
     if (actual > highestSeen) {
       highestSeen = actual;
       low = mozilla::FloorLog2(highestSeen);
     }
   }
 
   // Those didn't work, so perform a binary search.
@@ -292,55 +345,60 @@ static uint64_t FindAddressLimit() {
 
     // Low was already updated above, so just check if we need to update high.
     if (actual < startRaw) {
       high = middle;
     }
   }
 
   // High was excluded, so use low (but sanity check it).
-  startRaw = UINT64_C(1) << std::min(low, size_t(46));
-  endRaw = 2 * startRaw - length - 1;
-  return endRaw;
+  return std::min(low + 1, size_t(47));
 }
 #endif  // !defined(XP_WIN)
 
 #endif  // defined(JS_64BIT)
 
 void InitMemorySubsystem() {
   if (pageSize == 0) {
-#if defined(XP_WIN)
+#ifdef XP_WIN
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
     pageSize = sysinfo.dwPageSize;
     allocGranularity = sysinfo.dwAllocationGranularity;
 #else
     pageSize = size_t(sysconf(_SC_PAGESIZE));
     allocGranularity = pageSize;
 #endif
-#if defined(JS_64BIT)
-#if defined(XP_WIN)
+#ifdef JS_64BIT
+#ifdef XP_WIN
     minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
     maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
+    numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
 #else
     // No standard way to determine these, so fall back to FindAddressLimit().
+    numAddressBits = FindAddressLimit();
     minValidAddress = allocGranularity;
-    maxValidAddress = FindAddressLimit() - allocGranularity;
+    maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
 #endif
     // Sanity check the address to ensure we don't use more than 47 bits.
     uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
     if (maxValidAddress > maxJSAddress) {
       maxValidAddress = maxJSAddress;
+      hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
+    } else {
+      hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
     }
+#else  // !defined(JS_64BIT)
+    numAddressBits = 32;
 #endif
   }
 }
 
+#ifdef JS_64BIT
 /* The JS engine uses 47-bit pointers; all higher bits must be clear. */
-#if defined(JS_64BIT)
 static inline bool IsInvalidRegion(void* region, size_t length) {
   const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
   return (uintptr_t(region) + length - 1) & invalidPointerMask;
 }
 #endif
 
 void* MapAlignedPages(size_t length, size_t alignment) {
   MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
@@ -349,21 +407,28 @@ void* MapAlignedPages(size_t length, siz
                          std::min(alignment, allocGranularity) ==
                      0);
 
   // Smaller alignments aren't supported by the allocation functions.
   if (alignment < allocGranularity) {
     alignment = allocGranularity;
   }
 
-#if defined(JS_64BIT)
-  void* region = MapAlignedPagesRandom(length, alignment);
+#ifdef JS_64BIT
+  // Use the scattershot allocator if the address range is large enough.
+  if (UsingScattershotAllocator()) {
+    void* region = MapAlignedPagesRandom(length, alignment);
 
-  MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
-#else
+    MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
+    MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+
+    return region;
+  }
+#endif
+
   void* region = MapMemory(length);
   if (OffsetFromAligned(region, alignment) == 0) {
     return region;
   }
 
   void* retainedRegion;
   TryToAlignChunk(&region, &retainedRegion, length, alignment);
   if (retainedRegion) {
@@ -375,27 +440,54 @@ void* MapAlignedPages(size_t length, siz
     }
     UnmapInternal(region, length);
   }
 
   region = MapAlignedPagesSlow(length, alignment);
   if (!region) {
     region = MapAlignedPagesLastDitch(length, alignment);
   }
-#endif
 
   MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
   return region;
 }
 
-#if defined(JS_64BIT)
+#ifdef JS_64BIT
 
+/*
+ * This allocator takes advantage of the large address range on some 64-bit
+ * platforms to allocate in a scattershot manner, choosing addresses at random
+ * from the range. By controlling the range we can avoid returning addresses
+ * that have more than 47 significant bits (as required by SpiderMonkey).
+ * This approach also has some other advantages over the methods employed by
+ * the other allocation functions in this file:
+ * 1) Allocations are extremely likely to succeed on the first try.
+ * 2) The randomness makes our memory layout becomes harder to predict.
+ * 3) The low probability of reusing regions guards against use-after-free.
+ *
+ * The main downside is that detecting physical OOM situations becomes more
+ * difficult; to guard against this, we occasionally try a regular allocation.
+ * In addition, sprinkling small allocations throughout the full address range
+ * might get in the way of large address space reservations such as those
+ * employed by WebAssembly. To avoid this (or the opposite problem of such
+ * reservations reducing the chance of success for smaller allocations) we
+ * split the address range in half, with one half reserved for huge allocations
+ * and the other for regular (usually chunk sized) allocations.
+ */
 static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
-  uint64_t minNum = (minValidAddress + alignment - 1) / alignment;
-  uint64_t maxNum = (maxValidAddress - (length - 1)) / alignment;
+  uint64_t minNum, maxNum;
+  if (length < HugeAllocationSize) {
+    // Use the lower half of the range.
+    minNum = (minValidAddress + alignment - 1) / alignment;
+    maxNum = (hugeSplit - (length - 1)) / alignment;
+  } else {
+    // Use the upper half of the range.
+    minNum = (hugeSplit + 1 + alignment - 1) / alignment;
+    maxNum = (maxValidAddress - (length - 1)) / alignment;
+  }
 
   // Try to allocate in random aligned locations.
   void* region = nullptr;
   for (size_t i = 1; i <= 1024; ++i) {
     if (i & 0xf) {
       uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
       region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
       if (!region) {
@@ -419,38 +511,49 @@ static void* MapAlignedPagesRandom(size_
     if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
       MOZ_ASSERT(!retainedRegion);
       return region;
     }
     MOZ_ASSERT(region && !retainedRegion);
     UnmapInternal(region, length);
   }
 
-  MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+  if (numAddressBits < 48) {
+    // Try the reliable fallback of overallocating.
+    // Note: This will not respect the address space split.
+    region = MapAlignedPagesSlow(length, alignment);
+    if (region) {
+      return region;
+    }
+  }
+  if (length < HugeAllocationSize) {
+    MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+  }
+
   return nullptr;
 }
 
-#else  // !defined(JS_64BIT)
+#endif  // defined(JS_64BIT)
 
 static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
   void* alignedRegion = nullptr;
   do {
     size_t reserveLength = length + alignment - pageSize;
-#if defined(XP_WIN)
+#ifdef XP_WIN
     // Don't commit the requested pages as we won't use the region directly.
     void* region = MapMemory<Commit::No>(reserveLength);
 #else
     void* region = MapMemory(reserveLength);
 #endif
     if (!region) {
       return nullptr;
     }
     alignedRegion =
         reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
-#if defined(XP_WIN)
+#ifdef XP_WIN
     // Windows requires that map and unmap calls be matched, so deallocate
     // and immediately reallocate at the desired (aligned) address.
     UnmapInternal(region, reserveLength);
     alignedRegion = MapMemoryAt(alignedRegion, length);
 #else
     // munmap allows us to simply unmap the pages that don't interest us.
     if (alignedRegion != region) {
       UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
@@ -498,19 +601,17 @@ static void* MapAlignedPagesLastDitch(si
     region = nullptr;
   }
   while (--attempt >= 0) {
     UnmapInternal(tempMaps[attempt], length);
   }
   return region;
 }
 
-#endif  // defined(JS_64BIT)
-
-#if defined(XP_WIN)
+#ifdef XP_WIN
 
 /*
  * On Windows, map and unmap calls must be matched, so we deallocate the
  * unaligned chunk, then reallocate the unaligned part to block off the
  * old address and force the allocator to give us a new one.
  */
 template <bool>
 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
@@ -649,17 +750,17 @@ void MarkPagesInUse(void* region, size_t
 
   MOZ_MAKE_MEM_UNDEFINED(region, length);
 }
 
 size_t GetPageFaultCount() {
   if (mozilla::recordreplay::IsRecordingOrReplaying()) {
     return 0;
   }
-#if defined(XP_WIN)
+#ifdef XP_WIN
   PROCESS_MEMORY_COUNTERS pmc;
   if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
     return 0;
   }
   return pmc.PageFaultCount;
 #else
   struct rusage usage;
   int err = getrusage(RUSAGE_SELF, &usage);
@@ -684,17 +785,17 @@ void* AllocateMappedContent(int fd, size
 
   // We preallocate the mapping using MapAlignedPages, which expects
   // the length parameter to be an integer multiple of the page size.
   size_t mappedLength = alignedLength;
   if (alignedLength % pageSize != 0) {
     mappedLength += pageSize - alignedLength % pageSize;
   }
 
-#if defined(XP_WIN)
+#ifdef XP_WIN
   HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
 
   // This call will fail if the file does not exist.
   HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
   if (!hMap) {
     return nullptr;
   }
 
@@ -723,17 +824,17 @@ void* AllocateMappedContent(int fd, size
 
   // This just decreases the file mapping object's internal reference count;
   // it won't actually be destroyed until we unmap the associated view.
   CloseHandle(hMap);
 
   if (!map) {
     return nullptr;
   }
-#else
+#else  // !defined(XP_WIN)
   // Sanity check the offset and length, as mmap does not do this for us.
   struct stat st;
   if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
       length > uint64_t(st.st_size) - offset) {
     return nullptr;
   }
 
   void* region = MapAlignedPages(mappedLength, alignment);
@@ -744,17 +845,17 @@ void* AllocateMappedContent(int fd, size
   // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
   // us to reuse the region we obtained without racing with other threads.
   uint8_t* map =
       static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
                                  MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
   MOZ_RELEASE_ASSERT(map != MAP_FAILED);
 #endif
 
-#if defined(DEBUG)
+#ifdef DEBUG
   // Zero out data before and after the desired mapping to catch errors early.
   if (offset != alignedOffset) {
     memset(map, 0, offset - alignedOffset);
   }
   if (alignedLength % pageSize) {
     memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
   }
 #endif
@@ -770,30 +871,30 @@ void DeallocateMappedContent(void* regio
   // Due to bug 1502562, the following assertion does not currently hold.
   // MOZ_RELEASE_ASSERT(length > 0);
 
   // Calculate the address originally returned by the system call.
   // This is needed because AllocateMappedContent returns a pointer
   // that might be offset from the mapping, as the beginning of a
   // mapping must be aligned with the allocation granularity.
   uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
-#if defined(XP_WIN)
+#ifdef XP_WIN
   MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
 #else
   size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
   if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
     MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 #endif
 }
 
 static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
   MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
   MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
-#if defined(XP_WIN)
+#ifdef XP_WIN
   DWORD oldProtect;
   MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
                      0);
 #else
   MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
 #endif
 }
 
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -11,18 +11,27 @@
 
 namespace js {
 namespace gc {
 
 // Sanity check that our compiled configuration matches the currently
 // running instance and initialize any runtime data needed for allocation.
 void InitMemorySubsystem();
 
+// The page size as reported by the operating system.
 size_t SystemPageSize();
 
+// The number of bits that may be set in a valid address, as
+// reported by the operating system or measured at startup.
+size_t SystemAddressBits();
+
+// The scattershot allocator is used on platforms that have a large address
+// range. On these platforms we allocate at random addresses.
+bool UsingScattershotAllocator();
+
 // Allocate or deallocate pages from the system with the given alignment.
 void* MapAlignedPages(size_t size, size_t alignment);
 void UnmapPages(void* p, size_t size);
 
 // Tell the OS that the given pages are not in use, so they should not be
 // written to a paging file. This may be a no-op on some platforms.
 bool MarkPagesUnused(void* p, size_t size);
 
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -21,39 +21,35 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 #endif
 
 BEGIN_TEST(testGCAllocator) {
 #ifdef JS_64BIT
-  /* On 64-bit platforms we use very different logic. */
-  return true;
-#else
-  size_t PageSize = 0;
-#if defined(XP_WIN)
-  SYSTEM_INFO sysinfo;
-  GetSystemInfo(&sysinfo);
-  PageSize = sysinfo.dwPageSize;
-#else
-  PageSize = size_t(sysconf(_SC_PAGESIZE));
+  // If we're using the scattershot allocator, this test does not apply.
+  if (js::gc::UsingScattershotAllocator()) {
+    return true;
+  }
 #endif
 
+  size_t PageSize = js::gc::SystemPageSize();
+
   /* Finish any ongoing background free activity. */
   js::gc::FinishGC(cx);
 
   bool growUp;
   CHECK(addressesGrowUp(&growUp));
 
   if (growUp) {
     return testGCAllocatorUp(PageSize);
+  } else {
+    return testGCAllocatorDown(PageSize);
   }
-  return testGCAllocatorDown(PageSize);
-#endif
 }
 
 static const size_t Chunk = 512 * 1024;
 static const size_t Alignment = 2 * Chunk;
 static const int MaxTempChunks = 4096;
 static const size_t StagingSize = 16 * Chunk;
 
 bool addressesGrowUp(bool* resultOut) {