Bug 1502733 - Part 2: Allocate at randomly chosen aligned addresses on 64-bit platforms. r=sfink
authorEmanuel Hoogeveen <emanuel.hoogeveen@protonmail.com>
Sun, 13 Jan 2019 23:10:00 +0200
changeset 513903 ac29aabfda36a42731a20f6ee687cdf739ab293f
parent 513902 32aab5bf983a6245b581819421f76770ddd666e7
child 513904 e5d3da4bdf58d26a21b311b6e02673c61aa84226
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1502733
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1502733 - Part 2: Allocate at randomly chosen aligned addresses on 64-bit platforms. r=sfink
js/src/gc/Memory.cpp
js/src/jsapi-tests/testGCAllocator.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gc/Memory.h"
 
 #include "mozilla/Atomics.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/RandomNum.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
 #if defined(XP_WIN)
 
 #include "util/Windows.h"
@@ -36,31 +38,33 @@ namespace gc {
  * System allocation functions generally require the allocation size
  * to be an integer multiple of the page size of the running process.
  */
 static size_t pageSize = 0;
 
 /* The OS allocation granularity may not match the page size. */
 static size_t allocGranularity = 0;
 
+#if defined(JS_64BIT)
+static size_t minValidAddress = 0;
+static size_t maxValidAddress = 0;
+#endif
+
 /*
  * System allocation functions may hand out regions of memory in increasing or
  * decreasing order. This ordering is used as a hint during chunk alignment to
  * reduce the number of system calls. On systems with 48-bit addresses, our
  * workarounds to obtain 47-bit pointers cause addresses to be handed out in
  * increasing order.
  *
  * We do not use the growth direction on Windows, as constraints on VirtualAlloc
  * would make its application failure prone and complex. Tests indicate that
  * VirtualAlloc always hands out regions of memory in increasing order.
  */
-#if defined(XP_DARWIN) ||                                               \
-    (!defined(XP_WIN) && (defined(__ia64__) || defined(__aarch64__) ||  \
-                          (defined(__sparc__) && defined(__arch64__) && \
-                           (defined(__NetBSD__) || defined(__linux__)))))
+#if defined(XP_DARWIN)
 static mozilla::Atomic<int, mozilla::Relaxed,
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(1);
 #elif defined(XP_UNIX)
 static mozilla::Atomic<int, mozilla::Relaxed,
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(0);
 #endif
@@ -85,182 +89,281 @@ enum class PageAccess : int {
   Read = PROT_READ,
   ReadWrite = PROT_READ | PROT_WRITE,
   Execute = PROT_EXEC,
   ReadExecute = PROT_READ | PROT_EXEC,
   ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
 };
 #endif
 
+#if !defined(JS_64BIT)
 /*
  * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
  * chunks available in low memory situations. These chunks may all need to be
  * used up before we gain access to remaining *alignable* chunk-sized regions,
  * so we use a generous limit of 32 unusable chunks to ensure we reach them.
  */
 static const int MaxLastDitchAttempts = 32;
+#endif
 
-static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+template <bool AlwaysGetNew = true>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
                             size_t length, size_t alignment);
+
+#if defined(JS_64BIT)
+static void* MapAlignedPagesRandom(size_t length, size_t alignment);
+void* TestMapAlignedPagesLastDitch(size_t, size_t) { return nullptr; }
+#else
 static void* MapAlignedPagesSlow(size_t length, size_t alignment);
 static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
+void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
+  return MapAlignedPagesLastDitch(length, alignment);
+}
+#endif
 
 size_t SystemPageSize() { return pageSize; }
 
 /*
  * We can only decommit unused pages if the hardcoded Arena
  * size matches the page size for the running process.
  */
 static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
 
 /* Returns the offset from the nearest aligned address at or below |region|. */
 static inline size_t OffsetFromAligned(void* region, size_t alignment) {
   return uintptr_t(region) % alignment;
 }
 
-void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
-  return MapAlignedPagesLastDitch(length, alignment);
-}
-
-void InitMemorySubsystem() {
-  if (pageSize == 0) {
-#if defined(XP_WIN)
-    SYSTEM_INFO sysinfo;
-    GetSystemInfo(&sysinfo);
-    pageSize = sysinfo.dwPageSize;
-    allocGranularity = sysinfo.dwAllocationGranularity;
-#else
-    pageSize = size_t(sysconf(_SC_PAGESIZE));
-    allocGranularity = pageSize;
-#endif
-  }
-}
-
 template <Commit commit, PageAccess prot>
 static inline void* MapInternal(void* desired, size_t length) {
+  void* region = nullptr;
 #if defined(XP_WIN)
   DWORD flags =
       (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
-  return VirtualAlloc(desired, length, flags, DWORD(prot));
+  region = VirtualAlloc(desired, length, flags, DWORD(prot));
 #else
   int flags = MAP_PRIVATE | MAP_ANON;
-  void* region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1,
-                                        0, "js-gc-heap");
+  region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
+                                  "js-gc-heap");
   if (region == MAP_FAILED) {
     return nullptr;
   }
+#endif
   return region;
-#endif
 }
 
 static inline void UnmapInternal(void* region, size_t length) {
   MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
   MOZ_ASSERT(length > 0 && length % pageSize == 0);
 
 #if defined(XP_WIN)
   MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
 #else
   if (munmap(region, length)) {
     MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 #endif
 }
 
-/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
-static inline bool IsInvalidRegion(void* region, size_t length) {
-  const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
-  return (uintptr_t(region) + length - 1) & invalidPointerMask;
-}
-
 template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
 static inline void* MapMemory(size_t length) {
   MOZ_ASSERT(length > 0);
 
-#if defined(XP_WIN)
   return MapInternal<commit, prot>(nullptr, length);
-#elif defined(__ia64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
-  // These platforms allow addresses with more than 47 bits. However, their
-  // mmap treats the first parameter as a hint of what address to allocate.
-  // If the region starting at the exact address passed is not available, the
-  // closest available region above it will be returned. Thus we supply a base
-  // address of 0x0000070000000000, 121 TiB below our 47-bit limit.
-  const uintptr_t hint = UINT64_C(0x0000070000000000);
-  void* region =
-      MapInternal<commit, prot>(reinterpret_cast<void*>(hint), length);
+}
 
-  // If we're given a region that ends above the 47-bit limit,
-  // treat it as running out of memory.
-  if (IsInvalidRegion(region, length)) {
-    UnmapInternal(region, length);
-    return nullptr;
-  }
-  return region;
-#elif defined(__aarch64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  // These platforms allow addresses with more than 47 bits. Unlike above,
-  // their mmap does not treat its first parameter as a hint, so we're forced
-  // to loop through valid regions manually. The following logic is far from
-  // ideal and may cause allocation to take a long time on these platforms.
-  const uintptr_t start = UINT64_C(0x0000070000000000);
-  const uintptr_t end = UINT64_C(0x0000800000000000);
-  const uintptr_t step = ChunkSize;
-  uintptr_t desired;
-  void* region = nullptr;
-  for (desired = start; !region && desired + length <= end; desired += step) {
-    region =
-        MapInternal<commit, prot>(reinterpret_cast<void*>(desired), length);
-    // If mmap on this platform *does* treat the supplied address as a hint,
-    // this platform should be using the logic above!
-    if (region) {
-      MOZ_RELEASE_ASSERT(uintptr_t(region) == desired);
-    }
-  }
-  return region;
-#else
-  return MapInternal<commit, prot>(nullptr, length);
-#endif
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
+  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0);
+
+  // Note that some platforms treat the requested address as a hint, so the
+  // returned address might not match the requested address.
+  return MapInternal<commit, prot>(desired, length);
 }
 
 template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
 static inline void* MapMemoryAt(void* desired, size_t length) {
   MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
   MOZ_ASSERT(length > 0);
 
-#if defined(XP_WIN)
-  return MapInternal<commit, prot>(desired, length);
-#else
-#if defined(__ia64__) || defined(__aarch64__) ||  \
-    (defined(__sparc__) && defined(__arch64__) && \
-     (defined(__NetBSD__) || defined(__linux__)))
-  MOZ_RELEASE_ASSERT(!IsInvalidRegion(desired, length));
-#endif
-
   void* region = MapInternal<commit, prot>(desired, length);
   if (!region) {
     return nullptr;
   }
 
   // On some platforms mmap treats the desired address as a hint, so
   // check that the address we got is the address we requested.
   if (region != desired) {
     UnmapInternal(region, length);
     return nullptr;
   }
   return region;
+}
+
+#if defined(JS_64BIT)
+
+/* Returns a random number in the given range. */
+static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
+  const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
+  maxNum -= minNum;
+  uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
+
+  uint64_t rndNum;
+  do {
+    mozilla::Maybe<uint64_t> result;
+    do {
+      result = mozilla::RandomUint64();
+    } while (!result);
+    rndNum = result.value() / binSize;
+  } while (rndNum > maxNum);
+
+  return minNum + rndNum;
+}
+
+#if !defined(XP_WIN)
+/*
+ * The address range available to applications depends on both hardware and
+ * kernel configuration. For example, AArch64 on Linux uses addresses with
+ * 39 significant bits by default, but can be configured to use addresses with
+ * 48 significant bits by enabling a 4th translation table. Unfortunately,
+ * there appears to be no standard way to query the limit at runtime
+ * (Windows exposes this via GetSystemInfo()).
+ *
+ * This function tries to find the address limit by performing a binary search
+ * on the index of the most significant set bit in the addresses it attempts to
+ * allocate. As the requested address is often treated as a hint by the
+ * operating system, we use the actual returned addresses to narrow the range.
+ */
+static uint64_t FindAddressLimit() {
+  const size_t length = allocGranularity;  // Used as both length and alignment.
+
+  void* address;
+  uint64_t startRaw, endRaw, start, end, desired, actual;
+
+  // Use 32 bits as a lower bound in case we keep getting nullptr.
+  size_t low = 31;
+  uint64_t highestSeen = (UINT64_C(1) << 32) - length - 1;
+
+  // Start with addresses that have bit 46 set.
+  size_t high = 46;
+  startRaw = UINT64_C(1) << high;
+  endRaw = 2 * startRaw - length - 1;
+  start = (startRaw + length - 1) / length;
+  end = (endRaw - (length - 1)) / length;
+
+  for (size_t tries = 0; tries < 4; ++tries) {
+    desired = length * GetNumberInRange(start, end);
+    address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+    actual = uint64_t(address);
+    if (address) {
+      UnmapInternal(address, length);
+    }
+    if (actual >= startRaw) {
+      return endRaw;  // Return early and skip the binary search.
+    }
+    if (actual > highestSeen) {
+      highestSeen = actual;
+      low = mozilla::FloorLog2(highestSeen);
+    }
+  }
+
+  // Those didn't work, so perform a binary search.
+  while (high - 1 > low) {
+    size_t middle = low + (high - low) / 2;
+    startRaw = UINT64_C(1) << middle;
+    endRaw = 2 * startRaw - length - 1;
+    start = (startRaw + length - 1) / length;
+    end = (endRaw - (length - 1)) / length;
+
+    for (size_t tries = 0; tries < 4; ++tries) {
+      desired = length * GetNumberInRange(start, end);
+      address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+      actual = uint64_t(address);
+      if (address) {
+        UnmapInternal(address, length);
+      }
+      if (actual > highestSeen) {
+        highestSeen = actual;
+        low = mozilla::FloorLog2(highestSeen);
+      }
+      if (actual >= startRaw) {
+        break;
+      }
+    }
+
+    // Low was already updated above, so just check if we need to update high.
+    if (actual < startRaw) {
+      high = middle;
+    }
+  }
+
+  // High was excluded, so use low (but sanity check it).
+  startRaw = UINT64_C(1) << std::min(low, size_t(46));
+  endRaw = 2 * startRaw - length - 1;
+  return endRaw;
+}
+#endif  // !defined(XP_WIN)
+
+#endif  // defined(JS_64BIT)
+
+void InitMemorySubsystem() {
+  if (pageSize == 0) {
+#if defined(XP_WIN)
+    SYSTEM_INFO sysinfo;
+    GetSystemInfo(&sysinfo);
+    pageSize = sysinfo.dwPageSize;
+    allocGranularity = sysinfo.dwAllocationGranularity;
+#else
+    pageSize = size_t(sysconf(_SC_PAGESIZE));
+    allocGranularity = pageSize;
 #endif
+#if defined(JS_64BIT)
+#if defined(XP_WIN)
+    minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
+    maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
+#else
+    // No standard way to determine these, so fall back to FindAddressLimit().
+    minValidAddress = allocGranularity;
+    maxValidAddress = FindAddressLimit() - allocGranularity;
+#endif
+    // Sanity check the address to ensure we don't use more than 47 bits.
+    uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
+    if (maxValidAddress > maxJSAddress) {
+      maxValidAddress = maxJSAddress;
+    }
+#endif
+  }
 }
 
+/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
+#if defined(JS_64BIT)
+static inline bool IsInvalidRegion(void* region, size_t length) {
+  const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
+  return (uintptr_t(region) + length - 1) & invalidPointerMask;
+}
+#endif
+
 void* MapAlignedPages(size_t length, size_t alignment) {
   MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
   MOZ_RELEASE_ASSERT(length % pageSize == 0);
   MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
                          std::min(alignment, allocGranularity) ==
                      0);
 
+  // Smaller alignments aren't supported by the allocation functions.
+  if (alignment < allocGranularity) {
+    alignment = allocGranularity;
+  }
+
+#if defined(JS_64BIT)
+  void* region = MapAlignedPagesRandom(length, alignment);
+
+  MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
+#else
   void* region = MapMemory(length);
   if (OffsetFromAligned(region, alignment) == 0) {
     return region;
   }
 
   void* retainedRegion;
   TryToAlignChunk(&region, &retainedRegion, length, alignment);
   if (retainedRegion) {
@@ -270,23 +373,68 @@ void* MapAlignedPages(size_t length, siz
     if (OffsetFromAligned(region, alignment) == 0) {
       return region;
     }
     UnmapInternal(region, length);
   }
 
   region = MapAlignedPagesSlow(length, alignment);
   if (!region) {
-    return MapAlignedPagesLastDitch(length, alignment);
+    region = MapAlignedPagesLastDitch(length, alignment);
   }
+#endif
 
   MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
   return region;
 }
 
+#if defined(JS_64BIT)
+
+static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
+  uint64_t minNum = (minValidAddress + alignment - 1) / alignment;
+  uint64_t maxNum = (maxValidAddress - (length - 1)) / alignment;
+
+  // Try to allocate in random aligned locations.
+  void* region = nullptr;
+  for (size_t i = 1; i <= 1024; ++i) {
+    if (i & 0xf) {
+      uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
+      region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+      if (!region) {
+        continue;
+      }
+    } else {
+      // Check for OOM.
+      region = MapMemory(length);
+      if (!region) {
+        return nullptr;
+      }
+    }
+    if (IsInvalidRegion(region, length)) {
+      UnmapInternal(region, length);
+      continue;
+    }
+    if (OffsetFromAligned(region, alignment) == 0) {
+      return region;
+    }
+    void* retainedRegion = nullptr;
+    if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
+      MOZ_ASSERT(!retainedRegion);
+      return region;
+    }
+    MOZ_ASSERT(region && !retainedRegion);
+    UnmapInternal(region, length);
+  }
+
+  MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+  return nullptr;
+}
+
+#else  // !defined(JS_64BIT)
+
 static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
   void* alignedRegion = nullptr;
   do {
     size_t reserveLength = length + alignment - pageSize;
 #if defined(XP_WIN)
     // Don't commit the requested pages as we won't use the region directly.
     void* region = MapMemory<Commit::No>(reserveLength);
 #else
@@ -332,76 +480,86 @@ static void* MapAlignedPagesSlow(size_t 
 static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
   void* tempMaps[MaxLastDitchAttempts];
   int attempt = 0;
   void* region = MapMemory(length);
   if (OffsetFromAligned(region, alignment) == 0) {
     return region;
   }
   for (; attempt < MaxLastDitchAttempts; ++attempt) {
-    TryToAlignChunk(&region, tempMaps + attempt, length, alignment);
-    if (OffsetFromAligned(region, alignment) == 0) {
-      if (tempMaps[attempt]) {
-        UnmapInternal(tempMaps[attempt], length);
-      }
-      break;
+    if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
+      MOZ_ASSERT(!tempMaps[attempt]);
+      break;  // Success!
     }
-    if (!tempMaps[attempt]) {
-      break;  // Bail if TryToAlignChunk failed.
+    if (!region || !tempMaps[attempt]) {
+      break;  // We ran out of memory, so give up.
     }
   }
   if (OffsetFromAligned(region, alignment)) {
     UnmapInternal(region, length);
     region = nullptr;
   }
   while (--attempt >= 0) {
     UnmapInternal(tempMaps[attempt], length);
   }
   return region;
 }
 
+#endif  // defined(JS_64BIT)
+
 #if defined(XP_WIN)
 
 /*
  * On Windows, map and unmap calls must be matched, so we deallocate the
  * unaligned chunk, then reallocate the unaligned part to block off the
  * old address and force the allocator to give us a new one.
  */
-static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+template <bool>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
                             size_t length, size_t alignment) {
   void* region = *aRegion;
   MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
 
+  size_t retainedLength = 0;
   void* retainedRegion = nullptr;
   do {
     size_t offset = OffsetFromAligned(region, alignment);
     if (offset == 0) {
       // If the address is aligned, either we hit OOM or we're done.
       break;
     }
     UnmapInternal(region, length);
-    size_t retainedLength = alignment - offset;
+    retainedLength = alignment - offset;
     retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
     region = MapMemory(length);
 
     // If retainedRegion is null here, we raced with another thread.
   } while (!retainedRegion);
+
+  bool result = OffsetFromAligned(region, alignment) == 0;
+  if (result && retainedRegion) {
+    UnmapInternal(retainedRegion, retainedLength);
+    retainedRegion = nullptr;
+  }
+
   *aRegion = region;
   *aRetainedRegion = retainedRegion;
+  return region && result;
 }
 
 #else  // !defined(XP_WIN)
 
 /*
  * mmap calls don't have to be matched with calls to munmap, so we can unmap
  * just the pages we don't need. However, as we don't know a priori if addresses
  * are handed out in increasing or decreasing order, we have to try both
  * directions (depending on the environment, one will always fail).
  */
-static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+template <bool AlwaysGetNew>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
                             size_t length, size_t alignment) {
   void* regionStart = *aRegion;
   MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
 
   bool addressesGrowUpward = growthDirection > 0;
   bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
   size_t offsetLower = OffsetFromAligned(regionStart, alignment);
   size_t offsetUpper = alignment - offsetLower;
@@ -433,24 +591,28 @@ static void TryToAlignChunk(void** aRegi
       }
     }
     // If we're confident in the growth direction, don't try the other.
     if (!directionUncertain) {
       break;
     }
     addressesGrowUpward = !addressesGrowUpward;
   }
-  // If our current chunk cannot be aligned, just get a new one.
+
   void* retainedRegion = nullptr;
-  if (OffsetFromAligned(regionStart, alignment) != 0) {
+  bool result = OffsetFromAligned(regionStart, alignment) == 0;
+  if (AlwaysGetNew && !result) {
+    // If our current chunk cannot be aligned, just get a new one.
     retainedRegion = regionStart;
     regionStart = MapMemory(length);
   }
+
   *aRegion = regionStart;
   *aRetainedRegion = retainedRegion;
+  return regionStart && result;
 }
 
 #endif
 
 void UnmapPages(void* region, size_t length) {
   MOZ_RELEASE_ASSERT(region &&
                      OffsetFromAligned(region, allocGranularity) == 0);
   MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
@@ -582,17 +744,17 @@ void* AllocateMappedContent(int fd, size
   // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
   // us to reuse the region we obtained without racing with other threads.
   uint8_t* map =
       static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
                                  MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
   MOZ_RELEASE_ASSERT(map != MAP_FAILED);
 #endif
 
-#ifdef DEBUG
+#if defined(DEBUG)
   // Zero out data before and after the desired mapping to catch errors early.
   if (offset != alignedOffset) {
     memset(map, 0, offset - alignedOffset);
   }
   if (alignedLength % pageSize) {
     memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
   }
 #endif
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -9,58 +9,51 @@
 
 #include "gc/GCInternals.h"
 #include "gc/Memory.h"
 #include "jsapi-tests/tests.h"
 
 #if defined(XP_WIN)
 #include "util/Windows.h"
 #include <psapi.h>
-#elif defined(SOLARIS)
-// This test doesn't apply to Solaris.
-#elif defined(XP_UNIX)
+#else
 #include <algorithm>
 #include <errno.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
-#else
-#error "Memory mapping functions are not defined for your OS."
 #endif
 
 BEGIN_TEST(testGCAllocator) {
+#ifdef JS_64BIT
+  /* On 64-bit platforms we use very different logic. */
+  return true;
+#else
   size_t PageSize = 0;
 #if defined(XP_WIN)
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
   SYSTEM_INFO sysinfo;
   GetSystemInfo(&sysinfo);
   PageSize = sysinfo.dwPageSize;
-#else  // Various APIs are unavailable. This test is disabled.
-  return true;
-#endif
-#elif defined(SOLARIS)
-  return true;
-#elif defined(XP_UNIX)
+#else
   PageSize = size_t(sysconf(_SC_PAGESIZE));
-#else
-  return true;
 #endif
 
   /* Finish any ongoing background free activity. */
   js::gc::FinishGC(cx);
 
   bool growUp;
   CHECK(addressesGrowUp(&growUp));
 
   if (growUp) {
     return testGCAllocatorUp(PageSize);
   }
   return testGCAllocatorDown(PageSize);
+#endif
 }
 
 static const size_t Chunk = 512 * 1024;
 static const size_t Alignment = 2 * Chunk;
 static const int MaxTempChunks = 4096;
 static const size_t StagingSize = 16 * Chunk;
 
 bool addressesGrowUp(bool* resultOut) {
@@ -135,25 +128,19 @@ bool testGCAllocatorUp(const size_t Page
   CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
                           tempChunks));
   // Check that an aligned chunk after a single unalignable chunk is used.
   CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
                           tempChunks));
   // Check that we fall back to the slow path after two unalignable chunks.
   CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
                           tempChunks));
-#ifndef __aarch64__
-  // Bug 1440330 - this test is incorrect for aarch64 because MapMemory only
-  // looks for 1MB-aligned chunks on that platform, and will find one at
-  // position 6 here.
-
   // Check that we also fall back after an unalignable and an alignable chunk.
   CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
                           tempChunks));
-#endif
   // Check that the last ditch allocator works as expected.
   CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
                           tempChunks, UseLastDitchAllocator));
 
   // Clean up.
   while (--tempChunks >= 0) {
     unmapPages(chunkPool[tempChunks], 2 * Chunk);
   }
@@ -288,55 +275,34 @@ bool positionIsCorrect(const char* str, 
     while (--tempChunks >= 0) {
       js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
     }
   }
   return result == desired;
 }
 
 #if defined(XP_WIN)
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
 
 void* mapMemoryAt(void* desired, size_t length) {
   return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE,
                       PAGE_READWRITE);
 }
 
 void* mapMemory(size_t length) {
   return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
                       PAGE_READWRITE);
 }
 
 void unmapPages(void* p, size_t size) {
   MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
-#else  // Various APIs are unavailable. This test is disabled.
-
-void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
-void* mapMemory(size_t length) { return nullptr; }
-void unmapPages(void* p, size_t size) {}
-
-#endif
-#elif defined(SOLARIS)  // This test doesn't apply to Solaris.
-
-void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
-void* mapMemory(size_t length) { return nullptr; }
-void unmapPages(void* p, size_t size) {}
-
-#elif defined(XP_UNIX)
+#else
 
 void* mapMemoryAt(void* desired, size_t length) {
-
-#if defined(__ia64__) || defined(__aarch64__) ||  \
-    (defined(__sparc__) && defined(__arch64__) && \
-     (defined(__NetBSD__) || defined(__linux__)))
-  MOZ_RELEASE_ASSERT(
-      (0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
-#endif
   void* region = mmap(desired, length, PROT_READ | PROT_WRITE,
                       MAP_PRIVATE | MAP_ANON, -1, 0);
   if (region == MAP_FAILED) {
     return nullptr;
   }
   if (region != desired) {
     if (munmap(region, length)) {
       MOZ_RELEASE_ASSERT(errno == ENOMEM);
@@ -346,62 +312,24 @@ void* mapMemoryAt(void* desired, size_t 
   return region;
 }
 
 void* mapMemory(size_t length) {
   int prot = PROT_READ | PROT_WRITE;
   int flags = MAP_PRIVATE | MAP_ANON;
   int fd = -1;
   off_t offset = 0;
-  // The test code must be aligned with the implementation in gc/Memory.cpp.
-#if defined(__ia64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
-  void* region =
-      mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-    if (munmap(region, length)) {
-      MOZ_RELEASE_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-#elif defined(__aarch64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  const uintptr_t start = UINT64_C(0x0000070000000000);
-  const uintptr_t end = UINT64_C(0x0000800000000000);
-  const uintptr_t step = js::gc::ChunkSize;
-  uintptr_t hint;
-  void* region = MAP_FAILED;
-  for (hint = start; region == MAP_FAILED && hint + length <= end;
-       hint += step) {
-    region = mmap((void*)hint, length, prot, flags, fd, offset);
-    if (region != MAP_FAILED) {
-      if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-        if (munmap(region, length)) {
-          MOZ_RELEASE_ASSERT(errno == ENOMEM);
-        }
-        region = MAP_FAILED;
-      }
-    }
-  }
-  return region == MAP_FAILED ? nullptr : region;
-#else
   void* region = mmap(nullptr, length, prot, flags, fd, offset);
   if (region == MAP_FAILED) {
     return nullptr;
   }
   return region;
-#endif
 }
 
 void unmapPages(void* p, size_t size) {
   if (munmap(p, size)) {
     MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 }
 
-#else  // !defined(XP_WIN) && !defined(SOLARIS) && !defined(XP_UNIX)
-#error "Memory mapping functions are not defined for your OS."
 #endif
+
 END_TEST(testGCAllocator)