Bug 1502733 - Clean up and refactor GC system memory allocation functions. r=sfink
☠☠ backed out by 098c543f74c6 ☠ ☠
authorEmanuel Hoogeveen <emanuel.hoogeveen@protonmail.com>
Mon, 03 Dec 2018 02:39:43 +0200
changeset 508423 2b3af7022014b0d4a10a2c8812da9d2735f75a9b
parent 508422 4976cba52e47aaba70bcc69357ad33335c2a9e3a
child 508424 098c543f74c68ee61b4de82dae21870b1eb8fa12
push id1905
push userffxbld-merge
push dateMon, 21 Jan 2019 12:33:13 +0000
treeherdermozilla-release@c2fca1944d8c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1502733
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1502733 - Clean up and refactor GC system memory allocation functions. r=sfink
js/src/gc/Memory.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -9,910 +9,643 @@
 #include "mozilla/Atomics.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
 #if defined(XP_WIN)
 
-#include "mozilla/Sprintf.h"
 #include "util/Windows.h"
 #include <psapi.h>
 
-#elif defined(SOLARIS)
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#elif defined(XP_UNIX)
+#else
 
 #include <algorithm>
 #include <errno.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #endif
 
 namespace js {
 namespace gc {
 
-// The GC can only safely decommit memory when the page size of the
-// running process matches the compiled arena size.
+/*
+ * System allocation functions generally require the allocation size
+ * to be an integer multiple of the page size of the running process.
+ */
 static size_t pageSize = 0;
 
-// The OS allocation granularity may not match the page size.
+/* The OS allocation granularity may not match the page size. */
 static size_t allocGranularity = 0;
 
-#if defined(XP_UNIX)
-// The addresses handed out by mmap may grow up or down.
+/*
+ * System allocation functions may hand out regions of memory in increasing or
+ * decreasing order. This ordering is used as a hint during chunk alignment to
+ * reduce the number of system calls. On systems with 48-bit addresses, our
+ * workarounds to obtain 47-bit pointers cause addresses to be handed out in
+ * increasing order.
+ *
+ * We do not use the growth direction on Windows, as constraints on VirtualAlloc
+ * would make its application failure prone and complex. Tests indicate that
+ * VirtualAlloc always hands out regions of memory in increasing order.
+ */
+#if defined(XP_DARWIN) ||                                               \
+    (!defined(XP_WIN) && (defined(__ia64__) || defined(__aarch64__) ||  \
+                          (defined(__sparc__) && defined(__arch64__) && \
+                           (defined(__NetBSD__) || defined(__linux__)))))
+static mozilla::Atomic<int, mozilla::Relaxed,
+                       mozilla::recordreplay::Behavior::DontPreserve>
+    growthDirection(1);
+#elif defined(XP_LINUX)
 static mozilla::Atomic<int, mozilla::Relaxed,
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(0);
 #endif
 
-// Data from OOM crashes shows there may be up to 24 chunksized but unusable
-// chunks available in low memory situations. These chunks may all need to be
-// used up before we gain access to remaining *alignable* chunksized regions,
-// so we use a generous limit of 32 unusable chunks to ensure we reach them.
+enum class Commit : bool {
+  No = false,
+  Yes = true,
+};
+
+#if defined(XP_WIN)
+enum class PageAccess : DWORD {
+  None = PAGE_NOACCESS,
+  Read = PAGE_READONLY,
+  ReadWrite = PAGE_READWRITE,
+  Execute = PAGE_EXECUTE,
+  ReadExecute = PAGE_EXECUTE_READ,
+  ReadWriteExecute = PAGE_EXECUTE_READWRITE,
+};
+#else
+enum class PageAccess : int {
+  None = PROT_NONE,
+  Read = PROT_READ,
+  ReadWrite = PROT_READ | PROT_WRITE,
+  Execute = PROT_EXEC,
+  ReadExecute = PROT_READ | PROT_EXEC,
+  ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
+};
+#endif
+
+/*
+ * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
+ * chunks available in low memory situations. These chunks may all need to be
+ * used up before we gain access to remaining *alignable* chunk-sized regions,
+ * so we use a generous limit of 32 unusable chunks to ensure we reach them.
+ */
 static const int MaxLastDitchAttempts = 32;
 
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment);
-static void* MapAlignedPagesSlow(size_t size, size_t alignment);
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment);
+static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment);
+static void* MapAlignedPagesSlow(size_t length, size_t alignment);
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
 
 size_t SystemPageSize() { return pageSize; }
 
-static bool DecommitEnabled() { return pageSize == ArenaSize; }
+/*
+ * We can only decommit unused pages if the hardcoded Arena
+ * size matches the page size for the running process.
+ */
+static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
 
-/*
- * This returns the offset of address p from the nearest aligned address at
- * or below p - or alternatively, the number of unaligned bytes at the end of
- * the region starting at p (as we assert that allocation size is an integer
- * multiple of the alignment).
- */
-static inline size_t OffsetFromAligned(void* p, size_t alignment) {
-  return uintptr_t(p) % alignment;
+/* Returns the offset from the nearest aligned address at or below |region|. */
+static inline size_t OffsetFromAligned(void* region, size_t alignment) {
+  return uintptr_t(region) % alignment;
 }
 
-void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return MapAlignedPagesLastDitch(size, alignment);
+void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
+  return MapAlignedPagesLastDitch(length, alignment);
 }
 
-#if defined(XP_WIN)
-
 void InitMemorySubsystem() {
   if (pageSize == 0) {
+#if defined(XP_WIN)
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
     pageSize = sysinfo.dwPageSize;
     allocGranularity = sysinfo.dwAllocationGranularity;
+#else
+    pageSize = size_t(sysconf(_SC_PAGESIZE));
+    allocGranularity = pageSize;
+#endif
   }
 }
 
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-
-static inline void* MapMemoryAt(void* desired, size_t length, int flags,
-                                int prot = PAGE_READWRITE) {
-  return VirtualAlloc(desired, length, flags, prot);
+template <Commit commit, PageAccess prot>
+static inline void* MapInternal(void* desired, size_t length) {
+#if defined(XP_WIN)
+  DWORD flags =
+      (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
+  return VirtualAlloc(desired, length, flags, DWORD(prot));
+#else
+  int flags = MAP_PRIVATE | MAP_ANON;
+  void* region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1,
+                                        0, "js-gc-heap");
+  if (region == MAP_FAILED) {
+    return nullptr;
+  }
+  return region;
+#endif
 }
 
-static inline void* MapMemory(size_t length, int flags,
-                              int prot = PAGE_READWRITE) {
-  return VirtualAlloc(nullptr, length, flags, prot);
+static inline void UnmapInternal(void* region, size_t length) {
+  MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0 && length % pageSize == 0);
+
+#if defined(XP_WIN)
+  MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
+#else
+  if (munmap(region, length)) {
+    MOZ_RELEASE_ASSERT(errno == ENOMEM);
+  }
+#endif
+}
+
+/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
+static inline bool IsInvalidRegion(void* region, size_t length) {
+  const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
+  return (uintptr_t(region) + length - 1) & invalidPointerMask;
 }
 
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemory(size_t length) {
+  MOZ_ASSERT(length > 0);
+
+#if defined(XP_WIN)
+  return MapInternal<commit, prot>(nullptr, length);
+#elif defined(__ia64__) || \
+    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
+  // These platforms allow addresses with more than 47 bits. However, their
+  // mmap treats the first parameter as a hint of what address to allocate.
+  // If the region starting at the exact address passed is not available, the
+  // closest available region above it will be returned. Thus we supply a base
+  // address of 0x0000070000000000, 121 TiB below our 47-bit limit.
+  const uintptr_t hint = UINT64_C(0x0000070000000000);
+  void* region =
+      MapInternal<commit, prot>(reinterpret_cast<void*>(hint), length);
 
-  void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
+  // If we're given a region that ends above the 47-bit limit,
+  // treat it as running out of memory.
+  if (IsInvalidRegion(region, length)) {
+    UnmapInternal(region, length);
+    return nullptr;
+  }
+  return region;
+#elif defined(__aarch64__) || \
+    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
+  // These platforms allow addresses with more than 47 bits. Unlike above,
+  // their mmap does not treat its first parameter as a hint, so we're forced
+  // to loop through valid regions manually. The following logic is far from
+  // ideal and may cause allocation to take a long time on these platforms.
+  const uintptr_t start = UINT64_C(0x0000070000000000);
+  const uintptr_t end = UINT64_C(0x0000800000000000);
+  const uintptr_t step = ChunkSize;
+  uintptr_t desired;
+  void* region = nullptr;
+  for (desired = start; !region && desired + length <= end; desired += step) {
+    region =
+        MapInternal<commit, prot>(reinterpret_cast<void*>(desired), length);
+    // If mmap on this platform *does* treat the supplied address as a hint,
+    // this platform should be using the logic above!
+    if (region) {
+      MOZ_RELEASE_ASSERT(uintptr_t(region) == desired);
+    }
+  }
+  return region;
+#else
+  return MapInternal<commit, prot>(nullptr, length);
+#endif
+}
 
-  // Special case: If we want allocation alignment, no further work is needed.
-  if (alignment == allocGranularity) {
-    return p;
-  }
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAt(void* desired, size_t length) {
+  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0);
 
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
+#if defined(XP_WIN)
+  return MapInternal<commit, prot>(desired, length);
+#else
+#if defined(__ia64__) || defined(__aarch64__) ||  \
+    (defined(__sparc__) && defined(__arch64__) && \
+     (defined(__NetBSD__) || defined(__linux__)))
+  MOZ_RELEASE_ASSERT(!IsInvalidRegion(desired, length));
+#endif
+
+  void* region = MapInternal<commit, prot>(desired, length);
+  if (!region) {
+    return nullptr;
   }
 
-  void* retainedAddr;
-  GetNewChunk(&p, &retainedAddr, size, alignment);
-  if (retainedAddr) {
-    UnmapPages(retainedAddr, size);
+  // On some platforms mmap treats the desired address as a hint, so
+  // check that the address we got is the address we requested.
+  if (region != desired) {
+    UnmapInternal(region, length);
+    return nullptr;
   }
-  if (p) {
-    if (OffsetFromAligned(p, alignment) == 0) {
-      return p;
-    }
-    UnmapPages(p, size);
-  }
-
-  p = MapAlignedPagesSlow(size, alignment);
-  if (!p) {
-    return MapAlignedPagesLastDitch(size, alignment);
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
+  return region;
+#endif
 }
 
-static void* MapAlignedPagesSlow(size_t size, size_t alignment) {
-  /*
-   * Windows requires that there be a 1:1 mapping between VM allocation
-   * and deallocation operations.  Therefore, take care here to acquire the
-   * final result via one mapping operation.  This means unmapping any
-   * preliminary result that is not correctly aligned.
-   */
-  void* p;
+void* MapAlignedPages(size_t length, size_t alignment) {
+  MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
+  MOZ_RELEASE_ASSERT(length % pageSize == 0);
+  MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
+                         std::min(alignment, allocGranularity) ==
+                     0);
+
+  void* region = MapMemory(length);
+  if (OffsetFromAligned(region, alignment) == 0) {
+    return region;
+  }
+
+  void* retainedRegion;
+  TryToAlignChunk(&region, &retainedRegion, length, alignment);
+  if (retainedRegion) {
+    UnmapInternal(retainedRegion, length);
+  }
+  if (region) {
+    if (OffsetFromAligned(region, alignment) == 0) {
+      return region;
+    }
+    UnmapInternal(region, length);
+  }
+
+  region = MapAlignedPagesSlow(length, alignment);
+  if (!region) {
+    return MapAlignedPagesLastDitch(length, alignment);
+  }
+
+  MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+  return region;
+}
+
+static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
+  void* alignedRegion = nullptr;
   do {
-    /*
-     * Over-allocate in order to map a memory region that is definitely
-     * large enough, then deallocate and allocate again the correct size,
-     * within the over-sized mapping.
-     *
-     * Since we're going to unmap the whole thing anyway, the first
-     * mapping doesn't have to commit pages.
-     */
-    size_t reserveSize = size + alignment - pageSize;
-    p = MapMemory(reserveSize, MEM_RESERVE);
-    if (!p) {
+    size_t reserveLength = length + alignment - pageSize;
+#if defined(XP_WIN)
+    // Don't commit the requested pages as we won't use the region directly.
+    void* region = MapMemory<Commit::No>(reserveLength);
+#else
+    void* region = MapMemory(reserveLength);
+#endif
+    if (!region) {
       return nullptr;
     }
-    void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment);
-    UnmapPages(p, reserveSize);
-    p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
+    alignedRegion =
+        reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
+#if defined(XP_WIN)
+    // Windows requires that map and unmap calls be matched, so deallocate
+    // and immediately reallocate at the desired (aligned) address.
+    UnmapInternal(region, reserveLength);
+    alignedRegion = MapMemoryAt(alignedRegion, length);
+#else
+    // munmap allows us to simply unmap the pages that don't interest us.
+    if (alignedRegion != region) {
+      UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
+    }
+    void* regionEnd =
+        reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
+    void* alignedEnd =
+        reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
+    if (alignedEnd != regionEnd) {
+      UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
+    }
+#endif
+    // On Windows we may have raced with another thread; if so, try again.
+  } while (!alignedRegion);
 
-    /* Failure here indicates a race with another thread, so try again. */
-  } while (!p);
-
-  return p;
+  return alignedRegion;
 }
 
 /*
  * In a low memory or high fragmentation situation, alignable chunks of the
- * desired size may still be available, even if there are no more contiguous
- * free chunks that meet the |size + alignment - pageSize| requirement of
+ * desired length may still be available, even if there are no more contiguous
+ * free chunks that meet the |length + alignment - pageSize| requirement of
  * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
  * by temporarily holding onto the unaligned parts of each chunk until the
  * allocator gives us a chunk that either is, or can be aligned.
  */
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
   void* tempMaps[MaxLastDitchAttempts];
   int attempt = 0;
-  void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
+  void* region = MapMemory(length);
+  if (OffsetFromAligned(region, alignment) == 0) {
+    return region;
   }
   for (; attempt < MaxLastDitchAttempts; ++attempt) {
-    GetNewChunk(&p, tempMaps + attempt, size, alignment);
-    if (OffsetFromAligned(p, alignment) == 0) {
+    TryToAlignChunk(&region, tempMaps + attempt, length, alignment);
+    if (OffsetFromAligned(region, alignment) == 0) {
       if (tempMaps[attempt]) {
-        UnmapPages(tempMaps[attempt], size);
+        UnmapInternal(tempMaps[attempt], length);
       }
       break;
     }
     if (!tempMaps[attempt]) {
-      break; /* Bail if GetNewChunk failed. */
+      break;  // Bail if TryToAlignChunk failed.
     }
   }
-  if (OffsetFromAligned(p, alignment)) {
-    UnmapPages(p, size);
-    p = nullptr;
+  if (OffsetFromAligned(region, alignment)) {
+    UnmapInternal(region, length);
+    region = nullptr;
   }
   while (--attempt >= 0) {
-    UnmapPages(tempMaps[attempt], size);
+    UnmapInternal(tempMaps[attempt], length);
   }
-  return p;
+  return region;
 }
 
+#if defined(XP_WIN)
+
 /*
  * On Windows, map and unmap calls must be matched, so we deallocate the
  * unaligned chunk, then reallocate the unaligned part to block off the
  * old address and force the allocator to give us a new one.
  */
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment) {
-  void* address = *aAddress;
-  void* retainedAddr = nullptr;
+static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment) {
+  void* region = *aRegion;
+  MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
+
+  void* retainedRegion = nullptr;
   do {
-    size_t retainedSize;
-    size_t offset = OffsetFromAligned(address, alignment);
-    if (!offset) {
+    size_t offset = OffsetFromAligned(region, alignment);
+    if (offset == 0) {
+      // If the address is aligned, either we hit OOM or we're done.
       break;
     }
-    UnmapPages(address, size);
-    retainedSize = alignment - offset;
-    retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
-    address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
-    /* If retainedAddr is null here, we raced with another thread. */
-  } while (!retainedAddr);
-  *aAddress = address;
-  *aRetainedAddr = retainedAddr;
+    UnmapInternal(region, length);
+    size_t retainedLength = alignment - offset;
+    retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
+    region = MapMemory(length);
+
+    // If retainedRegion is null here, we raced with another thread.
+  } while (!retainedRegion);
+  *aRegion = region;
+  *aRetainedRegion = retainedRegion;
 }
 
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
+#else  // !defined(XP_WIN)
+
+/*
+ * mmap calls don't have to be matched with calls to munmap, so we can unmap
+ * just the pages we don't need. However, as we don't know a priori if addresses
+ * are handed out in increasing or decreasing order, we have to try both
+ * directions (depending on the environment, one will always fail).
+ */
+static void TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment) {
+  void* regionStart = *aRegion;
+  MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
 
-  MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+  bool addressesGrowUpward = growthDirection > 0;
+  bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
+  size_t offsetLower = OffsetFromAligned(regionStart, alignment);
+  size_t offsetUpper = alignment - offsetLower;
+  for (size_t i = 0; i < 2; ++i) {
+    if (addressesGrowUpward) {
+      void* upperStart =
+          reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
+      void* regionEnd =
+          reinterpret_cast<void*>(uintptr_t(regionStart) + length);
+      if (MapMemoryAt(regionEnd, offsetUpper)) {
+        UnmapInternal(regionStart, offsetUpper);
+        if (directionUncertain) {
+          ++growthDirection;
+        }
+        regionStart = upperStart;
+        break;
+      }
+    } else {
+      void* lowerStart =
+          reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
+      void* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
+      if (MapMemoryAt(lowerStart, offsetLower)) {
+        UnmapInternal(lowerEnd, offsetLower);
+        if (directionUncertain) {
+          --growthDirection;
+        }
+        regionStart = lowerStart;
+        break;
+      }
+    }
+    // If we're confident in the growth direction, don't try the other.
+    if (!directionUncertain) {
+      break;
+    }
+    addressesGrowUpward = !addressesGrowUpward;
+  }
+  // If our current chunk cannot be aligned, just get a new one.
+  void* retainedRegion = nullptr;
+  if (OffsetFromAligned(regionStart, alignment) != 0) {
+    retainedRegion = regionStart;
+    regionStart = MapMemory(length);
+  }
+  *aRegion = regionStart;
+  *aRetainedRegion = retainedRegion;
 }
 
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
+#endif
+
+void UnmapPages(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region &&
+                     OffsetFromAligned(region, allocGranularity) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+  // ASan does not automatically unpoison memory, so we have to do this here.
+  MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+  UnmapInternal(region, length);
+}
+
+bool MarkPagesUnused(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+  MOZ_MAKE_MEM_NOACCESS(region, length);
 
   if (!DecommitEnabled()) {
     return true;
   }
 
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
-  return p2 == p;
+#if defined(XP_WIN)
+  return VirtualAlloc(region, length, MEM_RESET,
+                      DWORD(PageAccess::ReadWrite)) == region;
+#elif defined(XP_DARWIN)
+  return madvise(region, length, MADV_FREE) == 0;
+#else
+  return madvise(region, length, MADV_DONTNEED) == 0;
+#endif
 }
 
-void MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
+void MarkPagesInUse(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
 
-  if (!DecommitEnabled()) {
-    return;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+  MOZ_MAKE_MEM_UNDEFINED(region, length);
 }
 
 size_t GetPageFaultCount() {
   if (mozilla::recordreplay::IsRecordingOrReplaying()) {
     return 0;
   }
+#if defined(XP_WIN)
   PROCESS_MEMORY_COUNTERS pmc;
-  if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
+  if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
     return 0;
   }
   return pmc.PageFaultCount;
+#else
+  struct rusage usage;
+  int err = getrusage(RUSAGE_SELF, &usage);
+  if (err) {
+    return 0;
+  }
+  return usage.ru_majflt;
+#endif
 }
 
 void* AllocateMappedContent(int fd, size_t offset, size_t length,
                             size_t alignment) {
-  MOZ_ASSERT(length && alignment);
-
-  // The allocation granularity and the requested offset
-  // must both be divisible by the requested alignment.
-  // Alignments larger than the allocation granularity are not supported.
-  if (allocGranularity % alignment != 0 || offset % alignment != 0) {
-    return nullptr;
-  }
-
-  HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
-
-  // This call will fail if the file does not exist, which is what we want.
-  HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
-  if (!hMap) {
+  if (length == 0 || alignment == 0 || offset % alignment != 0 ||
+      std::max(alignment, allocGranularity) %
+              std::min(alignment, allocGranularity) !=
+          0) {
     return nullptr;
   }
 
   size_t alignedOffset = offset - (offset % allocGranularity);
   size_t alignedLength = length + (offset % allocGranularity);
 
+  // We preallocate the mapping using MapAlignedPages, which expects
+  // the length parameter to be an integer multiple of the page size.
+  size_t mappedLength = alignedLength;
+  if (alignedLength % pageSize != 0) {
+    mappedLength += pageSize - alignedLength % pageSize;
+  }
+
+#if defined(XP_WIN)
+  HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
+
+  // This call will fail if the file does not exist.
+  HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
+  if (!hMap) {
+    return nullptr;
+  }
+
   DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
   DWORD offsetL = uint32_t(alignedOffset);
 
-  // If the offset or length are out of bounds, this call will fail.
-  uint8_t* map = static_cast<uint8_t*>(
-      MapViewOfFile(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength));
+  uint8_t* map = nullptr;
+  for (;;) {
+    // The value of a pointer is technically only defined while the region
+    // it points to is allocated, so explicitly treat this one as a number.
+    uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
+    if (region == 0) {
+      break;
+    }
+    UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
+    // If the offset or length are out of bounds, this call will fail.
+    map = static_cast<uint8_t*>(
+        MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
+                        reinterpret_cast<void*>(region)));
+
+    // Retry if another thread mapped the address we were trying to use.
+    if (map || GetLastError() != ERROR_INVALID_ADDRESS) {
+      break;
+    }
+  }
 
   // This just decreases the file mapping object's internal reference count;
   // it won't actually be destroyed until we unmap the associated view.
   CloseHandle(hMap);
 
   if (!map) {
     return nullptr;
   }
+#else
+  // Sanity check the offset and length, as mmap does not do this for us.
+  struct stat st;
+  if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
+      length > uint64_t(st.st_size) - offset) {
+    return nullptr;
+  }
+
+  void* region = MapAlignedPages(mappedLength, alignment);
+  if (!region) {
+    return nullptr;
+  }
+
+  // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
+  // us to reuse the region we obtained without racing with other threads.
+  uint8_t* map =
+      static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
+                                 MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
+  MOZ_RELEASE_ASSERT(map != MAP_FAILED);
+#endif
 
 #ifdef DEBUG
   // Zero out data before and after the desired mapping to catch errors early.
   if (offset != alignedOffset) {
     memset(map, 0, offset - alignedOffset);
   }
   if (alignedLength % pageSize) {
     memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
   }
 #endif
 
   return map + (offset - alignedOffset);
 }
 
-void DeallocateMappedContent(void* p, size_t /*length*/) {
-  if (!p) {
-    return;
-  }
-
-  // Calculate the address originally returned by MapViewOfFile.
-  // This is needed because AllocateMappedContent returns a pointer
-  // that might be offset from the view, as the beginning of a
-  // view must be aligned with the allocation granularity.
-  uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
-  MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
-}
-
-#else  // Various APIs are unavailable.
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  void* p = _aligned_malloc(size, alignment);
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
-}
-
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return nullptr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  _aligned_free(p);
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  return true;
-}
-
-bool MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() {
-  // GetProcessMemoryInfo is unavailable.
-  return 0;
-}
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  // Not implemented.
-  return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void DeallocateMappedContent(void* p, size_t length) {
-  // Not implemented.
-}
-
-#endif
-
-#elif defined(SOLARIS)
-
-#ifndef MAP_NOSYNC
-#define MAP_NOSYNC 0
-#endif
-
-void InitMemorySubsystem() {
-  if (pageSize == 0) {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
-  }
-}
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  int prot = PROT_READ | PROT_WRITE;
-  int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
-
-  void* p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
-  if (p == MAP_FAILED) {
-    return nullptr;
-  }
-  return p;
-}
-
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return nullptr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  return true;
-}
-
-bool MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (!DecommitEnabled()) {
+void DeallocateMappedContent(void* region, size_t length) {
+  if (!region) {
     return;
   }
 
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() { return 0; }
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  // Not implemented.
-  return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void DeallocateMappedContent(void* p, size_t length) {
-  // Not implemented.
-}
-
-#elif defined(XP_UNIX)
-
-void InitMemorySubsystem() {
-  if (pageSize == 0) {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
-  }
-}
-
-static inline void* MapMemoryAt(void* desired, size_t length,
-                                int prot = PROT_READ | PROT_WRITE,
-                                int flags = MAP_PRIVATE | MAP_ANON, int fd = -1,
-                                off_t offset = 0) {
-
-#if defined(__ia64__) || defined(__aarch64__) ||  \
-    (defined(__sparc__) && defined(__arch64__) && \
-     (defined(__NetBSD__) || defined(__linux__)))
-  MOZ_ASSERT((0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
-#endif
-  void* region = mmap(desired, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  /*
-   * mmap treats the given address as a hint unless the MAP_FIXED flag is
-   * used (which isn't usually what you want, as this overrides existing
-   * mappings), so check that the address we got is the address we wanted.
-   */
-  if (region != desired) {
-    if (munmap(region, length)) {
-      MOZ_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-}
-
-static inline void* MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
-                              int flags = MAP_PRIVATE | MAP_ANON, int fd = -1,
-                              off_t offset = 0) {
-#if defined(__ia64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
-  /*
-   * The JS engine assumes that all allocated pointers have their high 17 bits
-   * clear, which ia64's mmap doesn't support directly. However, we can emulate
-   * it by passing mmap an "addr" parameter with those bits clear. The mmap will
-   * return that address, or the nearest available memory above that address,
-   * providing a near-guarantee that those bits are clear. If they are not, we
-   * return nullptr below to indicate out-of-memory.
-   *
-   * The addr is chosen as 0x0000070000000000, which still allows about 120TB of
-   * virtual address space.
-   *
-   * See Bug 589735 for more information.
-   */
-  void* region =
-      mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  /*
-   * If the allocated memory doesn't have its upper 17 bits clear, consider it
-   * as out of memory.
-   */
-  if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-    if (munmap(region, length)) {
-      MOZ_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-#elif defined(__aarch64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  /*
-   * There might be similar virtual address issue on arm64 which depends on
-   * hardware and kernel configurations. But the work around is slightly
-   * different due to the different mmap behavior.
-   *
-   * TODO: Merge with the above code block if this implementation works for
-   * ia64 and sparc64.
-   */
-  const uintptr_t start = UINT64_C(0x0000070000000000);
-  const uintptr_t end = UINT64_C(0x0000800000000000);
-  const uintptr_t step = ChunkSize;
-  /*
-   * Optimization options if there are too many retries in practice:
-   * 1. Examine /proc/self/maps to find an available address. This file is
-   *    not always available, however. In addition, even if we examine
-   *    /proc/self/maps, we may still need to retry several times due to
-   *    racing with other threads.
-   * 2. Use a global/static variable with lock to track the addresses we have
-   *    allocated or tried.
-   */
-  uintptr_t hint;
-  void* region = MAP_FAILED;
-  for (hint = start; region == MAP_FAILED && hint + length <= end;
-       hint += step) {
-    region = mmap((void*)hint, length, prot, flags, fd, offset);
-    if (region != MAP_FAILED) {
-      if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-        if (munmap(region, length)) {
-          MOZ_ASSERT(errno == ENOMEM);
-        }
-        region = MAP_FAILED;
-      }
-    }
-  }
-  return region == MAP_FAILED ? nullptr : region;
-#else
-  void* region = MozTaggedAnonymousMmap(nullptr, length, prot, flags, fd,
-                                        offset, "js-gc-heap");
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  return region;
-#endif
-}
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  void* p = MapMemory(size);
-
-  /* Special case: If we want page alignment, no further work is needed. */
-  if (alignment == allocGranularity) {
-    return p;
-  }
-
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
-  }
-
-  void* retainedAddr;
-  GetNewChunk(&p, &retainedAddr, size, alignment);
-  if (retainedAddr) {
-    UnmapPages(retainedAddr, size);
-  }
-  if (p) {
-    if (OffsetFromAligned(p, alignment) == 0) {
-      return p;
-    }
-    UnmapPages(p, size);
-  }
-
-  p = MapAlignedPagesSlow(size, alignment);
-  if (!p) {
-    return MapAlignedPagesLastDitch(size, alignment);
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
-}
-
-static void* MapAlignedPagesSlow(size_t size, size_t alignment) {
-  /* Overallocate and unmap the region's edges. */
-  size_t reqSize = size + alignment - pageSize;
-  void* region = MapMemory(reqSize);
-  if (!region) {
-    return nullptr;
-  }
-
-  void* regionEnd = (void*)(uintptr_t(region) + reqSize);
-  void* front;
-  void* end;
-  if (growthDirection <= 0) {
-    size_t offset = OffsetFromAligned(regionEnd, alignment);
-    end = (void*)(uintptr_t(regionEnd) - offset);
-    front = (void*)(uintptr_t(end) - size);
-  } else {
-    size_t offset = OffsetFromAligned(region, alignment);
-    front = (void*)(uintptr_t(region) + (offset ? alignment - offset : 0));
-    end = (void*)(uintptr_t(front) + size);
-  }
+  // Due to bug 1502562, the following assertion does not currently hold.
+  // MOZ_RELEASE_ASSERT(length > 0);
 
-  if (front != region) {
-    UnmapPages(region, uintptr_t(front) - uintptr_t(region));
-  }
-  if (end != regionEnd) {
-    UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
-  }
-
-  return front;
-}
-
-/*
- * In a low memory or high fragmentation situation, alignable chunks of the
- * desired size may still be available, even if there are no more contiguous
- * free chunks that meet the |size + alignment - pageSize| requirement of
- * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
- * by temporarily holding onto the unaligned parts of each chunk until the
- * allocator gives us a chunk that either is, or can be aligned.
- */
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  void* tempMaps[MaxLastDitchAttempts];
-  int attempt = 0;
-  void* p = MapMemory(size);
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
-  }
-  for (; attempt < MaxLastDitchAttempts; ++attempt) {
-    GetNewChunk(&p, tempMaps + attempt, size, alignment);
-    if (OffsetFromAligned(p, alignment) == 0) {
-      if (tempMaps[attempt]) {
-        UnmapPages(tempMaps[attempt], size);
-      }
-      break;
-    }
-    if (!tempMaps[attempt]) {
-      break; /* Bail if GetNewChunk failed. */
-    }
-  }
-  if (OffsetFromAligned(p, alignment)) {
-    UnmapPages(p, size);
-    p = nullptr;
-  }
-  while (--attempt >= 0) {
-    UnmapPages(tempMaps[attempt], size);
-  }
-  return p;
-}
-
-/*
- * mmap calls don't have to be matched with calls to munmap, so we can unmap
- * just the pages we don't need. However, as we don't know a priori if addresses
- * are handed out in increasing or decreasing order, we have to try both
- * directions (depending on the environment, one will always fail).
- */
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment) {
-  void* address = *aAddress;
-  void* retainedAddr = nullptr;
-  bool addrsGrowDown = growthDirection <= 0;
-  int i = 0;
-  for (; i < 2; ++i) {
-    /* Try the direction indicated by growthDirection. */
-    if (addrsGrowDown) {
-      size_t offset = OffsetFromAligned(address, alignment);
-      void* head = (void*)((uintptr_t)address - offset);
-      void* tail = (void*)((uintptr_t)head + size);
-      if (MapMemoryAt(head, offset)) {
-        UnmapPages(tail, offset);
-        if (growthDirection >= -8) {
-          --growthDirection;
-        }
-        address = head;
-        break;
-      }
-    } else {
-      size_t offset = alignment - OffsetFromAligned(address, alignment);
-      void* head = (void*)((uintptr_t)address + offset);
-      void* tail = (void*)((uintptr_t)address + size);
-      if (MapMemoryAt(tail, offset)) {
-        UnmapPages(address, offset);
-        if (growthDirection <= 8) {
-          ++growthDirection;
-        }
-        address = head;
-        break;
-      }
-    }
-    /* If we're confident in the growth direction, don't try the other. */
-    if (growthDirection < -8 || growthDirection > 8) {
-      break;
-    }
-    /* If that failed, try the opposite direction. */
-    addrsGrowDown = !addrsGrowDown;
-  }
-  /* If our current chunk cannot be aligned, see if the next one is aligned. */
-  if (OffsetFromAligned(address, alignment)) {
-    retainedAddr = address;
-    address = MapMemory(size);
-  }
-  *aAddress = address;
-  *aRetainedAddr = retainedAddr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (munmap(p, size)) {
-    MOZ_ASSERT(errno == ENOMEM);
-  }
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-
-  if (!DecommitEnabled()) {
-    return false;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-#if defined(XP_SOLARIS)
-  int result = posix_madvise(p, size, POSIX_MADV_DONTNEED);
-#else
-  int result = madvise(p, size, MADV_DONTNEED);
-#endif
-  return result != -1;
-}
-
-void MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (!DecommitEnabled()) {
-    return;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() {
-  if (mozilla::recordreplay::IsRecordingOrReplaying()) {
-    return 0;
-  }
-  struct rusage usage;
-  int err = getrusage(RUSAGE_SELF, &usage);
-  if (err) {
-    return 0;
-  }
-  return usage.ru_majflt;
-}
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  MOZ_ASSERT(length && alignment);
-
-  // The allocation granularity and the requested offset
-  // must both be divisible by the requested alignment.
-  // Alignments larger than the allocation granularity are not supported.
-  if (allocGranularity % alignment != 0 || offset % alignment != 0) {
-    return nullptr;
-  }
-
-  // Sanity check the offset and size, as mmap does not do this for us.
-  struct stat st;
-  if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
-      length > uint64_t(st.st_size) - offset) {
-    return nullptr;
-  }
-
-  size_t alignedOffset = offset - (offset % allocGranularity);
-  size_t alignedLength = length + (offset % allocGranularity);
-
-  uint8_t* map = static_cast<uint8_t*>(MapMemory(
-      alignedLength, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, alignedOffset));
-  if (!map) {
-    return nullptr;
-  }
-
-#ifdef DEBUG
-  // Zero out data before and after the desired mapping to catch errors early.
-  if (offset != alignedOffset) {
-    memset(map, 0, offset - alignedOffset);
-  }
-  if (alignedLength % pageSize) {
-    memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
-  }
-#endif
-
-  return map + (offset - alignedOffset);
-}
-
-void DeallocateMappedContent(void* p, size_t length) {
-  if (!p) {
-    return;
-  }
-
-  // Calculate the address originally returned by mmap.
+  // Calculate the address originally returned by the system call.
   // This is needed because AllocateMappedContent returns a pointer
   // that might be offset from the mapping, as the beginning of a
   // mapping must be aligned with the allocation granularity.
-  uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
-  size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
-  UnmapPages(reinterpret_cast<void*>(map), alignedLength);
-}
-
+  uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
+#if defined(XP_WIN)
+  MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
 #else
-#error "Memory mapping functions are not defined for your OS."
-#endif
-
-void ProtectPages(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
-#if defined(XP_WIN)
-  DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_NOACCESS, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_NOACCESS) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_NONE)) {
-    MOZ_CRASH("mprotect(PROT_NONE) failed");
+  size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
+  if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
+    MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 #endif
 }
 
-void MakePagesReadOnly(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
+static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
 #if defined(XP_WIN)
   DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_READONLY, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_READONLY) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_READ)) {
-    MOZ_CRASH("mprotect(PROT_READ) failed");
-  }
+  MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
+                     0);
+#else
+  MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
 #endif
 }
 
-void UnprotectPages(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
-#if defined(XP_WIN)
-  DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_READWRITE, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_READWRITE) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_READ | PROT_WRITE)) {
-    MOZ_CRASH("mprotect(PROT_READ | PROT_WRITE) failed");
-  }
-#endif
+void ProtectPages(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::None);
+}
+
+void MakePagesReadOnly(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::Read);
+}
+
+void UnprotectPages(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::ReadWrite);
 }
 
 }  // namespace gc
 }  // namespace js