Merge mozilla-central to autoland. a=merge CLOSED TREE
authorBogdan Tara <btara@mozilla.com>
Tue, 15 Jan 2019 18:12:37 +0200
changeset 511049 18bdd2ab1081221fd7b0e115e792645ff036fe95
parent 511048 ad473f67f76950921b049ffe896dd25a0e987de6 (current diff)
parent 511030 abb8a4bb66a742bc1e31c3e2a2956bedf52be665 (diff)
child 511050 d7d3b47d3a775c7fa45d5e0c3a6eba850cc394e8
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland. a=merge CLOSED TREE
--- a/dom/base/Element.cpp
+++ b/dom/base/Element.cpp
@@ -277,51 +277,30 @@ void Element::UpdateState(bool aNotify) 
     }
   }
 }
 
 }  // namespace dom
 }  // namespace mozilla
 
 void nsIContent::UpdateEditableState(bool aNotify) {
-  // Guaranteed to be non-element content
-  NS_ASSERTION(!IsElement(), "What happened here?");
   nsIContent* parent = GetParent();
 
-  // Skip over unknown native anonymous content to avoid setting a flag we
-  // can't clear later
-  bool isUnknownNativeAnon = false;
-  if (IsInNativeAnonymousSubtree()) {
-    isUnknownNativeAnon = true;
-    nsCOMPtr<nsIContent> root = this;
-    while (root && !root->IsRootOfNativeAnonymousSubtree()) {
-      root = root->GetParent();
-    }
-    // root should always be true here, but isn't -- bug 999416
-    if (root) {
-      nsIFrame* rootFrame = root->GetPrimaryFrame();
-      if (rootFrame) {
-        nsContainerFrame* parentFrame = rootFrame->GetParent();
-        nsITextControlFrame* textCtrl = do_QueryFrame(parentFrame);
-        isUnknownNativeAnon = !textCtrl;
-      }
-    }
-  }
-
+  // Don't implicitly set the flag on the root of a native anonymous subtree.
+  // This needs to be set explicitly, see for example
+  // nsTextControlFrame::CreateRootNode().
   SetEditableFlag(parent && parent->HasFlag(NODE_IS_EDITABLE) &&
-                  !isUnknownNativeAnon);
+                  !IsRootOfNativeAnonymousSubtree());
 }
 
 namespace mozilla {
 namespace dom {
 
 void Element::UpdateEditableState(bool aNotify) {
-  nsIContent* parent = GetParent();
-
-  SetEditableFlag(parent && parent->HasFlag(NODE_IS_EDITABLE));
+  nsIContent::UpdateEditableState(aNotify);
   if (aNotify) {
     UpdateState(aNotify);
   } else {
     // Avoid calling UpdateState in this very common case, because
     // this gets called for pretty much every single element on
     // insertion into the document and UpdateState can be slow for
     // some kinds of elements even when not notifying.
     if (IsEditable()) {
--- a/dom/base/nsIContentInlines.h
+++ b/dom/base/nsIContentInlines.h
@@ -148,19 +148,24 @@ inline bool nsINode::NodeOrAncestorHasDi
 }
 
 inline bool nsINode::IsEditable() const {
   if (HasFlag(NODE_IS_EDITABLE)) {
     // The node is in an editable contentEditable subtree.
     return true;
   }
 
-  Document* doc = GetUncomposedDoc();
+  // All editable anonymous content should be made explicitly editable via the
+  // NODE_IS_EDITABLE flag.
+  if (IsInNativeAnonymousSubtree()) {
+    return false;
+  }
 
   // Check if the node is in a document and the document is in designMode.
+  Document* doc = GetUncomposedDoc();
   return doc && doc->HasFlag(NODE_IS_EDITABLE);
 }
 
 inline bool nsIContent::IsActiveChildrenElement() const {
   if (!mNodeInfo->Equals(nsGkAtoms::children, kNameSpaceID_XBL)) {
     return false;
   }
 
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -2,917 +2,913 @@
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gc/Memory.h"
 
 #include "mozilla/Atomics.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/RandomNum.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "js/HeapAPI.h"
 #include "vm/Runtime.h"
 
-#if defined(XP_WIN)
+#ifdef XP_WIN
 
-#include "mozilla/Sprintf.h"
 #include "util/Windows.h"
 #include <psapi.h>
 
-#elif defined(SOLARIS)
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#elif defined(XP_UNIX)
+#else
 
 #include <algorithm>
 #include <errno.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #endif
 
 namespace js {
 namespace gc {
 
-// The GC can only safely decommit memory when the page size of the
-// running process matches the compiled arena size.
+/*
+ * System allocation functions generally require the allocation size
+ * to be an integer multiple of the page size of the running process.
+ */
 static size_t pageSize = 0;
 
-// The OS allocation granularity may not match the page size.
+/* The OS allocation granularity may not match the page size. */
 static size_t allocGranularity = 0;
 
-#if defined(XP_UNIX)
-// The addresses handed out by mmap may grow up or down.
+/* The number of bits used by addresses on this platform. */
+static size_t numAddressBits = 0;
+
+/*
+ * System allocation functions may hand out regions of memory in increasing or
+ * decreasing order. This ordering is used as a hint during chunk alignment to
+ * reduce the number of system calls. On systems with 48-bit addresses, our
+ * workarounds to obtain 47-bit pointers cause addresses to be handed out in
+ * increasing order.
+ *
+ * We do not use the growth direction on Windows, as constraints on VirtualAlloc
+ * would make its application failure prone and complex. Tests indicate that
+ * VirtualAlloc always hands out regions of memory in increasing order.
+ */
+#if defined(XP_DARWIN)
+static mozilla::Atomic<int, mozilla::Relaxed,
+                       mozilla::recordreplay::Behavior::DontPreserve>
+    growthDirection(1);
+#elif defined(XP_UNIX)
 static mozilla::Atomic<int, mozilla::Relaxed,
                        mozilla::recordreplay::Behavior::DontPreserve>
     growthDirection(0);
 #endif
 
-// Data from OOM crashes shows there may be up to 24 chunksized but unusable
-// chunks available in low memory situations. These chunks may all need to be
-// used up before we gain access to remaining *alignable* chunksized regions,
-// so we use a generous limit of 32 unusable chunks to ensure we reach them.
+/*
+ * Data from OOM crashes shows there may be up to 24 chunk-sized but unusable
+ * chunks available in low memory situations. These chunks may all need to be
+ * used up before we gain access to remaining *alignable* chunk-sized regions,
+ * so we use a generous limit of 32 unusable chunks to ensure we reach them.
+ */
 static const int MaxLastDitchAttempts = 32;
 
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment);
-static void* MapAlignedPagesSlow(size_t size, size_t alignment);
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment);
+#ifdef JS_64BIT
+/*
+ * On some 64-bit platforms we can use a random, scattershot allocator that
+ * tries addresses from the available range at random. If the address range
+ * is large enough this will have a high chance of success and additionally
+ * makes the memory layout of our process less predictable.
+ *
+ * However, not all 64-bit platforms have a very large address range. For
+ * example, AArch64 on Linux defaults to using 39-bit addresses to limit the
+ * number of translation tables used. On such configurations the scattershot
+ * approach to allocation creates a conflict with our desire to reserve large
+ * regions of memory for applications like WebAssembly: Small allocations may
+ * inadvertently block off all available 4-6GiB regions, and conversely
+ * reserving such regions may lower the success rate for smaller allocations to
+ * unacceptable levels.
+ *
+ * So we make a compromise: Instead of using the scattershot on all 64-bit
+ * platforms, we only use it on platforms that meet a minimum requirement for
+ * the available address range. In addition we split the address range,
+ * reserving the upper half for huge allocations and the lower half for smaller
+ * allocations. We use a limit of 43 bits so that at least 42 bits are available
+ * for huge allocations - this matches the 8TiB per process address space limit
+ * that we're already subject to on Windows.
+ */
+static const size_t MinAddressBitsForRandomAlloc = 43;
+
+/* The lower limit for huge allocations. This is fairly arbitrary. */
+static const size_t HugeAllocationSize = 1024 * 1024 * 1024;
+
+/* The minimum and maximum valid addresses that can be allocated into. */
+static size_t minValidAddress = 0;
+static size_t maxValidAddress = 0;
+
+/* The upper limit for smaller allocations and the lower limit for huge ones. */
+static size_t hugeSplit = 0;
+#endif
 
 size_t SystemPageSize() { return pageSize; }
 
-static bool DecommitEnabled() { return pageSize == ArenaSize; }
+size_t SystemAddressBits() { return numAddressBits; }
+
+bool UsingScattershotAllocator() {
+#ifdef JS_64BIT
+  return numAddressBits >= MinAddressBitsForRandomAlloc;
+#else
+  return false;
+#endif
+}
+
+enum class Commit : bool {
+  No = false,
+  Yes = true,
+};
+
+#ifdef XP_WIN
+enum class PageAccess : DWORD {
+  None = PAGE_NOACCESS,
+  Read = PAGE_READONLY,
+  ReadWrite = PAGE_READWRITE,
+  Execute = PAGE_EXECUTE,
+  ReadExecute = PAGE_EXECUTE_READ,
+  ReadWriteExecute = PAGE_EXECUTE_READWRITE,
+};
+#else
+enum class PageAccess : int {
+  None = PROT_NONE,
+  Read = PROT_READ,
+  ReadWrite = PROT_READ | PROT_WRITE,
+  Execute = PROT_EXEC,
+  ReadExecute = PROT_READ | PROT_EXEC,
+  ReadWriteExecute = PROT_READ | PROT_WRITE | PROT_EXEC,
+};
+#endif
+
+template <bool AlwaysGetNew = true>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment);
+
+static void* MapAlignedPagesSlow(size_t length, size_t alignment);
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
+
+#ifdef JS_64BIT
+static void* MapAlignedPagesRandom(size_t length, size_t alignment);
+void* TestMapAlignedPagesLastDitch(size_t, size_t) { return nullptr; }
+#else
+void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
+  return MapAlignedPagesLastDitch(length, alignment);
+}
+#endif
+
+/*
+ * We can only decommit unused pages if the hardcoded Arena
+ * size matches the page size for the running process.
+ */
+static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
+
+/* Returns the offset from the nearest aligned address at or below |region|. */
+static inline size_t OffsetFromAligned(void* region, size_t alignment) {
+  return uintptr_t(region) % alignment;
+}
+
+template <Commit commit, PageAccess prot>
+static inline void* MapInternal(void* desired, size_t length) {
+  void* region = nullptr;
+#ifdef XP_WIN
+  DWORD flags =
+      (commit == Commit::Yes ? MEM_RESERVE | MEM_COMMIT : MEM_RESERVE);
+  region = VirtualAlloc(desired, length, flags, DWORD(prot));
+#else
+  int flags = MAP_PRIVATE | MAP_ANON;
+  region = MozTaggedAnonymousMmap(desired, length, int(prot), flags, -1, 0,
+                                  "js-gc-heap");
+  if (region == MAP_FAILED) {
+    return nullptr;
+  }
+#endif
+  return region;
+}
+
+static inline void UnmapInternal(void* region, size_t length) {
+  MOZ_ASSERT(region && OffsetFromAligned(region, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0 && length % pageSize == 0);
+
+#ifdef XP_WIN
+  MOZ_RELEASE_ASSERT(VirtualFree(region, 0, MEM_RELEASE) != 0);
+#else
+  if (munmap(region, length)) {
+    MOZ_RELEASE_ASSERT(errno == ENOMEM);
+  }
+#endif
+}
+
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemory(size_t length) {
+  MOZ_ASSERT(length > 0);
+
+  return MapInternal<commit, prot>(nullptr, length);
+}
+
+/*
+ * Attempts to map memory at the given address, but allows the system
+ * to return a different address that may still be suitable.
+ */
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAtFuzzy(void* desired, size_t length) {
+  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0);
+
+  // Note that some platforms treat the requested address as a hint, so the
+  // returned address might not match the requested address.
+  return MapInternal<commit, prot>(desired, length);
+}
 
 /*
- * This returns the offset of address p from the nearest aligned address at
- * or below p - or alternatively, the number of unaligned bytes at the end of
- * the region starting at p (as we assert that allocation size is an integer
- * multiple of the alignment).
+ * Attempts to map memory at the given address, returning nullptr if
+ * the system returns any address other than the requested one.
  */
-static inline size_t OffsetFromAligned(void* p, size_t alignment) {
-  return uintptr_t(p) % alignment;
+template <Commit commit = Commit::Yes, PageAccess prot = PageAccess::ReadWrite>
+static inline void* MapMemoryAt(void* desired, size_t length) {
+  MOZ_ASSERT(desired && OffsetFromAligned(desired, allocGranularity) == 0);
+  MOZ_ASSERT(length > 0);
+
+  void* region = MapInternal<commit, prot>(desired, length);
+  if (!region) {
+    return nullptr;
+  }
+
+  // On some platforms mmap treats the desired address as a hint, so
+  // check that the address we got is the address we requested.
+  if (region != desired) {
+    UnmapInternal(region, length);
+    return nullptr;
+  }
+  return region;
+}
+
+#ifdef JS_64BIT
+
+/* Returns a random number in the given range. */
+static inline uint64_t GetNumberInRange(uint64_t minNum, uint64_t maxNum) {
+  const uint64_t MaxRand = UINT64_C(0xffffffffffffffff);
+  maxNum -= minNum;
+  uint64_t binSize = 1 + (MaxRand - maxNum) / (maxNum + 1);
+
+  uint64_t rndNum;
+  do {
+    mozilla::Maybe<uint64_t> result;
+    do {
+      result = mozilla::RandomUint64();
+    } while (!result);
+    rndNum = result.value() / binSize;
+  } while (rndNum > maxNum);
+
+  return minNum + rndNum;
 }
 
-void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return MapAlignedPagesLastDitch(size, alignment);
+#ifndef XP_WIN
+/*
+ * The address range available to applications depends on both hardware and
+ * kernel configuration. For example, AArch64 on Linux uses addresses with
+ * 39 significant bits by default, but can be configured to use addresses with
+ * 48 significant bits by enabling a 4th translation table. Unfortunately,
+ * there appears to be no standard way to query the limit at runtime
+ * (Windows exposes this via GetSystemInfo()).
+ *
+ * This function tries to find the address limit by performing a binary search
+ * on the index of the most significant set bit in the addresses it attempts to
+ * allocate. As the requested address is often treated as a hint by the
+ * operating system, we use the actual returned addresses to narrow the range.
+ * We return the number of bits of an address that may be set.
+ */
+static size_t FindAddressLimit() {
+  const size_t length = allocGranularity;  // Used as both length and alignment.
+
+  void* address;
+  uint64_t startRaw, endRaw, start, end, desired, actual;
+
+  // Use 32 bits as a lower bound in case we keep getting nullptr.
+  size_t low = 31;
+  uint64_t highestSeen = (UINT64_C(1) << 32) - length - 1;
+
+  // Start with addresses that have bit 47 set.
+  size_t high = 47;
+  startRaw = UINT64_C(1) << high;
+  endRaw = 2 * startRaw - length - 1;
+  start = (startRaw + length - 1) / length;
+  end = (endRaw - (length - 1)) / length;
+
+  for (size_t tries = 0; tries < 4; ++tries) {
+    desired = length * GetNumberInRange(start, end);
+    address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+    actual = uint64_t(address);
+    if (address) {
+      UnmapInternal(address, length);
+    }
+    if (actual >= startRaw) {
+      return high + 1;  // Return early and skip the binary search.
+    }
+    if (actual > highestSeen) {
+      highestSeen = actual;
+      low = mozilla::FloorLog2(highestSeen);
+    }
+  }
+
+  // Those didn't work, so perform a binary search.
+  while (high - 1 > low) {
+    size_t middle = low + (high - low) / 2;
+    startRaw = UINT64_C(1) << middle;
+    endRaw = 2 * startRaw - length - 1;
+    start = (startRaw + length - 1) / length;
+    end = (endRaw - (length - 1)) / length;
+
+    for (size_t tries = 0; tries < 4; ++tries) {
+      desired = length * GetNumberInRange(start, end);
+      address = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+      actual = uint64_t(address);
+      if (address) {
+        UnmapInternal(address, length);
+      }
+      if (actual > highestSeen) {
+        highestSeen = actual;
+        low = mozilla::FloorLog2(highestSeen);
+      }
+      if (actual >= startRaw) {
+        break;
+      }
+    }
+
+    // Low was already updated above, so just check if we need to update high.
+    if (actual < startRaw) {
+      high = middle;
+    }
+  }
+
+  // High was excluded, so use low (but sanity check it).
+  return std::min(low + 1, size_t(47));
 }
+#endif  // !defined(XP_WIN)
 
-#if defined(XP_WIN)
+#endif  // defined(JS_64BIT)
 
 void InitMemorySubsystem() {
   if (pageSize == 0) {
+#ifdef XP_WIN
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
     pageSize = sysinfo.dwPageSize;
     allocGranularity = sysinfo.dwAllocationGranularity;
+#else
+    pageSize = size_t(sysconf(_SC_PAGESIZE));
+    allocGranularity = pageSize;
+#endif
+#ifdef JS_64BIT
+#ifdef XP_WIN
+    minValidAddress = size_t(sysinfo.lpMinimumApplicationAddress);
+    maxValidAddress = size_t(sysinfo.lpMaximumApplicationAddress);
+    numAddressBits = mozilla::FloorLog2(maxValidAddress) + 1;
+#else
+    // No standard way to determine these, so fall back to FindAddressLimit().
+    numAddressBits = FindAddressLimit();
+    minValidAddress = allocGranularity;
+    maxValidAddress = (UINT64_C(1) << numAddressBits) - 1 - allocGranularity;
+#endif
+    // Sanity check the address to ensure we don't use more than 47 bits.
+    uint64_t maxJSAddress = UINT64_C(0x00007fffffffffff) - allocGranularity;
+    if (maxValidAddress > maxJSAddress) {
+      maxValidAddress = maxJSAddress;
+      hugeSplit = UINT64_C(0x00003fffffffffff) - allocGranularity;
+    } else {
+      hugeSplit = (UINT64_C(1) << (numAddressBits - 1)) - 1 - allocGranularity;
+    }
+#else  // !defined(JS_64BIT)
+    numAddressBits = 32;
+#endif
   }
 }
 
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-
-static inline void* MapMemoryAt(void* desired, size_t length, int flags,
-                                int prot = PAGE_READWRITE) {
-  return VirtualAlloc(desired, length, flags, prot);
+#ifdef JS_64BIT
+/* The JS engine uses 47-bit pointers; all higher bits must be clear. */
+static inline bool IsInvalidRegion(void* region, size_t length) {
+  const uint64_t invalidPointerMask = UINT64_C(0xffff800000000000);
+  return (uintptr_t(region) + length - 1) & invalidPointerMask;
 }
-
-static inline void* MapMemory(size_t length, int flags,
-                              int prot = PAGE_READWRITE) {
-  return VirtualAlloc(nullptr, length, flags, prot);
-}
+#endif
 
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
+void* MapAlignedPages(size_t length, size_t alignment) {
+  MOZ_RELEASE_ASSERT(length > 0 && alignment > 0);
+  MOZ_RELEASE_ASSERT(length % pageSize == 0);
+  MOZ_RELEASE_ASSERT(std::max(alignment, allocGranularity) %
+                         std::min(alignment, allocGranularity) ==
+                     0);
 
-  void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
-
-  // Special case: If we want allocation alignment, no further work is needed.
-  if (alignment == allocGranularity) {
-    return p;
+  // Smaller alignments aren't supported by the allocation functions.
+  if (alignment < allocGranularity) {
+    alignment = allocGranularity;
   }
 
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
+#ifdef JS_64BIT
+  // Use the scattershot allocator if the address range is large enough.
+  if (UsingScattershotAllocator()) {
+    void* region = MapAlignedPagesRandom(length, alignment);
+
+    MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
+    MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+
+    return region;
+  }
+#endif
+
+  void* region = MapMemory(length);
+  if (OffsetFromAligned(region, alignment) == 0) {
+    return region;
+  }
+
+  void* retainedRegion;
+  TryToAlignChunk(&region, &retainedRegion, length, alignment);
+  if (retainedRegion) {
+    UnmapInternal(retainedRegion, length);
+  }
+  if (region) {
+    if (OffsetFromAligned(region, alignment) == 0) {
+      return region;
+    }
+    UnmapInternal(region, length);
+  }
+
+  region = MapAlignedPagesSlow(length, alignment);
+  if (!region) {
+    region = MapAlignedPagesLastDitch(length, alignment);
   }
 
-  void* retainedAddr;
-  GetNewChunk(&p, &retainedAddr, size, alignment);
-  if (retainedAddr) {
-    UnmapPages(retainedAddr, size);
-  }
-  if (p) {
-    if (OffsetFromAligned(p, alignment) == 0) {
-      return p;
-    }
-    UnmapPages(p, size);
-  }
-
-  p = MapAlignedPagesSlow(size, alignment);
-  if (!p) {
-    return MapAlignedPagesLastDitch(size, alignment);
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
+  MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
+  return region;
 }
 
-static void* MapAlignedPagesSlow(size_t size, size_t alignment) {
-  /*
-   * Windows requires that there be a 1:1 mapping between VM allocation
-   * and deallocation operations.  Therefore, take care here to acquire the
-   * final result via one mapping operation.  This means unmapping any
-   * preliminary result that is not correctly aligned.
-   */
-  void* p;
+#ifdef JS_64BIT
+
+/*
+ * This allocator takes advantage of the large address range on some 64-bit
+ * platforms to allocate in a scattershot manner, choosing addresses at random
+ * from the range. By controlling the range we can avoid returning addresses
+ * that have more than 47 significant bits (as required by SpiderMonkey).
+ * This approach also has some other advantages over the methods employed by
+ * the other allocation functions in this file:
+ * 1) Allocations are extremely likely to succeed on the first try.
+ * 2) The randomness makes our memory layout becomes harder to predict.
+ * 3) The low probability of reusing regions guards against use-after-free.
+ *
+ * The main downside is that detecting physical OOM situations becomes more
+ * difficult; to guard against this, we occasionally try a regular allocation.
+ * In addition, sprinkling small allocations throughout the full address range
+ * might get in the way of large address space reservations such as those
+ * employed by WebAssembly. To avoid this (or the opposite problem of such
+ * reservations reducing the chance of success for smaller allocations) we
+ * split the address range in half, with one half reserved for huge allocations
+ * and the other for regular (usually chunk sized) allocations.
+ */
+static void* MapAlignedPagesRandom(size_t length, size_t alignment) {
+  uint64_t minNum, maxNum;
+  if (length < HugeAllocationSize) {
+    // Use the lower half of the range.
+    minNum = (minValidAddress + alignment - 1) / alignment;
+    maxNum = (hugeSplit - (length - 1)) / alignment;
+  } else {
+    // Use the upper half of the range.
+    minNum = (hugeSplit + 1 + alignment - 1) / alignment;
+    maxNum = (maxValidAddress - (length - 1)) / alignment;
+  }
+
+  // Try to allocate in random aligned locations.
+  void* region = nullptr;
+  for (size_t i = 1; i <= 1024; ++i) {
+    if (i & 0xf) {
+      uint64_t desired = alignment * GetNumberInRange(minNum, maxNum);
+      region = MapMemoryAtFuzzy(reinterpret_cast<void*>(desired), length);
+      if (!region) {
+        continue;
+      }
+    } else {
+      // Check for OOM.
+      region = MapMemory(length);
+      if (!region) {
+        return nullptr;
+      }
+    }
+    if (IsInvalidRegion(region, length)) {
+      UnmapInternal(region, length);
+      continue;
+    }
+    if (OffsetFromAligned(region, alignment) == 0) {
+      return region;
+    }
+    void* retainedRegion = nullptr;
+    if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
+      MOZ_ASSERT(!retainedRegion);
+      return region;
+    }
+    MOZ_ASSERT(region && !retainedRegion);
+    UnmapInternal(region, length);
+  }
+
+  if (numAddressBits < 48) {
+    // Try the reliable fallback of overallocating.
+    // Note: This will not respect the address space split.
+    region = MapAlignedPagesSlow(length, alignment);
+    if (region) {
+      return region;
+    }
+  }
+  if (length < HugeAllocationSize) {
+    MOZ_CRASH("Couldn't allocate even after 1000 tries!");
+  }
+
+  return nullptr;
+}
+
+#endif  // defined(JS_64BIT)
+
+static void* MapAlignedPagesSlow(size_t length, size_t alignment) {
+  void* alignedRegion = nullptr;
   do {
-    /*
-     * Over-allocate in order to map a memory region that is definitely
-     * large enough, then deallocate and allocate again the correct size,
-     * within the over-sized mapping.
-     *
-     * Since we're going to unmap the whole thing anyway, the first
-     * mapping doesn't have to commit pages.
-     */
-    size_t reserveSize = size + alignment - pageSize;
-    p = MapMemory(reserveSize, MEM_RESERVE);
-    if (!p) {
+    size_t reserveLength = length + alignment - pageSize;
+#ifdef XP_WIN
+    // Don't commit the requested pages as we won't use the region directly.
+    void* region = MapMemory<Commit::No>(reserveLength);
+#else
+    void* region = MapMemory(reserveLength);
+#endif
+    if (!region) {
       return nullptr;
     }
-    void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment);
-    UnmapPages(p, reserveSize);
-    p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
+    alignedRegion =
+        reinterpret_cast<void*>(AlignBytes(uintptr_t(region), alignment));
+#ifdef XP_WIN
+    // Windows requires that map and unmap calls be matched, so deallocate
+    // and immediately reallocate at the desired (aligned) address.
+    UnmapInternal(region, reserveLength);
+    alignedRegion = MapMemoryAt(alignedRegion, length);
+#else
+    // munmap allows us to simply unmap the pages that don't interest us.
+    if (alignedRegion != region) {
+      UnmapInternal(region, uintptr_t(alignedRegion) - uintptr_t(region));
+    }
+    void* regionEnd =
+        reinterpret_cast<void*>(uintptr_t(region) + reserveLength);
+    void* alignedEnd =
+        reinterpret_cast<void*>(uintptr_t(alignedRegion) + length);
+    if (alignedEnd != regionEnd) {
+      UnmapInternal(alignedEnd, uintptr_t(regionEnd) - uintptr_t(alignedEnd));
+    }
+#endif
+    // On Windows we may have raced with another thread; if so, try again.
+  } while (!alignedRegion);
 
-    /* Failure here indicates a race with another thread, so try again. */
-  } while (!p);
-
-  return p;
+  return alignedRegion;
 }
 
 /*
  * In a low memory or high fragmentation situation, alignable chunks of the
- * desired size may still be available, even if there are no more contiguous
- * free chunks that meet the |size + alignment - pageSize| requirement of
+ * desired length may still be available, even if there are no more contiguous
+ * free chunks that meet the |length + alignment - pageSize| requirement of
  * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
  * by temporarily holding onto the unaligned parts of each chunk until the
  * allocator gives us a chunk that either is, or can be aligned.
  */
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
+static void* MapAlignedPagesLastDitch(size_t length, size_t alignment) {
   void* tempMaps[MaxLastDitchAttempts];
   int attempt = 0;
-  void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
+  void* region = MapMemory(length);
+  if (OffsetFromAligned(region, alignment) == 0) {
+    return region;
   }
   for (; attempt < MaxLastDitchAttempts; ++attempt) {
-    GetNewChunk(&p, tempMaps + attempt, size, alignment);
-    if (OffsetFromAligned(p, alignment) == 0) {
-      if (tempMaps[attempt]) {
-        UnmapPages(tempMaps[attempt], size);
-      }
-      break;
+    if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
+      MOZ_ASSERT(!tempMaps[attempt]);
+      break;  // Success!
     }
-    if (!tempMaps[attempt]) {
-      break; /* Bail if GetNewChunk failed. */
+    if (!region || !tempMaps[attempt]) {
+      break;  // We ran out of memory, so give up.
     }
   }
-  if (OffsetFromAligned(p, alignment)) {
-    UnmapPages(p, size);
-    p = nullptr;
+  if (OffsetFromAligned(region, alignment)) {
+    UnmapInternal(region, length);
+    region = nullptr;
   }
   while (--attempt >= 0) {
-    UnmapPages(tempMaps[attempt], size);
+    UnmapInternal(tempMaps[attempt], length);
   }
-  return p;
+  return region;
 }
 
+#ifdef XP_WIN
+
 /*
  * On Windows, map and unmap calls must be matched, so we deallocate the
  * unaligned chunk, then reallocate the unaligned part to block off the
  * old address and force the allocator to give us a new one.
  */
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment) {
-  void* address = *aAddress;
-  void* retainedAddr = nullptr;
+template <bool>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment) {
+  void* region = *aRegion;
+  MOZ_ASSERT(region && OffsetFromAligned(region, alignment) != 0);
+
+  size_t retainedLength = 0;
+  void* retainedRegion = nullptr;
   do {
-    size_t retainedSize;
-    size_t offset = OffsetFromAligned(address, alignment);
-    if (!offset) {
+    size_t offset = OffsetFromAligned(region, alignment);
+    if (offset == 0) {
+      // If the address is aligned, either we hit OOM or we're done.
       break;
     }
-    UnmapPages(address, size);
-    retainedSize = alignment - offset;
-    retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
-    address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
-    /* If retainedAddr is null here, we raced with another thread. */
-  } while (!retainedAddr);
-  *aAddress = address;
-  *aRetainedAddr = retainedAddr;
+    UnmapInternal(region, length);
+    retainedLength = alignment - offset;
+    retainedRegion = MapMemoryAt<Commit::No>(region, retainedLength);
+    region = MapMemory(length);
+
+    // If retainedRegion is null here, we raced with another thread.
+  } while (!retainedRegion);
+
+  bool result = OffsetFromAligned(region, alignment) == 0;
+  if (result && retainedRegion) {
+    UnmapInternal(retainedRegion, retainedLength);
+    retainedRegion = nullptr;
+  }
+
+  *aRegion = region;
+  *aRetainedRegion = retainedRegion;
+  return region && result;
 }
 
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
+#else  // !defined(XP_WIN)
+
+/*
+ * mmap calls don't have to be matched with calls to munmap, so we can unmap
+ * just the pages we don't need. However, as we don't know a priori if addresses
+ * are handed out in increasing or decreasing order, we have to try both
+ * directions (depending on the environment, one will always fail).
+ */
+template <bool AlwaysGetNew>
+static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
+                            size_t length, size_t alignment) {
+  void* regionStart = *aRegion;
+  MOZ_ASSERT(regionStart && OffsetFromAligned(regionStart, alignment) != 0);
 
-  MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+  bool addressesGrowUpward = growthDirection > 0;
+  bool directionUncertain = -8 < growthDirection && growthDirection <= 8;
+  size_t offsetLower = OffsetFromAligned(regionStart, alignment);
+  size_t offsetUpper = alignment - offsetLower;
+  for (size_t i = 0; i < 2; ++i) {
+    if (addressesGrowUpward) {
+      void* upperStart =
+          reinterpret_cast<void*>(uintptr_t(regionStart) + offsetUpper);
+      void* regionEnd =
+          reinterpret_cast<void*>(uintptr_t(regionStart) + length);
+      if (MapMemoryAt(regionEnd, offsetUpper)) {
+        UnmapInternal(regionStart, offsetUpper);
+        if (directionUncertain) {
+          ++growthDirection;
+        }
+        regionStart = upperStart;
+        break;
+      }
+    } else {
+      void* lowerStart =
+          reinterpret_cast<void*>(uintptr_t(regionStart) - offsetLower);
+      void* lowerEnd = reinterpret_cast<void*>(uintptr_t(lowerStart) + length);
+      if (MapMemoryAt(lowerStart, offsetLower)) {
+        UnmapInternal(lowerEnd, offsetLower);
+        if (directionUncertain) {
+          --growthDirection;
+        }
+        regionStart = lowerStart;
+        break;
+      }
+    }
+    // If we're confident in the growth direction, don't try the other.
+    if (!directionUncertain) {
+      break;
+    }
+    addressesGrowUpward = !addressesGrowUpward;
+  }
+
+  void* retainedRegion = nullptr;
+  bool result = OffsetFromAligned(regionStart, alignment) == 0;
+  if (AlwaysGetNew && !result) {
+    // If our current chunk cannot be aligned, just get a new one.
+    retainedRegion = regionStart;
+    regionStart = MapMemory(length);
+  }
+
+  *aRegion = regionStart;
+  *aRetainedRegion = retainedRegion;
+  return regionStart && result;
 }
 
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
+#endif
+
+void UnmapPages(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region &&
+                     OffsetFromAligned(region, allocGranularity) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+  // ASan does not automatically unpoison memory, so we have to do this here.
+  MOZ_MAKE_MEM_UNDEFINED(region, length);
+
+  UnmapInternal(region, length);
+}
+
+bool MarkPagesUnused(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+
+  MOZ_MAKE_MEM_NOACCESS(region, length);
 
   if (!DecommitEnabled()) {
     return true;
   }
 
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
-  return p2 == p;
+#if defined(XP_WIN)
+  return VirtualAlloc(region, length, MEM_RESET,
+                      DWORD(PageAccess::ReadWrite)) == region;
+#elif defined(XP_DARWIN)
+  return madvise(region, length, MADV_FREE) == 0;
+#else
+  return madvise(region, length, MADV_DONTNEED) == 0;
+#endif
 }
 
-void MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
+void MarkPagesInUse(void* region, size_t length) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
 
-  if (!DecommitEnabled()) {
-    return;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+  MOZ_MAKE_MEM_UNDEFINED(region, length);
 }
 
 size_t GetPageFaultCount() {
   if (mozilla::recordreplay::IsRecordingOrReplaying()) {
     return 0;
   }
+#ifdef XP_WIN
   PROCESS_MEMORY_COUNTERS pmc;
-  if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
+  if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)) == 0) {
     return 0;
   }
   return pmc.PageFaultCount;
+#else
+  struct rusage usage;
+  int err = getrusage(RUSAGE_SELF, &usage);
+  if (err) {
+    return 0;
+  }
+  return usage.ru_majflt;
+#endif
 }
 
 void* AllocateMappedContent(int fd, size_t offset, size_t length,
                             size_t alignment) {
-  MOZ_ASSERT(length && alignment);
-
-  // The allocation granularity and the requested offset
-  // must both be divisible by the requested alignment.
-  // Alignments larger than the allocation granularity are not supported.
-  if (allocGranularity % alignment != 0 || offset % alignment != 0) {
-    return nullptr;
-  }
-
-  HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
-
-  // This call will fail if the file does not exist, which is what we want.
-  HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
-  if (!hMap) {
+  if (length == 0 || alignment == 0 || offset % alignment != 0 ||
+      std::max(alignment, allocGranularity) %
+              std::min(alignment, allocGranularity) !=
+          0) {
     return nullptr;
   }
 
   size_t alignedOffset = offset - (offset % allocGranularity);
   size_t alignedLength = length + (offset % allocGranularity);
 
+  // We preallocate the mapping using MapAlignedPages, which expects
+  // the length parameter to be an integer multiple of the page size.
+  size_t mappedLength = alignedLength;
+  if (alignedLength % pageSize != 0) {
+    mappedLength += pageSize - alignedLength % pageSize;
+  }
+
+#ifdef XP_WIN
+  HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
+
+  // This call will fail if the file does not exist.
+  HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
+  if (!hMap) {
+    return nullptr;
+  }
+
   DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
   DWORD offsetL = uint32_t(alignedOffset);
 
-  // If the offset or length are out of bounds, this call will fail.
-  uint8_t* map = static_cast<uint8_t*>(
-      MapViewOfFile(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength));
+  uint8_t* map = nullptr;
+  for (;;) {
+    // The value of a pointer is technically only defined while the region
+    // it points to is allocated, so explicitly treat this one as a number.
+    uintptr_t region = uintptr_t(MapAlignedPages(mappedLength, alignment));
+    if (region == 0) {
+      break;
+    }
+    UnmapInternal(reinterpret_cast<void*>(region), mappedLength);
+    // If the offset or length are out of bounds, this call will fail.
+    map = static_cast<uint8_t*>(
+        MapViewOfFileEx(hMap, FILE_MAP_COPY, offsetH, offsetL, alignedLength,
+                        reinterpret_cast<void*>(region)));
+
+    // Retry if another thread mapped the address we were trying to use.
+    if (map || GetLastError() != ERROR_INVALID_ADDRESS) {
+      break;
+    }
+  }
 
   // This just decreases the file mapping object's internal reference count;
   // it won't actually be destroyed until we unmap the associated view.
   CloseHandle(hMap);
 
   if (!map) {
     return nullptr;
   }
+#else  // !defined(XP_WIN)
+  // Sanity check the offset and length, as mmap does not do this for us.
+  struct stat st;
+  if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
+      length > uint64_t(st.st_size) - offset) {
+    return nullptr;
+  }
+
+  void* region = MapAlignedPages(mappedLength, alignment);
+  if (!region) {
+    return nullptr;
+  }
+
+  // Calling mmap with MAP_FIXED will replace the previous mapping, allowing
+  // us to reuse the region we obtained without racing with other threads.
+  uint8_t* map =
+      static_cast<uint8_t*>(mmap(region, alignedLength, PROT_READ | PROT_WRITE,
+                                 MAP_PRIVATE | MAP_FIXED, fd, alignedOffset));
+  MOZ_RELEASE_ASSERT(map != MAP_FAILED);
+#endif
 
 #ifdef DEBUG
   // Zero out data before and after the desired mapping to catch errors early.
   if (offset != alignedOffset) {
     memset(map, 0, offset - alignedOffset);
   }
   if (alignedLength % pageSize) {
     memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
   }
 #endif
 
   return map + (offset - alignedOffset);
 }
 
-void DeallocateMappedContent(void* p, size_t /*length*/) {
-  if (!p) {
-    return;
-  }
-
-  // Calculate the address originally returned by MapViewOfFile.
-  // This is needed because AllocateMappedContent returns a pointer
-  // that might be offset from the view, as the beginning of a
-  // view must be aligned with the allocation granularity.
-  uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
-  MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
-}
-
-#else  // Various APIs are unavailable.
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  void* p = _aligned_malloc(size, alignment);
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
-}
-
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return nullptr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  _aligned_free(p);
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  return true;
-}
-
-bool MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() {
-  // GetProcessMemoryInfo is unavailable.
-  return 0;
-}
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  // Not implemented.
-  return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void DeallocateMappedContent(void* p, size_t length) {
-  // Not implemented.
-}
-
-#endif
-
-#elif defined(SOLARIS)
-
-#ifndef MAP_NOSYNC
-#define MAP_NOSYNC 0
-#endif
-
-void InitMemorySubsystem() {
-  if (pageSize == 0) {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
-  }
-}
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  int prot = PROT_READ | PROT_WRITE;
-  int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
-
-  void* p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
-  if (p == MAP_FAILED) {
-    return nullptr;
-  }
-  return p;
-}
-
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  return nullptr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-  return true;
-}
-
-bool MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (!DecommitEnabled()) {
+void DeallocateMappedContent(void* region, size_t length) {
+  if (!region) {
     return;
   }
 
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() { return 0; }
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  // Not implemented.
-  return nullptr;
-}
-
-// Deallocate mapped memory for object.
-void DeallocateMappedContent(void* p, size_t length) {
-  // Not implemented.
-}
-
-#elif defined(XP_UNIX)
-
-void InitMemorySubsystem() {
-  if (pageSize == 0) {
-    pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
-  }
-}
-
-static inline void* MapMemoryAt(void* desired, size_t length,
-                                int prot = PROT_READ | PROT_WRITE,
-                                int flags = MAP_PRIVATE | MAP_ANON, int fd = -1,
-                                off_t offset = 0) {
-
-#if defined(__ia64__) || defined(__aarch64__) ||  \
-    (defined(__sparc__) && defined(__arch64__) && \
-     (defined(__NetBSD__) || defined(__linux__)))
-  MOZ_ASSERT((0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
-#endif
-  void* region = mmap(desired, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  /*
-   * mmap treats the given address as a hint unless the MAP_FIXED flag is
-   * used (which isn't usually what you want, as this overrides existing
-   * mappings), so check that the address we got is the address we wanted.
-   */
-  if (region != desired) {
-    if (munmap(region, length)) {
-      MOZ_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-}
-
-static inline void* MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
-                              int flags = MAP_PRIVATE | MAP_ANON, int fd = -1,
-                              off_t offset = 0) {
-#if defined(__ia64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
-  /*
-   * The JS engine assumes that all allocated pointers have their high 17 bits
-   * clear, which ia64's mmap doesn't support directly. However, we can emulate
-   * it by passing mmap an "addr" parameter with those bits clear. The mmap will
-   * return that address, or the nearest available memory above that address,
-   * providing a near-guarantee that those bits are clear. If they are not, we
-   * return nullptr below to indicate out-of-memory.
-   *
-   * The addr is chosen as 0x0000070000000000, which still allows about 120TB of
-   * virtual address space.
-   *
-   * See Bug 589735 for more information.
-   */
-  void* region =
-      mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  /*
-   * If the allocated memory doesn't have its upper 17 bits clear, consider it
-   * as out of memory.
-   */
-  if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-    if (munmap(region, length)) {
-      MOZ_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-#elif defined(__aarch64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  /*
-   * There might be similar virtual address issue on arm64 which depends on
-   * hardware and kernel configurations. But the work around is slightly
-   * different due to the different mmap behavior.
-   *
-   * TODO: Merge with the above code block if this implementation works for
-   * ia64 and sparc64.
-   */
-  const uintptr_t start = UINT64_C(0x0000070000000000);
-  const uintptr_t end = UINT64_C(0x0000800000000000);
-  const uintptr_t step = ChunkSize;
-  /*
-   * Optimization options if there are too many retries in practice:
-   * 1. Examine /proc/self/maps to find an available address. This file is
-   *    not always available, however. In addition, even if we examine
-   *    /proc/self/maps, we may still need to retry several times due to
-   *    racing with other threads.
-   * 2. Use a global/static variable with lock to track the addresses we have
-   *    allocated or tried.
-   */
-  uintptr_t hint;
-  void* region = MAP_FAILED;
-  for (hint = start; region == MAP_FAILED && hint + length <= end;
-       hint += step) {
-    region = mmap((void*)hint, length, prot, flags, fd, offset);
-    if (region != MAP_FAILED) {
-      if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-        if (munmap(region, length)) {
-          MOZ_ASSERT(errno == ENOMEM);
-        }
-        region = MAP_FAILED;
-      }
-    }
-  }
-  return region == MAP_FAILED ? nullptr : region;
-#else
-  void* region = MozTaggedAnonymousMmap(nullptr, length, prot, flags, fd,
-                                        offset, "js-gc-heap");
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  return region;
-#endif
-}
-
-void* MapAlignedPages(size_t size, size_t alignment) {
-  MOZ_ASSERT(size >= alignment);
-  MOZ_ASSERT(size >= allocGranularity);
-  MOZ_ASSERT(size % alignment == 0);
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_ASSERT_IF(alignment < allocGranularity,
-                allocGranularity % alignment == 0);
-  MOZ_ASSERT_IF(alignment > allocGranularity,
-                alignment % allocGranularity == 0);
-
-  void* p = MapMemory(size);
-
-  /* Special case: If we want page alignment, no further work is needed. */
-  if (alignment == allocGranularity) {
-    return p;
-  }
-
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
-  }
-
-  void* retainedAddr;
-  GetNewChunk(&p, &retainedAddr, size, alignment);
-  if (retainedAddr) {
-    UnmapPages(retainedAddr, size);
-  }
-  if (p) {
-    if (OffsetFromAligned(p, alignment) == 0) {
-      return p;
-    }
-    UnmapPages(p, size);
-  }
-
-  p = MapAlignedPagesSlow(size, alignment);
-  if (!p) {
-    return MapAlignedPagesLastDitch(size, alignment);
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
-  return p;
-}
-
-static void* MapAlignedPagesSlow(size_t size, size_t alignment) {
-  /* Overallocate and unmap the region's edges. */
-  size_t reqSize = size + alignment - pageSize;
-  void* region = MapMemory(reqSize);
-  if (!region) {
-    return nullptr;
-  }
-
-  void* regionEnd = (void*)(uintptr_t(region) + reqSize);
-  void* front;
-  void* end;
-  if (growthDirection <= 0) {
-    size_t offset = OffsetFromAligned(regionEnd, alignment);
-    end = (void*)(uintptr_t(regionEnd) - offset);
-    front = (void*)(uintptr_t(end) - size);
-  } else {
-    size_t offset = OffsetFromAligned(region, alignment);
-    front = (void*)(uintptr_t(region) + (offset ? alignment - offset : 0));
-    end = (void*)(uintptr_t(front) + size);
-  }
+  // Due to bug 1502562, the following assertion does not currently hold.
+  // MOZ_RELEASE_ASSERT(length > 0);
 
-  if (front != region) {
-    UnmapPages(region, uintptr_t(front) - uintptr_t(region));
-  }
-  if (end != regionEnd) {
-    UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
-  }
-
-  return front;
-}
-
-/*
- * In a low memory or high fragmentation situation, alignable chunks of the
- * desired size may still be available, even if there are no more contiguous
- * free chunks that meet the |size + alignment - pageSize| requirement of
- * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
- * by temporarily holding onto the unaligned parts of each chunk until the
- * allocator gives us a chunk that either is, or can be aligned.
- */
-static void* MapAlignedPagesLastDitch(size_t size, size_t alignment) {
-  void* tempMaps[MaxLastDitchAttempts];
-  int attempt = 0;
-  void* p = MapMemory(size);
-  if (OffsetFromAligned(p, alignment) == 0) {
-    return p;
-  }
-  for (; attempt < MaxLastDitchAttempts; ++attempt) {
-    GetNewChunk(&p, tempMaps + attempt, size, alignment);
-    if (OffsetFromAligned(p, alignment) == 0) {
-      if (tempMaps[attempt]) {
-        UnmapPages(tempMaps[attempt], size);
-      }
-      break;
-    }
-    if (!tempMaps[attempt]) {
-      break; /* Bail if GetNewChunk failed. */
-    }
-  }
-  if (OffsetFromAligned(p, alignment)) {
-    UnmapPages(p, size);
-    p = nullptr;
-  }
-  while (--attempt >= 0) {
-    UnmapPages(tempMaps[attempt], size);
-  }
-  return p;
-}
-
-/*
- * mmap calls don't have to be matched with calls to munmap, so we can unmap
- * just the pages we don't need. However, as we don't know a priori if addresses
- * are handed out in increasing or decreasing order, we have to try both
- * directions (depending on the environment, one will always fail).
- */
-static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size,
-                        size_t alignment) {
-  void* address = *aAddress;
-  void* retainedAddr = nullptr;
-  bool addrsGrowDown = growthDirection <= 0;
-  int i = 0;
-  for (; i < 2; ++i) {
-    /* Try the direction indicated by growthDirection. */
-    if (addrsGrowDown) {
-      size_t offset = OffsetFromAligned(address, alignment);
-      void* head = (void*)((uintptr_t)address - offset);
-      void* tail = (void*)((uintptr_t)head + size);
-      if (MapMemoryAt(head, offset)) {
-        UnmapPages(tail, offset);
-        if (growthDirection >= -8) {
-          --growthDirection;
-        }
-        address = head;
-        break;
-      }
-    } else {
-      size_t offset = alignment - OffsetFromAligned(address, alignment);
-      void* head = (void*)((uintptr_t)address + offset);
-      void* tail = (void*)((uintptr_t)address + size);
-      if (MapMemoryAt(tail, offset)) {
-        UnmapPages(address, offset);
-        if (growthDirection <= 8) {
-          ++growthDirection;
-        }
-        address = head;
-        break;
-      }
-    }
-    /* If we're confident in the growth direction, don't try the other. */
-    if (growthDirection < -8 || growthDirection > 8) {
-      break;
-    }
-    /* If that failed, try the opposite direction. */
-    addrsGrowDown = !addrsGrowDown;
-  }
-  /* If our current chunk cannot be aligned, see if the next one is aligned. */
-  if (OffsetFromAligned(address, alignment)) {
-    retainedAddr = address;
-    address = MapMemory(size);
-  }
-  *aAddress = address;
-  *aRetainedAddr = retainedAddr;
-}
-
-void UnmapPages(void* p, size_t size) {
-  // ASan does not automatically unpoison memory, so we have to do this here.
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (munmap(p, size)) {
-    MOZ_ASSERT(errno == ENOMEM);
-  }
-}
-
-bool MarkPagesUnused(void* p, size_t size) {
-  MOZ_MAKE_MEM_NOACCESS(p, size);
-
-  if (!DecommitEnabled()) {
-    return false;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-#if defined(XP_SOLARIS)
-  int result = posix_madvise(p, size, POSIX_MADV_DONTNEED);
-#else
-  int result = madvise(p, size, MADV_DONTNEED);
-#endif
-  return result != -1;
-}
-
-void MarkPagesInUse(void* p, size_t size) {
-  MOZ_MAKE_MEM_UNDEFINED(p, size);
-
-  if (!DecommitEnabled()) {
-    return;
-  }
-
-  MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
-}
-
-size_t GetPageFaultCount() {
-  if (mozilla::recordreplay::IsRecordingOrReplaying()) {
-    return 0;
-  }
-  struct rusage usage;
-  int err = getrusage(RUSAGE_SELF, &usage);
-  if (err) {
-    return 0;
-  }
-  return usage.ru_majflt;
-}
-
-void* AllocateMappedContent(int fd, size_t offset, size_t length,
-                            size_t alignment) {
-  MOZ_ASSERT(length && alignment);
-
-  // The allocation granularity and the requested offset
-  // must both be divisible by the requested alignment.
-  // Alignments larger than the allocation granularity are not supported.
-  if (allocGranularity % alignment != 0 || offset % alignment != 0) {
-    return nullptr;
-  }
-
-  // Sanity check the offset and size, as mmap does not do this for us.
-  struct stat st;
-  if (fstat(fd, &st) || offset >= uint64_t(st.st_size) ||
-      length > uint64_t(st.st_size) - offset) {
-    return nullptr;
-  }
-
-  size_t alignedOffset = offset - (offset % allocGranularity);
-  size_t alignedLength = length + (offset % allocGranularity);
-
-  uint8_t* map = static_cast<uint8_t*>(MapMemory(
-      alignedLength, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, alignedOffset));
-  if (!map) {
-    return nullptr;
-  }
-
-#ifdef DEBUG
-  // Zero out data before and after the desired mapping to catch errors early.
-  if (offset != alignedOffset) {
-    memset(map, 0, offset - alignedOffset);
-  }
-  if (alignedLength % pageSize) {
-    memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
-  }
-#endif
-
-  return map + (offset - alignedOffset);
-}
-
-void DeallocateMappedContent(void* p, size_t length) {
-  if (!p) {
-    return;
-  }
-
-  // Calculate the address originally returned by mmap.
+  // Calculate the address originally returned by the system call.
   // This is needed because AllocateMappedContent returns a pointer
   // that might be offset from the mapping, as the beginning of a
   // mapping must be aligned with the allocation granularity.
-  uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
-  size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
-  UnmapPages(reinterpret_cast<void*>(map), alignedLength);
-}
-
+  uintptr_t map = uintptr_t(region) - (uintptr_t(region) % allocGranularity);
+#ifdef XP_WIN
+  MOZ_RELEASE_ASSERT(UnmapViewOfFile(reinterpret_cast<void*>(map)) != 0);
 #else
-#error "Memory mapping functions are not defined for your OS."
-#endif
-
-void ProtectPages(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
-#if defined(XP_WIN)
-  DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_NOACCESS, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_NOACCESS) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_NONE)) {
-    MOZ_CRASH("mprotect(PROT_NONE) failed");
+  size_t alignedLength = length + (uintptr_t(region) % allocGranularity);
+  if (munmap(reinterpret_cast<void*>(map), alignedLength)) {
+    MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 #endif
 }
 
-void MakePagesReadOnly(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
-#if defined(XP_WIN)
+static inline void ProtectMemory(void* region, size_t length, PageAccess prot) {
+  MOZ_RELEASE_ASSERT(region && OffsetFromAligned(region, pageSize) == 0);
+  MOZ_RELEASE_ASSERT(length > 0 && length % pageSize == 0);
+#ifdef XP_WIN
   DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_READONLY, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_READONLY) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_READ)) {
-    MOZ_CRASH("mprotect(PROT_READ) failed");
-  }
+  MOZ_RELEASE_ASSERT(VirtualProtect(region, length, DWORD(prot), &oldProtect) !=
+                     0);
+#else
+  MOZ_RELEASE_ASSERT(mprotect(region, length, int(prot)) == 0);
 #endif
 }
 
-void UnprotectPages(void* p, size_t size) {
-  MOZ_ASSERT(size % pageSize == 0);
-  MOZ_RELEASE_ASSERT(size > 0);
-  MOZ_RELEASE_ASSERT(p);
-#if defined(XP_WIN)
-  DWORD oldProtect;
-  if (!VirtualProtect(p, size, PAGE_READWRITE, &oldProtect)) {
-    MOZ_CRASH_UNSAFE_PRINTF(
-        "VirtualProtect(PAGE_READWRITE) failed! Error code: %lu",
-        GetLastError());
-  }
-#else  // assume Unix
-  if (mprotect(p, size, PROT_READ | PROT_WRITE)) {
-    MOZ_CRASH("mprotect(PROT_READ | PROT_WRITE) failed");
-  }
-#endif
+void ProtectPages(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::None);
+}
+
+void MakePagesReadOnly(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::Read);
+}
+
+void UnprotectPages(void* region, size_t length) {
+  ProtectMemory(region, length, PageAccess::ReadWrite);
 }
 
 }  // namespace gc
 }  // namespace js
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -11,18 +11,27 @@
 
 namespace js {
 namespace gc {
 
 // Sanity check that our compiled configuration matches the currently
 // running instance and initialize any runtime data needed for allocation.
 void InitMemorySubsystem();
 
+// The page size as reported by the operating system.
 size_t SystemPageSize();
 
+// The number of bits that may be set in a valid address, as
+// reported by the operating system or measured at startup.
+size_t SystemAddressBits();
+
+// The scattershot allocator is used on platforms that have a large address
+// range. On these platforms we allocate at random addresses.
+bool UsingScattershotAllocator();
+
 // Allocate or deallocate pages from the system with the given alignment.
 void* MapAlignedPages(size_t size, size_t alignment);
 void UnmapPages(void* p, size_t size);
 
 // Tell the OS that the given pages are not in use, so they should not be
 // written to a paging file. This may be a no-op on some platforms.
 bool MarkPagesUnused(void* p, size_t size);
 
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -9,58 +9,47 @@
 
 #include "gc/GCInternals.h"
 #include "gc/Memory.h"
 #include "jsapi-tests/tests.h"
 
 #if defined(XP_WIN)
 #include "util/Windows.h"
 #include <psapi.h>
-#elif defined(SOLARIS)
-// This test doesn't apply to Solaris.
-#elif defined(XP_UNIX)
+#else
 #include <algorithm>
 #include <errno.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
-#else
-#error "Memory mapping functions are not defined for your OS."
 #endif
 
 BEGIN_TEST(testGCAllocator) {
-  size_t PageSize = 0;
-#if defined(XP_WIN)
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
-  SYSTEM_INFO sysinfo;
-  GetSystemInfo(&sysinfo);
-  PageSize = sysinfo.dwPageSize;
-#else  // Various APIs are unavailable. This test is disabled.
-  return true;
+#ifdef JS_64BIT
+  // If we're using the scattershot allocator, this test does not apply.
+  if (js::gc::UsingScattershotAllocator()) {
+    return true;
+  }
 #endif
-#elif defined(SOLARIS)
-  return true;
-#elif defined(XP_UNIX)
-  PageSize = size_t(sysconf(_SC_PAGESIZE));
-#else
-  return true;
-#endif
+
+  size_t PageSize = js::gc::SystemPageSize();
 
   /* Finish any ongoing background free activity. */
   js::gc::FinishGC(cx);
 
   bool growUp;
   CHECK(addressesGrowUp(&growUp));
 
   if (growUp) {
     return testGCAllocatorUp(PageSize);
+  } else {
+    return testGCAllocatorDown(PageSize);
   }
-  return testGCAllocatorDown(PageSize);
 }
 
 static const size_t Chunk = 512 * 1024;
 static const size_t Alignment = 2 * Chunk;
 static const int MaxTempChunks = 4096;
 static const size_t StagingSize = 16 * Chunk;
 
 bool addressesGrowUp(bool* resultOut) {
@@ -135,25 +124,19 @@ bool testGCAllocatorUp(const size_t Page
   CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool,
                           tempChunks));
   // Check that an aligned chunk after a single unalignable chunk is used.
   CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool,
                           tempChunks));
   // Check that we fall back to the slow path after two unalignable chunks.
   CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
                           tempChunks));
-#ifndef __aarch64__
-  // Bug 1440330 - this test is incorrect for aarch64 because MapMemory only
-  // looks for 1MB-aligned chunks on that platform, and will find one at
-  // position 6 here.
-
   // Check that we also fall back after an unalignable and an alignable chunk.
   CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
                           tempChunks));
-#endif
   // Check that the last ditch allocator works as expected.
   CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
                           tempChunks, UseLastDitchAllocator));
 
   // Clean up.
   while (--tempChunks >= 0) {
     unmapPages(chunkPool[tempChunks], 2 * Chunk);
   }
@@ -288,55 +271,34 @@ bool positionIsCorrect(const char* str, 
     while (--tempChunks >= 0) {
       js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
     }
   }
   return result == desired;
 }
 
 #if defined(XP_WIN)
-#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
 
 void* mapMemoryAt(void* desired, size_t length) {
   return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE,
                       PAGE_READWRITE);
 }
 
 void* mapMemory(size_t length) {
   return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE,
                       PAGE_READWRITE);
 }
 
 void unmapPages(void* p, size_t size) {
   MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
-#else  // Various APIs are unavailable. This test is disabled.
-
-void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
-void* mapMemory(size_t length) { return nullptr; }
-void unmapPages(void* p, size_t size) {}
-
-#endif
-#elif defined(SOLARIS)  // This test doesn't apply to Solaris.
-
-void* mapMemoryAt(void* desired, size_t length) { return nullptr; }
-void* mapMemory(size_t length) { return nullptr; }
-void unmapPages(void* p, size_t size) {}
-
-#elif defined(XP_UNIX)
+#else
 
 void* mapMemoryAt(void* desired, size_t length) {
-
-#if defined(__ia64__) || defined(__aarch64__) ||  \
-    (defined(__sparc__) && defined(__arch64__) && \
-     (defined(__NetBSD__) || defined(__linux__)))
-  MOZ_RELEASE_ASSERT(
-      (0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
-#endif
   void* region = mmap(desired, length, PROT_READ | PROT_WRITE,
                       MAP_PRIVATE | MAP_ANON, -1, 0);
   if (region == MAP_FAILED) {
     return nullptr;
   }
   if (region != desired) {
     if (munmap(region, length)) {
       MOZ_RELEASE_ASSERT(errno == ENOMEM);
@@ -346,62 +308,24 @@ void* mapMemoryAt(void* desired, size_t 
   return region;
 }
 
 void* mapMemory(size_t length) {
   int prot = PROT_READ | PROT_WRITE;
   int flags = MAP_PRIVATE | MAP_ANON;
   int fd = -1;
   off_t offset = 0;
-  // The test code must be aligned with the implementation in gc/Memory.cpp.
-#if defined(__ia64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__NetBSD__))
-  void* region =
-      mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
-  if (region == MAP_FAILED) {
-    return nullptr;
-  }
-  if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-    if (munmap(region, length)) {
-      MOZ_RELEASE_ASSERT(errno == ENOMEM);
-    }
-    return nullptr;
-  }
-  return region;
-#elif defined(__aarch64__) || \
-    (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
-  const uintptr_t start = UINT64_C(0x0000070000000000);
-  const uintptr_t end = UINT64_C(0x0000800000000000);
-  const uintptr_t step = js::gc::ChunkSize;
-  uintptr_t hint;
-  void* region = MAP_FAILED;
-  for (hint = start; region == MAP_FAILED && hint + length <= end;
-       hint += step) {
-    region = mmap((void*)hint, length, prot, flags, fd, offset);
-    if (region != MAP_FAILED) {
-      if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-        if (munmap(region, length)) {
-          MOZ_RELEASE_ASSERT(errno == ENOMEM);
-        }
-        region = MAP_FAILED;
-      }
-    }
-  }
-  return region == MAP_FAILED ? nullptr : region;
-#else
   void* region = mmap(nullptr, length, prot, flags, fd, offset);
   if (region == MAP_FAILED) {
     return nullptr;
   }
   return region;
-#endif
 }
 
 void unmapPages(void* p, size_t size) {
   if (munmap(p, size)) {
     MOZ_RELEASE_ASSERT(errno == ENOMEM);
   }
 }
 
-#else  // !defined(XP_WIN) && !defined(SOLARIS) && !defined(XP_UNIX)
-#error "Memory mapping functions are not defined for your OS."
 #endif
+
 END_TEST(testGCAllocator)
new file mode 100644
--- /dev/null
+++ b/layout/base/crashtests/1511563.html
@@ -0,0 +1,8 @@
+<script>
+document.addEventListener("DOMContentLoaded", function(){
+  document.designMode='on'
+  window.getSelection().modify('move', 'right', 'line')
+})
+</script>
+<br>
+<keygen>
--- a/layout/base/crashtests/crashtests.list
+++ b/layout/base/crashtests/crashtests.list
@@ -551,8 +551,9 @@ load 1494030.html
 load 1505420.html
 pref(layout.css.column-span.enabled,true) load 1506163.html
 pref(layout.css.column-span.enabled,true) load 1506204.html
 pref(layout.css.column-span.enabled,true) load 1506314.html
 pref(layout.css.column-span.enabled,true) load 1507244.html
 load 1510080.html
 load 1510485.html
 pref(layout.css.column-span.enabled,true) load 1511535.html
+load 1511563.html
--- a/layout/reftests/invalidation/reftest.list
+++ b/layout/reftests/invalidation/reftest.list
@@ -39,18 +39,18 @@ pref(layout.animated-image-layers.enable
 == filter-userspace-offset.svg?offsetContainer=foreignObject&mask=boundingBox filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=rect&mask=userSpace-at100 filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=use&mask=userSpace-atZero filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=innerSVG&mask=userSpace-atZero filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=foreignObject&mask=userSpace-at100 filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=rect&filter=matrix-fillPaint-boundingBox filter-userspace-offset.svg
 == filter-userspace-offset.svg?offsetContainer=rect&filter=matrix-fillPaint-userSpace-at100 filter-userspace-offset.svg
 
-fails-if(!Android) != scroll-inactive-layers.html about:blank   # bug 1494110 for the fails-if(!Android) (Android is a false pass, no doubt)
-fails-if(!Android) != scroll-inactive-layers-2.html about:blank # bug 1494110 for the fails-if(!Android) (Android is a false pass, no doubt)
+fails-if(webrender) != scroll-inactive-layers.html about:blank
+fails-if(webrender) != scroll-inactive-layers-2.html about:blank
 != inactive-layertree-visible-region-1.html about:blank
 != inactive-layertree-visible-region-2.html about:blank
 != transform-floating-point-invalidation.html about:blank
 != transform-floating-point-invalidation.html?reverse about:blank
 != nudge-to-integer-invalidation.html about:blank
 != nudge-to-integer-invalidation.html?reverse about:blank
 != clipped-animated-transform-1.html about:blank
 != paintedlayer-recycling-1.html about:blank
--- a/layout/svg/nsSVGIntegrationUtils.cpp
+++ b/layout/svg/nsSVGIntegrationUtils.cpp
@@ -305,39 +305,31 @@ nsRect nsSVGIntegrationUtils::ComputePos
 
 nsIntRegion nsSVGIntegrationUtils::AdjustInvalidAreaForSVGEffects(
     nsIFrame* aFrame, const nsPoint& aToReferenceFrame,
     const nsIntRegion& aInvalidRegion) {
   if (aInvalidRegion.IsEmpty()) {
     return nsIntRect();
   }
 
-  int32_t appUnitsPerDevPixel = aFrame->PresContext()->AppUnitsPerDevPixel();
   nsIFrame* firstFrame =
       nsLayoutUtils::FirstContinuationOrIBSplitSibling(aFrame);
 
   // If we have any filters to observe then we should have started doing that
   // during reflow/ComputeFrameEffectsRect, so we use GetFiltersIfObserving
   // here to avoid needless work (or masking bugs by setting up observers at
   // the wrong time).
   if (!aFrame->StyleEffects()->HasFilters() ||
       SVGObserverUtils::GetFiltersIfObserving(firstFrame, nullptr) ==
           SVGObserverUtils::eHasRefsSomeInvalid) {
-    // The frame is either not there or not currently available,
-    // perhaps because we're in the middle of tearing stuff down.
-    // Be conservative, return our visual overflow rect relative
-    // to the reference frame.
-    // XXX we may get here purely due to an SVG mask, in which case there is
-    // no need to do this it causes over-invalidation. See:
-    // https://bugzilla.mozilla.org/show_bug.cgi?id=1494110
-    // Do we still even need this for filters? If so, why?
-    nsRect overflow = aFrame->GetVisualOverflowRect() + aToReferenceFrame;
-    return overflow.ToOutsidePixels(appUnitsPerDevPixel);
+    return aInvalidRegion;
   }
 
+  int32_t appUnitsPerDevPixel = aFrame->PresContext()->AppUnitsPerDevPixel();
+
   // Convert aInvalidRegion into bounding box frame space in app units:
   nsPoint toBoundingBox =
       aFrame->GetOffsetTo(firstFrame) + GetOffsetToBoundingBox(firstFrame);
   // The initial rect was relative to the reference frame, so we need to
   // remove that offset to get a rect relative to the current frame.
   toBoundingBox -= aToReferenceFrame;
   nsRegion preEffectsRegion =
       aInvalidRegion.ToAppUnits(appUnitsPerDevPixel).MovedBy(toBoundingBox);
--- a/testing/geckodriver/doc/TraceLogs.md
+++ b/testing/geckodriver/doc/TraceLogs.md
@@ -135,27 +135,27 @@ As with C#, the log output is helpfully 
 
 [Java client]: https://seleniumhq.github.io/selenium/docs/api/java/
 [`org.openqa.selenium.firefox.FirefoxOptions`]: https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/firefox/FirefoxOptions.html
 
 
 Python
 ------
 
-The Selenium [Python client] comes with an
+The Selenium [Python client] comes with a
 [`selenium.webdriver.firefox.options.Options`] helper that can
 be used programmatically to construct the [`moz:firefoxOptions`]
 capabilities object:
 
 	from selenium.webdriver import Firefox
 	from selenium.webdriver.firefox.options import Options
 
 	opts = Options()
 	opts.log.level = "trace"
-	driver = Firefox(firefox_options=opts)
+	driver = Firefox(options=opts)
 
 The log output is stored in a file called _geckodriver.log_ in your
 script’s current working directory.
 
 [Python client]: https://selenium-python.readthedocs.io/
 [`selenium.webdriver.firefox.options.Options`]: https://github.com/SeleniumHQ/selenium/blob/master/py/selenium/webdriver/firefox/options.py
 
 
--- a/toolkit/components/extensions/test/xpcshell/test_ext_permission_warnings.js
+++ b/toolkit/components/extensions/test/xpcshell/test_ext_permission_warnings.js
@@ -1,13 +1,18 @@
 "use strict";
 
 let {ExtensionTestCommon} = ChromeUtils.import("resource://testing-common/ExtensionTestCommon.jsm", {});
 
-const bundle = Services.strings.createBundle("chrome://browser/locale/browser.properties");
+let bundle;
+if (AppConstants.MOZ_APP_NAME == "thunderbird") {
+  bundle = Services.strings.createBundle("chrome://messenger/locale/addons.properties");
+} else {
+  bundle = Services.strings.createBundle("chrome://browser/locale/browser.properties");
+}
 const DUMMY_APP_NAME = "Dummy brandName";
 
 async function getManifestPermissions(extensionData) {
   let extension = ExtensionTestCommon.generate(extensionData);
   await extension.loadManifest();
   return extension.manifestPermissions;
 }
 
--- a/widget/tests/test_imestate.html
+++ b/widget/tests/test_imestate.html
@@ -401,17 +401,17 @@ function runBasicTest(aIsEditable, aInDe
     { id: "reset",
       description: "input[type=reset]",
       focusable: !aInDesignMode,
       expectedEnabled: kEnabledStateOnNonEditableElement },
     { id: "file",
       description: "input[type=file]",
       focusable: !aInDesignMode,
       focusEventNotFired: aIsEditable && !aInDesignMode,
-      expectedEnabled: kEnabledStateOnNonEditableElement },
+      expectedEnabled: kEnabledStateOnReadonlyField },
     { id: "button",
       description: "input[type=button]",
       focusable: !aInDesignMode,
       expectedEnabled: kEnabledStateOnNonEditableElement },
     { id: "image",
       description: "input[type=image]",
       focusable: !aInDesignMode,
       expectedEnabled: kEnabledStateOnNonEditableElement },