Bug 1522294 - Do the right thing if we happen to get an aligned region in TryToAlignChunk. r=sfink a=lizzard
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Thu, 21 Feb 2019 01:51:51 +0200
changeset 516159 2a50443802ba4760f9846ba8acc1835712c17bd6
parent 516158 5f890c9359edaa25ed64bf38b089b4a98c0ab396
child 516160 e886d8a7a57bd08d1ca8b72d67c168c858fc035b
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink, lizzard
bugs1522294
milestone66.0
Bug 1522294 - Do the right thing if we happen to get an aligned region in TryToAlignChunk. r=sfink a=lizzard
js/src/gc/Memory.cpp
js/src/jsapi-tests/testGCAllocator.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -153,22 +153,21 @@ template <bool AlwaysGetNew = true>
 static bool TryToAlignChunk(void** aRegion, void** aRetainedRegion,
                             size_t length, size_t alignment);
 
 static void* MapAlignedPagesSlow(size_t length, size_t alignment);
 static void* MapAlignedPagesLastDitch(size_t length, size_t alignment);
 
 #ifdef JS_64BIT
 static void* MapAlignedPagesRandom(size_t length, size_t alignment);
-void* TestMapAlignedPagesLastDitch(size_t, size_t) { return nullptr; }
-#else
+#endif
+
 void* TestMapAlignedPagesLastDitch(size_t length, size_t alignment) {
   return MapAlignedPagesLastDitch(length, alignment);
 }
-#endif
 
 /*
  * We can only decommit unused pages if the hardcoded Arena
  * size matches the page size for the running process.
  */
 static inline bool DecommitEnabled() { return pageSize == ArenaSize; }
 
 /* Returns the offset from the nearest aligned address at or below |region|. */
@@ -420,38 +419,55 @@ void* MapAlignedPages(size_t length, siz
 
     MOZ_RELEASE_ASSERT(!IsInvalidRegion(region, length));
     MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
 
     return region;
   }
 #endif
 
+  // Try to allocate the region. If the returned address is aligned,
+  // either we OOMed (region is nullptr) or we're done.
   void* region = MapMemory(length);
   if (OffsetFromAligned(region, alignment) == 0) {
     return region;
   }
 
+  // Try to align the region. On success, TryToAlignChunk() returns
+  // true and we can return the aligned region immediately.
   void* retainedRegion;
-  TryToAlignChunk(&region, &retainedRegion, length, alignment);
+  if (TryToAlignChunk(&region, &retainedRegion, length, alignment)) {
+    MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
+    MOZ_ASSERT(!retainedRegion);
+    return region;
+  }
+
+  // On failure, the unaligned region is retained unless we OOMed. We don't
+  // use the retained region on this path (see the last ditch allocator).
   if (retainedRegion) {
     UnmapInternal(retainedRegion, length);
   }
+
+  // If it fails to align the given region, TryToAlignChunk() returns the
+  // next valid region that we might be able to align (unless we OOMed).
   if (region) {
-    if (OffsetFromAligned(region, alignment) == 0) {
-      return region;
-    }
+    MOZ_ASSERT(OffsetFromAligned(region, alignment) != 0);
     UnmapInternal(region, length);
   }
 
+  // Since we couldn't align the first region, fall back to allocating a
+  // region large enough that we can definitely align it.
   region = MapAlignedPagesSlow(length, alignment);
   if (!region) {
+    // If there wasn't enough contiguous address space left for that,
+    // try to find an alignable region using the last ditch allocator.
     region = MapAlignedPagesLastDitch(length, alignment);
   }
 
+  // At this point we should either have an aligned region or nullptr.
   MOZ_ASSERT(OffsetFromAligned(region, alignment) == 0);
   return region;
 }
 
 #ifdef JS_64BIT
 
 /*
  * This allocator takes advantage of the large address range on some 64-bit
@@ -505,16 +521,17 @@ static void* MapAlignedPagesRandom(size_
       UnmapInternal(region, length);
       continue;
     }
     if (OffsetFromAligned(region, alignment) == 0) {
       return region;
     }
     void* retainedRegion = nullptr;
     if (TryToAlignChunk<false>(&region, &retainedRegion, length, alignment)) {
+      MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
       MOZ_ASSERT(!retainedRegion);
       return region;
     }
     MOZ_ASSERT(region && !retainedRegion);
     UnmapInternal(region, length);
   }
 
   if (numAddressBits < 48) {
@@ -585,16 +602,17 @@ static void* MapAlignedPagesLastDitch(si
   void* tempMaps[MaxLastDitchAttempts];
   int attempt = 0;
   void* region = MapMemory(length);
   if (OffsetFromAligned(region, alignment) == 0) {
     return region;
   }
   for (; attempt < MaxLastDitchAttempts; ++attempt) {
     if (TryToAlignChunk(&region, tempMaps + attempt, length, alignment)) {
+      MOZ_ASSERT(region && OffsetFromAligned(region, alignment) == 0);
       MOZ_ASSERT(!tempMaps[attempt]);
       break;  // Success!
     }
     if (!region || !tempMaps[attempt]) {
       break;  // We ran out of memory, so give up.
     }
   }
   if (OffsetFromAligned(region, alignment)) {
@@ -700,16 +718,22 @@ static bool TryToAlignChunk(void** aRegi
   }
 
   void* retainedRegion = nullptr;
   bool result = OffsetFromAligned(regionStart, alignment) == 0;
   if (AlwaysGetNew && !result) {
     // If our current chunk cannot be aligned, just get a new one.
     retainedRegion = regionStart;
     regionStart = MapMemory(length);
+    // Our new region might happen to already be aligned.
+    result = OffsetFromAligned(regionStart, alignment) == 0;
+    if (result) {
+      UnmapInternal(retainedRegion, length);
+      retainedRegion = nullptr;
+    }
   }
 
   *aRegion = regionStart;
   *aRetainedRegion = retainedRegion;
   return regionStart && result;
 }
 
 #endif
--- a/js/src/jsapi-tests/testGCAllocator.cpp
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -130,16 +130,19 @@ bool testGCAllocatorUp(const size_t Page
   CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool,
                           tempChunks));
   // Check that we also fall back after an unalignable and an alignable chunk.
   CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool,
                           tempChunks));
   // Check that the last ditch allocator works as expected.
   CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool,
                           tempChunks, UseLastDitchAllocator));
+  // Check that the last ditch allocator can deal with naturally aligned chunks.
+  CHECK(positionIsCorrect("x--xx--xoo------", stagingArea, chunkPool,
+                          tempChunks, UseLastDitchAllocator));
 
   // Clean up.
   while (--tempChunks >= 0) {
     unmapPages(chunkPool[tempChunks], 2 * Chunk);
   }
   return true;
 }
 
@@ -180,16 +183,19 @@ bool testGCAllocatorDown(const size_t Pa
   CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool,
                           tempChunks));
   // Check that we also fall back after an unalignable and an alignable chunk.
   CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool,
                           tempChunks));
   // Check that the last ditch allocator works as expected.
   CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool,
                           tempChunks, UseLastDitchAllocator));
+  // Check that the last ditch allocator can deal with naturally aligned chunks.
+  CHECK(positionIsCorrect("------oox--xx--x", stagingArea, chunkPool,
+                          tempChunks, UseLastDitchAllocator));
 
   // Clean up.
   while (--tempChunks >= 0) {
     unmapPages(chunkPool[tempChunks], 2 * Chunk);
   }
   return true;
 }