Bug 1005849 - Part 4: Test the new allocation logic. r=terrence
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Fri, 23 May 2014 15:30:00 +0200
changeset 185081 4e09a894645c
parent 185080 96e1b7a25c71
child 185082 fd13d0871449
push id26844
push userryanvm@gmail.com
push dateTue, 27 May 2014 20:23:53 +0000
treeherdermozilla-central@448f2153d6d3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1005849
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1005849 - Part 4: Test the new allocation logic. r=terrence
js/src/gc/Memory.h
js/src/jsapi-tests/moz.build
js/src/jsapi-tests/testGCAllocator.cpp
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -67,14 +67,19 @@ class SystemPageAllocator
     // The addresses handed out by mmap may grow up or down.
     int                 growthDirection;
 #endif
 
     // The maximum number of unalignable chunks to temporarily keep alive in
     // the last ditch allocation pass. OOM crash reports generally show <= 7
     // unaligned chunks available (bug 1005844 comment #16).
     static const int    MaxLastDitchAttempts = 8;
+
+public:
+    void *testMapAlignedPagesLastDitch(size_t size, size_t alignment) {
+        return mapAlignedPagesLastDitch(size, alignment);
+    }
 };
 
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/jsapi-tests/moz.build
+++ b/js/src/jsapi-tests/moz.build
@@ -27,16 +27,17 @@ UNIFIED_SOURCES += [
     'testEnclosingFunction.cpp',
     'testErrorCopying.cpp',
     'testException.cpp',
     'testExternalStrings.cpp',
     'testFindSCCs.cpp',
     'testFreshGlobalEvalRedefinition.cpp',
     'testFuncCallback.cpp',
     'testFunctionProperties.cpp',
+    'testGCAllocator.cpp',
     'testGCExactRooting.cpp',
     'testGCFinalizeCallback.cpp',
     'testGCOutOfMemory.cpp',
     'testGCStoreBufferRemoval.cpp',
     'testHashTable.cpp',
     'testHashTableInit.cpp',
     'testIndexToString.cpp',
     'testIntern.cpp',
new file mode 100644
--- /dev/null
+++ b/js/src/jsapi-tests/testGCAllocator.cpp
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+* vim: set ts=8 sts=4 et sw=4 tw=99:
+*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Memory.h"
+#include "jsapi-tests/tests.h"
+
+#if defined(XP_WIN)
+#include "jswin.h"
+#include <psapi.h>
+#elif defined(SOLARIS)
+// This test doesn't apply to Solaris.
+#elif defined(XP_UNIX)
+#include <algorithm>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#else
+#error "Memory mapping functions are not defined for your OS."
+#endif
+
+BEGIN_TEST(testGCAllocator)
+{
+#if defined(XP_WIN)
+    SYSTEM_INFO sysinfo;
+    GetSystemInfo(&sysinfo);
+    const size_t PageSize = sysinfo.dwPageSize;
+#elif defined(SOLARIS)
+    return true;
+#elif defined(XP_UNIX)
+    const size_t PageSize = size_t(sysconf(_SC_PAGESIZE));
+#else
+    return true;
+#endif
+    if (addressesGrowUp())
+        return testGCAllocatorUp(PageSize);
+    return testGCAllocatorDown(PageSize);
+}
+
+static const size_t Chunk = 512 * 1024;
+static const size_t Alignment = 2 * Chunk;
+static const int MaxTempChunks = 4096;
+static const size_t StagingSize = 16 * Chunk;
+
+bool
+addressesGrowUp()
+{
+    void *p1 = mapMemory(2 * Chunk);
+    void *p2 = mapMemory(2 * Chunk);
+    unmapPages(p1, 2 * Chunk);
+    unmapPages(p2, 2 * Chunk);
+    return p1 < p2;
+}
+
+size_t
+offsetFromAligned(void *p)
+{
+    return uintptr_t(p) % Alignment;
+}
+
+enum AllocType {
+   UseNormalAllocator,
+   UseLastDitchAllocator
+};
+
+bool
+testGCAllocatorUp(const size_t PageSize)
+{
+    const size_t UnalignedSize = StagingSize + Alignment - PageSize;
+    void *chunkPool[MaxTempChunks];
+    // Allocate a contiguous chunk that we can partition for testing.
+    void *stagingArea = mapMemory(UnalignedSize);
+    if (!stagingArea)
+        return false;
+    // Ensure that the staging area is aligned.
+    unmapPages(stagingArea, UnalignedSize);
+    if (offsetFromAligned(stagingArea)) {
+        const size_t Offset = offsetFromAligned(stagingArea);
+        // Place the area at the lowest aligned address.
+        stagingArea = (void *)(uintptr_t(stagingArea) + (Alignment - Offset));
+    }
+    mapMemoryAt(stagingArea, StagingSize);
+    // Make sure there are no available chunks below the staging area.
+    int tempChunks;
+    if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, false))
+        return false;
+    // Unmap the staging area so we can set it up for testing.
+    unmapPages(stagingArea, StagingSize);
+    // Reuse the same allocator so it learns the address growth direction.
+    js::gc::SystemPageAllocator GCAlloc;
+    // Check that the first chunk is used if it is aligned.
+    CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that the first chunk is used if it can be aligned.
+    CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that an aligned chunk after a single unalignable chunk is used.
+    CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that we fall back to the slow path after two unalignable chunks.
+    CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that we also fall back after an unalignable and an alignable chunk.
+    CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that the last ditch allocator works as expected.
+    CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool, tempChunks, GCAlloc,
+                            UseLastDitchAllocator));
+
+    // Clean up.
+    while (--tempChunks >= 0)
+        unmapPages(chunkPool[tempChunks], 2 * Chunk);
+    return true;
+}
+
+bool
+testGCAllocatorDown(const size_t PageSize)
+{
+    const size_t UnalignedSize = StagingSize + Alignment - PageSize;
+    void *chunkPool[MaxTempChunks];
+    // Allocate a contiguous chunk that we can partition for testing.
+    void *stagingArea = mapMemory(UnalignedSize);
+    if (!stagingArea)
+        return false;
+    // Ensure that the staging area is aligned.
+    unmapPages(stagingArea, UnalignedSize);
+    if (offsetFromAligned(stagingArea)) {
+        void *stagingEnd = (void *)(uintptr_t(stagingArea) + UnalignedSize);
+        const size_t Offset = offsetFromAligned(stagingEnd);
+        // Place the area at the highest aligned address.
+        stagingArea = (void *)(uintptr_t(stagingEnd) - Offset - StagingSize);
+    }
+    mapMemoryAt(stagingArea, StagingSize);
+    // Make sure there are no available chunks above the staging area.
+    int tempChunks;
+    if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true))
+        return false;
+    // Unmap the staging area so we can set it up for testing.
+    unmapPages(stagingArea, StagingSize);
+    // Reuse the same allocator so it learns the address growth direction.
+    js::gc::SystemPageAllocator GCAlloc;
+    // Check that the first chunk is used if it is aligned.
+    CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that the first chunk is used if it can be aligned.
+    CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that an aligned chunk after a single unalignable chunk is used.
+    CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that we fall back to the slow path after two unalignable chunks.
+    CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that we also fall back after an unalignable and an alignable chunk.
+    CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
+    // Check that the last ditch allocator works as expected.
+    CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc,
+                            UseLastDitchAllocator));
+
+    // Clean up.
+    while (--tempChunks >= 0)
+        unmapPages(chunkPool[tempChunks], 2 * Chunk);
+    return true;
+}
+
+bool
+fillSpaceBeforeStagingArea(int &tempChunks, void *stagingArea,
+                           void **chunkPool, bool addressesGrowDown)
+{
+    // Make sure there are no available chunks before the staging area.
+    tempChunks = 0;
+    chunkPool[tempChunks++] = mapMemory(2 * Chunk);
+    while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] &&
+           (chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) {
+        chunkPool[tempChunks++] = mapMemory(2 * Chunk);
+        if (!chunkPool[tempChunks - 1])
+            break; // We already have our staging area, so OOM here is okay.
+        if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^ addressesGrowDown)
+            break; // The address growth direction is inconsistent!
+    }
+    // OOM also means success in this case.
+    if (!chunkPool[tempChunks - 1]) {
+        --tempChunks;
+        return true;
+    }
+    // Bail if we can't guarantee the right address space layout.
+    if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown || (tempChunks > 1 &&
+            (chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^ addressesGrowDown))
+    {
+        while (--tempChunks >= 0)
+            unmapPages(chunkPool[tempChunks], 2 * Chunk);
+        unmapPages(stagingArea, StagingSize);
+        return false;
+    }
+    return true;
+}
+
+bool
+positionIsCorrect(const char *str, void *base, void **chunkPool, int tempChunks,
+                  js::gc::SystemPageAllocator& GCAlloc, AllocType allocator = UseNormalAllocator)
+{
+    // str represents a region of memory, with each character representing a
+    // region of Chunk bytes. str should contain only x, o and -, where
+    // x = mapped by the test to set up the initial conditions,
+    // o = mapped by the GC allocator, and
+    // - = unmapped.
+    // base should point to a region of contiguous free memory
+    // large enough to hold strlen(str) chunks of Chunk bytes.
+    int len = strlen(str);
+    int i;
+    // Find the index of the desired address.
+    for (i = 0; i < len && str[i] != 'o'; ++i);
+    void *desired = (void *)(uintptr_t(base) + i * Chunk);
+    // Map the regions indicated by str.
+    for (i = 0; i < len; ++i) {
+        if (str[i] == 'x')
+            mapMemoryAt((void *)(uintptr_t(base) +  i * Chunk), Chunk);
+    }
+    // Allocate using the GC's allocator.
+    void *result;
+    if (allocator == UseNormalAllocator)
+        result = GCAlloc.mapAlignedPages(2 * Chunk, Alignment);
+    else
+        result = GCAlloc.testMapAlignedPagesLastDitch(2 * Chunk, Alignment);
+    // Clean up the mapped regions.
+    if (result)
+        GCAlloc.unmapPages(result, 2 * Chunk);
+    for (--i; i >= 0; --i) {
+        if (str[i] == 'x')
+            unmapPages((void *)(uintptr_t(base) +  i * Chunk), Chunk);
+    }
+    // CHECK returns, so clean up on failure.
+    if (result != desired) {
+        while (--tempChunks >= 0)
+            unmapPages(chunkPool[tempChunks], 2 * Chunk);
+    }
+    return result == desired;
+}
+
+#if defined(XP_WIN)
+
+void *
+mapMemoryAt(void *desired, size_t length)
+{
+    return VirtualAlloc(desired, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+}
+
+void *
+mapMemory(size_t length)
+{
+    return VirtualAlloc(nullptr, length, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+}
+
+void
+unmapPages(void *p, size_t size)
+{
+    MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+}
+
+#elif defined(SOLARIS)
+// This test doesn't apply to Solaris.
+#elif defined(XP_UNIX)
+
+void *
+mapMemoryAt(void *desired, size_t length)
+{
+#if defined(__ia64__)
+    MOZ_ASSERT(0xffff800000000000ULL & (uintptr_t(desired) + length - 1) == 0);
+#endif
+    void *region = mmap(desired, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+    if (region == MAP_FAILED)
+        return nullptr;
+    if (region != desired) {
+        if (munmap(region, length))
+            MOZ_ASSERT(errno == ENOMEM);
+        return nullptr;
+    }
+    return region;
+}
+
+void *
+mapMemory(size_t length)
+{
+    void *hint = nullptr;
+#if defined(__ia64__)
+    hint = (void*)0x0000070000000000ULL;
+#endif
+    void *region = mmap(hint, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+    if (region == MAP_FAILED)
+        return nullptr;
+#if defined(__ia64__)
+    if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000ULL) {
+        if (munmap(region, length))
+            MOZ_ASSERT(errno == ENOMEM);
+        return nullptr;
+    }
+#endif
+    return region;
+}
+
+void
+unmapPages(void *p, size_t size)
+{
+    if (munmap(p, size))
+        MOZ_ASSERT(errno == ENOMEM);
+}
+
+#else // !defined(XP_WIN) && !defined(SOLARIS) && !defined(XP_UNIX)
+#error "Memory mapping functions are not defined for your OS."
+#endif
+END_TEST(testGCAllocator)