Bug 1005849 - Part 1: Refactor GC allocation logic to match jemalloc3 more closely. r=terrence
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Tue, 13 May 2014 18:32:00 +0200
changeset 183340 47f1e10bbe33
parent 183339 d5a2654130e3
child 183341 51e11d4c451c
push id26789
push userkwierso@gmail.com
push dateThu, 15 May 2014 22:59:59 +0000
treeherdermozilla-central@58c5a3427997 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1005849
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1005849 - Part 1: Refactor GC allocation logic to match jemalloc3 more closely. r=terrence
js/src/gc/Memory.cpp
js/src/gc/Memory.h
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -33,29 +33,42 @@ SystemPageAllocator::SystemPageAllocator
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
     JS_ASSERT(size % pageSize == 0);
     JS_ASSERT(alignment % allocGranularity == 0);
 
+    void *p = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+
     /* Special case: If we want allocation alignment, no further work is needed. */
-    if (alignment == allocGranularity) {
-        return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+    if (alignment == allocGranularity)
+        return p;
+
+    if (uintptr_t(p) % alignment != 0) {
+        unmapPages(p, size);
+        p = mapAlignedPagesSlow(size, alignment);
     }
 
+    JS_ASSERT(uintptr_t(p) % alignment == 0);
+    return p;
+}
+
+void *
+SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
+{
     /*
      * Windows requires that there be a 1:1 mapping between VM allocation
      * and deallocation operations.  Therefore, take care here to acquire the
      * final result via one mapping operation.  This means unmapping any
      * preliminary result that is not correctly aligned.
      */
-    void *p = nullptr;
-    while (!p) {
+    void *p;
+    do {
         /*
          * Over-allocate in order to map a memory region that is definitely
          * large enough, then deallocate and allocate again the correct size,
          * within the over-sized mapping.
          *
          * Since we're going to unmap the whole thing anyway, the first
          * mapping doesn't have to commit pages.
          */
@@ -63,19 +76,18 @@ SystemPageAllocator::mapAlignedPages(siz
         p = VirtualAlloc(nullptr, reserveSize, MEM_RESERVE, PAGE_READWRITE);
         if (!p)
             return nullptr;
         void *chunkStart = (void *)AlignBytes(uintptr_t(p), alignment);
         unmapPages(p, reserveSize);
         p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
 
         /* Failure here indicates a race with another thread, so try again. */
-    }
+    } while (!p);
 
-    JS_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void
 SystemPageAllocator::unmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
@@ -246,24 +258,39 @@ SystemPageAllocator::mapAlignedPages(siz
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
     JS_ASSERT(size % pageSize == 0);
     JS_ASSERT(alignment % allocGranularity == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON;
 
+    void *p = MapMemory(size, prot, flags, -1, 0);
+    if (p == MAP_FAILED)
+        return nullptr;
+
     /* Special case: If we want page alignment, no further work is needed. */
-    if (alignment == allocGranularity) {
-        void *region = MapMemory(size, prot, flags, -1, 0);
-        if (region == MAP_FAILED)
-            return nullptr;
-        return region;
+    if (alignment == allocGranularity)
+        return p;
+
+    if (uintptr_t(p) % alignment != 0) {
+        unmapPages(p, size);
+        p = mapAlignedPagesSlow(size, alignment);
     }
 
+    JS_ASSERT(uintptr_t(p) % alignment == 0);
+    return p;
+}
+
+void *
+SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
+{
+    int prot = PROT_READ | PROT_WRITE;
+    int flags = MAP_PRIVATE | MAP_ANON;
+
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = Min(size + 2 * alignment, 2 * size);
     void *region = MapMemory(reqSize, prot, flags, -1, 0);
     if (region == MAP_FAILED)
         return nullptr;
 
     uintptr_t regionEnd = uintptr_t(region) + reqSize;
     uintptr_t offset = uintptr_t(region) % alignment;
@@ -271,17 +298,16 @@ SystemPageAllocator::mapAlignedPages(siz
 
     void *front = (void *)AlignBytes(uintptr_t(region), alignment);
     void *end = (void *)(uintptr_t(front) + size);
     if (front != region)
         JS_ALWAYS_TRUE(0 == munmap(region, alignment - offset));
     if (uintptr_t(end) != regionEnd)
         JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end)));
 
-    JS_ASSERT(uintptr_t(front) % alignment == 0);
     return front;
 }
 
 void
 SystemPageAllocator::unmapPages(void *p, size_t size)
 {
     JS_ALWAYS_TRUE(0 == munmap(p, size));
 }
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -44,16 +44,17 @@ class SystemPageAllocator
     // The offset must be aligned according to alignment requirement.
     static void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
 
     // Deallocate memory mapped content.
     static void DeallocateMappedContent(void *p, size_t length);
 
   private:
     bool decommitEnabled();
+    void *mapAlignedPagesSlow(size_t size, size_t alignment);
 
     // The GC can only safely decommit memory when the page size of the
     // running process matches the compiled arena size.
     size_t              pageSize;
 
     // The OS allocation granularity may not match the page size.
     size_t              allocGranularity;
 };