Bug 1005849 - Part 2: Some interface changes to make mapping memory less error prone. r=terrence
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Sat, 17 May 2014 16:03:00 +0200
changeset 184890 e4899509ca88
parent 184889 8eb3006c6d6f
child 184891 07e1de8eaa76
push id43967
push usercbook@mozilla.com
push dateMon, 26 May 2014 11:41:11 +0000
treeherdermozilla-inbound@4e09a894645c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1005849
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1005849 - Part 2: Some interface changes to make mapping memory less error prone. r=terrence
js/src/gc/Memory.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -25,25 +25,37 @@ SystemPageAllocator::decommitEnabled()
 SystemPageAllocator::SystemPageAllocator()
 {
     SYSTEM_INFO sysinfo;
     GetSystemInfo(&sysinfo);
     pageSize = sysinfo.dwPageSize;
     allocGranularity = sysinfo.dwAllocationGranularity;
 }
 
+static inline void *
+MapMemoryAt(void *desired, size_t length, int flags, int prot = PAGE_READWRITE)
+{
+    return VirtualAlloc(desired, length, flags, prot);
+}
+
+static inline void *
+MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
+{
+    return VirtualAlloc(nullptr, length, flags, prot);
+}
+
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
     JS_ASSERT(size % pageSize == 0);
     JS_ASSERT(alignment % allocGranularity == 0);
 
-    void *p = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+    void *p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
 
     /* Special case: If we want allocation alignment, no further work is needed. */
     if (alignment == allocGranularity)
         return p;
 
     if (uintptr_t(p) % alignment != 0) {
         unmapPages(p, size);
         p = mapAlignedPagesSlow(size, alignment);
@@ -68,22 +80,22 @@ SystemPageAllocator::mapAlignedPagesSlow
          * Over-allocate in order to map a memory region that is definitely
          * large enough, then deallocate and allocate again the correct size,
          * within the over-sized mapping.
          *
          * Since we're going to unmap the whole thing anyway, the first
          * mapping doesn't have to commit pages.
          */
         size_t reserveSize = size + alignment - pageSize;
-        p = VirtualAlloc(nullptr, reserveSize, MEM_RESERVE, PAGE_READWRITE);
+        p = MapMemory(reserveSize, MEM_RESERVE);
         if (!p)
             return nullptr;
         void *chunkStart = (void *)AlignBytes(uintptr_t(p), alignment);
         unmapPages(p, reserveSize);
-        p = VirtualAlloc(chunkStart, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+        p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
 
         /* Failure here indicates a race with another thread, so try again. */
     } while (!p);
 
     return p;
 }
 
 void
@@ -94,17 +106,17 @@ SystemPageAllocator::unmapPages(void *p,
 
 bool
 SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
     if (!decommitEnabled())
         return true;
 
     JS_ASSERT(uintptr_t(p) % pageSize == 0);
-    LPVOID p2 = VirtualAlloc(p, size, MEM_RESET, PAGE_READWRITE);
+    LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
     return p2 == p;
 }
 
 bool
 SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
     JS_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
@@ -214,17 +226,40 @@ SystemPageAllocator::DeallocateMappedCon
 #include <unistd.h>
 
 SystemPageAllocator::SystemPageAllocator()
 {
     pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 static inline void *
-MapMemory(size_t length, int prot, int flags, int fd, off_t offset)
+MapMemoryAt(void *desired, size_t length, int prot = PROT_READ | PROT_WRITE,
+            int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
+{
+#if defined(__ia64__)
+    JS_ASSERT(0xffff800000000000ULL & (uintptr_t(desired) + length - 1) == 0);
+#endif
+    void *region = mmap(desired, length, prot, flags, fd, offset);
+    if (region == MAP_FAILED)
+        return nullptr;
+    /*
+     * mmap treats the given address as a hint unless the MAP_FIXED flag is
+     * used (which isn't usually what you want, as this overrides existing
+     * mappings), so check that the address we got is the address we wanted.
+     */
+    if (region != desired) {
+        JS_ALWAYS_TRUE(0 == munmap(region, length));
+        return nullptr;
+    }
+    return region;
+}
+
+static inline void *
+MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
+          int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
 {
 #if defined(__ia64__)
     /*
      * The JS engine assumes that all allocated pointers have their high 17 bits clear,
      * which ia64's mmap doesn't support directly. However, we can emulate it by passing
      * mmap an "addr" parameter with those bits clear. The mmap will return that address,
      * or the nearest available memory above that address, providing a near-guarantee
      * that those bits are clear. If they are not, we return nullptr below to indicate
@@ -232,45 +267,43 @@ MapMemory(size_t length, int prot, int f
      *
      * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
      * address space.
      *
      * See Bug 589735 for more information.
      */
     void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
     if (region == MAP_FAILED)
-        return MAP_FAILED;
+        return nullptr;
     /*
      * If the allocated memory doesn't have its upper 17 bits clear, consider it
      * as out of memory.
      */
     if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
         JS_ALWAYS_TRUE(0 == munmap(region, length));
-        return MAP_FAILED;
+        return nullptr;
     }
     return region;
 #else
-    return mmap(nullptr, length, prot, flags, fd, offset);
+    void *region = mmap(nullptr, length, prot, flags, fd, offset);
+    if (region == MAP_FAILED)
+        return nullptr;
+    return region;
 #endif
 }
 
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
     JS_ASSERT(size >= alignment);
     JS_ASSERT(size % alignment == 0);
     JS_ASSERT(size % pageSize == 0);
     JS_ASSERT(alignment % allocGranularity == 0);
 
-    int prot = PROT_READ | PROT_WRITE;
-    int flags = MAP_PRIVATE | MAP_ANON;
-
-    void *p = MapMemory(size, prot, flags, -1, 0);
-    if (p == MAP_FAILED)
-        return nullptr;
+    void *p = MapMemory(size);
 
     /* Special case: If we want page alignment, no further work is needed. */
     if (alignment == allocGranularity)
         return p;
 
     if (uintptr_t(p) % alignment != 0) {
         unmapPages(p, size);
         p = mapAlignedPagesSlow(size, alignment);
@@ -278,23 +311,20 @@ SystemPageAllocator::mapAlignedPages(siz
 
     JS_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void *
 SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
 {
-    int prot = PROT_READ | PROT_WRITE;
-    int flags = MAP_PRIVATE | MAP_ANON;
-
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = Min(size + 2 * alignment, 2 * size);
-    void *region = MapMemory(reqSize, prot, flags, -1, 0);
-    if (region == MAP_FAILED)
+    void *region = MapMemory(reqSize);
+    if (!region)
         return nullptr;
 
     uintptr_t regionEnd = uintptr_t(region) + reqSize;
     uintptr_t offset = uintptr_t(region) % alignment;
     JS_ASSERT(offset < reqSize - size);
 
     void *front = (void *)AlignBytes(uintptr_t(region), alignment);
     void *end = (void *)(uintptr_t(front) + size);
@@ -366,23 +396,23 @@ SystemPageAllocator::AllocateMappedConte
     // Page aligned starting of the offset.
     pa_start = offset & ~(page_size - 1);
     // Calculate page aligned ending by adding one page to the page aligned
     // starting of data end position(offset + length - 1).
     pa_end = ((offset + length - 1) & ~(page_size - 1)) + page_size;
     pa_size = pa_end - pa_start;
 
     // Ask for a continuous memory location.
-    buf = (uint8_t *) MapMemory(pa_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
-    if (buf == MAP_FAILED)
+    buf = (uint8_t *) MapMemory(pa_size);
+    if (!buf)
         return nullptr;
 
-    buf = (uint8_t *) mmap(buf, pa_size, PROT_READ | PROT_WRITE,
-                           MAP_PRIVATE | MAP_FIXED, fd, pa_start);
-    if (buf == MAP_FAILED)
+    buf = (uint8_t *) MapMemoryAt(buf, pa_size, PROT_READ | PROT_WRITE,
+                                  MAP_PRIVATE | MAP_FIXED, fd, pa_start);
+    if (!buf)
         return nullptr;
 
     // Reset the data before target file, which we don't need to see.
     memset(buf, 0, offset - pa_start);
 
     // Reset the data after target file, which we don't need to see.
     memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));