Bug 1005849 - Part 2b: Allow munmap to fail if errno == ENOMEM and switch over to MOZ_ASSERT variants. r=terrence
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Tue, 20 May 2014 10:44:00 +0200
changeset 184891 07e1de8eaa76
parent 184890 e4899509ca88
child 184892 96e1b7a25c71
push id43967
push usercbook@mozilla.com
push dateMon, 26 May 2014 11:41:11 +0000
treeherdermozilla-inbound@4e09a894645c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1005849
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1005849 - Part 2b: Allow munmap to fail if errno == ENOMEM and switch over to MOZ_ASSERT variants. r=terrence
js/src/gc/Memory.cpp
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -40,33 +40,33 @@ static inline void *
 MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
 {
     return VirtualAlloc(nullptr, length, flags, prot);
 }
 
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
-    JS_ASSERT(size >= alignment);
-    JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % pageSize == 0);
-    JS_ASSERT(alignment % allocGranularity == 0);
+    MOZ_ASSERT(size >= alignment);
+    MOZ_ASSERT(size % alignment == 0);
+    MOZ_ASSERT(size % pageSize == 0);
+    MOZ_ASSERT(alignment % allocGranularity == 0);
 
     void *p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
 
     /* Special case: If we want allocation alignment, no further work is needed. */
     if (alignment == allocGranularity)
         return p;
 
     if (uintptr_t(p) % alignment != 0) {
         unmapPages(p, size);
         p = mapAlignedPagesSlow(size, alignment);
     }
 
-    JS_ASSERT(uintptr_t(p) % alignment == 0);
+    MOZ_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void *
 SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
 {
     /*
      * Windows requires that there be a 1:1 mapping between VM allocation
@@ -96,34 +96,34 @@ SystemPageAllocator::mapAlignedPagesSlow
     } while (!p);
 
     return p;
 }
 
 void
 SystemPageAllocator::unmapPages(void *p, size_t size)
 {
-    JS_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+    MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
 }
 
 bool
 SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
     if (!decommitEnabled())
         return true;
 
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
     return p2 == p;
 }
 
 bool
 SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
 SystemPageAllocator::GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
@@ -157,47 +157,47 @@ SystemPageAllocator::DeallocateMappedCon
 SystemPageAllocator::SystemPageAllocator()
 {
     pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
-    JS_ASSERT(size >= alignment);
-    JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % pageSize == 0);
-    JS_ASSERT(alignment % allocGranularity == 0);
+    MOZ_ASSERT(size >= alignment);
+    MOZ_ASSERT(size % alignment == 0);
+    MOZ_ASSERT(size % pageSize == 0);
+    MOZ_ASSERT(alignment % allocGranularity == 0);
 
     int prot = PROT_READ | PROT_WRITE;
     int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
 
     void *p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
     if (p == MAP_FAILED)
         return nullptr;
     return p;
 }
 
 void
 SystemPageAllocator::unmapPages(void *p, size_t size)
 {
-    JS_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
+    MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
 }
 
 bool
 SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 bool
 SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
 SystemPageAllocator::GetPageFaultCount()
 {
     return 0;
 }
@@ -214,44 +214,46 @@ void
 SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
 {
     // Not implemented.
 }
 
 #elif defined(XP_UNIX)
 
 #include <algorithm>
+#include <errno.h>
 #include <sys/mman.h>
 #include <sys/resource.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 SystemPageAllocator::SystemPageAllocator()
 {
     pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
 }
 
 static inline void *
 MapMemoryAt(void *desired, size_t length, int prot = PROT_READ | PROT_WRITE,
             int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
 {
 #if defined(__ia64__)
-    JS_ASSERT(0xffff800000000000ULL & (uintptr_t(desired) + length - 1) == 0);
+    MOZ_ASSERT(0xffff800000000000ULL & (uintptr_t(desired) + length - 1) == 0);
 #endif
     void *region = mmap(desired, length, prot, flags, fd, offset);
     if (region == MAP_FAILED)
         return nullptr;
     /*
      * mmap treats the given address as a hint unless the MAP_FIXED flag is
      * used (which isn't usually what you want, as this overrides existing
      * mappings), so check that the address we got is the address we wanted.
      */
     if (region != desired) {
-        JS_ALWAYS_TRUE(0 == munmap(region, length));
+        if (munmap(region, length))
+            MOZ_ASSERT(errno == ENOMEM);
         return nullptr;
     }
     return region;
 }
 
 static inline void *
 MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
           int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
@@ -273,95 +275,97 @@ MapMemory(size_t length, int prot = PROT
     void *region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
     if (region == MAP_FAILED)
         return nullptr;
     /*
      * If the allocated memory doesn't have its upper 17 bits clear, consider it
      * as out of memory.
      */
     if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
-        JS_ALWAYS_TRUE(0 == munmap(region, length));
+        if (munmap(region, length))
+            MOZ_ASSERT(errno == ENOMEM);
         return nullptr;
     }
     return region;
 #else
     void *region = mmap(nullptr, length, prot, flags, fd, offset);
     if (region == MAP_FAILED)
         return nullptr;
     return region;
 #endif
 }
 
 void *
 SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
 {
-    JS_ASSERT(size >= alignment);
-    JS_ASSERT(size % alignment == 0);
-    JS_ASSERT(size % pageSize == 0);
-    JS_ASSERT(alignment % allocGranularity == 0);
+    MOZ_ASSERT(size >= alignment);
+    MOZ_ASSERT(size % alignment == 0);
+    MOZ_ASSERT(size % pageSize == 0);
+    MOZ_ASSERT(alignment % allocGranularity == 0);
 
     void *p = MapMemory(size);
 
     /* Special case: If we want page alignment, no further work is needed. */
     if (alignment == allocGranularity)
         return p;
 
     if (uintptr_t(p) % alignment != 0) {
         unmapPages(p, size);
         p = mapAlignedPagesSlow(size, alignment);
     }
 
-    JS_ASSERT(uintptr_t(p) % alignment == 0);
+    MOZ_ASSERT(uintptr_t(p) % alignment == 0);
     return p;
 }
 
 void *
 SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
 {
     /* Overallocate and unmap the region's edges. */
     size_t reqSize = Min(size + 2 * alignment, 2 * size);
     void *region = MapMemory(reqSize);
     if (!region)
         return nullptr;
 
     uintptr_t regionEnd = uintptr_t(region) + reqSize;
     uintptr_t offset = uintptr_t(region) % alignment;
-    JS_ASSERT(offset < reqSize - size);
+    MOZ_ASSERT(offset < reqSize - size);
 
     void *front = (void *)AlignBytes(uintptr_t(region), alignment);
     void *end = (void *)(uintptr_t(front) + size);
     if (front != region)
-        JS_ALWAYS_TRUE(0 == munmap(region, alignment - offset));
+        unmapPages(region, alignment - offset);
     if (uintptr_t(end) != regionEnd)
-        JS_ALWAYS_TRUE(0 == munmap(end, regionEnd - uintptr_t(end)));
+        unmapPages(end, regionEnd - uintptr_t(end));
 
     return front;
 }
 
 void
 SystemPageAllocator::unmapPages(void *p, size_t size)
 {
-    JS_ALWAYS_TRUE(0 == munmap(p, size));
+    if (munmap(p, size))
+        MOZ_ASSERT(errno == ENOMEM);
 }
 
 bool
 SystemPageAllocator::markPagesUnused(void *p, size_t size)
 {
     if (!decommitEnabled())
         return false;
 
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     int result = madvise(p, size, MADV_DONTNEED);
     return result != -1;
 }
 
 bool
 SystemPageAllocator::markPagesInUse(void *p, size_t size)
 {
-    JS_ASSERT(uintptr_t(p) % pageSize == 0);
+    MOZ_ASSERT(uintptr_t(p) % pageSize == 0);
     return true;
 }
 
 size_t
 SystemPageAllocator::GetPageFaultCount()
 {
     struct rusage usage;
     int err = getrusage(RUSAGE_SELF, &usage);
@@ -423,14 +427,15 @@ void
 SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
 {
     void *pa_start; // Page aligned starting
     size_t page_size = sysconf(_SC_PAGESIZE); // Page size
     size_t total_size; // Total allocated size
 
     pa_start = (void *)(uintptr_t(p) & ~(page_size - 1));
     total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start);
-    munmap(pa_start, total_size);
+    if (munmap(pa_start, total_size))
+        MOZ_ASSERT(errno == ENOMEM);
 }
 
 #else
 #error "Memory mapping functions are not defined for your OS."
 #endif