Bug 1230162 - allocate less, when we can. r=luke
authorLars T Hansen <lhansen@mozilla.com>
Thu, 03 Dec 2015 16:52:31 +0100
changeset 309880 c0c86c046d88c879d7e402bc31a76173b16c906f
parent 309879 39c9062db9d9c4de647fb07f2faabd5c555f6022
child 309881 a42590c2a8bf36bc766621cb0362dff28de037df
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1230162
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1230162 - allocate less, when we can. r=luke
js/src/vm/SharedArrayObject.cpp
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -96,52 +96,61 @@ SharedArrayRawBuffer::New(JSContext* cx,
     // so guard against it on principle.
     MOZ_ASSERT(length != (uint32_t)-1);
 
     // Add a page for the header and round to a page boundary.
     uint32_t allocSize = (length + 2*AsmJSPageSize - 1) & ~(AsmJSPageSize - 1);
     if (allocSize <= length)
         return nullptr;
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    // Test >= to guard against the case where multiple extant runtimes
-    // race to allocate.
-    if (++numLive >= maxLive) {
-        JSRuntime* rt = cx->runtime();
-        if (rt->largeAllocationFailureCallback)
-            rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData);
-        if (numLive >= maxLive) {
+    void* p = nullptr;
+    if (!IsValidAsmJSHeapLength(length)) {
+        p = MapMemory(allocSize, true);
+        if (!p)
+            return nullptr;
+    } else {
+        // Test >= to guard against the case where multiple extant runtimes
+        // race to allocate.
+        if (++numLive >= maxLive) {
+            JSRuntime* rt = cx->runtime();
+            if (rt->largeAllocationFailureCallback)
+                rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData);
+            if (numLive >= maxLive) {
+                numLive--;
+                return nullptr;
+            }
+        }
+        // Get the entire reserved region (with all pages inaccessible)
+        p = MapMemory(SharedArrayMappedSize, false);
+        if (!p) {
             numLive--;
             return nullptr;
         }
-    }
-    // Get the entire reserved region (with all pages inaccessible)
-    void* p = MapMemory(SharedArrayMappedSize, false);
-    if (!p) {
-        numLive--;
-        return nullptr;
+
+        if (!MarkValidRegion(p, allocSize)) {
+            UnmapMemory(p, SharedArrayMappedSize);
+            numLive--;
+            return nullptr;
+        }
+#   if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
+        // Tell Valgrind/Memcheck to not report accesses in the inaccessible region.
+        VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize,
+                                                       SharedArrayMappedSize - allocSize);
+#   endif
     }
-
-    if (!MarkValidRegion(p, allocSize)) {
-        UnmapMemory(p, SharedArrayMappedSize);
-        numLive--;
-        return nullptr;
-    }
-#   if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
-    // Tell Valgrind/Memcheck to not report accesses in the inaccessible region.
-    VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize,
-                                                   SharedArrayMappedSize - allocSize);
-#   endif
 #else
     void* p = MapMemory(allocSize, true);
     if (!p)
         return nullptr;
 #endif
     uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + AsmJSPageSize;
     uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
-    return new (base) SharedArrayRawBuffer(buffer, length);
+    SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(buffer, length);
+    MOZ_ASSERT(rawbuf->length == length); // Deallocation needs this
+    return rawbuf;
 }
 
 void
 SharedArrayRawBuffer::addReference()
 {
     MOZ_ASSERT(this->refcount > 0);
     ++this->refcount; // Atomic.
 }
@@ -154,28 +163,33 @@ SharedArrayRawBuffer::dropReference()
 
     // If this was the final reference, release the buffer.
     if (refcount == 0) {
         SharedMem<uint8_t*> p = this->dataPointerShared() - AsmJSPageSize;
 
         MOZ_ASSERT(p.asValue() % AsmJSPageSize == 0);
 
         uint8_t* address = p.unwrap(/*safe - only reference*/);
+        uint32_t allocSize = (this->length + 2*AsmJSPageSize - 1) & ~(AsmJSPageSize - 1);
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-        numLive--;
-        UnmapMemory(address, SharedArrayMappedSize);
+        if (!IsValidAsmJSHeapLength(allocSize)) {
+            UnmapMemory(address, allocSize);
+        } else {
+            numLive--;
+            UnmapMemory(address, SharedArrayMappedSize);
 #       if defined(MOZ_VALGRIND) \
            && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
-        // Tell Valgrind/Memcheck to recommence reporting accesses in the
-        // previously-inaccessible region.
-        VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address,
-                                                      SharedArrayMappedSize);
+            // Tell Valgrind/Memcheck to recommence reporting accesses in the
+            // previously-inaccessible region.
+            VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address,
+                                                          SharedArrayMappedSize);
 #       endif
+        }
 #else
-        UnmapMemory(address, this->length + AsmJSPageSize);
+        UnmapMemory(address, allocSize);
 #endif
     }
 }
 
 const JSFunctionSpec SharedArrayBufferObject::jsfuncs[] = {
     /* Nothing yet */
     JS_FS_END
 };