Bug 616310 - JM: reduce fragmentation in ExecutableAllocator. r=jseward.
authorNicholas Nethercote <nnethercote@mozilla.com>
Mon, 06 Dec 2010 17:16:08 -0800
changeset 58751 8921e3faccd2558eab22efb3212aadda978edb99
parent 58750 4e41063ec08f7a5b4a303422d0ab338b7de62144
child 58752 37b29506a7d4c9915a39e881812b011159f96e30
child 58976 e35b70ffed69cf1729e8ba7dff19bbe974e9e52b
push id17414
push userrsayre@mozilla.com
push dateTue, 07 Dec 2010 03:47:09 +0000
treeherdermozilla-central@37b29506a7d4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjseward
bugs616310
milestone2.0b8pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 616310 - JM: reduce fragmentation in ExecutableAllocator. r=jseward.
js/src/assembler/jit/ExecutableAllocator.h
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -50,26 +50,24 @@
 
 #if WTF_PLATFORM_WINCE
 // From pkfuncs.h (private header file from the Platform Builder)
 #define CACHE_SYNC_ALL 0x07F
 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
 #endif
 
 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
-#if WTF_PLATFORM_WIN_OS || WTF_PLATFORM_WINCE
 /*
- * In practice, VirtualAlloc allocates in 64K chunks. (Technically, it
- * allocates in page chunks, but the starting address is always a multiple
- * of 64K, so each allocation uses up 64K of address space.
+ * On Windows, VirtualAlloc effectively allocates in 64K chunks. (Technically,
+ * it allocates in page chunks, but the starting address is always a multiple
+ * of 64K, so each allocation uses up 64K of address space.)  So a size less
+ * than that would be pointless.  But it turns out that 64KB is a reasonable
+ * size for all platforms.
  */
-# define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 16)
-#else
-# define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
-#endif
+#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 16)
 
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
 #else
 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
 #endif
@@ -198,57 +196,89 @@ public:
     static ExecutableAllocator *create()
     {
         ExecutableAllocator *allocator = new ExecutableAllocator();
         if (!allocator)
             return allocator;
 
         if (!pageSize)
             intializePageSize();
-        allocator->m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-        if (!allocator->m_smallAllocationPool) {
+        ExecutablePool *pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+        if (!pool) {
             delete allocator;
             return NULL;
         }
+        JS_ASSERT(allocator->m_smallAllocationPools.empty());
+        allocator->m_smallAllocationPools.append(pool);
         return allocator;
     }
 
-    ~ExecutableAllocator() { delete m_smallAllocationPool; }
+    ~ExecutableAllocator()
+    {
+        for (size_t i = 0; i < m_smallAllocationPools.length(); i++)
+            delete m_smallAllocationPools[i];
+    }
 
     // poolForSize returns reference-counted objects. The caller owns a reference
     // to the object; i.e., poolForSize increments the count before returning the
     // object.
 
     ExecutablePool* poolForSize(size_t n)
     {
 #ifndef DEBUG_STRESS_JSC_ALLOCATOR
-        // Try to fit in the existing small allocator
-        if (n < m_smallAllocationPool->available()) {
-	    m_smallAllocationPool->addRef();
-            return m_smallAllocationPool;
-	}
+        // Try to fit in an existing small allocator.  Use the pool with the
+        // least available space that is big enough (best-fit).  This is the
+        // best strategy because (a) it maximizes the chance of the next
+        // allocation fitting in a small pool, and (b) it minimizes the
+        // potential waste when a small pool is next abandoned.
+        ExecutablePool *minPool = NULL;
+        for (size_t i = 0; i < m_smallAllocationPools.length(); i++) {
+            ExecutablePool *pool = m_smallAllocationPools[i];
+            if (n <= pool->available() && (!minPool || pool->available() < minPool->available()))
+                minPool = pool;
+        }
+        if (minPool) {
+            minPool->addRef();
+            return minPool;
+        }
 #endif
 
         // If the request is large, we just provide a unshared allocator
         if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
             return ExecutablePool::create(n);
 
         // Create a new allocator
         ExecutablePool* pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
         if (!pool)
             return NULL;
   	    // At this point, local |pool| is the owner.
 
-        // If the new allocator will result in more free space than in
-        // the current small allocator, then we will use it instead
-        if ((pool->available() - n) > m_smallAllocationPool->available()) {
-	        m_smallAllocationPool->release();
-            m_smallAllocationPool = pool;
-	        pool->addRef();
-	    }
+        if (m_smallAllocationPools.length() < maxSmallPools) {
+            // We haven't hit the maximum number of live pools;  add the new pool.
+            m_smallAllocationPools.append(pool);
+            pool->addRef();
+        } else {
+            // Find the pool with the least space.
+            int iMin = 0;
+            for (size_t i = 1; i < m_smallAllocationPools.length(); i++)
+                if (m_smallAllocationPools[i]->available() <
+                    m_smallAllocationPools[iMin]->available())
+                {
+                    iMin = i;
+                }
+
+            // If the new allocator will result in more free space than the small
+            // pool with the least space, then we will use it instead
+            ExecutablePool *minPool = m_smallAllocationPools[iMin];
+            if ((pool->available() - n) > minPool->available()) {
+                minPool->release();
+                m_smallAllocationPools[iMin] = pool;
+                pool->addRef();
+            }
+        }
 
    	    // Pass ownership to the caller.
         return pool;
     }
 
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void makeWritable(void* start, size_t size)
     {
@@ -350,17 +380,19 @@ public:
 #endif
 
 private:
 
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void reprotectRegion(void*, size_t, ProtectionSeting);
 #endif
 
-    ExecutablePool* m_smallAllocationPool;
+    static const size_t maxSmallPools = 4;
+    typedef js::Vector<ExecutablePool *, maxSmallPools, js::SystemAllocPolicy > SmallExecPoolVector;
+    SmallExecPoolVector m_smallAllocationPools;
     static void intializePageSize();
 };
 
 // This constructor can fail due to OOM. If it does, m_freePtr will be
 // set to NULL. 
 inline ExecutablePool::ExecutablePool(size_t n) : m_refCount(1)
 {
     size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);