Bug 631106 - JM: simplify allocation of executable memory. r=dvander.
authorNicholas Nethercote <nnethercote@mozilla.com>
Mon, 07 Mar 2011 20:15:56 -0800
changeset 64243 441bc12e94e24d8d882cc32b9bd2479f83224179
parent 64242 10fb605a55afa80876925bd9a9088b7089f6abdb
child 64244 ff5bb67330c01198a0d0035d59d1d18ad2fb449c
push idunknown
push userunknown
push dateunknown
reviewersdvander
bugs631106
milestone2.0b13pre
Bug 631106 - JM: simplify allocation of executable memory. r=dvander.
js/src/assembler/assembler/ARMAssembler.cpp
js/src/assembler/assembler/ARMAssembler.h
js/src/assembler/assembler/ARMv7Assembler.h
js/src/assembler/assembler/AbstractMacroAssembler.h
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
js/src/assembler/assembler/LinkBuffer.h
js/src/assembler/assembler/X86Assembler.h
js/src/assembler/jit/ExecutableAllocator.h
js/src/jstracer.cpp
js/src/methodjit/BaseCompiler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/TrampolineCompiler.cpp
js/src/methodjit/TrampolineCompiler.h
js/src/yarr/yarr/RegexJIT.cpp
--- a/js/src/assembler/assembler/ARMAssembler.cpp
+++ b/js/src/assembler/assembler/ARMAssembler.cpp
@@ -408,40 +408,35 @@ inline void ARMAssembler::fixUpOffsets(v
                 }
             }
 #endif
             *addr = reinterpret_cast<ARMWord>(data + *addr);
         }
     }
 }
 
-void* ARMAssembler::executableCopy(ExecutablePool* allocator)
+void* ARMAssembler::executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
 {
     // 64-bit alignment is required for next constant pool and JIT code as well
     m_buffer.flushWithoutBarrier(true);
     if (m_buffer.uncheckedSize() & 0x7)
         bkpt(0);
 
-    void * data = m_buffer.executableCopy(allocator);
+    void * data = m_buffer.executableAllocAndCopy(allocator, poolp);
     if (data)
         fixUpOffsets(data);
     return data;
 }
 
 // This just dumps the code into the specified buffer, fixing up absolute
 // offsets and literal pool loads as it goes. The buffer is assumed to be large
 // enough to hold the code, and any pre-existing literal pool is assumed to
 // have been flushed.
-void* ARMAssembler::executableCopy(void * buffer)
+void ARMAssembler::executableCopy(void * buffer)
 {
-    if (m_buffer.oom())
-        return NULL;
-
     ASSERT(m_buffer.sizeOfConstantPool() == 0);
-
     memcpy(buffer, m_buffer.data(), m_buffer.size());
     fixUpOffsets(buffer);
-    return buffer;
 }
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -967,18 +967,18 @@ namespace JSC {
             return JmpSrc(s);
         }
 
         JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
         {
             return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
         }
 
-        void* executableCopy(ExecutablePool* allocator);
-        void* executableCopy(void* buffer);
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp);
+        void executableCopy(void* buffer);
         void fixUpOffsets(void* buffer);
 
         // Patching helpers
 
         static ARMWord* getLdrImmAddress(ARMWord* insn)
         {
 #if WTF_ARM_ARCH_AT_LEAST(5)
             // Check for call
--- a/js/src/assembler/assembler/ARMv7Assembler.h
+++ b/js/src/assembler/assembler/ARMv7Assembler.h
@@ -1553,19 +1553,19 @@ public:
     
     // Assembler admin methods:
 
     size_t size() const
     {
         return m_formatter.size();
     }
 
-    void* executableCopy(ExecutablePool* allocator)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
     {
-        void* copy = m_formatter.executableCopy(allocator);
+        void* copy = m_formatter.executableAllocAndCopy(allocator, poolp);
 
         unsigned jumpCount = m_jumpsToLink.size();
         for (unsigned i = 0; i < jumpCount; ++i) {
             uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
             uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
             linkJumpAbsolute(location, target);
         }
         m_jumpsToLink.clear();
@@ -1904,17 +1904,19 @@ private:
         }
 
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
-        void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp) {
+            return m_buffer.executableAllocAndCopy(allocator, poolp);
+        }
         bool oom() const { return m_buffer.oom(); }
 
     private:
         AssemblerBuffer m_buffer;
     } m_formatter;
 
     Vector<LinkRecord> m_jumpsToLink;
 };
--- a/js/src/assembler/assembler/AbstractMacroAssembler.h
+++ b/js/src/assembler/assembler/AbstractMacroAssembler.h
@@ -466,19 +466,20 @@ public:
         return m_assembler.buffer();
     }
 
     bool oom()
     {
         return m_assembler.oom();
     }
 
-    void* executableCopy(void* buffer)
+    void executableCopy(void* buffer)
     {
-        return m_assembler.executableCopy(buffer);
+        ASSERT(!oom());
+        m_assembler.executableCopy(buffer);
     }
 
     Label label()
     {
         return Label(this);
     }
 
     DataLabel32 dataLabel32()
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -132,28 +132,29 @@ namespace JSC {
         {
             return m_oom;
         }
 
         /*
          * The user must check for a NULL return value, which means
          * no code was generated, or there was an OOM.
          */
-        void* executableCopy(ExecutablePool* allocator)
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
         {
-            if (m_oom)
+            if (m_oom || m_size == 0) {
+                *poolp = NULL;
                 return 0;
+            }
 
-            if (!m_size)
+            void* result = allocator->alloc(m_size, poolp);
+            if (!result) {
+                *poolp = NULL;
                 return 0;
-
-            void* result = allocator->alloc(m_size);
-
-            if (!result)
-                return 0;
+            }
+            JS_ASSERT(*poolp);
 
             ExecutableAllocator::makeWritable(result, m_size);
 
             return memcpy(result, m_buffer, m_size);
         }
 
         unsigned char *buffer() const {
             ASSERT(!m_oom);
@@ -180,17 +181,17 @@ namespace JSC {
          * users of this class need to check for OOM only at certain points
          * and not after every operation.
          *
          * Our strategy for handling an OOM is to set m_oom, and then set
          * m_size to 0, preserving the current buffer. This way, the user
          * can continue assembling into the buffer, deferring OOM checking
          * until the user wants to read code out of the buffer.
          *
-         * See also the |executableCopy| and |buffer| methods.
+         * See also the |executableAllocAndCopy| and |buffer| methods.
          */
 
         void grow(int extraCapacity = 0)
         {
             int newCapacity = m_capacity + m_capacity / 2 + extraCapacity;
             char* newBuffer;
 
             if (m_buffer == m_inlineBuffer) {
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -189,20 +189,20 @@ public:
         return AssemblerBuffer::size();
     }
 
     int uncheckedSize()
     {
         return AssemblerBuffer::size();
     }
 
-    void* executableCopy(ExecutablePool* allocator)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
     {
         flushConstantPool(false);
-        return AssemblerBuffer::executableCopy(allocator);
+        return AssemblerBuffer::executableAllocAndCopy(allocator, poolp);
     }
 
     void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
     {
         flushIfNoSpaceFor(4, 4);
 
         m_loadOffsets.append(AssemblerBuffer::size());
         if (isReusable)
--- a/js/src/assembler/assembler/LinkBuffer.h
+++ b/js/src/assembler/assembler/LinkBuffer.h
@@ -57,27 +57,27 @@ class LinkBuffer {
     typedef MacroAssembler::Label Label;
     typedef MacroAssembler::Jump Jump;
     typedef MacroAssembler::JumpList JumpList;
     typedef MacroAssembler::Call Call;
     typedef MacroAssembler::DataLabel32 DataLabel32;
     typedef MacroAssembler::DataLabelPtr DataLabelPtr;
 
 public:
-    // Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
-    //       First, executablePool is copied into m_executablePool, then the initialization of
-    //       m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
-    LinkBuffer(MacroAssembler* masm, ExecutablePool* executablePool)
-        : m_executablePool(executablePool)
-        , m_code(executableCopy(*masm, executablePool))
-        , m_size(masm->m_assembler.size())
+    // 'ok' should be checked after this constructor is called;  it's false if OOM occurred.
+    LinkBuffer(MacroAssembler* masm, ExecutableAllocator* executableAllocator,
+               ExecutablePool** poolp, bool* ok)
+    {
+        m_code = executableAllocAndCopy(*masm, executableAllocator, poolp);
+        m_executablePool = *poolp;
+        m_size = masm->m_assembler.size();  // must come after call to executableAllocAndCopy()!
 #ifndef NDEBUG
-        , m_completed(false)
+        m_completed = false;
 #endif
-    {
+        *ok = !!m_code;
     }
 
     LinkBuffer()
         : m_executablePool(NULL)
         , m_code(NULL)
         , m_size(0)
 #ifndef NDEBUG
         , m_completed(false)
@@ -192,19 +192,20 @@ public:
 protected:
     // Keep this private! - the underlying code should only be obtained externally via 
     // finalizeCode() or finalizeCodeAddendum().
     void* code()
     {
         return m_code;
     }
 
-    void *executableCopy(MacroAssembler &masm, ExecutablePool *pool)
+    void *executableAllocAndCopy(MacroAssembler &masm, ExecutableAllocator *allocator,
+                                 ExecutablePool **poolp)
     {
-        return masm.m_assembler.executableCopy(pool);
+        return masm.m_assembler.executableAllocAndCopy(allocator, poolp);
     }
 
     void performFinalization()
     {
 #ifndef NDEBUG
         ASSERT(!m_completed);
         m_completed = true;
 #endif
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -2469,27 +2469,24 @@ public:
         return dst.m_offset - src.m_offset;
     }
     
     static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
     {
         return dst.m_offset - src.m_offset;
     }
     
-    void* executableCopy(ExecutablePool* allocator)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
     {
-        void* copy = m_formatter.executableCopy(allocator);
-        return copy;
+        return m_formatter.executableAllocAndCopy(allocator, poolp);
     }
 
-    void* executableCopy(void* buffer)
+    void executableCopy(void* buffer)
     {
-        if (m_formatter.oom())
-            return NULL;
-        return memcpy(buffer, m_formatter.buffer(), size());
+        memcpy(buffer, m_formatter.buffer(), size());
     }
 
 private:
 
     static void setPointer(void* where, void* value)
     {
         js::JaegerSpew(js::JSpew_Insns,
                        ISPFX "##setPtr     ((where=%p)) ((value=%p))\n", where, value);
@@ -2824,17 +2821,19 @@ private:
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
         unsigned char *buffer() const { return m_buffer.buffer(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
-        void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp) {
+            return m_buffer.executableAllocAndCopy(allocator, poolp);
+        }
 
     private:
 
         // Internals; ModRm and REX formatters.
 
         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
         inline bool byteRegRequiresRex(int reg)
         {
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -100,107 +100,103 @@ inline size_t roundUpAllocationSize(size
 
 //#define DEBUG_STRESS_JSC_ALLOCATOR
 
 namespace JSC {
 
   // These are reference-counted. A new one (from the constructor or create)
   // starts with a count of 1. 
   class ExecutablePool {
+    friend class ExecutableAllocator;
 private:
     struct Allocation {
         char* pages;
         size_t size;
 #if WTF_PLATFORM_SYMBIAN
         RChunk* chunk;
 #endif
     };
-    typedef js::Vector<Allocation, 2, js::SystemAllocPolicy> AllocationList;
+
+    char* m_freePtr;
+    char* m_end;
+    Allocation m_allocation;
 
     // Reference count for automatic reclamation.
     unsigned m_refCount;
 
 public:
+    // Flag for downstream use, whether to try to release references to this pool.
+    bool m_destroy;
+
+    // GC number in which the m_destroy flag was most recently set. Used downstream to
+    // remember whether m_destroy was computed for the currently active GC.
+    size_t m_gcNumber;
+
+public:
     // It should be impossible for us to roll over, because only small
     // pools have multiple holders, and they have one holder per chunk
     // of generated code, and they only hold 16KB or so of code.
     void addRef()
     {
         JS_ASSERT(m_refCount);
         ++m_refCount;
     }
 
     void release()
     { 
         JS_ASSERT(m_refCount != 0);
         if (--m_refCount == 0)
-            js_delete(this);
+            this->destroy();
     }
 
+private:
     static ExecutablePool* create(size_t n)
     {
         /* We can't (easily) use js_new() here because the constructor is private. */
         void *memory = js_malloc(sizeof(ExecutablePool));
         ExecutablePool *pool = memory ? new(memory) ExecutablePool(n) : NULL;
         if (!pool || !pool->m_freePtr) {
-            js_delete(pool);
+            pool->destroy();
             return NULL;
         }
         return pool;
     }
 
     void* alloc(size_t n)
     {
-        JS_ASSERT(m_freePtr <= m_end);
-
-        // Round 'n' up to a multiple of word size; if all allocations are of
-        // word sized quantities, then all subsequent allocations will be aligned.
-        n = roundUpAllocationSize(n, sizeof(void*));
-        if (n == OVERSIZE_ALLOCATION)
-            return NULL;
-
-        if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
-            void* result = m_freePtr;
-            m_freePtr += n;
-            return result;
-        }
-
-        // Insufficient space to allocate in the existing pool
-        // so we need allocate into a new pool
-        return poolAllocate(n);
+        JS_ASSERT(n != OVERSIZE_ALLOCATION);
+        JS_ASSERT(n <= available());
+        void *result = m_freePtr;
+        m_freePtr += n;
+        return result;
     }
     
-    ~ExecutablePool()
+    void destroy()
     {
-        Allocation* end = m_pools.end();
-        for (Allocation* ptr = m_pools.begin(); ptr != end; ++ptr)
-            ExecutablePool::systemRelease(*ptr);
+        /* We can't (easily) use js_delete() here because the destructor is private. */
+        this->~ExecutablePool();
+        js_free(this);
     }
 
-    size_t available() const { return (m_pools.length() > 1) ? 0 : m_end - m_freePtr; }
-
-    // Flag for downstream use, whether to try to release references to this pool.
-    bool m_destroy;
+    ~ExecutablePool()
+    {
+        if (m_allocation.pages)
+            ExecutablePool::systemRelease(m_allocation);
+    }
 
-    // GC number in which the m_destroy flag was most recently set. Used downstream to
-    // remember whether m_destroy was computed for the currently active GC.
-    size_t m_gcNumber;
+    size_t available() const { 
+        JS_ASSERT(m_end >= m_freePtr);
+        return m_end - m_freePtr;
+    }
 
-private:
     // On OOM, this will return an Allocation where pages is NULL.
     static Allocation systemAlloc(size_t n);
     static void systemRelease(const Allocation& alloc);
 
     ExecutablePool(size_t n);
-
-    void* poolAllocate(size_t n);
-
-    char* m_freePtr;
-    char* m_end;
-    AllocationList m_pools;
 };
 
 class ExecutableAllocator {
     enum ProtectionSeting { Writable, Executable };
 
     // Initialization can fail so we use a create method instead.
     ExecutableAllocator() {}
 public:
@@ -225,23 +221,45 @@ public:
         JS_ASSERT(allocator->m_smallAllocationPools.empty());
         allocator->m_smallAllocationPools.append(pool);
         return allocator;
     }
 
     ~ExecutableAllocator()
     {
         for (size_t i = 0; i < m_smallAllocationPools.length(); i++)
-            js_delete(m_smallAllocationPools[i]);
+            m_smallAllocationPools[i]->destroy();
     }
 
-    // poolForSize returns reference-counted objects. The caller owns a reference
-    // to the object; i.e., poolForSize increments the count before returning the
-    // object.
+    // alloc() returns a pointer to some memory, and also (by reference) a
+    // pointer to reference-counted pool. The caller owns a reference to the
+    // pool; i.e. alloc() increments the count before returning the object.
+    void* alloc(size_t n, ExecutablePool** poolp)
+    {
+        // Round 'n' up to a multiple of word size; if all allocations are of
+        // word sized quantities, then all subsequent allocations will be
+        // aligned.
+        n = roundUpAllocationSize(n, sizeof(void*));
+        if (n == OVERSIZE_ALLOCATION) {
+            *poolp = NULL;
+            return NULL;
+        }
 
+        *poolp = poolForSize(n);
+        if (!*poolp)
+            return NULL;
+
+        // This alloc is infallible because poolForSize() just obtained
+        // (found, or created if necessary) a pool that had enough space.
+        void *result = (*poolp)->alloc(n);
+        JS_ASSERT(result);
+        return result;
+    }
+
+private:
     ExecutablePool* poolForSize(size_t n)
     {
 #ifndef DEBUG_STRESS_JSC_ALLOCATOR
         // Try to fit in an existing small allocator.  Use the pool with the
         // least available space that is big enough (best-fit).  This is the
         // best strategy because (a) it maximizes the chance of the next
         // allocation fitting in a small pool, and (b) it minimizes the
         // potential waste when a small pool is next abandoned.
@@ -290,16 +308,17 @@ public:
                 pool->addRef();
             }
         }
 
    	    // Pass ownership to the caller.
         return pool;
     }
 
+public:
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void makeWritable(void* start, size_t size)
     {
         reprotectRegion(start, size, Writable);
     }
 
     static void makeExecutable(void* start, size_t size)
     {
@@ -420,47 +439,18 @@ inline ExecutablePool::ExecutablePool(si
     Allocation mem = systemAlloc(size_t(4294967291));
 #else
     Allocation mem = systemAlloc(allocSize);
 #endif
     if (!mem.pages) {
         m_freePtr = NULL;
         return;
     }
-    if (!m_pools.append(mem)) {
-        systemRelease(mem);
-        m_freePtr = NULL;
-        return;
-    }
+    m_allocation = mem;
     m_freePtr = mem.pages;
     m_end = m_freePtr + allocSize;
 }
 
-inline void* ExecutablePool::poolAllocate(size_t n)
-{
-    size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
-    if (allocSize == OVERSIZE_ALLOCATION)
-        return NULL;
-    
-#ifdef DEBUG_STRESS_JSC_ALLOCATOR
-    Allocation result = systemAlloc(size_t(4294967291));
-#else
-    Allocation result = systemAlloc(allocSize);
-#endif
-    if (!result.pages)
-        return NULL;
-    
-    JS_ASSERT(m_end >= m_freePtr);
-    if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
-        // Replace allocation pool
-        m_freePtr = result.pages + n;
-        m_end = result.pages + allocSize;
-    }
-
-    m_pools.append(result);
-    return result.pages;
-}
-
 }
 
 #endif // ENABLE(ASSEMBLER)
 
 #endif // !defined(ExecutableAllocator)
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -2510,17 +2510,17 @@ TraceRecorder::finishSuccessfully()
     AUDIT(traceCompleted);
     mark.commit();
 
     /* Grab local copies of members needed after destruction of |this|. */
     JSContext* localcx = cx;
     TraceMonitor* localtm = traceMonitor;
 
     localtm->recorder = NULL;
-    /* We can't (easily) use js_delete() here because the constructor is private. */
+    /* We can't (easily) use js_delete() here because the destructor is private. */
     this->~TraceRecorder();
     js_free(this);
 
     /* Catch OOM that occurred during recording. */
     if (localtm->outOfMemory() || OverfullJITCache(localcx, localtm)) {
         ResetJIT(localcx, localtm, FR_OOM);
         return ARECORD_ABORTED;
     }
@@ -2564,17 +2564,17 @@ TraceRecorder::finishAbort(const char* r
         fragment->root->sideExits.setLength(numSideExitsBefore);
     }
 
     /* Grab local copies of members needed after destruction of |this|. */
     JSContext* localcx = cx;
     TraceMonitor* localtm = traceMonitor;
 
     localtm->recorder = NULL;
-    /* We can't (easily) use js_delete() here because the constructor is private. */
+    /* We can't (easily) use js_delete() here because the destructor is private. */
     this->~TraceRecorder();
     js_free(this);
 
     /* Catch OOM that occurred during recording. */
     if (localtm->outOfMemory() || OverfullJITCache(localcx, localtm)) {
         ResetJIT(localcx, localtm, FR_OOM);
         return JIT_RESET;
     }
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -93,33 +93,16 @@ class BaseCompiler : public MacroAssembl
     JSContext *cx;
 
   public:
     BaseCompiler() : cx(NULL)
     { }
 
     BaseCompiler(JSContext *cx) : cx(cx)
     { }
-
-  protected:
-
-    JSC::ExecutablePool *
-    getExecPool(JSScript *script, size_t size) {
-        return BaseCompiler::GetExecPool(cx, script, size);
-    }
-
-  public:
-    static JSC::ExecutablePool *
-    GetExecPool(JSContext *cx, JSScript *script, size_t size) {
-        JaegerCompartment *jc = script->compartment->jaegerCompartment;
-        JSC::ExecutablePool *pool = jc->poolForSize(size);
-        if (!pool)
-            js_ReportOutOfMemory(cx);
-        return pool;
-    }
 };
 
 // This class wraps JSC::LinkBuffer for Mozilla-specific memory handling.
 // Every return |false| guarantees an OOM that has been correctly propagated,
 // and should continue to propagate.
 class LinkerHelper : public JSC::LinkBuffer
 {
   protected:
@@ -159,28 +142,25 @@ class LinkerHelper : public JSC::LinkBuf
     bool verifyRange(JITScript *jit) {
         return verifyRange(JSC::JITCode(jit->code.m_code.executableAddress(), jit->code.m_size));
     }
 
     JSC::ExecutablePool *init(JSContext *cx) {
         // The pool is incref'd after this call, so it's necessary to release()
         // on any failure.
         JSScript *script = cx->fp()->script();
-        JSC::ExecutablePool *ep = BaseCompiler::GetExecPool(cx, script, masm.size());
-        if (!ep)
-            return ep;
-
-        m_code = executableCopy(masm, ep);
+        JSC::ExecutableAllocator *allocator = script->compartment->jaegerCompartment->execAlloc();
+        JSC::ExecutablePool *pool;
+        m_code = executableAllocAndCopy(masm, allocator, &pool);
         if (!m_code) {
-            ep->release();
             js_ReportOutOfMemory(cx);
             return NULL;
         }
-        m_size = masm.size();   // must come after the call to executableCopy()
-        return ep;
+        m_size = masm.size();   // must come after call to executableAllocAndCopy()!
+        return pool;
     }
 
     JSC::CodeLocationLabel finalize() {
         masm.finalize(*this);
         return finalizeCodeAddendum();
     }
 
     void maybeLink(MaybeJump jump, JSC::CodeLocationLabel label) {
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -403,28 +403,24 @@ mjit::Compiler::finishThisUp(JITScript *
 #endif
     JaegerSpew(JSpew_Insns, "## Fast code (masm) size = %u, Slow code (stubcc) size = %u.\n", masm.size(), stubcc.size());
 
     size_t totalSize = masm.size() +
                        stubcc.size() +
                        doubleList.length() * sizeof(double) +
                        jumpTableOffsets.length() * sizeof(void *);
 
-    JSC::ExecutablePool *execPool = getExecPool(script, totalSize);
-    if (!execPool) {
+    JSC::ExecutablePool *execPool;
+    uint8 *result =
+        (uint8 *)script->compartment->jaegerCompartment->execAlloc()->alloc(totalSize, &execPool);
+    if (!result) {
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
-
-    uint8 *result = (uint8 *)execPool->alloc(totalSize);
-    if (!result) {
-        execPool->release();
-        js_ReportOutOfMemory(cx);
-        return Compile_Error;
-    }
+    JS_ASSERT(execPool);
     JSC::ExecutableAllocator::makeWritable(result, totalSize);
     masm.executableCopy(result);
     stubcc.masm.executableCopy(result + masm.size());
     
     JSC::LinkBuffer fullCode(result, totalSize);
     JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size());
 
     size_t nNmapLive = 0;
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -684,23 +684,23 @@ JS_STATIC_ASSERT(JSVAL_PAYLOAD_MASK == 0
 #  error "Unsupported CPU!"
 #endif
 
 #endif                   /* _MSC_VER */
 
 bool
 JaegerCompartment::Initialize()
 {
-    execAlloc = JSC::ExecutableAllocator::create();
-    if (!execAlloc)
+    execAlloc_ = JSC::ExecutableAllocator::create();
+    if (!execAlloc_)
         return false;
     
-    TrampolineCompiler tc(execAlloc, &trampolines);
+    TrampolineCompiler tc(execAlloc_, &trampolines);
     if (!tc.compile()) {
-        delete execAlloc;
+        delete execAlloc_;
         return false;
     }
 
 #ifdef JS_METHODJIT_PROFILE_STUBS
     for (size_t i = 0; i < STUB_CALLS_FOR_OP_COUNT; ++i)
         StubCallsForOp[i] = 0;
 #endif
 
@@ -708,17 +708,17 @@ JaegerCompartment::Initialize()
 
     return true;
 }
 
 void
 JaegerCompartment::Finish()
 {
     TrampolineCompiler::release(&trampolines);
-    js_delete(execAlloc);
+    js_delete(execAlloc_);
 #ifdef JS_METHODJIT_PROFILE_STUBS
     FILE *fp = fopen("/tmp/stub-profiling", "wt");
 # define OPDEF(op,val,name,image,length,nuses,ndefs,prec,format) \
     fprintf(fp, "%03d %s %d\n", val, #op, StubCallsForOp[val]);
 # include "jsopcode.tbl"
 # undef OPDEF
     fclose(fp);
 #endif
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -166,29 +166,29 @@ struct Trampolines {
 
 /*
  * Method JIT compartment data. Currently, there is exactly one per
  * JS compartment. It would be safe for multiple JS compartments to
  * share a JaegerCompartment as long as only one thread can enter
  * the JaegerCompartment at a time.
  */
 class JaegerCompartment {
-    JSC::ExecutableAllocator *execAlloc;     // allocator for jit code
+    JSC::ExecutableAllocator *execAlloc_;    // allocator for jit code
     Trampolines              trampolines;    // force-return trampolines
     VMFrame                  *activeFrame_;  // current active VMFrame
 
     void Finish();
 
   public:
     bool Initialize();
 
     ~JaegerCompartment() { Finish(); }
 
-    JSC::ExecutablePool *poolForSize(size_t size) {
-        return execAlloc->poolForSize(size);
+    JSC::ExecutableAllocator *execAlloc() {
+        return execAlloc_;
     }
 
     VMFrame *activeFrame() {
         return activeFrame_;
     }
 
     void pushActiveFrame(VMFrame *f) {
         f->previous = activeFrame_;
--- a/js/src/methodjit/TrampolineCompiler.cpp
+++ b/js/src/methodjit/TrampolineCompiler.cpp
@@ -81,30 +81,29 @@ TrampolineCompiler::release(Trampolines 
 {
     RELEASE(tramps->forceReturn, tramps->forceReturnPool);
 #if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
     RELEASE(tramps->forceReturnFast, tramps->forceReturnFastPool);
 #endif
 }
 
 bool
-TrampolineCompiler::compileTrampoline(Trampolines::TrampolinePtr *where, JSC::ExecutablePool **pool,
-                                      TrampolineGenerator generator)
+TrampolineCompiler::compileTrampoline(Trampolines::TrampolinePtr *where,
+                                      JSC::ExecutablePool **poolp, TrampolineGenerator generator)
 {
     Assembler masm;
 
     Label entry = masm.label();
     CHECK_RESULT(generator(masm));
     JS_ASSERT(entry.isValid());
 
-    *pool = execPool->poolForSize(masm.size());
-    if (!*pool)
+    bool ok;
+    JSC::LinkBuffer buffer(&masm, execAlloc, poolp, &ok);
+    if (!ok) 
         return false;
-
-    JSC::LinkBuffer buffer(&masm, *pool);
     masm.finalize(buffer);
     uint8 *result = (uint8*)buffer.finalizeCodeAddendum().dataLocation();
     *where = JS_DATA_TO_FUNC_PTR(Trampolines::TrampolinePtr, result + masm.distanceOf(entry));
 
     return true;
 }
 
 /*
--- a/js/src/methodjit/TrampolineCompiler.h
+++ b/js/src/methodjit/TrampolineCompiler.h
@@ -47,35 +47,35 @@
 namespace js {
 namespace mjit {
 
 class TrampolineCompiler
 {
     typedef bool (*TrampolineGenerator)(Assembler &masm);
 
 public:
-    TrampolineCompiler(JSC::ExecutableAllocator *pool, Trampolines *tramps)
-      : execPool(pool), trampolines(tramps)
+    TrampolineCompiler(JSC::ExecutableAllocator *alloc, Trampolines *tramps)
+      : execAlloc(alloc), trampolines(tramps)
     { }
 
     bool compile();
     static void release(Trampolines *tramps);
 
 private:
     bool compileTrampoline(Trampolines::TrampolinePtr *where, JSC::ExecutablePool **pool,
                            TrampolineGenerator generator);
     
     /* Generators for trampolines. */
     static bool generateForceReturn(Assembler &masm);
 
 #if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
     static bool generateForceReturnFast(Assembler &masm);
 #endif
 
-    JSC::ExecutableAllocator *execPool;
+    JSC::ExecutableAllocator *execAlloc;
     Trampolines *trampolines;
 };
 
 } /* namespace mjit */
 } /* namespace js */
 
 #endif
 
--- a/js/src/yarr/yarr/RegexJIT.cpp
+++ b/js/src/yarr/yarr/RegexJIT.cpp
@@ -1501,24 +1501,24 @@ public:
     {
         generate();
 
         if (oom()) {
             m_shouldFallBack = true;
             return;
         }
 
-        ExecutablePool *executablePool = allocator.poolForSize(size());
-        if (!executablePool) {
+        ExecutablePool *dummy;
+        bool ok;
+        LinkBuffer patchBuffer(this, &allocator, &dummy, &ok);
+        if (!ok) {
             m_shouldFallBack = true;
             return;
         }
 
-        LinkBuffer patchBuffer(this, executablePool);
-
         for (unsigned i = 0; i < m_backtrackRecords.length(); ++i)
             patchBuffer.patch(m_backtrackRecords[i].dataLabel, patchBuffer.locationOf(m_backtrackRecords[i].backtrackLocation));
 
         jitObject.set(patchBuffer.finalizeCode());
     }
 
     bool shouldFallBack()
     {