Bug 673158 - Separate regexp JIT code and normal mjit code in about:memory. code=sandervv,Bas.Weelinck, r=nnethercote.
authorSander van Veen <sandervv@gmail.com>
Tue, 30 Aug 2011 17:21:36 -0700
changeset 76732 d9bbe2d0b569c027984610aaedc3fb93c1b846aa
parent 76731 8c9b24fb14872d5fcc1d24f1c99313983b293314
child 76733 d08db920b9863549b853380a18f0ff5002542625
push idunknown
push userunknown
push dateunknown
reviewersnnethercote
bugs673158
milestone9.0a1
Bug 673158 - Separate regexp JIT code and normal mjit code in about:memory. code=sandervv,Bas.Weelinck, r=nnethercote.
js/src/assembler/TestMain.cpp
js/src/assembler/assembler/ARMAssembler.cpp
js/src/assembler/assembler/ARMAssembler.h
js/src/assembler/assembler/ARMv7Assembler.h
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
js/src/assembler/assembler/LinkBuffer.h
js/src/assembler/assembler/SparcAssembler.h
js/src/assembler/assembler/X86Assembler.h
js/src/assembler/jit/ExecutableAllocator.cpp
js/src/assembler/jit/ExecutableAllocator.h
js/src/assembler/jit/ExecutableAllocatorOS2.cpp
js/src/assembler/jit/ExecutableAllocatorWin.cpp
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/methodjit/BaseCompiler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/MonoIC.cpp
js/src/methodjit/PolyIC.cpp
js/src/methodjit/StubCompiler.cpp
js/src/methodjit/TrampolineCompiler.cpp
js/src/shell/js.cpp
js/src/xpconnect/src/xpcjsruntime.cpp
js/src/xpconnect/src/xpcpublic.h
js/src/yarr/YarrJIT.cpp
--- a/js/src/assembler/TestMain.cpp
+++ b/js/src/assembler/TestMain.cpp
@@ -105,17 +105,17 @@ void test1 ( void )
   JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
 
   // intermediate step .. get the pool suited for the size of code in 'am'
   //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
   JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
 
   // constructor for LinkBuffer asks ep to allocate r-x memory,
   // then copies it there.
-  JSC::LinkBuffer patchBuffer(am, ep);
+  JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
 
   // finalize
   JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
 
   // cr now holds a pointer to the final runnable code.
   void* entry = cr.m_code.executableAddress();
 
   printf("disas %p %p\n",
@@ -261,17 +261,17 @@ void test2 ( void )
   JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
 
   // intermediate step .. get the pool suited for the size of code in 'am'
   //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
   JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
 
   // constructor for LinkBuffer asks ep to allocate r-x memory,
   // then copies it there.
-  JSC::LinkBuffer patchBuffer(am, ep);
+  JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
 
   // finalize
   JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
 
   // cr now holds a pointer to the final runnable code.
   void* entry = cr.m_code.executableAddress();
 
   printf("disas %p %p\n",
@@ -448,17 +448,17 @@ void test3 ( void )
   JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
 
   // intermediate step .. get the pool suited for the size of code in 'am'
   //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
   JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
 
   // constructor for LinkBuffer asks ep to allocate r-x memory,
   // then copies it there.
-  JSC::LinkBuffer patchBuffer(am, ep);
+  JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
 
   // finalize
   JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
 
   // cr now holds a pointer to the final runnable code.
   void* entry = cr.m_code.executableAddress();
 
   printf("disas %p %p\n",
@@ -658,17 +658,17 @@ void test4 ( void )
   JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
 
   // intermediate step .. get the pool suited for the size of code in 'am'
   //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
   JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
 
   // constructor for LinkBuffer asks ep to allocate r-x memory,
   // then copies it there.
-  JSC::LinkBuffer patchBuffer(am, ep);
+  JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
 
   // now fix up any branches/calls
   //JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
 
   // finalize
   JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
 
   // cr now holds a pointer to the final runnable code.
@@ -864,17 +864,17 @@ void test5 ( void )
   JSC::ExecutableAllocator* eal = new JSC::ExecutableAllocator();
 
   // intermediate step .. get the pool suited for the size of code in 'am'
   //WTF::PassRefPtr<JSC::ExecutablePool> ep = eal->poolForSize( am->size() );
   JSC::ExecutablePool* ep = eal->poolForSize( am->size() );
 
   // constructor for LinkBuffer asks ep to allocate r-x memory,
   // then copies it there.
-  JSC::LinkBuffer patchBuffer(am, ep);
+  JSC::LinkBuffer patchBuffer(am, ep, JSC::METHOD_CODE);
 
   // now fix up any branches/calls
   JSC::FunctionPtr target = JSC::FunctionPtr::FunctionPtr( &cube );
   patchBuffer.link(cl, target);
 
   JSC::MacroAssemblerCodeRef cr = patchBuffer.finalizeCode();
 
   // cr now holds a pointer to the final runnable code.
--- a/js/src/assembler/assembler/ARMAssembler.cpp
+++ b/js/src/assembler/assembler/ARMAssembler.cpp
@@ -629,24 +629,24 @@ inline void ARMAssembler::fixUpOffsets(v
                 }
             }
 #endif
             *addr = reinterpret_cast<ARMWord>(data + *addr);
         }
     }
 }
 
-void* ARMAssembler::executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
+void* ARMAssembler::executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp, CodeKind kind)
 {
     // 64-bit alignment is required for next constant pool and JIT code as well
     m_buffer.flushWithoutBarrier(true);
     if (m_buffer.uncheckedSize() & 0x7)
         bkpt(0);
 
-    void * data = m_buffer.executableAllocAndCopy(allocator, poolp);
+    void * data = m_buffer.executableAllocAndCopy(allocator, poolp, kind);
     if (data)
         fixUpOffsets(data);
     return data;
 }
 
 // This just dumps the code into the specified buffer, fixing up absolute
 // offsets and literal pool loads as it goes. The buffer is assumed to be large
 // enough to hold the code, and any pre-existing literal pool is assumed to
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -982,17 +982,17 @@ namespace JSC {
             return JmpSrc(s);
         }
 
         JmpSrc jmp(Condition cc = AL, int useConstantPool = 0)
         {
             return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
         }
 
-        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp);
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp, CodeKind kind);
         void executableCopy(void* buffer);
         void fixUpOffsets(void* buffer);
 
         // Patching helpers
 
         static ARMWord* getLdrImmAddress(ARMWord* insn)
         {
 #if WTF_CPU_ARM && WTF_ARM_ARCH_VERSION >= 5
--- a/js/src/assembler/assembler/ARMv7Assembler.h
+++ b/js/src/assembler/assembler/ARMv7Assembler.h
@@ -1553,19 +1553,19 @@ public:
     
     // Assembler admin methods:
 
     size_t size() const
     {
         return m_formatter.size();
     }
 
-    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
     {
-        void* copy = m_formatter.executableAllocAndCopy(allocator, poolp);
+        void* copy = m_formatter.executableAllocAndCopy(allocator, poolp, kind);
 
         unsigned jumpCount = m_jumpsToLink.size();
         for (unsigned i = 0; i < jumpCount; ++i) {
             uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
             uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
             linkJumpAbsolute(location, target);
         }
         m_jumpsToLink.clear();
@@ -1904,18 +1904,18 @@ private:
         }
 
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
-        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp) {
-            return m_buffer.executableAllocAndCopy(allocator, poolp);
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind) {
+            return m_buffer.executableAllocAndCopy(allocator, poolp, kind);
         }
         bool oom() const { return m_buffer.oom(); }
 
     private:
         AssemblerBuffer m_buffer;
     } m_formatter;
 
     Vector<LinkRecord> m_jumpsToLink;
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -132,24 +132,24 @@ namespace JSC {
         {
             return m_oom;
         }
 
         /*
          * The user must check for a NULL return value, which means
          * no code was generated, or there was an OOM.
          */
-        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
         {
             if (m_oom || m_size == 0) {
                 *poolp = NULL;
                 return 0;
             }
 
-            void* result = allocator->alloc(m_size, poolp);
+            void* result = allocator->alloc(m_size, poolp, kind);
             if (!result) {
                 *poolp = NULL;
                 return 0;
             }
             JS_ASSERT(*poolp);
 
             ExecutableAllocator::makeWritable(result, m_size);
 
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -189,20 +189,20 @@ public:
         return AssemblerBuffer::size();
     }
 
     int uncheckedSize()
     {
         return AssemblerBuffer::size();
     }
 
-    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
     {
         flushConstantPool(false);
-        return AssemblerBuffer::executableAllocAndCopy(allocator, poolp);
+        return AssemblerBuffer::executableAllocAndCopy(allocator, poolp, kind);
     }
 
     void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false)
     {
         flushIfNoSpaceFor(4, 4);
 
         m_loadOffsets.append(AssemblerBuffer::size());
         if (isReusable)
--- a/js/src/assembler/assembler/LinkBuffer.h
+++ b/js/src/assembler/assembler/LinkBuffer.h
@@ -59,41 +59,44 @@ class LinkBuffer {
     typedef MacroAssembler::JumpList JumpList;
     typedef MacroAssembler::Call Call;
     typedef MacroAssembler::DataLabel32 DataLabel32;
     typedef MacroAssembler::DataLabelPtr DataLabelPtr;
 
 public:
     // 'ok' should be checked after this constructor is called;  it's false if OOM occurred.
     LinkBuffer(MacroAssembler* masm, ExecutableAllocator* executableAllocator,
-               ExecutablePool** poolp, bool* ok)
+               ExecutablePool** poolp, bool* ok, CodeKind codeKind)
     {
+        m_codeKind = codeKind;
         m_code = executableAllocAndCopy(*masm, executableAllocator, poolp);
         m_executablePool = *poolp;
         m_size = masm->m_assembler.size();  // must come after call to executableAllocAndCopy()!
 #ifndef NDEBUG
         m_completed = false;
 #endif
         *ok = !!m_code;
     }
 
-    LinkBuffer()
+    LinkBuffer(CodeKind kind)
         : m_executablePool(NULL)
         , m_code(NULL)
         , m_size(0)
+        , m_codeKind(kind)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
     {
     }
 
-    LinkBuffer(uint8* ncode, size_t size)
+    LinkBuffer(uint8* ncode, size_t size, CodeKind kind)
         : m_executablePool(NULL)
         , m_code(ncode)
         , m_size(size)
+        , m_codeKind(kind)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
     {
     }
 
     ~LinkBuffer()
     {
@@ -195,33 +198,34 @@ protected:
     void* code()
     {
         return m_code;
     }
 
     void *executableAllocAndCopy(MacroAssembler &masm, ExecutableAllocator *allocator,
                                  ExecutablePool **poolp)
     {
-        return masm.m_assembler.executableAllocAndCopy(allocator, poolp);
+        return masm.m_assembler.executableAllocAndCopy(allocator, poolp, m_codeKind);
     }
 
     void performFinalization()
     {
 #ifndef NDEBUG
         ASSERT(!m_completed);
         m_completed = true;
 #endif
 
         ExecutableAllocator::makeExecutable(code(), m_size);
         ExecutableAllocator::cacheFlush(code(), m_size);
     }
 
     ExecutablePool* m_executablePool;
     void* m_code;
     size_t m_size;
+    CodeKind m_codeKind;
 #ifndef NDEBUG
     bool m_completed;
 #endif
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
--- a/js/src/assembler/assembler/SparcAssembler.h
+++ b/js/src/assembler/assembler/SparcAssembler.h
@@ -1040,19 +1040,19 @@ namespace JSC {
     
         static void* getRelocatedAddress(void* code, JmpDst destination)
         {
             ASSERT(destination.m_offset != -1);
 
             return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
         }
 
-        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp, CodeKind kind)
         {
-            return m_buffer.executableAllocAndCopy(allocator, poolp);
+            return m_buffer.executableAllocAndCopy(allocator, poolp, kind);
         }
 
         void* executableCopy(void* buffer)
         {
             return memcpy(buffer, m_buffer.buffer(), size());
         }
 
         static void patchPointerInternal(void* where, int value)
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -2486,19 +2486,19 @@ public:
         return dst.m_offset - src.m_offset;
     }
     
     static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
     {
         return dst.m_offset - src.m_offset;
     }
     
-    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
+    void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp, CodeKind kind)
     {
-        return m_formatter.executableAllocAndCopy(allocator, poolp);
+        return m_formatter.executableAllocAndCopy(allocator, poolp, kind);
     }
 
     void executableCopy(void* buffer)
     {
         memcpy(buffer, m_formatter.buffer(), size());
     }
 
 private:
@@ -2838,18 +2838,18 @@ private:
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
         unsigned char *buffer() const { return m_buffer.buffer(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
-        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp) {
-            return m_buffer.executableAllocAndCopy(allocator, poolp);
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind) {
+            return m_buffer.executableAllocAndCopy(allocator, poolp, kind);
         }
 
     private:
 
         // Internals; ModRm and REX formatters.
 
         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
         inline bool byteRegRequiresRex(int reg)
--- a/js/src/assembler/jit/ExecutableAllocator.cpp
+++ b/js/src/assembler/jit/ExecutableAllocator.cpp
@@ -32,22 +32,26 @@ namespace JSC {
 size_t ExecutableAllocator::pageSize = 0;
 size_t ExecutableAllocator::largeAllocSize = 0;
 
 ExecutablePool::~ExecutablePool()
 {
     m_allocator->releasePoolPages(this);
 }
 
-size_t
-ExecutableAllocator::getCodeSize() const
+void
+ExecutableAllocator::getCodeStats(size_t& method, size_t& regexp, size_t& unused) const
 {
-    size_t n = 0;
+    method = 0;
+    regexp = 0;
+    unused = 0;
+
     for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
         ExecutablePool* pool = r.front();
-        n += pool->m_allocation.size;
+        method += pool->m_mjitCodeMethod;
+        regexp += pool->m_mjitCodeRegexp;
+        unused += pool->m_allocation.size - pool->m_mjitCodeMethod - pool->m_mjitCodeRegexp;
     }
-    return n;
 }
 
 }
 
 #endif // HAVE(ASSEMBLER)
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -76,16 +76,18 @@ extern  "C" void sync_instruction_memory
 #if ENABLE_ASSEMBLER
 
 //#define DEBUG_STRESS_JSC_ALLOCATOR
 
 namespace JSC {
 
   class ExecutableAllocator;
 
+  enum CodeKind { METHOD_CODE, REGEXP_CODE };
+
   // These are reference-counted. A new one starts with a count of 1. 
   class ExecutablePool {
 
     JS_DECLARE_ALLOCATION_FRIENDS_FOR_PRIVATE_CONSTRUCTOR;
     friend class ExecutableAllocator;
 private:
     struct Allocation {
         char* pages;
@@ -97,16 +99,20 @@ private:
 
     ExecutableAllocator* m_allocator;
     char* m_freePtr;
     char* m_end;
     Allocation m_allocation;
 
     // Reference count for automatic reclamation.
     unsigned m_refCount;
+ 
+    // Number of bytes currently used for Method and Regexp JIT code.
+    size_t m_mjitCodeMethod;
+    size_t m_mjitCodeRegexp;
 
 public:
     // Flag for downstream use, whether to try to release references to this pool.
     bool m_destroy;
 
     // GC number in which the m_destroy flag was most recently set. Used downstream to
     // remember whether m_destroy was computed for the currently active GC.
     size_t m_gcNumber;
@@ -128,26 +134,32 @@ private:
     void addRef()
     {
         JS_ASSERT(m_refCount);
         ++m_refCount;
     }
 
     ExecutablePool(ExecutableAllocator* allocator, Allocation a)
       : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
-        m_refCount(1), m_destroy(false), m_gcNumber(0)
+        m_refCount(1), m_mjitCodeMethod(0), m_mjitCodeRegexp(0), m_destroy(false), m_gcNumber(0)
     { }
 
     ~ExecutablePool();
 
-    void* alloc(size_t n)
+    void* alloc(size_t n, CodeKind kind)
     {
         JS_ASSERT(n <= available());
         void *result = m_freePtr;
         m_freePtr += n;
+
+        if ( kind == REGEXP_CODE )
+            m_mjitCodeRegexp += n;
+        else
+            m_mjitCodeMethod += n;
+
         return result;
     }
     
     size_t available() const { 
         JS_ASSERT(m_end >= m_freePtr);
         return m_end - m_freePtr;
     }
 };
@@ -180,45 +192,45 @@ public:
             m_smallPools[i]->release(/* willDestroy = */true);
         // XXX: temporarily disabled because it fails;  see bug 654820.
         //JS_ASSERT(m_pools.empty());     // if this asserts we have a pool leak
     }
 
     // alloc() returns a pointer to some memory, and also (by reference) a
     // pointer to reference-counted pool. The caller owns a reference to the
     // pool; i.e. alloc() increments the count before returning the object.
-    void* alloc(size_t n, ExecutablePool** poolp)
+    void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
     {
         // Round 'n' up to a multiple of word size; if all allocations are of
         // word sized quantities, then all subsequent allocations will be
         // aligned.
         n = roundUpAllocationSize(n, sizeof(void*));
         if (n == OVERSIZE_ALLOCATION) {
             *poolp = NULL;
             return NULL;
         }
 
         *poolp = poolForSize(n);
         if (!*poolp)
             return NULL;
 
         // This alloc is infallible because poolForSize() just obtained
         // (found, or created if necessary) a pool that had enough space.
-        void *result = (*poolp)->alloc(n);
+        void *result = (*poolp)->alloc(n, type);
         JS_ASSERT(result);
         return result;
     }
 
     void releasePoolPages(ExecutablePool *pool) {
         JS_ASSERT(pool->m_allocation.pages);
         systemRelease(pool->m_allocation);
         m_pools.remove(m_pools.lookup(pool));   // this asserts if |pool| is not in m_pools
     }
 
-    size_t getCodeSize() const;
+    void getCodeStats(size_t& method, size_t& regexp, size_t& unused) const;
 
 private:
     static size_t pageSize;
     static size_t largeAllocSize;
 
     static const size_t OVERSIZE_ALLOCATION = size_t(-1);
 
     static size_t roundUpAllocationSize(size_t request, size_t granularity)
@@ -353,17 +365,17 @@ public:
     {
 #if WTF_COMPILER_GCC && (GCC_VERSION >= 40300)
 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403)
         int lineSize;
         asm("rdhwr %0, $1" : "=r" (lineSize));
         //
         // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
         // mips_expand_synci_loop that may execute synci one more time.
-        // "start" points to the fisrt byte of the cache line.
+        // "start" points to the first byte of the cache line.
         // "end" points to the last byte of the line before the last cache line.
         // Because size is always a multiple of 4, this is safe to set
         // "end" to the last byte.
         //
         intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
         intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
         __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
 #else
--- a/js/src/assembler/jit/ExecutableAllocatorOS2.cpp
+++ b/js/src/assembler/jit/ExecutableAllocatorOS2.cpp
@@ -39,17 +39,17 @@ size_t ExecutableAllocator::determinePag
 }
 
 ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n)
 {
     void* allocation = NULL;
     if (DosAllocMem(&allocation, n, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
         DosAllocMem(&allocation, n, PAG_COMMIT|PAG_READ|PAG_WRITE))
         CRASH();
-    ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
+    ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
     return alloc;
 }
 
 void ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
 {
     DosFreeMem(alloc.pages);
 }
 
--- a/js/src/assembler/jit/ExecutableAllocatorWin.cpp
+++ b/js/src/assembler/jit/ExecutableAllocatorWin.cpp
@@ -37,17 +37,17 @@ size_t ExecutableAllocator::determinePag
     SYSTEM_INFO system_info;
     GetSystemInfo(&system_info);
     return system_info.dwPageSize;
 }
 
 ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n)
 {
     void *allocation = VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
-    ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
+    ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
     return alloc;
 }
 
 void ExecutableAllocator::systemRelease(const ExecutablePool::Allocation& alloc)
 { 
     VirtualFree(alloc.pages, 0, MEM_RELEASE); 
 }
 
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -45,16 +45,17 @@
 #include "jsiter.h"
 #include "jsmath.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jstracer.h"
 #include "jswatchpoint.h"
 #include "jswrapper.h"
 #include "assembler/wtf/Platform.h"
+#include "assembler/jit/ExecutableAllocator.h"
 #include "yarr/BumpPointerAllocator.h"
 #include "methodjit/MethodJIT.h"
 #include "methodjit/PolyIC.h"
 #include "methodjit/MonoIC.h"
 #include "vm/Debugger.h"
 
 #include "jsgcinlines.h"
 #include "jsscopeinlines.h"
@@ -171,20 +172,26 @@ JSCompartment::ensureJaegerCompartmentEx
     if (!jc->Initialize()) {
         cx->delete_(jc);
         return false;
     }
     jaegerCompartment_ = jc;
     return true;
 }
 
-size_t
-JSCompartment::getMjitCodeSize() const
+void
+JSCompartment::getMjitCodeStats(size_t& method, size_t& regexp, size_t& unused) const
 {
-    return jaegerCompartment_ ? jaegerCompartment_->execAlloc()->getCodeSize() : 0;
+    if (jaegerCompartment_) {
+        jaegerCompartment_->execAlloc()->getCodeStats(method, regexp, unused);
+    } else {
+        method = 0;
+        regexp = 0;
+        unused = 0;
+    }
 }
 #endif
 
 bool
 JSCompartment::arenaListsAreEmpty()
 {
   for (unsigned i = 0; i < FINALIZE_LIMIT; i++) {
        if (!arenas[i].isEmpty())
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -450,17 +450,17 @@ struct JS_FRIEND_API(JSCompartment) {
 
     js::mjit::JaegerCompartment *jaegerCompartment() const {
         JS_ASSERT(jaegerCompartment_);
         return jaegerCompartment_;
     }
 
     bool ensureJaegerCompartmentExists(JSContext *cx);
 
-    size_t getMjitCodeSize() const;
+    void getMjitCodeStats(size_t& method, size_t& regexp, size_t& unused) const;
 #endif
     WTF::BumpPointerAllocator    *regExpAllocator;
 
     /*
      * Shared scope property tree, and arena-pool for allocating its nodes.
      */
     js::PropertyTree             propertyTree;
 
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -107,17 +107,18 @@ class LinkerHelper : public JSC::LinkBuf
 {
   protected:
     Assembler &masm;
 #ifdef DEBUG
     bool verifiedRange;
 #endif
 
   public:
-    LinkerHelper(Assembler &masm) : masm(masm)
+    LinkerHelper(Assembler &masm, JSC::CodeKind kind) : JSC::LinkBuffer(kind)
+        , masm(masm)
 #ifdef DEBUG
         , verifiedRange(false)
 #endif
     { }
 
     ~LinkerHelper() {
         JS_ASSERT(verifiedRange);
     }
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -855,29 +855,29 @@ mjit::Compiler::finishThisUp(JITScript *
 
     size_t codeSize = masm.size() +
                       stubcc.size() +
                       (masm.numDoubles() * sizeof(double)) +
                       (stubcc.masm.numDoubles() * sizeof(double)) +
                       jumpTableOffsets.length() * sizeof(void *);
 
     JSC::ExecutablePool *execPool;
-    uint8 *result =
-        (uint8 *)script->compartment->jaegerCompartment()->execAlloc()->alloc(codeSize, &execPool);
+    uint8 *result = (uint8 *)script->compartment->jaegerCompartment()->execAlloc()->
+                    alloc(codeSize, &execPool, JSC::METHOD_CODE);
     if (!result) {
         js_ReportOutOfMemory(cx);
         return Compile_Error;
     }
     JS_ASSERT(execPool);
     JSC::ExecutableAllocator::makeWritable(result, codeSize);
     masm.executableCopy(result);
     stubcc.masm.executableCopy(result + masm.size());
 
-    JSC::LinkBuffer fullCode(result, codeSize);
-    JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size());
+    JSC::LinkBuffer fullCode(result, codeSize, JSC::METHOD_CODE);
+    JSC::LinkBuffer stubCode(result + masm.size(), stubcc.size(), JSC::METHOD_CODE);
 
     size_t nNmapLive = loopEntries.length();
     for (size_t i = 0; i < script->length; i++) {
         Bytecode *opinfo = analysis->maybeCode(i);
         if (opinfo && opinfo->safePoint) {
             /* loopEntries cover any safe points which are at loop heads. */
             if (!cx->typeInferenceEnabled() || !opinfo->loopHead)
                 nNmapLive++;
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -219,17 +219,17 @@ AttachSetGlobalNameStub(VMFrame &f, ic::
 
     /* If the object test fails, shapeReg is still obj->slots. */
     isNotObject.linkTo(masm.label(), &masm);
     DataLabel32 store = masm.storeValueWithAddressOffsetPatch(ic->vr, slot);
 
     Jump done = masm.jump();
 
     JITScript *jit = f.jit();
-    LinkerHelper linker(masm);
+    LinkerHelper linker(masm, JSC::METHOD_CODE);
     JSC::ExecutablePool *ep = linker.init(f.cx);
     if (!ep)
         return Lookup_Error;
     if (!jit->execPools.append(ep)) {
         ep->release();
         js_ReportOutOfMemory(f.cx);
         return Lookup_Error;
     }
@@ -335,17 +335,17 @@ ic::SetGlobalName(VMFrame &f, ic::SetGlo
 }
 
 class EqualityICLinker : public LinkerHelper
 {
     VMFrame &f;
 
   public:
     EqualityICLinker(Assembler &masm, VMFrame &f)
-        : LinkerHelper(masm), f(f)
+        : LinkerHelper(masm, JSC::METHOD_CODE), f(f)
     { }
 
     bool init(JSContext *cx) {
         JSC::ExecutablePool *pool = LinkerHelper::init(cx);
         if (!pool)
             return false;
         JS_ASSERT(!f.regs.inlined());
         JSScript *script = f.fp()->script();
@@ -696,34 +696,34 @@ class CallCompiler : public BaseCompiler
 
         /* Get nmap[ARITY], set argc, call. */
         if (ic.frameSize.isStatic())
             masm.move(Imm32(ic.frameSize.staticArgc()), JSParamReg_Argc);
         else
             masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), JSParamReg_Argc);
         masm.jump(t0);
 
-        LinkerHelper linker(masm);
+        LinkerHelper linker(masm, JSC::METHOD_CODE);
         JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_ScriptStub);
         if (!ep)
             return false;
 
         if (!linker.verifyRange(from)) {
             disable(from);
             return true;
         }
 
         linker.link(notCompiled, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
         JSC::CodeLocationLabel cs = linker.finalize();
 
         JaegerSpew(JSpew_PICs, "generated CALL stub %p (%lu bytes)\n", cs.executableAddress(),
                    (unsigned long) masm.size());
 
         if (f.regs.inlined()) {
-            JSC::LinkBuffer code((uint8 *) cs.executableAddress(), masm.size());
+            JSC::LinkBuffer code((uint8 *) cs.executableAddress(), masm.size(), JSC::METHOD_CODE);
             code.patch(inlined, f.regs.inlined());
         }
 
         Repatcher repatch(from);
         JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
         repatch.relink(oolJump, cs);
 
         return true;
@@ -778,17 +778,17 @@ class CallCompiler : public BaseCompiler
         Jump claspGuard = masm.testObjClass(Assembler::NotEqual, ic.funObjReg, &js_FunctionClass);
 
         /* Guard that it's the same function. */
         JSFunction *fun = obj->getFunctionPrivate();
         masm.loadObjPrivate(ic.funObjReg, t0);
         Jump funGuard = masm.branchPtr(Assembler::NotEqual, t0, ImmPtr(fun));
         Jump done = masm.jump();
 
-        LinkerHelper linker(masm);
+        LinkerHelper linker(masm, JSC::METHOD_CODE);
         JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_ClosureStub);
         if (!ep)
             return false;
 
         ic.hasJsFunCheck = true;
 
         if (!linker.verifyRange(from)) {
             disable(from);
@@ -1032,17 +1032,17 @@ class CallCompiler : public BaseCompiler
         }
 
         /* Move JaegerThrowpoline into register for very far jump on x64. */
         hasException.linkTo(masm.label(), &masm);
         if (cx->typeInferenceEnabled())
             masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
         masm.throwInJIT();
 
-        LinkerHelper linker(masm);
+        LinkerHelper linker(masm, JSC::METHOD_CODE);
         JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_NativeStub);
         if (!ep)
             THROWV(true);
 
         ic.fastGuardedNative = obj;
 
         if (!linker.verifyRange(jit)) {
             disable(jit);
@@ -1331,17 +1331,17 @@ ic::GenerateArgumentCheckStub(VMFrame &f
         types::TypeSet *types = types::TypeScript::ArgTypes(script, i);
         Address address(JSFrameReg, StackFrame::offsetOfFormalArg(fun, i));
         if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
             return;
     }
 
     Jump done = masm.jump();
 
-    LinkerHelper linker(masm);
+    LinkerHelper linker(masm, JSC::METHOD_CODE);
     JSC::ExecutablePool *ep = linker.init(f.cx);
     if (!ep)
         return;
     jit->argsCheckPool = ep;
 
     if (!linker.verifyRange(jit)) {
         jit->resetArgsCheck();
         return;
--- a/js/src/methodjit/PolyIC.cpp
+++ b/js/src/methodjit/PolyIC.cpp
@@ -117,17 +117,17 @@ ScopeNameLabels PICInfo::scopeNameLabels
 // This guarantees correct OOM and refcount handling for buffers while they
 // are instantiated and rooted.
 class PICLinker : public LinkerHelper
 {
     ic::BasePolyIC &ic;
 
   public:
     PICLinker(Assembler &masm, ic::BasePolyIC &ic)
-      : LinkerHelper(masm), ic(ic)
+      : LinkerHelper(masm, JSC::METHOD_CODE), ic(ic)
     { }
 
     bool init(JSContext *cx) {
         JSC::ExecutablePool *pool = LinkerHelper::init(cx);
         if (!pool)
             return false;
         if (!ic.addPool(cx, pool)) {
             pool->release();
@@ -2973,17 +2973,17 @@ SetElementIC::attachHoleStub(JSContext *
         masm.storeValue(vr, slot);
     }
 
     Jump done = masm.jump();
 
     JS_ASSERT(!execPool);
     JS_ASSERT(!inlineHoleGuardPatched);
 
-    LinkerHelper buffer(masm);
+    LinkerHelper buffer(masm, JSC::METHOD_CODE);
     execPool = buffer.init(cx);
     if (!execPool)
         return error(cx);
 
     if (!buffer.verifyRange(cx->fp()->jit()))
         return disable(cx, "code memory is out of range");
 
     // Patch all guards.
@@ -3054,17 +3054,17 @@ SetElementIC::attachTypedArray(JSContext
     }
 
     Jump done = masm.jump();
 
     // The stub does not rely on any pointers or numbers that could be ruined
     // by a GC or shape regenerated GC. We let this stub live for the lifetime
     // of the script.
     JS_ASSERT(!execPool);
-    LinkerHelper buffer(masm);
+    LinkerHelper buffer(masm, JSC::METHOD_CODE);
     execPool = buffer.init(cx);
     if (!execPool)
         return error(cx);
 
     if (!buffer.verifyRange(cx->fp()->jit()))
         return disable(cx, "code memory is out of range");
 
     // Note that the out-of-bounds path simply does nothing.
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -204,18 +204,18 @@ StubCompiler::emitStubCall(void *ptr, Re
 
     cc.addCallSite(site);
     return cl;
 }
 
 void
 StubCompiler::fixCrossJumps(uint8 *ncode, size_t offset, size_t total)
 {
-    JSC::LinkBuffer fast(ncode, total);
-    JSC::LinkBuffer slow(ncode + offset, total - offset);
+    JSC::LinkBuffer fast(ncode, total, JSC::METHOD_CODE);
+    JSC::LinkBuffer slow(ncode + offset, total - offset, JSC::METHOD_CODE);
 
     for (size_t i = 0; i < exits.length(); i++)
         fast.link(exits[i].from, slow.locationOf(exits[i].to));
 
     for (size_t i = 0; i < scriptJoins.length(); i++) {
         const CrossJumpInScript &cj = scriptJoins[i];
         slow.link(cj.from, fast.locationOf(cc.labelOf(cj.pc, cj.inlineIndex)));
     }
--- a/js/src/methodjit/TrampolineCompiler.cpp
+++ b/js/src/methodjit/TrampolineCompiler.cpp
@@ -36,16 +36,17 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "TrampolineCompiler.h"
 #include "StubCalls.h"
 #include "assembler/assembler/LinkBuffer.h"
+#include "assembler/jit/ExecutableAllocator.h"
 
 namespace js {
 namespace mjit {
 
 #define CHECK_RESULT(x) if (!(x)) return false
 #define COMPILE(which, pool, how) CHECK_RESULT(compileTrampoline(&(which), &pool, how))
 #define RELEASE(which, pool) JS_BEGIN_MACRO \
     which = NULL;                           \
@@ -91,17 +92,17 @@ TrampolineCompiler::compileTrampoline(Tr
 {
     Assembler masm;
 
     Label entry = masm.label();
     CHECK_RESULT(generator(masm));
     JS_ASSERT(entry.isSet());
 
     bool ok;
-    JSC::LinkBuffer buffer(&masm, execAlloc, poolp, &ok);
+    JSC::LinkBuffer buffer(&masm, execAlloc, poolp, &ok, JSC::METHOD_CODE);
     if (!ok) 
         return false;
     masm.finalize(buffer);
     uint8 *result = (uint8*)buffer.finalizeCodeAddendum().dataLocation();
     *where = JS_DATA_TO_FUNC_PTR(Trampolines::TrampolinePtr, result + masm.distanceOf(entry));
 
     return true;
 }
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -4037,19 +4037,22 @@ Deserialize(JSContext *cx, uintN argc, j
 }
 
 JSBool
 MJitCodeStats(JSContext *cx, uintN argc, jsval *vp)
 {
 #ifdef JS_METHODJIT
     JSRuntime *rt = cx->runtime;
     AutoLockGC lock(rt);
-    size_t n = 0;
+    size_t n = 0, method, regexp, unused;
     for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
-        n += (*c)->getMjitCodeSize();
+    {
+        (*c)->getMjitCodeStats(method, regexp, unused);
+        n += method + regexp + unused;
+    }
     JS_SET_RVAL(cx, vp, INT_TO_JSVAL(n));
 #else
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
 #endif
     return true;
 }
 
 JSBool
--- a/js/src/xpconnect/src/xpcjsruntime.cpp
+++ b/js/src/xpconnect/src/xpcjsruntime.cpp
@@ -1260,20 +1260,21 @@ GetCompartmentScriptsSize(JSCompartment 
     {
         n += script->totalSize();
     }
     return n;
 }
 
 #ifdef JS_METHODJIT
 
-PRInt64
-GetCompartmentMjitCodeSize(JSCompartment *c)
+void
+GetCompartmentMjitCodeStats(JSCompartment *c, size_t& method, size_t& regexp,
+        size_t& unused)
 {
-    return c->getMjitCodeSize();
+    c->getMjitCodeStats(method, regexp, unused);
 }
 
 PRInt64
 GetCompartmentMjitDataSize(JSCompartment *c)
 {
     PRInt64 n = 0;
     for(JSScript *script = (JSScript *)c->scripts.next;
         &script->links != &c->scripts;
@@ -1334,17 +1335,21 @@ CompartmentCallback(JSContext *cx, void 
     CompartmentStats compartmentStats(cx, compartment);
     CompartmentStats *curr =
         data->compartmentStatsVector.AppendElement(compartmentStats);
     data->currCompartmentStats = curr;
 
     // Get the compartment-level numbers.
     curr->scripts = GetCompartmentScriptsSize(compartment);
 #ifdef JS_METHODJIT
-    curr->mjitCode = GetCompartmentMjitCodeSize(compartment);
+    size_t method, regexp, unused;
+    GetCompartmentMjitCodeStats(compartment, method, regexp, unused);
+    curr->mjitCodeMethod = method;
+    curr->mjitCodeRegexp = regexp;
+    curr->mjitCodeUnused = unused;
     curr->mjitData = GetCompartmentMjitDataSize(compartment);
 #endif
 #ifdef JS_TRACER
     curr->tjitCode = GetCompartmentTjitCodeSize(compartment);
     curr->tjitDataAllocatorsMain = GetCompartmentTjitDataAllocatorsMainSize(compartment);
     curr->tjitDataAllocatorsReserve = GetCompartmentTjitDataAllocatorsReserveSize(compartment);
     curr->tjitDataNonAllocators = GetCompartmentTjitDataTraceMonitorSize(compartment);
 #endif
@@ -1794,22 +1799,35 @@ ReportCompartmentStats(const Compartment
     "Memory allocated for the compartment's JSScripts.  A JSScript is created "
     "for each user-defined function in a script.  One is also created for "
     "the top-level code in a script.  Each JSScript includes byte-code and "
     "various other things.",
                        callback, closure);
 
 #ifdef JS_METHODJIT
     ReportMemoryBytes0(MakeMemoryReporterPath(pathPrefix, stats.name,
-                                              "mjit-code"),
-                       nsIMemoryReporter::KIND_NONHEAP, stats.mjitCode,
+                                              "mjit-code/method"),
+                       nsIMemoryReporter::KIND_NONHEAP, stats.mjitCodeMethod,
     "Memory used by the method JIT to hold the compartment's generated code.",
                        callback, closure);
 
     ReportMemoryBytes0(MakeMemoryReporterPath(pathPrefix, stats.name,
+                                              "mjit-code/regexp"),
+                       nsIMemoryReporter::KIND_NONHEAP, stats.mjitCodeRegexp,
+    "Memory used by the regexp JIT to hold the compartment's generated code.",
+                       callback, closure);
+
+    ReportMemoryBytes0(MakeMemoryReporterPath(pathPrefix, stats.name,
+                                              "mjit-code/unused"),
+                       nsIMemoryReporter::KIND_NONHEAP, stats.mjitCodeUnused,
+    "Memory allocated by the method and/or regexp JIT to hold the "
+    "compartment's code, but which is currently unused.",
+                       callback, closure);
+
+    ReportMemoryBytes0(MakeMemoryReporterPath(pathPrefix, stats.name,
                                               "mjit-data"),
                        nsIMemoryReporter::KIND_HEAP, stats.mjitData,
     "Memory used by the method JIT for the compartment's compilation data: "
     "JITScripts, native maps, and inline cache structs.",
                        callback, closure);
 #endif
 #ifdef JS_TRACER
     ReportMemoryBytes0(MakeMemoryReporterPath(pathPrefix, stats.name,
--- a/js/src/xpconnect/src/xpcpublic.h
+++ b/js/src/xpconnect/src/xpcpublic.h
@@ -208,17 +208,19 @@ struct CompartmentStats
     PRInt64 gcHeapXml;
 
     PRInt64 objectSlots;
     PRInt64 stringChars;
     PRInt64 propertyTables;
 
     PRInt64 scripts;
 #ifdef JS_METHODJIT
-    PRInt64 mjitCode;
+    PRInt64 mjitCodeMethod;
+    PRInt64 mjitCodeRegexp;
+    PRInt64 mjitCodeUnused;
     PRInt64 mjitData;
 #endif
 #ifdef JS_TRACER
     PRInt64 tjitCode;
     PRInt64 tjitDataAllocatorsMain;
     PRInt64 tjitDataAllocatorsReserve;
     PRInt64 tjitDataNonAllocators;
 #endif
--- a/js/src/yarr/YarrJIT.cpp
+++ b/js/src/yarr/YarrJIT.cpp
@@ -2392,17 +2392,17 @@ public:
 
         generate();
         backtrack();
 
         // Link & finalize the code.
         // XXX yarr-oom
         ExecutablePool *pool;
         bool ok;
-        LinkBuffer linkBuffer(this, globalData->regexAllocator, &pool, &ok);
+        LinkBuffer linkBuffer(this, globalData->regexAllocator, &pool, &ok, REGEXP_CODE);
         m_backtrackingState.linkDataLabels(linkBuffer);
         jitObject.set(linkBuffer.finalizeCode());
         jitObject.setFallBack(m_shouldFallBack);
     }
 
 private:
     YarrPattern& m_pattern;