Bug 981693 - Improve JIT code memory reporters. r=njn
authorJan de Mooij <jdemooij@mozilla.com>
Mon, 17 Mar 2014 10:11:21 +0100
changeset 191115 2c7fac27ca5855e57652c57dc056d5b8802b0233
parent 191114 d8e3558aeaa7393e0d3639dbb8c3640b18d7c56d
child 191116 f592a7ad7c36891fdd9f71d0af6b7018bb3cb874
push id3503
push userraliiev@mozilla.com
push dateMon, 28 Apr 2014 18:51:11 +0000
treeherdermozilla-beta@c95ac01e332e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs981693
milestone30.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 981693 - Improve JIT code memory reporters. r=njn
js/src/assembler/assembler/ARMAssembler.h
js/src/assembler/assembler/AssemblerBuffer.h
js/src/assembler/assembler/LinkBuffer.h
js/src/assembler/assembler/MacroAssemblerCodeRef.h
js/src/assembler/assembler/X86Assembler.h
js/src/assembler/jit/ExecutableAllocator.cpp
js/src/assembler/jit/ExecutableAllocator.h
js/src/jit/Ion.cpp
js/src/jit/IonCode.h
js/src/jit/IonLinker.h
js/src/yarr/YarrJIT.h
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -901,16 +901,21 @@ namespace JSC {
             m_buffer.flushWithoutBarrier(true);
         }
 
         size_t size() const
         {
             return m_buffer.uncheckedSize();
         }
 
+        size_t allocSize() const
+        {
+            return m_buffer.allocSize();
+        }
+
         void ensureSpace(int insnSpace, int constSpace)
         {
             m_buffer.ensureSpace(insnSpace, constSpace);
         }
 
         void ensureSpace(int space)
         {
             m_buffer.ensureSpace(space);
--- a/js/src/assembler/assembler/AssemblerBuffer.h
+++ b/js/src/assembler/assembler/AssemblerBuffer.h
@@ -37,16 +37,17 @@
 #include <string.h>
 #include <limits.h>
 #include "assembler/jit/ExecutableAllocator.h"
 #include "assembler/wtf/Assertions.h"
 
 #include <stdarg.h>
 #include "jsfriendapi.h"
 #include "jsopcode.h"
+#include "jsutil.h"
 
 #include "jit/IonSpewer.h"
 #include "js/RootingAPI.h"
 
 #define PRETTY_PRINT_OFFSET(os) (((os)<0)?"-":""), (((os)<0)?-(os):(os))
 
 #define FIXME_INSN_PRINTING                                 \
     do {                                                    \
@@ -58,16 +59,17 @@ namespace JSC {
 
     class AssemblerBuffer {
         static const size_t inlineCapacity = 256;
     public:
         AssemblerBuffer()
             : m_buffer(m_inlineBuffer)
             , m_capacity(inlineCapacity)
             , m_size(0)
+            , m_allocSize(0)
             , m_oom(false)
         {
         }
 
         ~AssemblerBuffer()
         {
             if (m_buffer != m_inlineBuffer)
                 js_free(m_buffer);
@@ -138,33 +140,40 @@ namespace JSC {
             return m_buffer;
         }
 
         size_t size() const
         {
             return m_size;
         }
 
+        size_t allocSize() const
+        {
+            return m_allocSize;
+        }
+
         bool oom() const
         {
             return m_oom;
         }
 
         /*
          * The user must check for a NULL return value, which means
          * no code was generated, or there was an OOM.
          */
         void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind)
         {
             if (m_oom || m_size == 0) {
                 *poolp = NULL;
                 return 0;
             }
 
-            void* result = allocator->alloc(m_size, poolp, kind);
+            m_allocSize = js::AlignBytes(m_size, sizeof(void *));
+
+            void* result = allocator->alloc(m_allocSize, poolp, kind);
             if (!result) {
                 *poolp = NULL;
                 return 0;
             }
             JS_ASSERT(*poolp);
 
             ExecutableAllocator::makeWritable(result, m_size);
 
@@ -250,16 +259,17 @@ namespace JSC {
             m_buffer = newBuffer;
             m_capacity = newCapacity;
         }
 
         char m_inlineBuffer[inlineCapacity];
         char* m_buffer;
         size_t m_capacity;
         size_t m_size;
+        size_t m_allocSize;
         bool m_oom;
     };
 
     class GenericAssembler
     {
         js::Sprinter *printer;
 
       public:
--- a/js/src/assembler/assembler/LinkBuffer.h
+++ b/js/src/assembler/assembler/LinkBuffer.h
@@ -61,48 +61,29 @@ class LinkBuffer {
     typedef MacroAssembler::DataLabel32 DataLabel32;
     typedef MacroAssembler::DataLabelPtr DataLabelPtr;
 
 public:
     // 'ok' should be checked after this constructor is called;  it's false if OOM occurred.
     LinkBuffer(MacroAssembler* masm, ExecutableAllocator* executableAllocator,
                ExecutablePool** poolp, bool* ok, CodeKind codeKind)
     {
+        // LinkBuffer is only used by Yarr. MacroAssemblerCodeRef::release relies on this.
+        MOZ_ASSERT(codeKind == REGEXP_CODE);
         m_codeKind = codeKind;
         m_code = executableAllocAndCopy(*masm, executableAllocator, poolp);
         m_executablePool = *poolp;
         m_size = masm->m_assembler.size();  // must come after call to executableAllocAndCopy()!
+        m_allocSize = masm->m_assembler.allocSize();
 #ifndef NDEBUG
         m_completed = false;
 #endif
         *ok = !!m_code;
     }
 
-    LinkBuffer(CodeKind kind)
-        : m_executablePool(NULL)
-        , m_code(NULL)
-        , m_size(0)
-        , m_codeKind(kind)
-#ifndef NDEBUG
-        , m_completed(false)
-#endif
-    {
-    }
-
-    LinkBuffer(uint8_t* ncode, size_t size, CodeKind kind)
-        : m_executablePool(NULL)
-        , m_code(ncode)
-        , m_size(size)
-        , m_codeKind(kind)
-#ifndef NDEBUG
-        , m_completed(false)
-#endif
-    {
-    }
-
     ~LinkBuffer()
     {
         ASSERT(!m_executablePool || m_completed);
     }
 
     // These methods are used to link or set values at code generation time.
 
     void link(Call call, FunctionPtr function)
@@ -178,17 +159,18 @@ public:
     // Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
     // once to complete generation of the code.  'finalizeCode()' is suited to situations
     // where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
     // suited to adding to an existing allocation.
     CodeRef finalizeCode()
     {
         performFinalization();
 
-        return CodeRef(m_code, m_executablePool, m_size);
+        MOZ_ASSERT(m_allocSize >= m_size);
+        return CodeRef(m_code, m_executablePool, m_allocSize);
     }
     CodeLocationLabel finalizeCodeAddendum()
     {
         performFinalization();
 
         return CodeLocationLabel(code());
     }
 
@@ -220,16 +202,17 @@ protected:
 
         ExecutableAllocator::makeExecutable(code(), m_size);
         ExecutableAllocator::cacheFlush(code(), m_size);
     }
 
     ExecutablePool* m_executablePool;
     void* m_code;
     size_t m_size;
+    size_t m_allocSize;
     CodeKind m_codeKind;
 #ifndef NDEBUG
     bool m_completed;
 #endif
 };
 
 } // namespace JSC
 
--- a/js/src/assembler/assembler/MacroAssemblerCodeRef.h
+++ b/js/src/assembler/assembler/MacroAssemblerCodeRef.h
@@ -177,50 +177,51 @@ private:
 //
 // A reference to a section of JIT generated code.  A CodeRef consists of a
 // pointer to the code, and a ref pointer to the pool from within which it
 // was allocated.
 class MacroAssemblerCodeRef {
 public:
     MacroAssemblerCodeRef()
         : m_executablePool(NULL),
-          m_size(0)
+          m_allocSize(0)
     {
     }
 
-    MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t size)
+    MacroAssemblerCodeRef(void* code, ExecutablePool* executablePool, size_t allocSize)
         : m_code(code)
         , m_executablePool(executablePool)
-        , m_size(size)
+        , m_allocSize(allocSize)
     {
     }
 
     // Release the code memory in this code ref.
     void release()
     {
         if (!m_executablePool)
             return;
 
 #if defined DEBUG && (defined WTF_CPU_X86 || defined WTF_CPU_X86_64) 
         void *addr = m_code.executableAddress();
-        memset(addr, 0xcc, m_size);
+        memset(addr, 0xcc, m_allocSize);
 #endif
-        m_executablePool->release();
+        // MacroAssemblerCodeRef is only used by Yarr.
+        m_executablePool->release(m_allocSize, REGEXP_CODE);
         m_executablePool = NULL;
     }
 
     MacroAssemblerCodePtr code() const {
         return m_code;
     }
-    size_t size() const {
-        return m_size;
+    size_t allocSize() const {
+        return m_allocSize;
     }
 
     MacroAssemblerCodePtr m_code;
     ExecutablePool* m_executablePool;
-    size_t m_size;
+    size_t m_allocSize;
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
 
 #endif /* assembler_assembler_MacroAssemblerCodeRef_h */
--- a/js/src/assembler/assembler/X86Assembler.h
+++ b/js/src/assembler/assembler/X86Assembler.h
@@ -435,16 +435,17 @@ public:
             return m_offset;
         }
     private:
         signed int m_offset : 31;
         bool m_used : 1;
     };
 
     size_t size() const { return m_formatter.size(); }
+    size_t allocSize() const { return m_formatter.allocSize(); }
     unsigned char *buffer() const { return m_formatter.buffer(); }
     bool oom() const { return m_formatter.oom(); }
 
     void nop()
     {
         spew("nop");
         m_formatter.oneByteOp(OP_NOP);
     }
@@ -3862,16 +3863,17 @@ private:
         {
             m_buffer.ensureSpace(sizeof(int64_t));
             m_buffer.putInt64Unchecked(i);
         }
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
+        size_t allocSize() const { return m_buffer.allocSize(); }
         unsigned char *buffer() const { return m_buffer.buffer(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
         void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool** poolp, CodeKind kind) {
             return m_buffer.executableAllocAndCopy(allocator, poolp, kind);
         }
 
--- a/js/src/assembler/jit/ExecutableAllocator.cpp
+++ b/js/src/assembler/jit/ExecutableAllocator.cpp
@@ -33,16 +33,21 @@
 
 namespace JSC {
 
 size_t ExecutableAllocator::pageSize = 0;
 size_t ExecutableAllocator::largeAllocSize = 0;
 
 ExecutablePool::~ExecutablePool()
 {
+    MOZ_ASSERT(m_ionCodeBytes == 0);
+    MOZ_ASSERT(m_baselineCodeBytes == 0);
+    MOZ_ASSERT(m_regexpCodeBytes == 0);
+    MOZ_ASSERT(m_otherCodeBytes == 0);
+
     m_allocator->releasePoolPages(this);
 }
 
 void
 ExecutableAllocator::addSizeOfCode(JS::CodeSizes *sizes) const
 {
     if (m_pools.initialized()) {
         for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -80,17 +80,17 @@ extern  "C" void sync_instruction_memory
 namespace JS {
     struct CodeSizes;
 }
 
 namespace JSC {
 
   class ExecutableAllocator;
 
-  enum CodeKind { ION_CODE, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
+  enum CodeKind { ION_CODE = 0, BASELINE_CODE, REGEXP_CODE, OTHER_CODE };
 
   // These are reference-counted. A new one starts with a count of 1.
   class ExecutablePool {
 
     friend class ExecutableAllocator;
 private:
     struct Allocation {
         char* pages;
@@ -125,16 +125,41 @@ public:
     void release(bool willDestroy = false)
     {
         JS_ASSERT(m_refCount != 0);
         // XXX: disabled, see bug 654820.
         //JS_ASSERT_IF(willDestroy, m_refCount == 1);
         if (--m_refCount == 0)
             js_delete(this);
     }
+    void release(size_t n, CodeKind kind)
+    {
+        switch (kind) {
+          case ION_CODE:
+            m_ionCodeBytes -= n;
+            MOZ_ASSERT(m_ionCodeBytes < m_allocation.size); // Shouldn't underflow.
+            break;
+          case BASELINE_CODE:
+            m_baselineCodeBytes -= n;
+            MOZ_ASSERT(m_baselineCodeBytes < m_allocation.size);
+            break;
+          case REGEXP_CODE:
+            m_regexpCodeBytes -= n;
+            MOZ_ASSERT(m_regexpCodeBytes < m_allocation.size);
+            break;
+          case OTHER_CODE:
+            m_otherCodeBytes -= n;
+            MOZ_ASSERT(m_otherCodeBytes < m_allocation.size);
+            break;
+          default:
+            MOZ_ASSUME_UNREACHABLE("bad code kind");
+        }
+
+        release();
+    }
 
     ExecutablePool(ExecutableAllocator* allocator, Allocation a)
       : m_allocator(allocator), m_freePtr(a.pages), m_end(m_freePtr + a.size), m_allocation(a),
         m_refCount(1), m_ionCodeBytes(0), m_baselineCodeBytes(0), m_regexpCodeBytes(0),
         m_otherCodeBytes(0), m_destroy(false), m_gcNumber(0)
     { }
 
     ~ExecutablePool();
@@ -218,20 +243,21 @@ public:
         m_smallPools.clear();
     }
 
     // alloc() returns a pointer to some memory, and also (by reference) a
     // pointer to reference-counted pool. The caller owns a reference to the
     // pool; i.e. alloc() increments the count before returning the object.
     void* alloc(size_t n, ExecutablePool** poolp, CodeKind type)
     {
-        // Round 'n' up to a multiple of word size; if all allocations are of
-        // word sized quantities, then all subsequent allocations will be
+        // Caller must ensure 'n' is word-size aligned. If all allocations are
+        // of word sized quantities, then all subsequent allocations will be
         // aligned.
-        n = roundUpAllocationSize(n, sizeof(void*));
+        JS_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
+
         if (n == OVERSIZE_ALLOCATION) {
             *poolp = NULL;
             return NULL;
         }
 
         *poolp = poolForSize(n);
         if (!*poolp)
             return NULL;
@@ -342,17 +368,17 @@ public:
         // If the request is large, we just provide a unshared allocator
         if (n > largeAllocSize)
             return createPool(n);
 
         // Create a new allocator
         ExecutablePool* pool = createPool(largeAllocSize);
         if (!pool)
             return NULL;
-  	    // At this point, local |pool| is the owner.
+        // At this point, local |pool| is the owner.
 
         if (m_smallPools.length() < maxSmallPools) {
             // We haven't hit the maximum number of live pools;  add the new pool.
             m_smallPools.append(pool);
             pool->addRef();
         } else {
             // Find the pool with the least space.
             int iMin = 0;
@@ -368,17 +394,17 @@ public:
             ExecutablePool *minPool = m_smallPools[iMin];
             if ((pool->available() - n) > minPool->available()) {
                 minPool->release();
                 m_smallPools[iMin] = pool;
                 pool->addRef();
             }
         }
 
-   	    // Pass ownership to the caller.
+        // Pass ownership to the caller.
         return pool;
     }
 
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void makeWritable(void* start, size_t size)
     {
         reprotectRegion(start, size, Writable);
     }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -610,35 +610,38 @@ JitRuntime::getVMWrapper(const VMFunctio
     JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f);
     JS_ASSERT(p);
 
     return p->value();
 }
 
 template <AllowGC allowGC>
 JitCode *
-JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool)
+JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
+             JSC::ExecutablePool *pool, JSC::CodeKind kind)
 {
     JitCode *codeObj = js::NewJitCode<allowGC>(cx);
     if (!codeObj) {
-        pool->release();
+        pool->release(headerSize + bufferSize, kind);
         return nullptr;
     }
 
-    new (codeObj) JitCode(code, bufferSize, pool);
+    new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind);
     return codeObj;
 }
 
 template
 JitCode *
-JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
+JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
+                    JSC::ExecutablePool *pool, JSC::CodeKind kind);
 
 template
 JitCode *
-JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
+JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
+                   JSC::ExecutablePool *pool, JSC::CodeKind kind);
 
 void
 JitCode::copyFrom(MacroAssembler &masm)
 {
     // Store the JitCode pointer right before the code buffer, so we can
     // recover the gcthing from relocation tables.
     *(JitCode **)(code_ - sizeof(JitCode *)) = this;
     insnSize_ = masm.instructionsSize();
@@ -691,17 +694,17 @@ JitCode::finalize(FreeOp *fop)
     code_ = nullptr;
 
     // Code buffers are stored inside JSC pools.
     // Pools are refcounted. Releasing the pool may free it.
     if (pool_) {
         // Horrible hack: if we are using perf integration, we don't
         // want to reuse code addresses, so we just leak the memory instead.
         if (!PerfEnabled())
-            pool_->release();
+            pool_->release(headerSize_ + bufferSize_, JSC::CodeKind(kind_));
         pool_ = nullptr;
     }
 }
 
 void
 JitCode::togglePreBarriers(bool enabled)
 {
     uint8_t *start = code_ + preBarrierTableOffset();
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -9,16 +9,17 @@
 
 #include "mozilla/Atomics.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/PodOperations.h"
 
 #include "jsinfer.h"
 #include "jstypes.h"
 
+#include "assembler/jit/ExecutableAllocator.h"
 #include "gc/Heap.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/IonTypes.h"
 
 namespace JSC {
     class ExecutablePool;
 }
 
@@ -32,45 +33,53 @@ class MacroAssembler;
 class CodeOffsetLabel;
 class PatchableBackedge;
 
 class JitCode : public gc::BarrieredCell<JitCode>
 {
   protected:
     uint8_t *code_;
     JSC::ExecutablePool *pool_;
-    uint32_t bufferSize_;             // Total buffer size.
+    uint32_t bufferSize_;             // Total buffer size. Does not include headerSize_.
     uint32_t insnSize_;               // Instruction stream size.
     uint32_t dataSize_;               // Size of the read-only data area.
     uint32_t jumpRelocTableBytes_;    // Size of the jump relocation table.
     uint32_t dataRelocTableBytes_;    // Size of the data relocation table.
     uint32_t preBarrierTableBytes_;   // Size of the prebarrier table.
-    bool invalidated_;                // Whether the code object has been invalidated.
+    uint8_t headerSize_ : 5;          // Number of bytes allocated before codeStart.
+    uint8_t kind_ : 3;                // JSC::CodeKind, for the memory reporters.
+    bool invalidated_ : 1;            // Whether the code object has been invalidated.
                                       // This is necessary to prevent GC tracing.
 
 #if JS_BITS_PER_WORD == 32
     // Ensure JitCode is gc::Cell aligned.
     uint32_t padding_;
 #endif
 
     JitCode()
       : code_(nullptr),
         pool_(nullptr)
     { }
-    JitCode(uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool)
+    JitCode(uint8_t *code, uint32_t bufferSize, uint32_t headerSize, JSC::ExecutablePool *pool,
+            JSC::CodeKind kind)
       : code_(code),
         pool_(pool),
         bufferSize_(bufferSize),
         insnSize_(0),
         dataSize_(0),
         jumpRelocTableBytes_(0),
         dataRelocTableBytes_(0),
         preBarrierTableBytes_(0),
+        headerSize_(headerSize),
+        kind_(kind),
         invalidated_(false)
-    { }
+    {
+        MOZ_ASSERT(JSC::CodeKind(kind_) == kind);
+        MOZ_ASSERT(headerSize_ == headerSize);
+    }
 
     uint32_t dataOffset() const {
         return insnSize_;
     }
     uint32_t jumpRelocTableOffset() const {
         return dataOffset() + dataSize_;
     }
     uint32_t dataRelocTableOffset() const {
@@ -121,17 +130,18 @@ class JitCode : public gc::BarrieredCell
     uint8_t *jumpRelocTable() {
         return code_ + jumpRelocTableOffset();
     }
 
     // Allocates a new JitCode object which will be managed by the GC. If no
     // object can be allocated, nullptr is returned. On failure, |pool| is
     // automatically released, so the code may be freed.
     template <AllowGC allowGC>
-    static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, JSC::ExecutablePool *pool);
+    static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
+                        JSC::ExecutablePool *pool, JSC::CodeKind kind);
 
   public:
     static inline ThingRootKind rootKind() { return THING_ROOT_JIT_CODE; }
 };
 
 class SnapshotWriter;
 class SafepointWriter;
 class SafepointIndex;
--- a/js/src/jit/IonLinker.h
+++ b/js/src/jit/IonLinker.h
@@ -39,28 +39,31 @@ class Linker
         if (masm.oom())
             return fail(cx);
 
         JSC::ExecutablePool *pool;
         size_t bytesNeeded = masm.bytesNeeded() + sizeof(JitCode *) + CodeAlignment;
         if (bytesNeeded >= MAX_BUFFER_SIZE)
             return fail(cx);
 
+        // ExecutableAllocator requires bytesNeeded to be word-size aligned.
+        bytesNeeded = AlignBytes(bytesNeeded, sizeof(void *));
+
         uint8_t *result = (uint8_t *)execAlloc->alloc(bytesNeeded, &pool, kind);
         if (!result)
             return fail(cx);
 
         // The JitCode pointer will be stored right before the code buffer.
         uint8_t *codeStart = result + sizeof(JitCode *);
 
         // Bump the code up to a nice alignment.
         codeStart = (uint8_t *)AlignBytes((uintptr_t)codeStart, CodeAlignment);
         uint32_t headerSize = codeStart - result;
-        JitCode *code = JitCode::New<allowGC>(cx, codeStart,
-                                              bytesNeeded - headerSize, pool);
+        JitCode *code = JitCode::New<allowGC>(cx, codeStart, bytesNeeded - headerSize,
+                                              headerSize, pool, kind);
         if (!code)
             return nullptr;
         if (masm.oom())
             return fail(cx);
         code->copyFrom(masm);
         masm.link(code);
 #ifdef JSGC_GENERATIONAL
         if (masm.embedsNurseryPointers())
--- a/js/src/yarr/YarrJIT.h
+++ b/js/src/yarr/YarrJIT.h
@@ -77,26 +77,26 @@ public:
     ~YarrCodeBlock()
     {
     }
 
     void setFallBack(bool fallback) { m_needFallBack = fallback; }
     bool isFallBack() { return m_needFallBack; }
 
 #ifdef YARR_8BIT_CHAR_SUPPORT
-    bool has8BitCode() const { return m_ref8.size(); }
+    bool has8BitCode() const { return m_ref8.allocSize(); }
     void set8BitCode(MacroAssemblerCodeRef ref) { m_ref8 = ref; }
-    bool has8BitCodeMatchOnly() const { return m_matchOnly8.size(); }
+    bool has8BitCodeMatchOnly() const { return m_matchOnly8.allocSize(); }
     void set8BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly8 = matchOnly; }
 #endif
 
-    bool has16BitCode() const { return m_ref16.size(); }
+    bool has16BitCode() const { return m_ref16.allocSize(); }
     void set16BitCode(MacroAssemblerCodeRef ref) { m_ref16 = ref; }
 
-    bool has16BitCodeMatchOnly() const { return m_matchOnly16.size(); }
+    bool has16BitCodeMatchOnly() const { return m_matchOnly16.allocSize(); }
     void set16BitCodeMatchOnly(MacroAssemblerCodeRef matchOnly) { m_matchOnly16 = matchOnly; }
 
 #if YARR_8BIT_CHAR_SUPPORT
     MatchResult execute(const LChar* input, unsigned start, unsigned length, int* output)
     {
         ASSERT(has8BitCode());
 
 #if JS_TRACE_LOGGING