Bug 518744 - TM: mark and rewind dataAlloc, r=gal.
authorGraydon Hoare <graydon@mozilla.com>
Fri, 25 Sep 2009 17:20:01 -0700
changeset 33545 7f846ee363435788b0a2d444cb639f56d95cc510
parent 33544 33c00f847d8987d99bfde614defd82ec55bced46
child 33546 2150882a6548a6394fbc3d1554a40e4e00b4c71c
push idunknown
push userunknown
push dateunknown
reviewersgal
bugs518744
milestone1.9.3a1pre
Bug 518744 - TM: mark and rewind dataAlloc, r=gal.
js/src/jscntxt.h
js/src/jstracer.cpp
js/src/jstracer.h
js/src/nanojit/Allocator.h
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -149,17 +149,34 @@ struct JSTraceMonitor {
      *
      * !tracecx && !recorder: not on trace
      * !tracecx && recorder: recording
      * tracecx && !recorder: executing a trace
      * tracecx && recorder: executing inner loop, recording outer loop
      */
     JSContext               *tracecx;
 
-    CLS(VMAllocator)        dataAlloc;   /* A chunk allocator for LIR.    */
+    /*
+     * There are 3 allocators here. This might seem like overkill, but they
+     * have different lifecycles, and by keeping them separate we keep the
+     * amount of retained memory down significantly.
+     *
+     * The dataAlloc has the lifecycle of the monitor. It's flushed only
+     * when the monitor is flushed.
+     *
+     * The traceAlloc has the same flush lifecycle as the dataAlloc, but
+     * it is also *marked* when a recording starts and rewinds to the mark
+     * point if recording aborts. So you can put things in it that are only
+     * reachable on a successful record/compile cycle.
+     *
+     * The tempAlloc is flushed after each recording, successful or not.
+     */
+
+    CLS(VMAllocator)        dataAlloc;   /* A chunk allocator for fragments. */
+    CLS(VMAllocator)        traceAlloc;  /* An allocator for trace metadata. */
     CLS(VMAllocator)        tempAlloc;   /* A temporary chunk allocator.  */
     CLS(nanojit::CodeAlloc) codeAlloc;   /* An allocator for native code. */
     CLS(nanojit::Assembler) assembler;
     CLS(nanojit::LirBuffer) lirbuf;
     CLS(nanojit::LirBuffer) reLirBuf;
 #ifdef DEBUG
     CLS(nanojit::LabelMap)  labels;
 #endif
old mode 100755
new mode 100644
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -2156,16 +2156,17 @@ SpecializeTreesToMissingGlobals(JSContex
 static void
 TrashTree(JSContext* cx, Fragment* f);
 
 JS_REQUIRES_STACK
 TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
         TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
         VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
     : tempAlloc(*JS_TRACE_MONITOR(cx).tempAlloc),
+      mark(*JS_TRACE_MONITOR(cx).traceAlloc),
       whichTreesToTrash(&tempAlloc),
       cfgMerges(&tempAlloc)
 {
     JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
     /* Reset the fragment state we care about in case we got a recycled fragment.
        This includes resetting any profiling data we might have accumulated. */
     _fragment->lastIns = NULL;
     verbose_only( _fragment->profCount = 0; )
@@ -2304,17 +2305,19 @@ TraceRecorder::~TraceRecorder()
     /* Purge the tempAlloc used during recording. */
     tempAlloc.reset();
     traceMonitor->lirbuf->clear();
 }
 
 bool
 TraceRecorder::outOfMemory()
 {
-    return traceMonitor->dataAlloc->outOfMemory() || tempAlloc.outOfMemory();
+    return traceMonitor->dataAlloc->outOfMemory() ||
+        traceMonitor->traceAlloc->outOfMemory() ||
+        tempAlloc.outOfMemory();
 }
 
 /* Add debug information to a LIR instruction as we emit it. */
 inline LIns*
 TraceRecorder::addName(LIns* ins, const char* name)
 {
 #ifdef JS_JIT_SPEW
     /*
@@ -2584,16 +2587,17 @@ JSTraceMonitor::flush()
     )
 
     verbose_only(
         for (Seq<Fragment*>* f = branches; f; f = f->tail)
             js_FragProfiling_FragFinalizer(f->head, this);
     )
 
     dataAlloc->reset();
+    traceAlloc->reset();
     codeAlloc->reset();
 
     Allocator& alloc = *dataAlloc;
 
     for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
         globalStates[i].globalShape = -1;
         globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
     }
@@ -3909,18 +3913,18 @@ TraceRecorder::snapshot(ExitType exitTyp
         stackSlots = 0;
         ngslots = 0;
         typemap_size = 0;
         trashSelf = true;
     }
 
     /* We couldn't find a matching side exit, so create a new one. */
     VMSideExit* exit = (VMSideExit*)
-        traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
-                                       (stackSlots + ngslots) * sizeof(JSTraceType));
+        traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) +
+                                        (stackSlots + ngslots) * sizeof(JSTraceType));
 
     /* Setup side exit structure. */
     exit->from = fragment;
     exit->calldepth = callDepth;
     exit->numGlobalSlots = ngslots;
     exit->numStackSlots = stackSlots;
     exit->numStackSlotsBelowCurrentFrame = cx->fp->argv ?
                                            nativeStackOffset(&cx->fp->argv[-2]) / sizeof(double) :
@@ -3943,17 +3947,17 @@ TraceRecorder::snapshot(ExitType exitTyp
 
     JS_ARENA_RELEASE(&cx->tempPool, mark);
     return exit;
 }
 
 JS_REQUIRES_STACK GuardRecord*
 TraceRecorder::createGuardRecord(VMSideExit* exit)
 {
-    GuardRecord* gr = new (*traceMonitor->dataAlloc) GuardRecord();
+    GuardRecord* gr = new (*traceMonitor->traceAlloc) GuardRecord();
 
     gr->exit = exit;
     exit->addGuard(gr);
 
     // gr->profCount is calloc'd to zero
     verbose_only(
         gr->profGuardID = fragment->guardNumberer++;
         gr->nextInFrag = fragment->guardsForFrag;
@@ -3999,18 +4003,18 @@ TraceRecorder::guard(bool expected, LIns
     }
 }
 
 JS_REQUIRES_STACK VMSideExit*
 TraceRecorder::copy(VMSideExit* copy)
 {
     size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots;
     VMSideExit* exit = (VMSideExit*)
-        traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
-                                       typemap_size * sizeof(JSTraceType));
+        traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) +
+                                        typemap_size * sizeof(JSTraceType));
 
     /* Copy side exit structure. */
     memcpy(exit, copy, sizeof(VMSideExit) + typemap_size * sizeof(JSTraceType));
     exit->guards = NULL;
     exit->from = fragment;
     exit->target = NULL;
 
     /*
@@ -4150,16 +4154,17 @@ TraceRecorder::compile(JSTraceMonitor* t
     const char* filename = cx->fp->script->filename;
     char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
     sprintf(label, "%s:%u", filename ? filename : "<stdin>",
             js_FramePCToLineNumber(cx, cx->fp));
     tm->labels->add(fragment, sizeof(Fragment), 0, label);
     js_free(label);
 #endif
     AUDIT(traceCompleted);
+    mark.commit();
     return ARECORD_CONTINUE;
 }
 
 static void
 JoinPeers(Assembler* assm, VMSideExit* exit, VMFragment* target)
 {
     exit->target = target;
     assm->patch(exit);
@@ -4477,17 +4482,17 @@ TraceRecorder::closeLoop(SlotMap& slotMa
             /*
              * If such a fragment does not exist, let's compile the loop ahead
              * of time anyway.  Later, if the loop becomes type stable, we will
              * connect these two fragments together.
              */
             debug_only_print0(LC_TMTracer,
                               "Trace has unstable loop variable with no stable peer, "
                               "compiling anyway.\n");
-            UnstableExit* uexit = new (*traceMonitor->dataAlloc) UnstableExit;
+            UnstableExit* uexit = new (*traceMonitor->traceAlloc) UnstableExit;
             uexit->fragment = fragment;
             uexit->exit = exit;
             uexit->next = treeInfo->unstableExits;
             treeInfo->unstableExits = uexit;
         } else {
             JS_ASSERT(peer->code());
             exit->target = peer;
             debug_only_printf(LC_TMTracer,
@@ -5015,16 +5020,17 @@ DeleteRecorder(JSContext* cx)
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
 
     /* Aborting and completing a trace end up here. */
     delete tm->recorder;
     tm->recorder = NULL;
 
     /* If we ran out of memory, flush the code cache. */
     if (tm->dataAlloc->outOfMemory() ||
+        tm->traceAlloc->outOfMemory() ||
         js_OverfullJITCache(tm)) {
         ResetJIT(cx, FR_OOM);
         return false;
     }
 
     return true;
 }
 
@@ -5370,28 +5376,30 @@ RecordTree(JSContext* cx, JSTraceMonitor
     if (!f) {
         ResetJIT(cx, FR_OOM);
         return false;
     }
 
     f->root = f;
     f->lirbuf = tm->lirbuf;
 
-    if (tm->dataAlloc->outOfMemory() || js_OverfullJITCache(tm)) {
+    if (tm->dataAlloc->outOfMemory() ||
+        tm->traceAlloc->outOfMemory() ||
+        js_OverfullJITCache(tm)) {
         Backoff(cx, (jsbytecode*) f->root->ip);
         ResetJIT(cx, FR_OOM);
         debug_only_print0(LC_TMTracer,
                           "Out of memory recording new tree, flushing cache.\n");
         return false;
     }
 
     JS_ASSERT(!f->code() && !f->vmprivate);
 
     /* Set up the VM-private treeInfo structure for this fragment. */
-    TreeInfo* ti = new (*tm->dataAlloc) TreeInfo(tm->dataAlloc, f, globalSlots);
+    TreeInfo* ti = new (*tm->traceAlloc) TreeInfo(tm->dataAlloc, f, globalSlots);
 
     /* Capture the coerced type of each active slot in the type map. */
     ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
     ti->nStackTypes = ti->typeMap.length() - globalSlots->length();
 
 #ifdef DEBUG
     AssertTreeIsUnique(tm, (VMFragment*)f, ti);
     ti->treeFileName = cx->fp->script->filename;
@@ -7114,17 +7122,17 @@ js_disable_debugger_exceptions() { }
 #define K *1024
 #define M K K
 #define G K M
 
 void
 js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes)
 {
     JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor;
-    JS_ASSERT(tm->codeAlloc && tm->dataAlloc);
+    JS_ASSERT(tm->codeAlloc && tm->dataAlloc && tm->traceAlloc);
     if (bytes > 1 G)
         bytes = 1 G;
     if (bytes < 128 K)
         bytes = 128 K;
     tm->maxCodeCacheBytes = bytes;
 }
 
 void
@@ -7186,18 +7194,19 @@ js_InitJIT(JSTraceMonitor *tm)
     tm->maxCodeCacheBytes = 16 M;
 
     if (!tm->recordAttempts.ops) {
         JS_DHashTableInit(&tm->recordAttempts, JS_DHashGetStubOps(),
                           NULL, sizeof(PCHashEntry),
                           JS_DHASH_DEFAULT_CAPACITY(PC_HASH_COUNT));
     }
 
-    JS_ASSERT(!tm->dataAlloc && !tm->codeAlloc);
+    JS_ASSERT(!tm->dataAlloc && !tm->traceAlloc && !tm->codeAlloc);
     tm->dataAlloc = new VMAllocator();
+    tm->traceAlloc = new VMAllocator();
     tm->tempAlloc = new VMAllocator();
     tm->reTempAlloc = new VMAllocator();
     tm->codeAlloc = new CodeAlloc();
     tm->flush();
     verbose_only( tm->branches = NULL; )
 
     JS_ASSERT(!tm->reservedDoublePool);
     tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
@@ -7308,16 +7317,21 @@ js_FinishJIT(JSTraceMonitor *tm)
         tm->codeAlloc = NULL;
     }
 
     if (tm->dataAlloc) {
         delete tm->dataAlloc;
         tm->dataAlloc = NULL;
     }
 
+    if (tm->traceAlloc) {
+        delete tm->traceAlloc;
+        tm->traceAlloc = NULL;
+    }
+
     if (tm->tempAlloc) {
         delete tm->tempAlloc;
         tm->tempAlloc = NULL;
     }
 
     if (tm->reTempAlloc) {
         delete tm->reTempAlloc;
         tm->reTempAlloc = NULL;
@@ -7412,19 +7426,20 @@ js_OverfullJITCache(JSTraceMonitor* tm)
      * we construct our allocators to use all available memory they like,
      * and only report outOfMemory to us when there is literally no OS memory
      * left. Merely purging our cache when we hit our highwater mark is
      * handled by the (few) callers of this function.
      *
      */
     jsuint maxsz = tm->maxCodeCacheBytes;
     VMAllocator *dataAlloc = tm->dataAlloc;
+    VMAllocator *traceAlloc = tm->traceAlloc;
     CodeAlloc *codeAlloc = tm->codeAlloc;
 
-    return (codeAlloc->size() + dataAlloc->size() > maxsz);
+    return (codeAlloc->size() + dataAlloc->size() + traceAlloc->size() > maxsz);
 }
 
 JS_FORCES_STACK JS_FRIEND_API(void)
 js_DeepBail(JSContext *cx)
 {
     JS_ASSERT(JS_ON_TRACE(cx));
 
     /*
@@ -7629,17 +7644,17 @@ TraceRecorder::callProp(JSObject* obj, J
         obj->dropProperty(cx, prop);
     }
 
     LIns* obj_ins;
     JSObject* parent = STOBJ_GET_PARENT(JSVAL_TO_OBJECT(cx->fp->argv[-2]));
     LIns* parent_ins = stobj_get_parent(get(&cx->fp->argv[-2]));
     CHECK_STATUS(traverseScopeChain(parent, parent_ins, obj, obj_ins));
 
-    ClosureVarInfo* cv = new (traceMonitor->dataAlloc) ClosureVarInfo();
+    ClosureVarInfo* cv = new (traceMonitor->traceAlloc) ClosureVarInfo();
     cv->id = id;
     cv->slot = slot;
     cv->callDepth = callDepth;
     cv->resolveFlags = cx->resolveFlags == JSRESOLVE_INFER
                      ? js_InferFlags(cx, 0)
                      : cx->resolveFlags;
 
     LIns* outp = lir->insAlloc(sizeof(double));
@@ -8005,17 +8020,17 @@ TraceRecorder::tableswitch()
     /*
      * Really large tables won't fit in a page. This is a conservative check.
      * If it matters in practice we need to go off-page.
      */
     if ((high + 1 - low) * sizeof(intptr_t*) + 128 > (unsigned) LARGEST_UNDERRUN_PROT)
         return InjectStatus(switchop());
 
     /* Generate switch LIR. */
-    SwitchInfo* si = new (*traceMonitor->dataAlloc) SwitchInfo();
+    SwitchInfo* si = new (*traceMonitor->traceAlloc) SwitchInfo();
     si->count = high + 1 - low;
     si->table = 0;
     si->index = (uint32) -1;
     LIns* diff = lir->ins2(LIR_sub, v_ins, lir->insImm(low));
     LIns* cmp = lir->ins2(LIR_ult, diff, lir->insImm(si->count));
     lir->insGuard(LIR_xf, cmp, createGuardRecord(snapshot(DEFAULT_EXIT)));
     lir->insStorei(diff, lir->insImmPtr(&si->index), 0);
     VMSideExit* exit = snapshot(CASE_EXIT);
@@ -9359,17 +9374,17 @@ TraceRecorder::newArguments()
 {
     LIns* global_ins = INS_CONSTOBJ(globalObj);
     LIns* argc_ins = INS_CONST(cx->fp->argc);
     LIns* callee_ins = get(&cx->fp->argv[-2]);
     LIns* argv_ins = cx->fp->argc
         ? lir->ins2(LIR_piadd, lirbuf->sp, 
                     lir->insImmWord(-treeInfo->nativeStackBase + nativeStackOffset(&cx->fp->argv[0])))
         : INS_CONSTPTR((void *) 2);
-    js_ArgsPrivateNative *apn = js_ArgsPrivateNative::create(*traceMonitor->dataAlloc, cx->fp->argc);
+    js_ArgsPrivateNative *apn = js_ArgsPrivateNative::create(*traceMonitor->traceAlloc, cx->fp->argc);
     for (uintN i = 0; i < cx->fp->argc; ++i) {
         apn->typemap()[i] = determineSlotType(&cx->fp->argv[i]);
     }
 
     LIns* args[] = { INS_CONSTPTR(apn), argv_ins, callee_ins, argc_ins, global_ins, cx_ins };
     LIns* call_ins = lir->insCall(&js_Arguments_ci, args);
     guard(false, lir->ins_peq0(call_ins), OOM_EXIT);
     return call_ins;
@@ -9861,17 +9876,17 @@ TraceRecorder::emitNativePropertyOp(JSSc
     // because the getter or setter could end up resizing the object's dslots.
     // Instead, use a word of stack and root it in nativeVp.
     LIns* vp_ins = lir->insAlloc(sizeof(jsval));
     lir->insStorei(vp_ins, lirbuf->state, offsetof(InterpState, nativeVp));
     lir->insStorei(INS_CONST(1), lirbuf->state, offsetof(InterpState, nativeVpLen));
     if (setflag)
         lir->insStorei(boxed_ins, vp_ins, 0);
 
-    CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
+    CallInfo* ci = new (*traceMonitor->traceAlloc) CallInfo();
     ci->_address = uintptr_t(setflag ? sprop->setter : sprop->getter);
     ci->_argtypes = ARGSIZE_I << (0*ARGSIZE_SHIFT) |
                     ARGSIZE_P << (1*ARGSIZE_SHIFT) |
                     ARGSIZE_P << (2*ARGSIZE_SHIFT) |
                     ARGSIZE_P << (3*ARGSIZE_SHIFT) |
                     ARGSIZE_P << (4*ARGSIZE_SHIFT);
     ci->_cse = ci->_fold = 0;
     ci->_abi = ABI_CDECL;
@@ -10244,17 +10259,17 @@ TraceRecorder::callNative(uintN argc, JS
                 ARGSIZE_P << (4*ARGSIZE_SHIFT) |
                 ARGSIZE_P << (5*ARGSIZE_SHIFT);
     }
 
     // Generate CallInfo and a JSSpecializedNative structure on the fly.
     // Do not use JSTN_UNBOX_AFTER for mode JSOP_NEW because
     // record_NativeCallComplete unboxes the result specially.
 
-    CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
+    CallInfo* ci = new (*traceMonitor->traceAlloc) CallInfo();
     ci->_address = uintptr_t(fun->u.n.native);
     ci->_cse = ci->_fold = 0;
     ci->_abi = ABI_CDECL;
     ci->_argtypes = types;
 #ifdef DEBUG
     ci->_name = JS_GetFunctionName(fun);
  #endif
 
@@ -11028,17 +11043,17 @@ TraceRecorder::record_JSOP_GETELEM()
                     LIns* typemap_ins;
                     if (callDepth == depth) {
                         // In this case, we are in the same frame where the arguments object was created.
                         // The entry type map is not necessarily up-to-date, so we capture a new type map
                         // for this point in the code.
                         unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
                         if (stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
                             RETURN_STOP_A("|arguments| requires saving too much stack");
-                        JSTraceType* typemap = new (*traceMonitor->dataAlloc) JSTraceType[stackSlots];
+                        JSTraceType* typemap = new (*traceMonitor->traceAlloc) JSTraceType[stackSlots];
                         DetermineTypesVisitor detVisitor(*this, typemap);
                         VisitStackSlots(detVisitor, cx, 0);
                         typemap_ins = INS_CONSTPTR(typemap + 2 /* callee, this */);
                     } else {
                         // In this case, we are in a deeper frame from where the arguments object was
                         // created. The type map at the point of the call out from the creation frame
                         // is accurate.
                         // Note: this relies on the assumption that we abort on setting an element of
@@ -11512,18 +11527,18 @@ TraceRecorder::interpretedFunctionCall(j
         RETURN_STOP("can't trace calls with too few args requiring argv move");
     }
 
     // Generate a type map for the outgoing frame and stash it in the LIR
     unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
     if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
         RETURN_STOP("interpreted function call requires saving too much stack");
     FrameInfo* fi = (FrameInfo*)
-        traceMonitor->dataAlloc->alloc(sizeof(FrameInfo) +
-                                       stackSlots * sizeof(JSTraceType));
+        traceMonitor->traceAlloc->alloc(sizeof(FrameInfo) +
+                                        stackSlots * sizeof(JSTraceType));
     JSTraceType* typemap = reinterpret_cast<JSTraceType *>(fi + 1);
 
     DetermineTypesVisitor detVisitor(*this, typemap);
     VisitStackSlots(detVisitor, cx, 0);
 
     JS_ASSERT(argc < FrameInfo::CONSTRUCTING_FLAG);
 
     treeInfo->gcthings.addUnique(fval);
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -433,16 +433,55 @@ public:
     size_t size() {
         return mSize;
     }
 
     bool outOfMemory() {
         return mOutOfMemory;
     }
 
+    struct Mark
+    {
+        VMAllocator& vma;
+        bool committed;
+        nanojit::Allocator::Chunk* saved_chunk;
+        char* saved_top;
+        char* saved_limit;
+        size_t saved_size;
+
+        Mark(VMAllocator& vma) :
+            vma(vma),
+            committed(false),
+            saved_chunk(vma.current_chunk),
+            saved_top(vma.current_top),
+            saved_limit(vma.current_limit),
+            saved_size(vma.mSize)
+        {}
+
+        ~Mark()
+        {
+            if (!committed)
+                vma.rewind(*this);
+        }
+
+        void commit() { committed = true; }
+    };
+
+    void rewind(const Mark& m) {
+        while (current_chunk != m.saved_chunk) {
+            Chunk *prev = current_chunk->prev;
+            freeChunk(current_chunk);
+            current_chunk = prev;
+        }
+        current_top = m.saved_top;
+        current_limit = m.saved_limit;
+        mSize = m.saved_size;
+        memset(current_top, 0, current_limit - current_top);
+    }
+
     bool mOutOfMemory;
     size_t mSize;
 
     /*
      * FIXME: Area the LIR spills into if we encounter an OOM mid-way
      * through compilation; we must check mOutOfMemory before we run out
      * of mReserve, otherwise we're in undefined territory. This area
      * used to be one page, now 16 to be "safer". This is a temporary
@@ -772,16 +811,17 @@ enum TypeConsensus
 {
     TypeConsensus_Okay,         /* Two typemaps are compatible */
     TypeConsensus_Undemotes,    /* Not compatible now, but would be with pending undemotes. */
     TypeConsensus_Bad           /* Typemaps are not compatible */
 };
 
 class TraceRecorder {
     VMAllocator&            tempAlloc;
+    VMAllocator::Mark       mark;
     JSContext*              cx;
     JSTraceMonitor*         traceMonitor;
     JSObject*               globalObj;
     JSObject*               lexicalBlock;
     Tracker                 tracker;
     Tracker                 nativeFrameTracker;
     unsigned                callDepth;
     JSAtom**                atoms;
--- a/js/src/nanojit/Allocator.h
+++ b/js/src/nanojit/Allocator.h
@@ -62,32 +62,32 @@ namespace nanojit
             if (current_top + nbytes <= current_limit) {
                 void *p = current_top;
                 current_top += nbytes;
                 return p;
             }
             return allocSlow(nbytes);
         }
 
-    private:
+    protected:
         void* allocSlow(size_t nbytes);
         void fill(size_t minbytes);
 
         class Chunk {
         public:
             Chunk* prev;
             int64_t data[1]; // int64_t forces 8-byte alignment.
         };
 
         Chunk* current_chunk;
         char* current_top;
         char* current_limit;
 
-    // allocator SPI
-    private:
+        // allocator SPI
+
         /** allocate another block from a host provided allocator */
         void* allocChunk(size_t nbytes);
 
         /** free back to the same allocator */
         void freeChunk(void*);
 
         /** hook for post-reset action. */
         void postReset();