--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -218,17 +218,17 @@ using namespace avmplus;
using namespace nanojit;
static GC gc = GC();
static avmplus::AvmCore s_core = avmplus::AvmCore();
static avmplus::AvmCore* core = &s_core;
#ifdef JS_JIT_SPEW
void
-js_DumpPeerStability(JSTraceMonitor* tm, const void* ip);
+js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, uint32 globalShape);
#endif
/* We really need a better way to configure the JIT. Shaver, where is my fancy JIT object? */
static bool nesting_enabled = true;
#if defined(NANOJIT_IA32)
static bool did_we_check_sse2 = false;
#endif
@@ -238,17 +238,17 @@ bool js_verboseDebug = getenv("TRACEMONK
/* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst
case cause performance regressions. */
static Oracle oracle;
/* Blacklists the root peer fragment at a fragment's PC. This is so blacklisting stays at the
top of the peer list and not scattered around. */
void
-js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag);
+js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag, uint32 globalShape);
Tracker::Tracker()
{
pagelist = 0;
}
Tracker::~Tracker()
{
@@ -535,27 +535,24 @@ getVMFragment(JSTraceMonitor* tm, const
while (vf &&
! (vf->globalShape == globalShape &&
vf->ip == ip)) {
vf = vf->next;
}
return vf;
}
-// FIXME: remove the default parameters for globalShape when we're
-// actually keying by it.
-
static Fragment*
-getLoop(JSTraceMonitor* tm, const void *ip, uint32 globalShape = 0)
+getLoop(JSTraceMonitor* tm, const void *ip, uint32 globalShape)
{
return getVMFragment(tm, ip, globalShape);
}
static Fragment*
-getAnchor(JSTraceMonitor* tm, const void *ip, uint32 globalShape = 0)
+getAnchor(JSTraceMonitor* tm, const void *ip, uint32 globalShape)
{
LirBufWriter writer(tm->lirbuf);
char *fragmem = (char*) writer.skip(sizeof(VMFragment))->payload();
if (!fragmem)
return NULL;
VMFragment *f = new (fragmem) VMFragment(ip, globalShape);
JS_ASSERT(f);
@@ -1222,20 +1219,20 @@ TraceRecorder::TraceRecorder(JSContext*
lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp");
cx_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx");
gp_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, gp)), "gp");
eos_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos");
eor_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor");
globalObj_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, globalObj)), "globalObj");
/* If we came from exit, we might not have enough global types. */
- if (JS_TRACE_MONITOR(cx).globalSlots->length() > ti->globalSlots()) {
+ if (ti->globalSlots->length() > ti->nGlobalTypes()) {
ti->typeMap.captureMissingGlobalTypes(cx,
- *JS_TRACE_MONITOR(cx).globalSlots,
- ti->stackSlots);
+ *(ti->globalSlots),
+ ti->nStackTypes);
}
/* read into registers all values on the stack and all globals we know so far */
import(treeInfo, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap);
if (fragment == fragment->root) {
LIns* counter = lir->insLoadi(cx_ins,
offsetof(JSContext, operationCount));
@@ -1773,28 +1770,28 @@ TraceRecorder::import(TreeInfo* treeInfo
a different trace of the tree might have had a guard with a different type map for
these slots we just filled in here (the guard we continue from didn't know about them),
since we didn't take that particular guard the only way we could have ended up here
is if that other trace had at its end a compatible type distribution with the entry
map. Since thats exactly what we used to fill in the types our current side exit
didn't provide, this is always safe to do. */
uint8* globalTypeMap = typeMap + stackSlots;
- unsigned length = treeInfo->globalSlots();
+ unsigned length = treeInfo->nGlobalTypes();
/* This is potentially the typemap of the side exit and thus shorter than the tree's
global type map. */
if (ngslots < length)
mergeTypeMaps(&globalTypeMap/*out param*/, &ngslots/*out param*/,
treeInfo->globalTypeMap(), length,
(uint8*)alloca(sizeof(uint8) * length));
- JS_ASSERT(ngslots == treeInfo->globalSlots());
+ JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
/* the first time we compile a tree this will be empty as we add entries lazily */
- uint16* gslots = traceMonitor->globalSlots->data();
+ uint16* gslots = treeInfo->globalSlots->data();
uint8* m = globalTypeMap;
FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
import(gp_ins, nativeGlobalOffset(vp), vp, *m, vpname, vpnum, NULL);
m++;
);
ptrdiff_t offset = -treeInfo->nativeStackBase;
m = typeMap;
FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
@@ -1807,22 +1804,19 @@ TraceRecorder::import(TreeInfo* treeInfo
JS_REQUIRES_STACK bool
TraceRecorder::lazilyImportGlobalSlot(unsigned slot)
{
if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */
return false;
jsval* vp = &STOBJ_GET_SLOT(globalObj, slot);
if (known(vp))
return true; /* we already have it */
- unsigned index = traceMonitor->globalSlots->length();
- /* If this the first global we are adding, remember the shape of the global object. */
- if (index == 0)
- traceMonitor->globalShape = OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain));
+ unsigned index = treeInfo->globalSlots->length();
/* Add the slot to the list of interned global slots. */
- traceMonitor->globalSlots->add(slot);
+ treeInfo->globalSlots->add(slot);
uint8 type = getCoercedType(*vp);
if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, slot))
type = JSVAL_DOUBLE;
treeInfo->typeMap.add(type);
import(gp_ins, slot*sizeof(double), vp, type, "global", index, NULL);
return true;
}
@@ -1980,36 +1974,36 @@ js_IsLoopEdge(jsbytecode* pc, jsbytecode
}
/* Promote slots if necessary to match the called tree' type map and report error if thats
impossible. */
JS_REQUIRES_STACK bool
TraceRecorder::adjustCallerTypes(Fragment* f)
{
JSTraceMonitor* tm = traceMonitor;
- uint16* gslots = tm->globalSlots->data();
- unsigned ngslots = tm->globalSlots->length();
- JS_ASSERT(ngslots == treeInfo->globalSlots());
+ uint16* gslots = treeInfo->globalSlots->data();
+ unsigned ngslots = treeInfo->globalSlots->length();
+ JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
TreeInfo* ti = (TreeInfo*)f->vmprivate;
bool ok = true;
uint8* map = ti->globalTypeMap();
uint8* m = map;
FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
LIns* i = get(vp);
bool isPromote = isPromoteInt(i);
if (isPromote && *m == JSVAL_DOUBLE)
lir->insStorei(get(vp), gp_ins, nativeGlobalOffset(vp));
else if (!isPromote && *m == JSVAL_INT) {
debug_only_v(printf("adjusting will fail, %s%d, slot %d\n", vpname, vpnum, m - map);)
oracle.markGlobalSlotUndemotable(cx, gslots[n]);
ok = false;
}
++m;
);
- JS_ASSERT(unsigned(m - map) == ti->globalSlots());
+ JS_ASSERT(unsigned(m - map) == ti->nGlobalTypes());
map = ti->stackTypeMap();
m = map;
FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0,
LIns* i = get(vp);
bool isPromote = isPromoteInt(i);
if (isPromote && *m == JSVAL_DOUBLE) {
lir->insStorei(get(vp), lirbuf->sp,
-treeInfo->nativeStackBase + nativeStackOffset(vp));
@@ -2020,17 +2014,17 @@ TraceRecorder::adjustCallerTypes(Fragmen
ok = false;
oracle.markStackSlotUndemotable(cx, unsigned(m - map));
} else if (JSVAL_IS_INT(*vp) && *m == JSVAL_DOUBLE) {
/* Aggressively undo speculation so the inner tree will compile if this fails. */
oracle.markStackSlotUndemotable(cx, unsigned(m - map));
}
++m;
);
- JS_ASSERT(unsigned(m - map) == ti->stackSlots);
+ JS_ASSERT(unsigned(m - map) == ti->nStackTypes);
JS_ASSERT(f == f->root);
return ok;
}
JS_REQUIRES_STACK uint8
TraceRecorder::determineSlotType(jsval* vp)
{
uint8 m;
@@ -2115,25 +2109,25 @@ TraceRecorder::snapshot(ExitType exitTyp
/* Generate the entry map for the (possibly advanced) pc and stash it in the trace. */
unsigned stackSlots = js_NativeStackSlots(cx, callDepth);
/* It's sufficient to track the native stack use here since all stores above the
stack watermark defined by guards are killed. */
trackNativeStackUse(stackSlots + 1);
/* Capture the type map into a temporary location. */
- unsigned ngslots = traceMonitor->globalSlots->length();
+ unsigned ngslots = treeInfo->globalSlots->length();
unsigned typemap_size = (stackSlots + ngslots) * sizeof(uint8);
uint8* typemap = (uint8*)alloca(typemap_size);
uint8* m = typemap;
/* Determine the type of a store by looking at the current type of the actual value the
interpreter is using. For numbers we have to check what kind of store we used last
(integer or double) to figure out what the side exit show reflect in its typemap. */
- FORALL_SLOTS(cx, ngslots, traceMonitor->globalSlots->data(), callDepth,
+ FORALL_SLOTS(cx, ngslots, treeInfo->globalSlots->data(), callDepth,
*m++ = determineSlotType(vp);
);
JS_ASSERT(unsigned(m - typemap) == ngslots + stackSlots);
/* If we are capturing the stack state on a specific instruction, the value on
the top of the stack is a boxed value. */
if (resumeAfter) {
typemap[stackSlots - 1] = JSVAL_BOXED;
@@ -2319,19 +2313,19 @@ TraceRecorder::checkType(jsval& v, uint8
* @param demote True if stability was achieved through demotion.
* @return True if type stable, false otherwise.
*/
JS_REQUIRES_STACK bool
TraceRecorder::deduceTypeStability(Fragment* root_peer, Fragment** stable_peer, bool& demote)
{
uint8* m;
uint8* typemap;
- unsigned ngslots = traceMonitor->globalSlots->length();
- uint16* gslots = traceMonitor->globalSlots->data();
- JS_ASSERT(ngslots == treeInfo->globalSlots());
+ unsigned ngslots = treeInfo->globalSlots->length();
+ uint16* gslots = treeInfo->globalSlots->data();
+ JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
if (stable_peer)
*stable_peer = NULL;
/*
* Rather than calculate all of this stuff twice, it gets cached locally. The "stage" buffers
* are for calls to set() that will change the exit types.
*/
@@ -2395,24 +2389,25 @@ checktype_fail_1:
Fragment* f;
TreeInfo* ti;
for (f = root_peer; f != NULL; f = f->peer) {
debug_only_v(printf("Checking type stability against peer=%p (code=%p)\n", f, f->code());)
if (!f->code())
continue;
ti = (TreeInfo*)f->vmprivate;
/* Don't allow varying stack depths */
- if ((ti->stackSlots != treeInfo->stackSlots) ||
- (ti->typeMap.length() != treeInfo->typeMap.length()))
+ if ((ti->nStackTypes != treeInfo->nStackTypes) ||
+ (ti->typeMap.length() != treeInfo->typeMap.length()) ||
+ (ti->globalSlots->length() != treeInfo->globalSlots->length()))
continue;
stage_count = 0;
success = false;
m = ti->globalTypeMap();
- FORALL_GLOBAL_SLOTS(cx, traceMonitor->globalSlots->length(), traceMonitor->globalSlots->data(),
+ FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(),
if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count))
goto checktype_fail_2;
++m;
);
m = ti->stackTypeMap();
FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0,
if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count))
@@ -2439,17 +2434,17 @@ checktype_fail_2:
/*
* If this is a loop trace and it would be stable with demotions, build an undemote list
* and return true. Our caller should sniff this and trash the tree, recording a new one
* that will assumedly stabilize.
*/
if (demote && fragment->kind == LoopTrace) {
typemap = m = treeInfo->globalTypeMap();
- FORALL_GLOBAL_SLOTS(cx, traceMonitor->globalSlots->length(), traceMonitor->globalSlots->data(),
+ FORALL_GLOBAL_SLOTS(cx, treeInfo->globalSlots->length(), treeInfo->globalSlots->data(),
if (*m == JSVAL_INT) {
JS_ASSERT(isNumber(*vp));
if (!isPromoteInt(get(vp)))
oracle.markGlobalSlotUndemotable(cx, gslots[n]);
} else if (*m == JSVAL_DOUBLE) {
JS_ASSERT(isNumber(*vp));
oracle.markGlobalSlotUndemotable(cx, gslots[n]);
} else {
@@ -2489,29 +2484,29 @@ TraceRecorder::isLoopHeader(JSContext* c
/* Compile the current fragment. */
JS_REQUIRES_STACK void
TraceRecorder::compile(JSTraceMonitor* tm)
{
Fragmento* fragmento = tm->fragmento;
if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
debug_only_v(printf("Trace rejected: excessive stack use.\n"));
- js_BlacklistPC(tm, fragment);
+ js_BlacklistPC(tm, fragment, treeInfo->globalShape);
return;
}
++treeInfo->branchCount;
if (lirbuf->outOMem()) {
fragmento->assm()->setError(nanojit::OutOMem);
return;
}
::compile(fragmento->assm(), fragment);
if (fragmento->assm()->error() == nanojit::OutOMem)
return;
if (fragmento->assm()->error() != nanojit::None) {
- js_BlacklistPC(tm, fragment);
+ js_BlacklistPC(tm, fragment, treeInfo->globalShape);
return;
}
if (anchor)
fragmento->assm()->patch(anchor);
JS_ASSERT(fragment->code());
JS_ASSERT(!fragment->vmprivate);
if (fragment == fragment->root)
fragment->vmprivate = treeInfo;
@@ -2526,17 +2521,17 @@ TraceRecorder::compile(JSTraceMonitor* t
#endif
AUDIT(traceCompleted);
}
static bool
js_JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree,
VMSideExit* exit)
{
- JS_ASSERT(exit->numStackSlots == stableTree->stackSlots);
+ JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes);
/* Must have a matching type unstable exit. */
if ((exit->numGlobalSlots + exit->numStackSlots != stableTree->typeMap.length()) ||
memcmp(getFullTypeMap(exit), stableTree->typeMap.data(), stableTree->typeMap.length())) {
return false;
}
exit->target = stableFrag;
@@ -2558,24 +2553,24 @@ TraceRecorder::closeLoop(JSTraceMonitor*
Fragment* peer_root;
Fragmento* fragmento = tm->fragmento;
exitIns = snapshot(UNSTABLE_LOOP_EXIT);
exit = (VMSideExit*)((GuardRecord*)exitIns->payload())->exit;
if (callDepth != 0) {
debug_only_v(printf("Stack depth mismatch, possible recursion\n");)
- js_BlacklistPC(tm, fragment);
+ js_BlacklistPC(tm, fragment, treeInfo->globalShape);
trashSelf = true;
return false;
}
- JS_ASSERT(exit->numStackSlots == treeInfo->stackSlots);
-
- peer_root = getLoop(traceMonitor, fragment->root->ip);
+ JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes);
+
+ peer_root = getLoop(traceMonitor, fragment->root->ip, treeInfo->globalShape);
JS_ASSERT(peer_root != NULL);
stable = deduceTypeStability(peer_root, &peer, demote);
#if DEBUG
if (!stable)
AUDIT(unstableLoopVariable);
#endif
@@ -2647,18 +2642,18 @@ JS_REQUIRES_STACK void
TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, Fragment* peer_root)
{
if (fragment->kind == LoopTrace) {
JSTraceMonitor* tm = traceMonitor;
TreeInfo* ti;
Fragment* peer;
uint8* t1, *t2;
UnstableExit* uexit, **unext;
- uint32* stackDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->stackSlots);
- uint32* globalDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->globalSlots());
+ uint32* stackDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->nStackTypes);
+ uint32* globalDemotes = (uint32*)alloca(sizeof(uint32) * treeInfo->nGlobalTypes());
for (peer = peer_root; peer != NULL; peer = peer->peer) {
if (!peer->code())
continue;
ti = (TreeInfo*)peer->vmprivate;
uexit = ti->unstableExits;
unext = &ti->unstableExits;
while (uexit != NULL) {
@@ -2694,17 +2689,17 @@ TraceRecorder::joinEdgesToEntry(Fragment
stackCount = 0;
break;
}
}
if (stackCount || globalCount) {
for (unsigned i = 0; i < stackCount; i++)
oracle.markStackSlotUndemotable(cx, stackDemotes[i]);
for (unsigned i = 0; i < globalCount; i++)
- oracle.markGlobalSlotUndemotable(cx, tm->globalSlots->data()[globalDemotes[i]]);
+ oracle.markGlobalSlotUndemotable(cx, ti->globalSlots->data()[globalDemotes[i]]);
JS_ASSERT(peer == uexit->fragment->root);
if (fragment == peer)
trashSelf = true;
else
whichTreesToTrash.addUnique(uexit->fragment->root);
break;
}
}
@@ -2715,39 +2710,39 @@ TraceRecorder::joinEdgesToEntry(Fragment
} else {
unext = &uexit->next;
uexit = uexit->next;
}
}
}
}
- debug_only_v(js_DumpPeerStability(traceMonitor, peer_root->ip);)
+ debug_only_v(js_DumpPeerStability(traceMonitor, peer_root->ip, treeInfo->globalShape);)
}
/* Emit an always-exit guard and compile the tree (used for break statements. */
JS_REQUIRES_STACK void
TraceRecorder::endLoop(JSTraceMonitor* tm)
{
LIns* exitIns = snapshot(LOOP_EXIT);
if (callDepth != 0) {
debug_only_v(printf("Stack depth mismatch, possible recursion\n");)
- js_BlacklistPC(tm, fragment);
+ js_BlacklistPC(tm, fragment, treeInfo->globalShape);
trashSelf = true;
return;
}
fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), exitIns);
compile(tm);
if (tm->fragmento->assm()->error() != nanojit::None)
return;
- joinEdgesToEntry(tm->fragmento, getLoop(tm, fragment->root->ip));
+ joinEdgesToEntry(tm->fragmento, getLoop(tm, fragment->root->ip, treeInfo->globalShape));
debug_only_v(printf("recording completed at %s:%u@%u via endLoop\n",
cx->fp->script->filename,
js_FramePCToLineNumber(cx, cx->fp),
FramePCOffset(cx->fp));)
}
/* Emit code to adjust the stack to match the inner tree's stack expectations. */
@@ -2979,26 +2974,67 @@ js_DeleteRecorder(JSContext* cx)
return true;
}
/**
* Checks whether the shape of the global object has changed.
*/
static inline bool
-js_CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj)
-{
- /* Check the global shape. */
- if (OBJ_SHAPE(globalObj) != tm->globalShape) {
- AUDIT(globalShapeMismatchAtEntry);
- debug_only_v(printf("Global shape mismatch (%u vs. %u), flushing cache.\n",
- OBJ_SHAPE(globalObj), tm->globalShape);)
+js_CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj,
+ uint32 *shape=NULL, SlotList** slots=NULL)
+{
+ if (tm->needFlush) {
+ tm->needFlush = JS_FALSE;
return false;
}
- return true;
+
+ uint32 globalShape = OBJ_SHAPE(globalObj);
+
+ if (tm->recorder) {
+ TreeInfo* ti = tm->recorder->getTreeInfo();
+ /* Check the global shape matches the recorder's treeinfo's shape. */
+ if (globalShape != ti->globalShape) {
+ AUDIT(globalShapeMismatchAtEntry);
+ debug_only_v(printf("Global shape mismatch (%u vs. %u), flushing cache.\n",
+ globalShape, ti->globalShape);)
+ return false;
+ }
+ if (shape)
+ *shape = globalShape;
+ if (slots)
+ *slots = ti->globalSlots;
+ return true;
+ }
+
+ /* No recorder, search for a tracked global-state (or allocate one). */
+ for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
+
+ GlobalState &state = tm->globalStates[i];
+
+ if (state.globalShape == -1) {
+ state.globalShape = globalShape;
+ JS_ASSERT(state.globalSlots);
+ JS_ASSERT(state.globalSlots->length() == 0);
+ }
+
+ if (tm->globalStates[i].globalShape == globalShape) {
+ if (shape)
+ *shape = globalShape;
+ if (slots)
+ *slots = state.globalSlots;
+ return true;
+ }
+ }
+
+ /* No currently-tracked-global found and no room to allocate, abort. */
+ AUDIT(globalShapeMismatchAtEntry);
+ debug_only_v(printf("No global slotlist for global shape %u, flushing cache.\n",
+ globalShape));
+ return false;
}
static JS_REQUIRES_STACK bool
js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
unsigned stackSlots, unsigned ngslots, uint8* typeMap,
VMSideExit* expectedInnerExit, Fragment* outer)
{
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
@@ -3228,17 +3264,18 @@ js_dumpMap(TypeMap const & tm) {
uint8 *data = tm.data();
for (unsigned i = 0; i < tm.length(); ++i) {
printf("typemap[%d] = %c\n", i, typeChar[data[i]]);
}
}
#endif
JS_REQUIRES_STACK bool
-js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer)
+js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer,
+ uint32 globalShape, SlotList* globalSlots)
{
JS_ASSERT(f->root == f);
/* Avoid recording loops in overlarge scripts. */
if (cx->fp->script->length >= SCRIPT_PC_ADJ_LIMIT) {
js_AbortRecording(cx, "script too large");
return false;
}
@@ -3251,17 +3288,17 @@ js_RecordTree(JSContext* cx, JSTraceMoni
}
AUDIT(recorderStarted);
/* Try to find an unused peer fragment, or allocate a new one. */
while (f->code() && f->peer)
f = f->peer;
if (f->code())
- f = getAnchor(&JS_TRACE_MONITOR(cx), f->root->ip);
+ f = getAnchor(&JS_TRACE_MONITOR(cx), f->root->ip, globalShape);
if (!f) {
js_FlushJITCache(cx);
return false;
}
f->recordAttempts++;
f->root = f;
@@ -3271,164 +3308,159 @@ js_RecordTree(JSContext* cx, JSTraceMoni
js_FlushJITCache(cx);
debug_only_v(printf("Out of memory recording new tree, flushing cache.\n");)
return false;
}
JS_ASSERT(!f->code() && !f->vmprivate);
/* setup the VM-private treeInfo structure for this fragment */
- TreeInfo* ti = new (&gc) TreeInfo(f);
+ TreeInfo* ti = new (&gc) TreeInfo(f, globalShape, globalSlots);
/* capture the coerced type of each active slot in the type map */
- SlotList& globalSlots = *tm->globalSlots;
- ti->typeMap.captureTypes(cx, globalSlots, 0/*callDepth*/);
- ti->stackSlots = ti->typeMap.length() - globalSlots.length();
+ ti->typeMap.captureTypes(cx, *globalSlots, 0/*callDepth*/);
+ ti->nStackTypes = ti->typeMap.length() - globalSlots->length();
/* Check for duplicate entry type maps. This is always wrong and hints at trace explosion
since we are trying to stabilize something without properly connecting peer edges. */
#ifdef DEBUG
TreeInfo* ti_other;
- for (Fragment* peer = getLoop(tm, f->root->ip); peer != NULL; peer = peer->peer) {
+ for (Fragment* peer = getLoop(tm, f->root->ip, globalShape); peer != NULL; peer = peer->peer) {
if (!peer->code() || peer == f)
continue;
ti_other = (TreeInfo*)peer->vmprivate;
JS_ASSERT(ti_other);
JS_ASSERT(!ti->typeMap.matches(ti_other->typeMap));
}
#endif
/* determine the native frame layout at the entry point */
- unsigned entryNativeStackSlots = ti->stackSlots;
+ unsigned entryNativeStackSlots = ti->nStackTypes;
JS_ASSERT(entryNativeStackSlots == js_NativeStackSlots(cx, 0/*callDepth*/));
ti->nativeStackBase = (entryNativeStackSlots -
(cx->fp->regs->sp - StackBase(cx->fp))) * sizeof(double);
ti->maxNativeStackSlots = entryNativeStackSlots;
ti->maxCallDepth = 0;
ti->script = cx->fp->script;
/* recording primary trace */
if (!js_StartRecorder(cx, NULL, f, ti,
- ti->stackSlots,
- tm->globalSlots->length(),
+ ti->nStackTypes,
+ ti->globalSlots->length(),
ti->typeMap.data(), NULL, outer)) {
return false;
}
return true;
}
-JS_REQUIRES_STACK static inline bool isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
-{
- if (slot < ti->stackSlots)
+JS_REQUIRES_STACK static inline bool
+isSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
+{
+ if (slot < ti->nStackTypes)
return oracle.isStackSlotUndemotable(cx, slot);
-
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- uint16* gslots = tm->globalSlots->data();
- return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->stackSlots]);
+
+ uint16* gslots = ti->globalSlots->data();
+ return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]);
}
JS_REQUIRES_STACK static bool
js_AttemptToStabilizeTree(JSContext* cx, VMSideExit* exit, Fragment* outer)
{
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
Fragment* from = exit->from->root;
+ TreeInfo* from_ti = (TreeInfo*)from->vmprivate;
JS_ASSERT(exit->from->root->code());
/* Make sure any doubles are not accidentally undemoted */
uint8* m = getStackTypeMap(exit);
for (unsigned i = 0; i < exit->numStackSlots; i++) {
if (m[i] == JSVAL_DOUBLE)
oracle.markStackSlotUndemotable(cx, i);
}
m = getGlobalTypeMap(exit);
for (unsigned i = 0; i < exit->numGlobalSlots; i++) {
if (m[i] == JSVAL_DOUBLE)
- oracle.markGlobalSlotUndemotable(cx, tm->globalSlots->data()[i]);
+ oracle.markGlobalSlotUndemotable(cx, from_ti->globalSlots->data()[i]);
}
/* If this exit does not have enough globals, there might exist a peer with more globals that we
* can join to.
*/
- uint8* m2;
- Fragment* f;
- TreeInfo* ti;
- bool matched;
- bool undemote;
bool bound = false;
- unsigned int checkSlots;
- for (f = from->first; f != NULL; f = f->peer) {
+ for (Fragment* f = from->first; f != NULL; f = f->peer) {
if (!f->code())
continue;
- ti = (TreeInfo*)f->vmprivate;
- JS_ASSERT(exit->numStackSlots == ti->stackSlots);
+ TreeInfo* ti = (TreeInfo*)f->vmprivate;
+ JS_ASSERT(exit->numStackSlots == ti->nStackTypes);
/* Check the minimum number of slots that need to be compared. */
- checkSlots = JS_MIN(exit->numStackSlots + exit->numGlobalSlots, ti->typeMap.length());
+ unsigned checkSlots = JS_MIN(exit->numStackSlots + exit->numGlobalSlots, ti->typeMap.length());
m = getFullTypeMap(exit);
- m2 = ti->typeMap.data();
+ uint8* m2 = ti->typeMap.data();
/* Analyze the exit typemap against the peer typemap.
* Two conditions are important:
* 1) Typemaps are identical: these peers can be attached.
* 2) Typemaps do not match, but only contain I->D mismatches.
* In this case, the original tree must be trashed because it
* will never connect to any peer.
*/
- matched = true;
- undemote = false;
+ bool matched = true;
+ bool undemote = false;
for (uint32 i = 0; i < checkSlots; i++) {
/* If the types are equal we're okay. */
if (m[i] == m2[i])
continue;
matched = false;
/* If there's an I->D that cannot be resolved, flag it.
* Otherwise, break and go to the next peer.
*/
if (m[i] == JSVAL_INT && m2[i] == JSVAL_DOUBLE && isSlotUndemotable(cx, ti, i)) {
undemote = true;
} else {
undemote = false;
break;
}
}
- if (matched) {
+ if (matched) {
+ JS_ASSERT(from_ti->globalSlots == ti->globalSlots);
+ JS_ASSERT(from_ti->nStackTypes == ti->nStackTypes);
/* Capture missing globals on both trees and link the fragments together. */
if (from != f) {
ti->dependentTrees.addUnique(from);
- ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
+ ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
}
- ti = (TreeInfo*)from->vmprivate;
- ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
+ from_ti->typeMap.captureMissingGlobalTypes(cx, *from_ti->globalSlots, from_ti->nStackTypes);
exit->target = f;
tm->fragmento->assm()->patch(exit);
/* Now erase this exit from the unstable exit list. */
- UnstableExit** tail = &ti->unstableExits;
- for (UnstableExit* uexit = ti->unstableExits; uexit != NULL; uexit = uexit->next) {
+ UnstableExit** tail = &from_ti->unstableExits;
+ for (UnstableExit* uexit = from_ti->unstableExits; uexit != NULL; uexit = uexit->next) {
if (uexit->exit == exit) {
*tail = uexit->next;
delete uexit;
bound = true;
break;
}
tail = &uexit->next;
}
JS_ASSERT(bound);
- debug_only_v(js_DumpPeerStability(tm, f->ip);)
+ debug_only_v(js_DumpPeerStability(tm, f->ip, from_ti->globalShape);)
break;
} else if (undemote) {
/* The original tree is unconnectable, so trash it. */
js_TrashTree(cx, f);
/* We shouldn't attempt to record now, since we'll hit a duplicate. */
return false;
}
}
if (bound)
return false;
- return js_RecordTree(cx, tm, from->first, outer);
+ return js_RecordTree(cx, tm, from->first, outer, from_ti->globalShape, from_ti->globalSlots);
}
static JS_REQUIRES_STACK bool
js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, Fragment* outer)
{
Fragment* f = anchor->from->root;
JS_ASSERT(f->vmprivate);
TreeInfo* ti = (TreeInfo*)f->vmprivate;
@@ -3499,67 +3531,73 @@ js_CloseLoop(JSContext* cx)
if (fragmento->assm()->error()) {
js_AbortRecording(cx, "Error during recording");
return false;
}
bool demote = false;
Fragment* f = r->getFragment();
+ TreeInfo* ti = r->getTreeInfo();
+ uint32 globalShape = ti->globalShape;
+ SlotList* globalSlots = ti->globalSlots;
r->closeLoop(tm, demote);
/*
* If js_DeleteRecorder flushed the code cache, we can't rely on f any more.
*/
if (!js_DeleteRecorder(cx))
return false;
/*
* If we just walked out of a thin loop, we can't immediately start the
* compiler again here since we didn't return to the loop header.
*/
if (demote && !walkedOutOfLoop)
- return js_RecordTree(cx, tm, f, NULL);
+ return js_RecordTree(cx, tm, f, NULL, globalShape, globalSlots);
return false;
}
JS_REQUIRES_STACK bool
js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
{
#ifdef JS_THREADSAFE
if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) {
js_AbortRecording(cx, "Global object not owned by this context");
return false; /* we stay away from shared global objects */
}
#endif
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
+ TreeInfo* ti = r->getTreeInfo();
/* Process deep abort requests. */
if (r->wasDeepAborted()) {
js_AbortRecording(cx, "deep abort requested");
return false;
}
/* If we hit our own loop header, close the loop and compile the trace. */
if (r->isLoopHeader(cx))
return js_CloseLoop(cx);
/* does this branch go to an inner loop? */
- Fragment* f = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc);
+ Fragment* f = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc, ti->globalShape);
Fragment* peer_root = f;
if (nesting_enabled && f) {
/* Make sure inner tree call will not run into an out-of-memory condition. */
if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
!js_ReplenishReservedPool(cx, tm)) {
js_AbortRecording(cx, "Couldn't call inner tree (out of memory)");
return false;
}
/* Make sure the shape of the global object still matches (this might flush
the JIT cache). */
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- if (!js_CheckGlobalObjectShape(cx, tm, globalObj)) {
+ uint32 globalShape = -1;
+ SlotList* globalSlots = NULL;
+ if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) {
js_AbortRecording(cx, "Couldn't call inner tree (prep failed)");
return false;
}
debug_only_v(printf("Looking for type-compatible peer (%s:%d@%d)\n",
cx->fp->script->filename,
js_FramePCToLineNumber(cx, cx->fp),
FramePCOffset(cx->fp));)
@@ -3571,33 +3609,33 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
f = r->findNestedCompatiblePeer(f, &empty);
if (f && f->code())
success = r->adjustCallerTypes(f);
if (!success) {
AUDIT(noCompatInnerTrees);
debug_only_v(printf("No compatible inner tree (%p).\n", f);)
- Fragment* old = getLoop(tm, tm->recorder->getFragment()->root->ip);
+ Fragment* old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
if (old == NULL)
old = tm->recorder->getFragment();
js_AbortRecording(cx, "No compatible inner tree");
if (!f && oracle.hit(peer_root->ip) < MAX_INNER_RECORD_BLACKLIST)
return false;
if (old->recordAttempts < MAX_MISMATCH)
oracle.resetHits(old->ip);
f = empty;
if (!f) {
- f = getAnchor(tm, cx->fp->regs->pc);
+ f = getAnchor(tm, cx->fp->regs->pc, globalShape);
if (!f) {
js_FlushJITCache(cx);
return false;
}
}
- return js_RecordTree(cx, tm, f, old);
+ return js_RecordTree(cx, tm, f, old, globalShape, globalSlots);
}
r->prepareTreeCall(f);
VMSideExit* innermostNestedGuard = NULL;
VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
if (!lr) {
js_AbortRecording(cx, "Couldn't call inner tree");
return false;
@@ -3610,23 +3648,23 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
js_AbortRecording(cx, "Inner tree took different side exit, abort recording");
return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL);
}
/* emit a call to the inner tree and continue recording the outer tree trace */
r->emitTreeCall(f, lr);
return true;
case UNSTABLE_LOOP_EXIT:
/* abort recording so the inner loop can become type stable. */
- old = getLoop(tm, tm->recorder->getFragment()->root->ip);
+ old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
oracle.resetHits(old->ip);
return js_AttemptToStabilizeTree(cx, lr, old);
case BRANCH_EXIT:
/* abort recording the outer tree, extend the inner tree */
- old = getLoop(tm, tm->recorder->getFragment()->root->ip);
+ old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
oracle.resetHits(old->ip);
return js_AttemptToExtendTree(cx, lr, NULL, old);
default:
debug_only_v(printf("exit_type=%d\n", lr->exitType);)
js_AbortRecording(cx, "Inner tree not suitable for calling");
return false;
}
@@ -3691,18 +3729,18 @@ TraceRecorder::findNestedCompatiblePeer(
JSTraceMonitor* tm;
unsigned max_demotes;
if (empty)
*empty = NULL;
demote = NULL;
tm = &JS_TRACE_MONITOR(cx);
- unsigned int ngslots = tm->globalSlots->length();
- uint16* gslots = tm->globalSlots->data();
+ unsigned int ngslots = treeInfo->globalSlots->length();
+ uint16* gslots = treeInfo->globalSlots->data();
/* We keep a maximum tally - we want to select the peer most likely to work so we don't keep
* recording.
*/
max_demotes = 0;
TreeInfo* ti;
for (; f != NULL; f = f->peer) {
@@ -3712,18 +3750,18 @@ TraceRecorder::findNestedCompatiblePeer(
continue;
}
unsigned demotes = 0;
ti = (TreeInfo*)f->vmprivate;
debug_only_v(printf("checking nested types %p: ", f);)
- if (ngslots > ti->globalSlots())
- ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
+ if (ngslots > ti->nGlobalTypes())
+ ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
uint8* m = ti->typeMap.data();
FORALL_SLOTS(cx, ngslots, gslots, 0,
debug_only_v(printf("%s%d=", vpname, vpnum);)
if (!js_IsEntryTypeCompatible(vp, m))
goto check_fail;
if (*m == JSVAL_STRING && *vp == JSVAL_VOID)
@@ -3762,32 +3800,29 @@ check_fail:
*
* @param cx Context.
* @param ti Tree info of peer we're testing.
* @return True if compatible (with or without demotions), false otherwise.
*/
static JS_REQUIRES_STACK bool
js_CheckEntryTypes(JSContext* cx, TreeInfo* ti)
{
- JSTraceMonitor* tm;
-
- tm = &JS_TRACE_MONITOR(cx);
- unsigned int ngslots = tm->globalSlots->length();
- uint16* gslots = tm->globalSlots->data();
-
- JS_ASSERT(ti->stackSlots == js_NativeStackSlots(cx, 0));
-
- if (ngslots > ti->globalSlots())
- ti->typeMap.captureMissingGlobalTypes(cx, *tm->globalSlots, ti->stackSlots);
+ unsigned int ngslots = ti->globalSlots->length();
+ uint16* gslots = ti->globalSlots->data();
+
+ JS_ASSERT(ti->nStackTypes == js_NativeStackSlots(cx, 0));
+
+ if (ngslots > ti->nGlobalTypes())
+ ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
uint8* m = ti->typeMap.data();
JS_ASSERT(ti->typeMap.length() == js_NativeStackSlots(cx, 0) + ngslots);
- JS_ASSERT(ti->typeMap.length() == ti->stackSlots + ngslots);
- JS_ASSERT(ti->globalSlots() == ngslots);
+ JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots);
+ JS_ASSERT(ti->nGlobalTypes() == ngslots);
FORALL_SLOTS(cx, ngslots, gslots, 0,
debug_only_v(printf("%s%d=", vpname, vpnum);)
JS_ASSERT(*m != 0xCD);
if (!js_IsEntryTypeCompatible(vp, m))
goto check_fail;
m++;
);
JS_ASSERT(unsigned(m - ti->typeMap.data()) == ti->typeMap.length());
@@ -3827,25 +3862,25 @@ static VMSideExit*
js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
VMSideExit** innermostNestedGuardp)
{
JS_ASSERT(f->code() && f->vmprivate);
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
TreeInfo* ti = (TreeInfo*)f->vmprivate;
- unsigned ngslots = tm->globalSlots->length();
- uint16* gslots = tm->globalSlots->data();
+ unsigned ngslots = ti->globalSlots->length();
+ uint16* gslots = ti->globalSlots->data();
unsigned globalFrameSize = STOBJ_NSLOTS(globalObj);
double* global = (double*)alloca((globalFrameSize+1) * sizeof(double));
double stack_buffer[MAX_NATIVE_STACK_SLOTS];
double* stack = stack_buffer;
/* Make sure the global object is sane. */
- JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == tm->globalShape));
+ JS_ASSERT(!ngslots || (OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) == ti->globalShape));
/* Make sure our caller replenished the double pool. */
JS_ASSERT(tm->reservedDoublePoolPtr >= tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS);
/* Reserve objects and stack space now, to make leaving the tree infallible. */
void *reserve;
void *stackMark = JS_ARENA_MARK(&cx->stackPool);
if (!js_ReserveObjects(cx, MAX_CALL_STACK_ENTRIES))
return NULL;
@@ -3862,17 +3897,17 @@ js_ExecuteTree(JSContext* cx, Fragment*
debug_only(*(uint64*)&global[globalFrameSize] = 0xdeadbeefdeadbeefLL;)
debug_only_v(printf("entering trace at %s:%u@%u, native stack slots: %u code: %p\n",
cx->fp->script->filename,
js_FramePCToLineNumber(cx, cx->fp),
FramePCOffset(cx->fp),
ti->maxNativeStackSlots,
f->code());)
- JS_ASSERT(ti->globalSlots() == ngslots);
+ JS_ASSERT(ti->nGlobalTypes() == ngslots);
if (ngslots)
BuildNativeGlobalFrame(cx, ngslots, gslots, ti->globalTypeMap(), global);
BuildNativeStackFrame(cx, 0/*callDepth*/, ti->typeMap.data(), stack);
double* entry_sp = &stack[ti->nativeStackBase/sizeof(double)];
FrameInfo* callstack_buffer[MAX_CALL_STACK_ENTRIES];
FrameInfo** callstack = callstack_buffer;
@@ -4031,23 +4066,23 @@ js_ExecuteTree(JSContext* cx, Fragment*
calldepth,
cycles));
/* If this trace is part of a tree, later branches might have added additional globals for
which we don't have any type information available in the side exit. We merge in this
information from the entry type-map. See also comment in the constructor of TraceRecorder
why this is always safe to do. */
unsigned exit_gslots = innermost->numGlobalSlots;
- JS_ASSERT(ngslots == ti->globalSlots());
+ JS_ASSERT(ngslots == ti->nGlobalTypes());
JS_ASSERT(ngslots >= exit_gslots);
uint8* globalTypeMap = getGlobalTypeMap(innermost);
if (exit_gslots < ngslots)
mergeTypeMaps(&globalTypeMap, &exit_gslots, ti->globalTypeMap(), ngslots,
(uint8*)alloca(sizeof(uint8) * ngslots));
- JS_ASSERT(exit_gslots == ti->globalSlots());
+ JS_ASSERT(exit_gslots == ti->nGlobalTypes());
/* write back interned globals */
FlushNativeGlobalFrame(cx, exit_gslots, gslots, globalTypeMap, global);
JS_ASSERT_IF(ngslots != 0, globalFrameSize == STOBJ_NSLOTS(globalObj));
JS_ASSERT(*(uint64*)&global[globalFrameSize] == 0xdeadbeefdeadbeefLL);
/* write back native stack frame */
#ifdef DEBUG
@@ -4091,44 +4126,47 @@ js_MonitorLoopEdge(JSContext* cx, uintN&
/* Check the pool of reserved doubles (this might trigger a GC). */
if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
!js_ReplenishReservedPool(cx, tm)) {
return false; /* Out of memory, don't try to record now. */
}
/* Make sure the shape of the global object still matches (this might flush the JIT cache). */
JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- if (!js_CheckGlobalObjectShape(cx, tm, globalObj))
+ uint32 globalShape = -1;
+ SlotList* globalSlots = NULL;
+
+ if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots))
js_FlushJITCache(cx);
jsbytecode* pc = cx->fp->regs->pc;
if (oracle.getHits(pc) >= 0 &&
oracle.getHits(pc)+1 < HOTLOOP) {
oracle.hit(pc);
return false;
}
- Fragment* f = getLoop(tm, pc);
+ Fragment* f = getLoop(tm, pc, globalShape);
if (!f)
- f = getAnchor(tm, pc);
+ f = getAnchor(tm, pc, globalShape);
if (!f) {
js_FlushJITCache(cx);
return false;
}
/* If we have no code in the anchor and no peers, we definitively won't be able to
activate any trees so, start compiling. */
if (!f->code() && !f->peer) {
monitor_loop:
if (oracle.hit(pc) >= HOTLOOP) {
/* We can give RecordTree the root peer. If that peer is already taken, it will
walk the peer list and find us a free slot or allocate a new tree if needed. */
- return js_RecordTree(cx, tm, f->first, NULL);
+ return js_RecordTree(cx, tm, f->first, NULL, globalShape, globalSlots);
}
/* Threshold not reached yet. */
return false;
}
debug_only_v(printf("Looking for compat peer %d@%d, from %p (ip: %p, hits=%d)\n",
js_FramePCToLineNumber(cx, cx->fp),
FramePCOffset(cx->fp),
@@ -4257,20 +4295,20 @@ TraceRecorder::monitorRecording(JSContex
abort_recording:
js_AbortRecording(cx, js_CodeName[op]);
return JSMRS_STOP;
}
/* If used on a loop trace, blacklists the root peer instead of the given fragment. */
void
-js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag)
+js_BlacklistPC(JSTraceMonitor* tm, Fragment* frag, uint32 globalShape)
{
if (frag->kind == LoopTrace)
- frag = getLoop(tm, frag->ip);
+ frag = getLoop(tm, frag->ip, globalShape);
oracle.blacklist(frag->ip);
}
JS_REQUIRES_STACK void
js_AbortRecording(JSContext* cx, const char* reason)
{
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
JS_ASSERT(tm->recorder != NULL);
@@ -4285,21 +4323,22 @@ js_AbortRecording(JSContext* cx, const c
reason);)
}
Fragment* f = tm->recorder->getFragment();
if (!f) {
js_DeleteRecorder(cx);
return;
}
JS_ASSERT(!f->vmprivate);
- js_BlacklistPC(tm, f);
+ uint32 globalShape = tm->recorder->getTreeInfo()->globalShape;
+ js_BlacklistPC(tm, f, globalShape);
Fragment* outer = tm->recorder->getOuterToBlacklist();
/* Give outer two chances to stabilize before we start blacklisting. */
if (outer != NULL && outer->recordAttempts >= 2)
- js_BlacklistPC(tm, outer);
+ js_BlacklistPC(tm, outer, globalShape);
/*
* If js_DeleteRecorder flushed the code cache, we can't rely on f any more.
*/
if (!js_DeleteRecorder(cx))
return;
/*
@@ -4356,25 +4395,29 @@ js_InitJIT(JSTraceMonitor *tm)
#if defined NANOJIT_IA32
if (!did_we_check_sse2) {
avmplus::AvmCore::config.use_cmov =
avmplus::AvmCore::config.sse2 = js_CheckForSSE2();
did_we_check_sse2 = true;
}
#endif
if (!tm->fragmento) {
- JS_ASSERT(!tm->globalSlots && !tm->reservedDoublePool);
+ JS_ASSERT(!tm->reservedDoublePool);
Fragmento* fragmento = new (&gc) Fragmento(core, 24);
verbose_only(fragmento->labels = new (&gc) LabelMap(core, NULL);)
tm->fragmento = fragmento;
tm->lirbuf = new (&gc) LirBuffer(fragmento, NULL);
#ifdef DEBUG
tm->lirbuf->names = new (&gc) LirNameMap(&gc, NULL, tm->fragmento->labels);
#endif
- tm->globalSlots = new (&gc) SlotList();
+ for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
+ tm->globalStates[i].globalShape = -1;
+ JS_ASSERT(!tm->globalStates[i].globalSlots);
+ tm->globalStates[i].globalSlots = new (&gc) SlotList();
+ }
tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
}
if (!tm->reFragmento) {
Fragmento* fragmento = new (&gc) Fragmento(core, 20);
verbose_only(fragmento->labels = new (&gc) LabelMap(core, NULL);)
tm->reFragmento = fragmento;
tm->reLirBuf = new (&gc) LirBuffer(fragmento, NULL);
@@ -4398,28 +4441,30 @@ js_FinishJIT(JSTraceMonitor *tm)
jitstats.unstableLoopVariable, jitstats.breakLoopExits, jitstats.returnLoopExits,
jitstats.noCompatInnerTrees);
printf("monitor: triggered(%llu), exits(%llu), type mismatch(%llu), "
"global mismatch(%llu)\n", jitstats.traceTriggered, jitstats.sideExitIntoInterpreter,
jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
}
#endif
if (tm->fragmento != NULL) {
- JS_ASSERT(tm->globalSlots && tm->reservedDoublePool);
+ JS_ASSERT(tm->reservedDoublePool);
verbose_only(delete tm->fragmento->labels;)
#ifdef DEBUG
delete tm->lirbuf->names;
tm->lirbuf->names = NULL;
#endif
delete tm->lirbuf;
tm->lirbuf = NULL;
delete tm->fragmento;
tm->fragmento = NULL;
- delete tm->globalSlots;
- tm->globalSlots = NULL;
+ for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
+ JS_ASSERT(tm->globalStates[i].globalSlots);
+ delete tm->globalStates[i].globalSlots;
+ }
delete[] tm->reservedDoublePool;
tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
}
if (tm->reFragmento != NULL) {
delete tm->reLirBuf;
verbose_only(delete tm->reFragmento->labels;)
delete tm->reFragmento;
}
@@ -4475,20 +4520,20 @@ js_FlushJITCache(JSContext* cx)
fragmento->clearFrags();
#ifdef DEBUG
JS_ASSERT(fragmento->labels);
delete fragmento->labels;
fragmento->labels = new (&gc) LabelMap(core, NULL);
#endif
tm->lirbuf->rewind();
memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
- }
- if (cx->fp) {
- tm->globalShape = OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain));
- tm->globalSlots->clear();
+ for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
+ tm->globalStates[i].globalShape = -1;
+ tm->globalStates[i].globalSlots->clear();
+ }
}
oracle.clearHitCounts();
}
JS_FORCES_STACK JSStackFrame *
js_GetTopStackFrame(JSContext *cx)
{
if (JS_ON_TRACE(cx)) {
@@ -5782,17 +5827,17 @@ JS_REQUIRES_STACK bool
TraceRecorder::guardElemOp(JSObject* obj, LIns* obj_ins, jsid id, size_t op_offset, jsval* vp)
{
LIns* map_ins = lir->insLoad(LIR_ldp, obj_ins, (int)offsetof(JSObject, map));
LIns* ops_ins;
if (!map_is_native(obj->map, map_ins, ops_ins, op_offset))
return false;
uint32 shape = OBJ_SHAPE(obj);
- if (JSID_IS_ATOM(id) && shape == traceMonitor->globalShape)
+ if (JSID_IS_ATOM(id) && shape == treeInfo->globalShape)
ABORT_TRACE("elem op probably aliases global");
JSObject* pobj;
JSProperty* prop;
if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
return false;
if (vp)
@@ -9083,47 +9128,47 @@ TraceRecorder::record_JSOP_HOLE()
{
stack(0, INS_CONST(JSVAL_TO_BOOLEAN(JSVAL_HOLE)));
return true;
}
#ifdef JS_JIT_SPEW
/* Prints information about entry typemaps and unstable exits for all peers at a PC */
void
-js_DumpPeerStability(JSTraceMonitor* tm, const void* ip)
+js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, uint32 globalShape)
{
Fragment* f;
TreeInfo* ti;
bool looped = false;
unsigned length = 0;
- for (f = getLoop(tm, ip); f != NULL; f = f->peer) {
+ for (f = getLoop(tm, ip, globalShape); f != NULL; f = f->peer) {
if (!f->vmprivate)
continue;
printf("fragment %p:\nENTRY: ", f);
ti = (TreeInfo*)f->vmprivate;
if (looped)
- JS_ASSERT(ti->stackSlots == length);
- for (unsigned i = 0; i < ti->stackSlots; i++)
+ JS_ASSERT(ti->nStackTypes == length);
+ for (unsigned i = 0; i < ti->nStackTypes; i++)
printf("S%d ", ti->stackTypeMap()[i]);
- for (unsigned i = 0; i < ti->globalSlots(); i++)
+ for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
printf("G%d ", ti->globalTypeMap()[i]);
printf("\n");
UnstableExit* uexit = ti->unstableExits;
while (uexit != NULL) {
printf("EXIT: ");
uint8* m = getFullTypeMap(uexit->exit);
for (unsigned i = 0; i < uexit->exit->numStackSlots; i++)
printf("S%d ", m[i]);
for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++)
printf("G%d ", m[uexit->exit->numStackSlots + i]);
printf("\n");
uexit = uexit->next;
}
- length = ti->stackSlots;
+ length = ti->nStackTypes;
looped = true;
}
}
#endif
/*
* 17 potentially-converting binary operators:
* | ^ & == != < <= > >= << >> >>> + - * / %