Sync nanojit with tamarin-tracing tip.
Sync nanojit with tamarin-tracing tip.
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -893,20 +893,17 @@ js_LoopEdge(JSContext* cx)
/* is the recorder currently active? */
if (tm->recorder) {
if (tm->recorder->loopEdge())
return true; /* keep recording */
js_DeleteRecorder(cx);
return false; /* done recording */
}
- InterpState state;
- state.ip = (FOpcodep)cx->fp->regs->pc;
-
- Fragment* f = tm->fragmento->getLoop(state);
+ Fragment* f = tm->fragmento->getLoop(cx->fp->regs->pc);
if (!f->code()) {
int hits = ++f->hits();
if (!f->isBlacklisted() && hits >= HOTLOOP1) {
if (hits == HOTLOOP1 || hits == HOTLOOP2 || hits == HOTLOOP3) {
tm->recorder = new (&gc) TraceRecorder(cx, tm->fragmento, f);
return true; /* start recording */
}
if (hits > HOTLOOP3)
@@ -924,16 +921,18 @@ js_LoopEdge(JSContext* cx)
if (!unbox(cx->fp, *cx->fp->regs, fi->typeMap, native)) {
#ifdef DEBUG
printf("typemap mismatch, skipping trace.\n");
#endif
return false;
}
double* entry_sp = &native[fi->nativeStackBase/sizeof(double) +
(cx->fp->regs->sp - cx->fp->spbase - 1)];
+ InterpState state;
+ state.ip = cx->fp->regs->pc;
state.sp = (void*)entry_sp;
state.rp = NULL;
state.f = NULL;
state.cx = cx;
union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u;
u.code = f->code();
#ifdef DEBUG
printf("entering trace, pc=%p, sp=%p\n", state.ip, state.sp);
--- a/js/src/nanojit/Fragmento.cpp
+++ b/js/src/nanojit/Fragmento.cpp
@@ -110,17 +110,17 @@ namespace nanojit
void Fragmento::pagesGrow(int32_t count)
{
NanoAssert(!_pageList);
MMGC_MEM_TYPE("NanojitFragmentoMem");
Page* memory = 0;
if (NJ_UNLIMITED_GROWTH || _stats.pages < (uint32_t)NJ_PAGES)
{
// make sure we don't grow beyond NJ_PAGES
- if (_stats.pages + count > (uint32)NJ_PAGES)
+ if (_stats.pages + count > (uint32_t)NJ_PAGES)
count = NJ_PAGES - _stats.pages;
if (count < 0)
count = 0;
// @todo nastiness that needs a fix'n
_gcHeap = _core->GetGC()->GetGCHeap();
NanoAssert(NJ_PAGE_SIZE<=_gcHeap->kNativePageSize);
@@ -190,22 +190,22 @@ namespace nanojit
return _assm;
}
AvmCore* Fragmento::core()
{
return _core;
}
- Fragment* Fragmento::getLoop(const avmplus::InterpState &is)
+ Fragment* Fragmento::getLoop(const void* ip)
{
- Fragment* f = _frags->get(is.ip);
+ Fragment* f = _frags->get(ip);
if (!f) {
- f = newFrag(is);
- _frags->put(is.ip, f);
+ f = newFrag(ip);
+ _frags->put(ip, f);
f->anchor = f;
f->root = f;
f->kind = LoopTrace;
f->mergeCounts = new (_core->gc) BlockHist(_core->gc);
verbose_only( addLabel(f, "T", _frags->size()); )
}
return f;
}
@@ -214,38 +214,38 @@ namespace nanojit
void Fragmento::addLabel(Fragment *f, const char *prefix, int id)
{
char fragname[20];
sprintf(fragname,"%s%d", prefix, id);
labels->add(f, sizeof(Fragment), 0, fragname);
}
#endif
- Fragment *Fragmento::getMerge(GuardRecord *lr, const avmplus::InterpState &is)
+ Fragment *Fragmento::getMerge(GuardRecord *lr, const void* ip)
{
Fragment *anchor = lr->from->anchor;
for (Fragment *f = anchor->branches; f != 0; f = f->nextbranch) {
- if (f->kind == MergeTrace && f->frid == is.ip && f->calldepth == lr->calldepth) {
+ if (f->kind == MergeTrace && f->ip == ip && f->calldepth == lr->calldepth) {
// found existing shared branch on anchor
return f;
}
}
- Fragment *f = newBranch(anchor, is);
+ Fragment *f = newBranch(anchor, ip);
f->root = f;
f->kind = MergeTrace;
f->calldepth = lr->calldepth;
verbose_only(addLabel(f, "M", ++anchor->mergeid); )
return f;
}
- Fragment *Fragmento::createBranch(GuardRecord *lr, const avmplus::InterpState &is)
+ Fragment *Fragmento::createBranch(GuardRecord *lr, const void* ip)
{
Fragment *from = lr->from;
- Fragment *f = newBranch(from, is);
+ Fragment *f = newBranch(from, ip);
f->kind = BranchTrace;
f->calldepth = lr->calldepth;
f->treeBranches = f->root->treeBranches;
f->root->treeBranches = f;
return f;
}
#ifdef NJ_VERBOSE
@@ -289,17 +289,17 @@ namespace nanojit
sprintf(cause,"%s %s", f->_token, labels->format(f->eot_target));
} else {
strcpy(cause, f->_token);
}
}
else
cause[0] = 0;
- FOpcodep ip = f->frid;
+ const void* ip = f->ip;
_assm->outputf("%-*s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", namewidth, buf,
called, f->guardCount, main, f->_native, f->compileNbr, f->traceTicks/1000, f->interpTicks/1000,
cause, core()->interp.labels->format(ip));
size += main;
traceDur += f->traceTicks;
interpDur += f->interpTicks;
@@ -414,17 +414,17 @@ namespace nanojit
d.interpDur/1000, int(100.0*d.interpDur/totaldur),
size, int(100.0*size/totalsize));
}
_assm->_verbose = vsave;
}
- void Fragmento::countBlock(BlockHist *hist, FOpcodep ip)
+ void Fragmento::countBlock(BlockHist *hist, const void* ip)
{
int c = hist->count(ip);
if (_assm->_verbose)
_assm->outputf("++ %s %d", core()->interp.labels->format(ip), c);
}
void Fragmento::countIL(uint32_t il, uint32_t abc)
{
@@ -437,17 +437,17 @@ namespace nanojit
drawTraceTrees(this, this->_frags, this->_core, fileName);
}
#endif
#endif // NJ_VERBOSE
//
// Fragment
//
- Fragment::Fragment(FragID id) : frid(id)
+ Fragment::Fragment(const void* _ip) : ip(_ip)
{
// Fragment is a gc object which is zero'd by the GC, no need to clear fields
}
Fragment::~Fragment()
{
NanoAssert(_pages == 0);
}
@@ -585,34 +585,33 @@ namespace nanojit
}
void Fragment::blacklist()
{
blacklistLevel++;
_hits = -(1<<blacklistLevel);
}
- Fragment *Fragmento::newFrag(const avmplus::InterpState &interp)
+ Fragment *Fragmento::newFrag(const void* ip)
{
- FragID frid = interp.ip;
GC *gc = _core->gc;
- Fragment *f = new (gc) Fragment(frid);
+ Fragment *f = new (gc) Fragment(ip);
f->blacklistLevel = 5;
#ifdef AVMPLUS_VERBOSE
- if (interp.f->filename) {
- f->line = interp.f->linenum;
- f->file = interp.f->filename;
+ if (_core->interp.currentState->f->filename) {
+ f->line = _core->interp.currentState->f->linenum;
+ f->file = _core->interp.currentState->f->filename;
}
#endif
return f;
}
- Fragment *Fragmento::newBranch(Fragment *from, const avmplus::InterpState &interp)
+ Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
{
- Fragment *f = newFrag(interp);
+ Fragment *f = newFrag(ip);
f->anchor = from->anchor;
f->root = from->root;
f->mergeCounts = from->anchor->mergeCounts;
f->xjumpCount = from->xjumpCount;
/*// prepend
f->nextbranch = from->branches;
from->branches = f;*/
// append
--- a/js/src/nanojit/Fragmento.h
+++ b/js/src/nanojit/Fragmento.h
@@ -88,29 +88,29 @@ namespace nanojit
~Fragmento();
void addMemory(void* firstPage, uint32_t pageCount); // gives memory to the Assembler
Assembler* assm();
AvmCore* core();
Page* pageAlloc();
void pageFree(Page* page);
- Fragment* getLoop(const avmplus::InterpState &is);
+ Fragment* getLoop(const void* ip);
void clearFrags(); // clear all fragments from the cache
- Fragment* getMerge(GuardRecord *lr, const avmplus::InterpState &is);
- Fragment* createBranch(GuardRecord *lr, const avmplus::InterpState &is);
- Fragment* newFrag(const avmplus::InterpState &is);
- Fragment* newBranch(Fragment *from, const avmplus::InterpState &is);
+ Fragment* getMerge(GuardRecord *lr, const void* ip);
+ Fragment* createBranch(GuardRecord *lr, const void* ip);
+ Fragment* newFrag(const void* ip);
+ Fragment* newBranch(Fragment *from, const void* ip);
verbose_only ( uint32_t pageCount(); )
verbose_only ( void dumpStats(); )
verbose_only ( void dumpRatio(const char*, BlockHist*);)
verbose_only ( void dumpFragStats(Fragment*, int level,
int& size, uint64_t &dur, uint64_t &interpDur); )
- verbose_only ( void countBlock(BlockHist*, avmplus::FOpcodep pc); )
+ verbose_only ( void countBlock(BlockHist*, const void* pc); )
verbose_only ( void countIL(uint32_t il, uint32_t abc); )
verbose_only( void addLabel(Fragment* f, const char *prefix, int id); )
// stats
struct
{
uint32_t pages; // pages consumed
uint32_t flushes, ilsize, abcsize, compiles, totalCompiles, freePages;
@@ -164,17 +164,17 @@ namespace nanojit
* point at the start of the fragment and may have one or more exit points
*
* It may turn out that that this arrangement causes too much traffic
* between d and i-caches and that we need to carve up the structure differently.
*/
class Fragment : public GCFinalizedObject
{
public:
- Fragment(FragID);
+ Fragment(const void*);
~Fragment();
NIns* code() { return _code; }
void setCode(NIns* codee, Page* pages) { _code = codee; _pages = pages; }
GuardRecord* links() { return _links; }
int32_t& hits() { return _hits; }
void blacklist();
bool isBlacklisted() { return _hits < 0; }
@@ -215,17 +215,17 @@ namespace nanojit
DWB(Fragment*) root;
DWB(BlockHist*) mergeCounts;
DWB(LirBuffer*) lirbuf;
LIns* lastIns;
LIns* spawnedFrom;
GuardRecord* outbound;
TraceKind kind;
- const FragID frid;
+ const void* ip;
uint32_t guardCount;
uint32_t xjumpCount;
int32_t blacklistLevel;
NIns* fragEntry;
LInsp param0,param1,sp,rp;
int32_t calldepth;
void* vmprivate;
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -1547,17 +1547,17 @@ namespace nanojit
}
case LIR_param:
sprintf(s, "%s %s", lirNames[op], gpn(i->imm8()));
break;
case LIR_x: {
SideExit *x = (SideExit*) i->oprnd2()->payload();
- uint32_t ip = uint32_t(x->from->frid) + x->ip_adj;
+ uint32_t ip = uint32_t(x->from->ip) + x->ip_adj;
sprintf(s, "%s: %s -> %s sp%+d rp%+d f%+d",
formatRef(i), lirNames[op],
labels->format((void*)ip),
x->sp_adj, x->rp_adj, x->f_adj);
break;
}
case LIR_callh:
@@ -1573,17 +1573,17 @@ namespace nanojit
case LIR_ov:
case LIR_cs:
sprintf(s, "%s %s", lirNames[op], formatRef(i->oprnd1()));
break;
case LIR_xt:
case LIR_xf: {
SideExit *x = (SideExit*) i->oprnd2()->payload();
- uint32_t ip = int32_t(x->from->frid) + x->ip_adj;
+ uint32_t ip = int32_t(x->from->ip) + x->ip_adj;
sprintf(s, "%s: %s %s -> %s sp%+d rp%+d f%+d",
formatRef(i), lirNames[op],
formatRef(i->oprnd1()),
labels->format((void*)ip),
x->sp_adj, x->rp_adj, x->f_adj);
break;
}
case LIR_add:
@@ -1804,17 +1804,17 @@ namespace nanojit
{
// compile til no more frags
if (frag->lastIns)
{
assm->assemble(frag, loopJumps);
verbose_only(if (assm->_verbose)
assm->outputf("compiling branch %s ip %s",
frago->labels->format(frag),
- frago->labels->format(frag->frid)); )
+ frago->labels->format(frag->ip)); )
NanoAssert(frag->kind == BranchTrace);
RegAlloc* regs = new (gc) RegAlloc();
assm->copyRegisters(regs);
assm->releaseRegisters();
SideExit* exit = frag->spawnedFrom->exit();
regMap.put(exit, regs);
}
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -445,16 +445,18 @@ ShiftOperator;
#define MRL(dr,sr) MR_cond(dr, sr, LT, "movlt")
#define MRLE(dr,sr) MR_cond(dr, sr, LE, "movle")
#define MRG(dr,sr) MR_cond(dr, sr, GT, "movgt")
#define MRGE(dr,sr) MR_cond(dr, sr, GE, "movge")
#define MRB(dr,sr) MR_cond(dr, sr, CC, "movcc")
#define MRBE(dr,sr) MR_cond(dr, sr, LS, "movls")
#define MRA(dr,sr) MR_cond(dr, sr, HI, "movcs")
#define MRAE(dr,sr) MR_cond(dr, sr, CS, "movhi")
+#define MRNO(dr,sr) MR_cond(dr, sr, VC, "movvc") // overflow clear
+#define MRNC(dr,sr) MR_cond(dr, sr, CC, "movcc") // carry clear
#define LD(_d,_off,_b) do{\
if ((_off)<0){\
underrunProtect(4);\
NanoAssert((_off)>-4096);\
*(--_nIns) = (NIns)( COND_AL | (0x51<<20) | ((_b)<<16) | ((_d)<<12) | ((-(_off))&0xFFF) );\
} else {\
if (isS16(_off) || isU16(_off)) {\
@@ -551,17 +553,17 @@ ShiftOperator;
// we need to load into scratch reg, then push onto stack
#define PUSHm(_off,_b) do {\
NanoAssert( (int)(_off)>0 );\
underrunProtect(8);\
*(--_nIns) = (NIns)( COND_AL | (0x92<<20) | (SP<<16) | (1<<(Scratch)) ); \
*(--_nIns) = (NIns)( COND_AL | (0x59<<20) | ((_b)<<16) | ((Scratch)<<12) | ((_off)&0xFFF) );\
asm_output2("push %d(%s)",(_off),gpn(_b)); } while (0)
-#define POP(_r) do {\
+#define POPr(_r) do {\
underrunProtect(4); \
*(--_nIns) = (NIns)( COND_AL | (0x8B<<20) | (SP<<16) | (1<<(_r)) );\
asm_output1("pop %s",gpn(_r));} while (0)
#define POP_mask(_mask) do {\
underrunProtect(4); \
*(--_nIns) = (NIns)( COND_AL | (0x8B<<20) | (SP<<16) | (_mask) );\
asm_output1("pop %x", (_mask));} while (0)
@@ -630,16 +632,20 @@ ShiftOperator;
#define JL(t) do {B_cond(LT,t); asm_output1("jl 0x%08x",t); } while(0)
#define JNL(t) do {B_cond(GE,t); asm_output1("jnl 0x%08x",t); } while(0)
#define JLE(t) do {B_cond(LE,t); asm_output1("jle 0x%08x",t); } while(0)
#define JNLE(t) do {B_cond(GT,t); asm_output1("jnle 0x%08x",t); } while(0)
#define JGE(t) do {B_cond(GE,t); asm_output1("jge 0x%08x",t); } while(0)
#define JNGE(t) do {B_cond(LT,t); asm_output1("jnge 0x%08x",t); } while(0)
#define JG(t) do {B_cond(GT,t); asm_output1("jg 0x%08x",t); } while(0)
#define JNG(t) do {B_cond(LE,t); asm_output1("jng 0x%08x",t); } while(0)
+#define JC(t) do {B_cond(CS,t); asm_output1("bcs 0x%08x",t); } while(0)
+#define JNC(t) do {B_cond(CC,t); asm_output1("bcc 0x%08x",t); } while(0)
+#define JO(t) do {B_cond(VS,t); asm_output1("bvs 0x%08x",t); } while(0)
+#define JNO(t) do {B_cond(VC,t); asm_output1("bvc 0x%08x",t); } while(0)
// used for testing result of an FP compare
// JP = comparison false
#define JP(t) do {B_cond(EQ,NE,t); asm_output1("jp 0x%08x",t); } while(0)
// JNP = comparison true
#define JNP(t) do {B_cond(NE,EQ,t); asm_output1("jnp 0x%08x",t); } while(0)
@@ -679,16 +685,18 @@ ShiftOperator;
#define SETL(r) do {SET(r,LT,GE); asm_output1("setl %s",gpn(r)); } while(0)
#define SETLE(r) do {SET(r,LE,GT); asm_output1("setle %s",gpn(r)); } while(0)
#define SETG(r) do {SET(r,GT,LE); asm_output1("setg %s",gpn(r)); } while(0)
#define SETGE(r) do {SET(r,GE,LT); asm_output1("setge %s",gpn(r)); } while(0)
#define SETB(r) do {SET(r,CC,CS); asm_output1("setb %s",gpn(r)); } while(0)
#define SETBE(r) do {SET(r,LS,HI); asm_output1("setb %s",gpn(r)); } while(0)
#define SETAE(r) do {SET(r,CS,CC); asm_output1("setae %s",gpn(r)); } while(0)
#define SETA(r) do {SET(r,HI,LS); asm_output1("seta %s",gpn(r)); } while(0)
+#define SETO(r) do {SET(r,VS,LS); asm_output1("seto %s",gpn(r)); } while(0)
+#define SETC(r) do {SET(r,CS,LS); asm_output1("setc %s",gpn(r)); } while(0)
// This zero-extends a reg that has been set using one of the SET macros,
// but is a NOOP on ARM/Thumb
#define MOVZX8(r,r2)
// Load and sign extend a 16-bit value into a reg
#define MOVSX(_d,_off,_b) do{\
if ((_off)>=0){\
--- a/js/src/nanojit/NativeThumb.h
+++ b/js/src/nanojit/NativeThumb.h
@@ -129,17 +129,17 @@ namespace nanojit
#define DECLARE_PLATFORM_ASSEMBLER()\
const static Register argRegs[4], retRegs[2];\
bool has_cmov;\
void STi(Register b, int32_t d, int32_t v);\
void LDi(Register r, int32_t v);\
void BL(NIns* target);\
void PUSH_mask(RegisterMask);\
void POP_mask(RegisterMask);\
- void POP(Register);\
+ void POPr(Register);\
void underrunProtect(int bytes);\
void B_cond(int c, NIns *target);\
void B(NIns *target);\
void MOVi(Register r, int32_t imm);\
void ST(Register base, int32_t offset, Register reg);\
void STR_m(Register base, int32_t offset, Register reg);\
void STR_index(Register base, Register off, Register reg);\
void STR_sp(int32_t offset, Register reg);\
@@ -315,16 +315,18 @@ namespace nanojit
#define MRL(d,s) do { NanoAssert(0); } while (0)
#define MRLE(d,s) do { NanoAssert(0); } while (0)
#define MRG(d,s) do { NanoAssert(0); } while (0)
#define MRGE(d,s) do { NanoAssert(0); } while (0)
#define MRB(d,s) do { NanoAssert(0); } while (0)
#define MRBE(d,s) do { NanoAssert(0); } while (0)
#define MRA(d,s) do { NanoAssert(0); } while (0)
#define MRAE(d,s) do { NanoAssert(0); } while (0)
+#define MRNC(d,s) do { NanoAssert(0); } while (0)
+#define MRNO(d,s) do { NanoAssert(0); } while (0)
#define LD(reg,offset,base) do{ \
int off = (offset) >> 2; \
if (base==PC){ \
underrunProtect(2); \
NanoAssert(off>=0 && off<256); \
*(--_nIns) = (NIns)(0x4800 | ((reg)<<8) | (off&0xFF)); \
asm_output3("ld %s,%d(%s)",gpn(reg),(offset),gpn(base)); \
@@ -434,16 +436,20 @@ enum {
#define JL(t) B_cond(LT,t)
#define JNL(t) B_cond(GE,t)
#define JG(t) B_cond(GT,t)
#define JNG(t) B_cond(LE,t)
#define JLE(t) B_cond(LE,t)
#define JNLE(t) B_cond(GT,t)
#define JGE(t) B_cond(GE,t)
#define JNGE(t) B_cond(LT,t)
+#define JC(t) B_cond(CSHS,t)
+#define JNC(t) B_cond(CCLO,t)
+#define JO(t) B_cond(VS,t)
+#define JNO(t) B_cond(VC,t)
// B(cond) +4 - if condition, skip to MOV
// EOR R, R - set register to 0
// B(AL) +2 - skip over next
// MOV R, 1 - set register to 1
#define SET(r,cond) \
underrunProtect(10); \
*(--_nIns) = (NIns)(0x0000); \
@@ -456,16 +462,18 @@ enum {
#define SETL(r) do {SET(r,LT); asm_output1("setl %s",gpn(r)); } while(0)
#define SETLE(r) do {SET(r,LE); asm_output1("setle %s",gpn(r)); } while(0)
#define SETG(r) do {SET(r,GT); asm_output1("setg %s",gpn(r)); } while(0)
#define SETGE(r) do {SET(r,GE); asm_output1("setge %s",gpn(r)); } while(0)
#define SETB(r) do {SET(r,CCLO); asm_output1("setb %s",gpn(r)); } while(0)
#define SETBE(r) do {SET(r,LS); asm_output1("setbe %s",gpn(r)); } while(0)
#define SETAE(r) do {SET(r,CSHS); asm_output1("setae %s",gpn(r)); } while(0) /* warning, untested */
#define SETA(r) do {SET(r,HI); asm_output1("seta %s",gpn(r)); } while(0) /* warning, untested */
+#define SETC(r) do {SET(r,CSHS); asm_output1("setc %s",gpn(r)); } while(0) /* warning, untested */
+#define SETO(r) do {SET(r,VS); asm_output1("seto %s",gpn(r)); } while(0) /* warning, untested */
// This zero-extends a reg that has been set using one of the SET macros,
// but is a NOOP on ARM/Thumb
#define MOVZX8(r,r2)
// If the offset is 0-255, no problem, just load 8-bit imm
// If the offset is greater than that, we load the SP
//
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -36,16 +36,19 @@
*
* ***** END LICENSE BLOCK ***** */
#ifdef _MAC
// for MakeDataExecutable
#include <CoreServices/CoreServices.h>
#endif
+#if defined LINUX
+#include <sys/mman.h>
+#endif
#include "nanojit.h"
namespace nanojit
{
#ifdef FEATURE_NANOJIT
#ifdef NJ_VERBOSE
const char *regNames[] = {
@@ -241,19 +244,23 @@ namespace nanojit
if (extra > 0)
{ ADDi(SP, extra); }
}
void Assembler::nMarkExecute(Page* page, int32_t count, bool enable)
{
#ifdef _MAC
MakeDataExecutable(page, count*NJ_PAGE_SIZE);
- #else
- (void)page;
- (void)count;
+ #elif defined WIN32
+ DWORD dwIgnore;
+ VirtualProtect(&page->code, count*NJ_PAGE_SIZE, PAGE_EXECUTE_READWRITE, &dwIgnore);
+ #elif defined LINUX
+ intptr_t addr = (intptr_t)&page->code;
+ addr &= ~(NJ_PAGE_SIZE - 1);
+ mprotect((void *)addr, count*NJ_PAGE_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC);
#endif
(void)enable;
}
Register Assembler::nRegisterAllocFromSet(int set)
{
Register r;
RegAlloc ®s = _allocator;
--- a/js/src/nanojit/Tests.cpp
+++ b/js/src/nanojit/Tests.cpp
@@ -353,17 +353,17 @@ do_test(Test* test)
state.ip = NULL;
state.sp = NULL;
state.rp = NULL;
state.f = NULL;
/* Begin a dummy trace */
frago->labels = new (gc) LabelMap(core, NULL);
- frag = frago->getLoop(state);
+ frag = frago->getLoop(state.ip);
lirbuf = new (gc) LirBuffer(frago, NULL);
lirbuf->names = new (gc) LirNameMap(gc, NULL, frago->labels);
frag->lirbuf = lirbuf;
lirout = new LirBufWriter(lirbuf);
lirout->ins0(LIR_trace);
frag->param0 = lirout->insImm8(LIR_param, Assembler::argRegs[0], 0);
frag->param1 = lirout->insImm8(LIR_param, Assembler::argRegs[1], 0);
test->Compile(lirout);
--- a/js/src/nanojit/avmplus.h
+++ b/js/src/nanojit/avmplus.h
@@ -175,21 +175,19 @@ operator new(size_t size, GC* gc)
#define DRCWB(x) x
#define MMGC_MEM_TYPE(x)
typedef int FunctionID;
namespace avmplus
{
- typedef const uint16_t* FOpcodep;
-
struct InterpState
{
- FOpcodep ip;
+ void* ip;
void* sp;
void* rp;
void* f;
void* cx;
};
class String
{
@@ -245,17 +243,17 @@ namespace avmplus
};
static const int kstrconst_emptyString = 0;
class AvmInterpreter
{
class Labels {
public:
- const char* format(FOpcodep ip)
+ const char* format(const void* ip)
{
static char buf[33];
sprintf(buf, "%p", ip);
return buf;
}
};
Labels _labels;
--- a/js/src/nanojit/nanojit.h
+++ b/js/src/nanojit/nanojit.h
@@ -31,17 +31,17 @@
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
-#ifndef __nanojit_h_
+#ifndef __nanojit_h__
#define __nanojit_h__
#include <stddef.h>
#include "avmplus.h"
#ifdef AVMPLUS_IA32
#define NANOJIT_IA32
#elif AVMPLUS_ARM
@@ -63,18 +63,17 @@ namespace nanojit
* -------------------------------------------
*/
class Fragment;
class LIns;
struct SideExit;
class RegAlloc;
typedef avmplus::AvmCore AvmCore;
typedef avmplus::OSDep OSDep;
- typedef const uint16_t* FragID;
- typedef avmplus::SortedMap<FragID,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
+ typedef avmplus::SortedMap<const void*,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
typedef avmplus::SortedMap<SideExit*,RegAlloc*,avmplus::LIST_GCObjects> RegAllocMap;
typedef avmplus::List<LIns*,avmplus::LIST_NonGCObjects> InsList;
typedef avmplus::List<char*, avmplus::LIST_GCObjects> StringList;
#if defined(_DEBUG)
#ifndef WIN32
#define DebugBreak() AvmAssert(0)
@@ -162,9 +161,9 @@ namespace nanojit
#include "Native.h"
#include "LIR.h"
#include "RegAlloc.h"
#include "Fragmento.h"
#include "Assembler.h"
#include "TraceTreeDrawer.h"
-#endif // __nanojit_h_
+#endif // __nanojit_h__