Bug 498193 - nanojit: inline all LirWriter functions. r=gal.
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -447,21 +447,16 @@ namespace nanojit
// oprnd_2 must be in the same position in LIns{Op2,Op3,Sti}
// because oprnd2() is used for both of them.
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2)) ==
(offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) );
NanoStaticAssert( (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) ==
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) );
}
- LIns* LirWriter::ins2i(LOpcode v, LIns* oprnd1, int32_t imm)
- {
- return ins2(v, oprnd1, insImm(imm));
- }
-
bool insIsS16(LInsp i)
{
if (i->isconst()) {
int c = i->imm32();
return isS16(c);
}
if (i->isCmov()) {
return insIsS16(i->oprnd2()) && insIsS16(i->oprnd3());
@@ -915,44 +910,16 @@ namespace nanojit
// under the assumption that we're more likely to CSE-match the
// constant base address if we dont const-fold small offsets.
uintptr_t p = (uintptr_t)base->constvalp() + off;
return out->insLoad(op, insImmPtr((void*)p), 0, accSet);
}
return out->insLoad(op, base, off, accSet);
}
- LIns* LirWriter::ins_eq0(LIns* oprnd1)
- {
- return ins2i(LIR_eq, oprnd1, 0);
- }
-
- LIns* LirWriter::ins_peq0(LIns* oprnd1)
- {
- return ins2(LIR_peq, oprnd1, insImmWord(0));
- }
-
- LIns* LirWriter::ins_i2p(LIns* intIns)
- {
-#ifdef NANOJIT_64BIT
- return ins1(LIR_i2q, intIns);
-#else
- return intIns;
-#endif
- }
-
- LIns* LirWriter::ins_u2p(LIns* uintIns)
- {
-#ifdef NANOJIT_64BIT
- return ins1(LIR_u2q, uintIns);
-#else
- return uintIns;
-#endif
- }
-
LIns* LirWriter::insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet)
{
// Determine which kind of store should be used for 'value' based on
// its type.
LOpcode op = LOpcode(0);
switch (value->retType()) {
case LTy_I32: op = LIR_sti; break;
#ifdef NANOJIT_64BIT
@@ -960,52 +927,19 @@ namespace nanojit
#endif
case LTy_F64: op = LIR_stfi; break;
case LTy_Void: NanoAssert(0); break;
default: NanoAssert(0); break;
}
return insStore(op, value, base, d, accSet);
}
-#if NJ_SOFTFLOAT_SUPPORTED
- LIns* LirWriter::qjoin(LInsp lo, LInsp hi)
- {
- return ins2(LIR_qjoin, lo, hi);
- }
-#endif
-
- LIns* LirWriter::insImmWord(intptr_t value)
- {
-#ifdef NANOJIT_64BIT
- return insImmq(value);
-#else
- return insImm(value);
-#endif
- }
-
- LIns* LirWriter::insImmPtr(const void *ptr)
- {
-#ifdef NANOJIT_64BIT
- return insImmq((uint64_t)ptr);
-#else
- return insImm((int32_t)ptr);
-#endif
- }
-
LIns* LirWriter::ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov)
{
- // if not a conditional, make it implicitly an ==0 test (then flop results)
- if (!cond->isCmp())
- {
- cond = ins_eq0(cond);
- LInsp tmp = iftrue;
- iftrue = iffalse;
- iffalse = tmp;
- }
-
+ NanoAssert(cond->isCmp());
if (use_cmov) {
LOpcode op = LIR_cmov;
if (iftrue->isI32() && iffalse->isI32()) {
op = LIR_cmov;
#ifdef NANOJIT_64BIT
} else if (iftrue->isI64() && iffalse->isI64()) {
op = LIR_qcmov;
#endif
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -1436,34 +1436,69 @@ namespace nanojit
// convenience functions
// Inserts a conditional to execute and branches to execute if
// the condition is true and false respectively.
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
// Inserts an integer comparison to 0
- LIns* ins_eq0(LIns* oprnd1);
+ LIns* ins_eq0(LIns* oprnd1) {
+ return ins2i(LIR_eq, oprnd1, 0);
+ }
// Inserts a pointer comparison to 0
- LIns* ins_peq0(LIns* oprnd1);
+ LIns* ins_peq0(LIns* oprnd1) {
+ return ins2(LIR_peq, oprnd1, insImmWord(0));
+ }
// Inserts a binary operation where the second operand is an
// integer immediate.
- LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t);
+ LIns* ins2i(LOpcode v, LIns* oprnd1, int32_t imm) {
+ return ins2(v, oprnd1, insImm(imm));
+ }
#if NJ_SOFTFLOAT_SUPPORTED
- LIns* qjoin(LInsp lo, LInsp hi);
+ LIns* qjoin(LInsp lo, LInsp hi) {
+ return ins2(LIR_qjoin, lo, hi);
+ }
+#endif
+ LIns* insImmPtr(const void *ptr) {
+#ifdef NANOJIT_64BIT
+ return insImmq((uint64_t)ptr);
+#else
+ return insImm((int32_t)ptr);
+#endif
+ }
+
+ LIns* insImmWord(intptr_t value) {
+#ifdef NANOJIT_64BIT
+ return insImmq(value);
+#else
+ return insImm(value);
#endif
- LIns* insImmPtr(const void *ptr);
- LIns* insImmWord(intptr_t ptr);
+ }
- // Sign or zero extend integers to native integers. On 32-bit this is a no-op.
- LIns* ins_i2p(LIns* intIns);
- LIns* ins_u2p(LIns* uintIns);
+ // Sign-extend integers to native integers. On 32-bit this is a no-op.
+ LIns* ins_i2p(LIns* intIns) {
+#ifdef NANOJIT_64BIT
+ return ins1(LIR_i2q, intIns);
+#else
+ return intIns;
+#endif
+ }
+
+ // Zero-extend integers to native integers. On 32-bit this is a no-op.
+ LIns* ins_u2p(LIns* uintIns) {
+ #ifdef NANOJIT_64BIT
+ return ins1(LIR_u2q, uintIns);
+ #else
+ return uintIns;
+ #endif
+ }
// Chooses LIR_sti or LIR_stqi based on size of value.
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
// Insert a load/store with the most pessimistic region access info, which is always safe.
LIns* insLoad(LOpcode op, LIns* base, int32_t d) {
return insLoad(op, base, d, ACC_LOAD_ANY);
}