Backed out changeset fea4da580994 -- need to split LIR_f2i into nj-central and tm patches
Backed out changeset fea4da580994 -- need to split LIR_f2i into nj-central and tm patches
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -1560,23 +1560,16 @@ JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f
static jsdouble FASTCALL
u2f(jsuint u)
{
return u;
}
JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1)
-static jsint FASTCALL
-f2i(jsdouble d)
-{
- return jsint(d);
-}
-JS_DEFINE_CALLINFO_1(static, INT32, f2i, DOUBLE, 1, 1)
-
static int32 FASTCALL
fcmpeq(jsdouble x, jsdouble y)
{
return x==y;
}
JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
static int32 FASTCALL
@@ -1648,17 +1641,16 @@ static struct SoftFloatOps
map[LIR_fsub] = &fsub_ci;
map[LIR_fmul] = &fmul_ci;
map[LIR_fdiv] = &fdiv_ci;
map[LIR_feq] = &fcmpeq_ci;
map[LIR_flt] = &fcmplt_ci;
map[LIR_fgt] = &fcmpgt_ci;
map[LIR_fle] = &fcmple_ci;
map[LIR_fge] = &fcmpge_ci;
- map[LIR_f2i] = &f2i_ci;
}
} softFloatOps;
// replace fpu ops with function calls
class SoftFloatFilter: public LirWriter
{
public:
SoftFloatFilter(LirWriter *out) : LirWriter(out)
@@ -1680,21 +1672,16 @@ public:
}
LIns *split(const CallInfo *call, LInsp args[]) {
LIns *lo = out->insCall(call, args);
LIns *hi = out->ins1(LIR_callh, lo);
return out->ins2(LIR_qjoin, lo, hi);
}
- LIns *icall1(const CallInfo *call, LIns *a) {
- LIns *args[] = { split(a) };
- return out->insCall(call, args);
- }
-
LIns *fcall1(const CallInfo *call, LIns *a) {
LIns *args[] = { split(a) };
return split(call, args);
}
LIns *fcall2(const CallInfo *call, LIns *a, LIns *b) {
LIns *args[] = { split(b), split(a) };
return split(call, args);
@@ -1702,24 +1689,18 @@ public:
LIns *fcmp(const CallInfo *call, LIns *a, LIns *b) {
LIns *args[] = { split(b), split(a) };
return out->ins2(LIR_eq, out->insCall(call, args), out->insImm(1));
}
LIns *ins1(LOpcode op, LIns *a) {
const CallInfo *ci = softFloatOps.map[op];
- if (ci) {
- if (ci->returnType() == ARGSIZE_I || ci->returnType() == ARGSIZE_U)
- return icall1(ci, a);
- if (ci->returnType() == ARGSIZE_F)
- return fcall1(ci, a);
- JS_NOT_REACHED("ins1 softfloat filter with return type other than I/U or F");
- }
-
+ if (ci)
+ return fcall1(ci, a);
if (op == LIR_fret)
return out->ins1(op, split(a));
return out->ins1(op, a);
}
LIns *ins2(LOpcode op, LIns *a, LIns *b) {
const CallInfo *ci = softFloatOps.map[op];
if (ci) {
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -884,17 +884,16 @@ FragmentAssembler::assembleFragment(LirT
case LIR_not:
case LIR_qlo:
case LIR_qhi:
case LIR_ov:
case LIR_i2q:
case LIR_u2q:
case LIR_i2f:
case LIR_u2f:
- case LIR_f2i:
need(1);
ins = mLir->ins1(mOpcode,
ref(mTokens[0]));
break;
case LIR_iaddp:
case LIR_qaddp:
case LIR_add:
@@ -1351,17 +1350,16 @@ FragmentAssembler::assembleRandomFragmen
vector<LOpcode> F_I_ops;
F_I_ops.push_back(LIR_i2f);
F_I_ops.push_back(LIR_u2f);
vector<LOpcode> I_F_ops;
I_F_ops.push_back(LIR_qlo);
I_F_ops.push_back(LIR_qhi);
- I_F_ops.push_back(LIR_f2i);
vector<LOpcode> F_II_ops;
F_II_ops.push_back(LIR_qjoin);
vector<LOpcode> I_loads;
I_loads.push_back(LIR_ld); // weight LIR_ld more heavily
I_loads.push_back(LIR_ld);
I_loads.push_back(LIR_ld);
--- a/js/src/lirasm/testlirc.sh
+++ b/js/src/lirasm/testlirc.sh
@@ -14,17 +14,17 @@ do
fi
# If it has the special name "random.in", replace filename with --random.
if [ `basename $infile` = "random.in" ]
then
infile="--random 1000"
fi
- if ./lirasm --execute $infile | tr -d '\r' > testoutput.txt && cmp -s testoutput.txt $outfile
+ if ./lirasm --execute $infile > testoutput.txt && cmp -s testoutput.txt $outfile
then
echo "$0: output correct for $infile"
else
echo "$0: incorrect output for $infile"
echo "$0: === actual output ==="
cat testoutput.txt
echo "$0: === expected output ==="
cat $outfile
deleted file mode 100644
--- a/js/src/lirasm/tests/f2i.in
+++ /dev/null
@@ -1,6 +0,0 @@
-a = alloc 8
-d = float 5.0
-stqi d a 0
-x = ldf a 0
-i = f2i x
-ret i
deleted file mode 100644
--- a/js/src/lirasm/tests/f2i.out
+++ /dev/null
@@ -1,1 +0,0 @@
-Output is: 5
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1303,22 +1303,16 @@ namespace nanojit
break;
}
case LIR_u2f:
{
countlir_fpu();
asm_u2f(ins);
break;
}
- case LIR_f2i:
- {
- countlir_fpu();
- asm_f2i(ins);
- break;
- }
case LIR_i2q:
case LIR_u2q:
{
countlir_alu();
asm_promote(ins);
break;
}
case LIR_stb:
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -418,17 +418,16 @@ namespace nanojit
void asm_param(LInsp i);
void asm_int(LInsp i);
void asm_qlo(LInsp i);
void asm_qhi(LInsp i);
void asm_fneg(LInsp ins);
void asm_fop(LInsp ins);
void asm_i2f(LInsp ins);
void asm_u2f(LInsp ins);
- void asm_f2i(LInsp ins);
void asm_promote(LIns *ins);
void asm_nongp_copy(Register r, Register s);
void asm_call(LInsp);
Register asm_binop_rhs_reg(LInsp ins);
NIns* asm_branch(bool branchOnFalse, LInsp cond, NIns* targ);
void asm_switch(LIns* ins, NIns* target);
void asm_jtbl(LIns* ins, NIns** table);
void emitJumpTable(SwitchInfo* si, NIns* target);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -476,20 +476,16 @@ namespace nanojit
return insImmf(-i->imm64f());
if (i->isop(LIR_fsub))
return out->ins2(LIR_fsub, i->oprnd2(), i->oprnd1());
goto involution;
case LIR_i2f:
if (i->isconst())
return insImmf(i->imm32());
break;
- case LIR_f2i:
- if (i->isconstq())
- return insImm(int32_t(i->imm64f()));
- break;
case LIR_u2f:
if (i->isconst())
return insImmf(uint32_t(i->imm32()));
break;
default:
;
}
@@ -1519,17 +1515,16 @@ namespace nanojit
case LIR_not:
case LIR_qlo:
case LIR_qhi:
case LIR_ov:
case LIR_i2q:
case LIR_u2q:
case LIR_i2f:
case LIR_u2f:
- case LIR_f2i:
case LIR_mod:
live.add(ins->oprnd1(), ins);
break;
case LIR_sti:
case LIR_stqi:
case LIR_stfi:
case LIR_stb:
@@ -1845,17 +1840,16 @@ namespace nanojit
case LIR_u2f:
case LIR_qlo:
case LIR_qhi:
case LIR_ov:
case LIR_not:
case LIR_mod:
case LIR_i2q:
case LIR_u2q:
- case LIR_f2i:
VMPI_sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1()));
break;
case LIR_x:
case LIR_xt:
case LIR_xf:
case LIR_xbarrier:
case LIR_xtbl:
@@ -2393,17 +2387,16 @@ namespace nanojit
LIns* SanityFilter::ins1(LOpcode v, LIns* s0)
{
switch (v)
{
case LIR_fneg:
case LIR_fret:
case LIR_qlo:
case LIR_qhi:
- case LIR_f2i:
NanoAssert(s0->isQuad());
break;
case LIR_not:
case LIR_neg:
case LIR_u2f:
case LIR_i2f:
case LIR_i2q: case LIR_u2q:
NanoAssert(!s0->isQuad());
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -134,27 +134,16 @@ namespace nanojit
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
AbiKind _abi:3;
verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const;
uint32_t get_sizes(ArgSize*) const;
- inline ArgSize returnType() const {
- return ArgSize(_argtypes & ARGSIZE_MASK_ANY);
- }
-
- // Note that this indexes arguments *backwards*, that is to
- // get the Nth arg, you have to ask for index (numargs - N).
- // See mozilla bug 525815 for fixing this.
- inline ArgSize argType(uint32_t arg) const {
- return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY);
- }
-
inline bool isIndirect() const {
return _address < 256;
}
inline uint32_t count_args() const {
return _count_args(ARGSIZE_MASK_ANY);
}
inline uint32_t count_iargs() const {
return _count_args(ARGSIZE_MASK_INT);
--- a/js/src/nanojit/LIRopcode.tbl
+++ b/js/src/nanojit/LIRopcode.tbl
@@ -206,17 +206,17 @@ OPDEF(__87, 87, None, Void)
OPDEF(quad, 88, N64, I64) // 64-bit integer constant value
OPDEF(qcmov, 89, Op3, I64) // 64-bit conditional move
OPDEF(i2q, 90, Op1, I64) // sign-extend i32 to i64
OPDEF(u2q, 91, Op1, I64) // zero-extend u32 to u64
OPDEF(i2f, 92, Op1, F64) // convert a signed 32-bit integer to a float
OPDEF(u2f, 93, Op1, F64) // convert an unsigned 32-bit integer to a float
-OPDEF(f2i, 94, Op1, I32) // approximate f2i conversion (no exception raised)
+OPDEF(__94, 94, None, Void)
OPDEF(__95, 95, None, Void)
OPDEF(__96, 96, None, Void)
OPDEF(ldfc, 97, Ld, F64) // non-volatile 64-bit float load
OPDEF(ldqc, 98, Ld, I64) // non-volatile 64-bit integer load
OPDEF(fneg, 99, Op1, F64) // floating-point negation
OPDEF(fadd, 100, Op2, F64) // floating-point addition
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -2040,26 +2040,16 @@ Assembler::asm_u2f(LInsp ins)
// todo: support int value in memory, as per x86
NanoAssert(isKnownReg(sr));
FUITOD(rr, FpSingleScratch);
FMSR(FpSingleScratch, sr);
}
-void Assembler::asm_f2i(LInsp ins)
-{
- // where our result goes
- Register rr = prepResultReg(ins, GpRegs);
- Register sr = findRegFor(ins->oprnd1(), FpRegs);
-
- FMRS(rr, S14);
- FTOSID(S14, sr);
-}
-
void
Assembler::asm_fneg(LInsp ins)
{
LInsp lhs = ins->oprnd1();
Register rr = prepResultReg(ins, FpRegs);
Register sr = ( lhs->isUnusedOrHasUnknownReg()
? findRegFor(lhs, FpRegs)
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -943,27 +943,10 @@ enum {
#define FCPYD(_Dd,_Dm) do { \
underrunProtect(4); \
NanoAssert(ARM_VFP); \
NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm)); \
*(--_nIns) = (NIns)( COND_AL | (0xEB0<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
asm_output("fcpyd %s,%s", gpn(_Dd), gpn(_Dm)); \
} while (0)
-
-#define FMRS(_Rd,_Sn) do { \
- underrunProtect(4); \
- NanoAssert(ARM_VFP); \
- NanoAssert(((_Sn) == FpSingleScratch) && IsGpReg(_Rd)); \
- *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
- asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn)); \
- } while (0)
-
-#define FTOSID(_Sd,_Dm) do { \
- underrunProtect(4); \
- NanoAssert(ARM_VFP); \
- NanoAssert(((_Sd) == FpSingleScratch) && IsFpReg(_Dm)); \
- *(--_nIns) = (NIns)( COND_AL | (0xEBD<<16) | (0x7<<12) | (0xB4<<4) | FpRegNum(_Dm) ); \
- asm_output("ftosid %s,%s", gpn(_Sd), gpn(_Dm)); \
- } while (0)
-
-} // namespace nanojit
+}
#endif // __nanojit_NativeARM__
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -1004,23 +1004,16 @@ namespace nanojit
Register b = findRegFor(ins->oprnd1(), GpRegs);
NanoAssert(!ins->oprnd1()->isQuad());
// since oprnd1 value is 32bit, its okay to zero-extend the value without worrying about clobbering.
CVTSQ2SD(r, b); // convert int64 to double
XORPS(r); // xorps xmmr,xmmr to break dependency chains
MOVLR(b, b); // zero extend u32 to int64
}
- void Assembler::asm_f2i(LIns *ins) {
- NanoAssert(!ins->isQuad() && ins->oprnd1()->isQuad());
- Register r = prepResultReg(ins, GpRegs);
- Register b = findRegFor(ins->oprnd1(), FpRegs);
- emitprr(X64_cvtsd2si, r, b);
- }
-
void Assembler::asm_cmov(LIns *ins) {
LIns* cond = ins->oprnd1();
LIns* iftrue = ins->oprnd2();
LIns* iffalse = ins->oprnd3();
NanoAssert(cond->isCmp());
NanoAssert((ins->isop(LIR_qcmov) && iftrue->isQuad() && iffalse->isQuad()) ||
(ins->isop(LIR_cmov) && !iftrue->isQuad() && !iffalse->isQuad()));
--- a/js/src/nanojit/NativeX64.h
+++ b/js/src/nanojit/NativeX64.h
@@ -188,17 +188,16 @@ namespace nanojit
X64_cmplri = 0xF881400000000003LL, // 32bit compare r,imm32
X64_cmpqri = 0xF881480000000003LL, // 64bit compare r,int64(imm32)
X64_cmplr8 = 0x00F8834000000004LL, // 32bit compare r,imm8
X64_cmpqr8 = 0x00F8834800000004LL, // 64bit compare r,int64(imm8)
X64_cvtsi2sd= 0xC02A0F40F2000005LL, // convert int32 to double r = (double) b
X64_cvtsq2sd= 0xC02A0F48F2000005LL, // convert int64 to double r = (double) b
X64_cvtss2sd= 0xC05A0F40F3000005LL, // convert float to double r = (double) b
X64_cvtsd2ss= 0xC05A0F40F2000005LL, // convert double to float r = (float) b
- X64_cvtsd2si= 0xC02D0F40F2000005LL, // convert double to int32 r = (int32) b
X64_divsd = 0xC05E0F40F2000005LL, // divide scalar double r /= b
X64_mulsd = 0xC0590F40F2000005LL, // multiply scalar double r *= b
X64_addsd = 0xC0580F40F2000005LL, // add scalar double r += b
X64_idiv = 0xF8F7400000000003LL, // 32bit signed div (rax = rdx:rax/r, rdx=rdx:rax%r)
X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b
X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32
X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8
X64_jmp = 0x00000000E9000005LL, // jump near rel32
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -1809,34 +1809,16 @@ namespace nanojit
FILDQ(disp, base);
STi(base, disp+4, 0); // high 32 bits = 0
ST(base, disp, ra); // low 32 bits = unsigned value
}
freeResourcesOf(ins);
}
- void Assembler::asm_f2i(LInsp ins)
- {
- // where our result goes
- Register xr = findRegFor(ins->oprnd1(), FpRegs);
- if (rmask(xr) & XmmRegs) {
- Register rr = prepResultReg(ins, GpRegs);
- SSE_CVTSD2SI(rr, xr);
- } else {
- NanoAssert(xr == FST0);
- Register rr = ins->getReg();
- if (isKnownReg(rr))
- evict(ins);
- int d = findMemFor(ins);
- FISTP(d, FP);
- freeRsrcOf(ins, false);
- }
- }
-
void Assembler::asm_nongp_copy(Register rd, Register rs)
{
if ((rmask(rd) & XmmRegs) && (rmask(rs) & XmmRegs)) {
// xmm -> xmm
SSE_MOVSD(rd, rs);
} else if ((rmask(rd) & GpRegs) && (rmask(rs) & XmmRegs)) {
// xmm -> gp
SSE_MOVD(rd, rs);
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -758,22 +758,16 @@ namespace nanojit
} while(0)
#define SSE_CVTSI2SD(xr,gr) do{ \
count_fpu();\
SSE(0xf20f2a, (xr)&7, (gr)&7); \
asm_output("cvtsi2sd %s,%s",gpn(xr),gpn(gr)); \
} while(0)
- #define SSE_CVTSD2SI(gr,xr) do{ \
- count_fpu();\
- SSE(0xf20f2d, (gr)&7, (xr)&7); \
- asm_output("cvtsd2si %s,%s",gpn(gr),gpn(xr)); \
- } while(0)
-
#define SSE_CVTSD2SS(xr,gr) do{ \
count_fpu();\
SSE(0xf20f5a, (xr)&7, (gr)&7); \
asm_output("cvtsd2ss %s,%s",gpn(xr),gpn(gr)); \
} while(0)
#define SSE_CVTSS2SD(xr,gr) do{ \
count_fpu();\
@@ -935,17 +929,16 @@ namespace nanojit
#define FCOMdm(p,m) do { const double* const dm = m; \
count_fpuld(); FPUdm(0xdc02|(p), dm); asm_output("fcom%s (%p)",((p)?"p":""),dm); if (p) fpu_pop(); } while(0)
#define FLD32(d,b) do { count_ldq(); FPUm(0xd900, d, b); asm_output("fld32 %d(%s)",d,gpn(b)); fpu_push();} while(0)
#define FLDQ(d,b) do { count_ldq(); FPUm(0xdd00, d, b); asm_output("fldq %d(%s)",d,gpn(b)); fpu_push();} while(0)
#define FLDQdm(m) do { const double* const dm = m; \
count_ldq(); FPUdm(0xdd00, dm); asm_output("fldq (%p)",dm); fpu_push();} while(0)
#define FILDQ(d,b) do { count_fpuld(); FPUm(0xdf05, d, b); asm_output("fildq %d(%s)",d,gpn(b)); fpu_push(); } while(0)
#define FILD(d,b) do { count_fpuld(); FPUm(0xdb00, d, b); asm_output("fild %d(%s)",d,gpn(b)); fpu_push(); } while(0)
-#define FISTP(d,b) do { count_fpu(); FPUm(0xdb03, d, b); asm_output("fistp %d(%s)",d,gpn(b)); fpu_pop(); } while(0)
#define FADD(d,b) do { count_fpu(); FPUm(0xdc00, d, b); asm_output("fadd %d(%s)",d,gpn(b)); } while(0)
#define FADDdm(m) do { const double* const dm = m; \
count_ldq(); FPUdm(0xdc00, dm); asm_output("fadd (%p)",dm); } while(0)
#define FSUB(d,b) do { count_fpu(); FPUm(0xdc04, d, b); asm_output("fsub %d(%s)",d,gpn(b)); } while(0)
#define FSUBR(d,b) do { count_fpu(); FPUm(0xdc05, d, b); asm_output("fsubr %d(%s)",d,gpn(b)); } while(0)
#define FSUBRdm(m) do { const double* const dm = m; \
count_ldq(); FPUdm(0xdc05, dm); asm_output("fsubr (%p)",dm); } while(0)
#define FMUL(d,b) do { count_fpu(); FPUm(0xdc01, d, b); asm_output("fmul %d(%s)",d,gpn(b)); } while(0)