Bug 541030 - nanojit: isconstq() should only succeed for 64-bit integer. r=rreitmai.
authorNicholas Nethercote <nnethercote@mozilla.com>
Sun, 28 Mar 2010 16:49:42 -0700
changeset 40385 cb5914d2a5db3f99538e96ae76f1cfd7e19ee50b
parent 40384 46b22e830aa97c4ffdfbaef459485c59372bde93
child 40386 d1aa10888d8ef35892583b58b6eee70bf61d44df
push id12610
push userrsayre@mozilla.com
push dateMon, 05 Apr 2010 17:26:41 +0000
treeherdermozilla-central@1942c0b4e101 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrreitmai
bugs541030
milestone1.9.3a3pre
Bug 541030 - nanojit: isconstq() should only succeed for 64-bit integer. r=rreitmai.
js/src/nanojit/Assembler.cpp
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/NativePPC.cpp
js/src/nanojit/NativeSparc.cpp
js/src/nanojit/NativeX64.cpp
js/src/nanojit/Nativei386.cpp
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -254,17 +254,17 @@ namespace nanojit
         return r;
      }
 
     /**
      * these instructions don't have to be saved & reloaded to spill,
      * they can just be recalculated w/out any inputs.
      */
     bool Assembler::canRemat(LIns *i) {
-        return i->isconst() || i->isconstq() || i->isop(LIR_alloc);
+        return i->isImmAny() || i->isop(LIR_alloc);
     }
 
     void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip
                               verbose_only(, size_t &nBytes))
     {
         // save the block we just filled
         if (start)
             CodeAlloc::add(codeList, start, end);
@@ -563,17 +563,17 @@ namespace nanojit
         }
         return p;
     }
 #endif
 
     int Assembler::findMemFor(LIns *ins)
     {
 #if NJ_USES_QUAD_CONSTANTS
-        NanoAssert(!ins->isconstq());
+        NanoAssert(!ins->isconstf());
 #endif
         if (!ins->isInAr()) {
             uint32_t const arIndex = arReserve(ins);
             ins->setArIndex(arIndex);
             NanoAssert(_activation.isValidEntry(ins->getArIndex(), ins) == (arIndex != 0));
         }
         return arDisp(ins);
     }
@@ -1913,28 +1913,28 @@ namespace nanojit
     void Assembler::handleLoopCarriedExprs(InsList& pending_lives)
     {
         // ensure that exprs spanning the loop are marked live at the end of the loop
         reserveSavedRegs();
         for (Seq<LIns*> *p = pending_lives.get(); p != NULL; p = p->tail) {
             LIns *ins = p->head;
             NanoAssert(ins->isLive());
             LIns *op1 = ins->oprnd1();
-            // must findMemFor even if we're going to findRegFor; loop-carried
+            // Must findMemFor even if we're going to findRegFor; loop-carried
             // operands may spill on another edge, and we need them to always
             // spill to the same place.
 #if NJ_USES_QUAD_CONSTANTS
-            // exception: if quad constants are true constants, we should
-            // never call findMemFor on those ops
-            if (!op1->isconstq())
+            // Exception: if float constants are true constants, we should
+            // never call findMemFor on those ops.
+            if (!op1->isconstf())
 #endif
             {
                 findMemFor(op1);
             }
-            if (! (op1->isconst() || op1->isconstf() || op1->isconstq()))
+            if (!op1->isImmAny())
                 findRegFor(op1, ins->isop(LIR_flive) ? FpRegs : GpRegs);
         }
 
         // clear this list since we have now dealt with those lifetimes.  extending
         // their lifetimes again later (earlier in the code) serves no purpose.
         pending_lives.clear();
     }
 
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -519,23 +519,23 @@ namespace nanojit
 #ifdef NANOJIT_64BIT
         case LIR_q2i:
             if (oprnd->isconstq())
                 return insImm(oprnd->imm64_0());
             break;
 #endif
 #if NJ_SOFTFLOAT_SUPPORTED
         case LIR_qlo:
-            if (oprnd->isconstq())
+            if (oprnd->isconstf())
                 return insImm(oprnd->imm64_0());
             if (oprnd->isop(LIR_qjoin))
                 return oprnd->oprnd1();
             break;
         case LIR_qhi:
-            if (oprnd->isconstq())
+            if (oprnd->isconstf())
                 return insImm(oprnd->imm64_1());
             if (oprnd->isop(LIR_qjoin))
                 return oprnd->oprnd2();
             break;
 #endif
         case LIR_not:
             if (oprnd->isconst())
                 return insImm(~oprnd->imm32());
@@ -545,27 +545,27 @@ namespace nanojit
             break;
         case LIR_neg:
             if (oprnd->isconst())
                 return insImm(-oprnd->imm32());
             if (oprnd->isop(LIR_sub)) // -(a-b) = b-a
                 return out->ins2(LIR_sub, oprnd->oprnd2(), oprnd->oprnd1());
             goto involution;
         case LIR_fneg:
-            if (oprnd->isconstq())
+            if (oprnd->isconstf())
                 return insImmf(-oprnd->imm64f());
             if (oprnd->isop(LIR_fsub))
                 return out->ins2(LIR_fsub, oprnd->oprnd2(), oprnd->oprnd1());
             goto involution;
         case LIR_i2f:
             if (oprnd->isconst())
                 return insImmf(oprnd->imm32());
             break;
         case LIR_f2i:
-            if (oprnd->isconstq())
+            if (oprnd->isconstf())
                 return insImm(int32_t(oprnd->imm64f()));
             break;
         case LIR_u2f:
             if (oprnd->isconst())
                 return insImmf(uint32_t(oprnd->imm32()));
             break;
         default:
             ;
@@ -676,17 +676,17 @@ namespace nanojit
                 // impossible to calculate the mod that refers to it. The
                 // frontend shouldn't emit div and mod with constant operands.
                 NanoAssert(0);
                 #endif
             default:
                 ;
             }
         }
-        else if (oprnd1->isconstq() && oprnd2->isconstq())
+        else if (oprnd1->isconstf() && oprnd2->isconstf())
         {
             double c1 = oprnd1->imm64f();
             double c2 = oprnd2->imm64f();
             switch (v) {
             case LIR_feq:
                 return insImm(c1 == c2);
             case LIR_flt:
                 return insImm(c1 < c2);
@@ -1516,20 +1516,20 @@ namespace nanojit
         LiveTable(Allocator& alloc)
             : alloc(alloc)
             , live(alloc)
             , retired(alloc)
             , retiredCount(0)
             , maxlive(0)
         { }
 
-        void add(LInsp i, LInsp use) {
-            if (!i->isconst() && !i->isconstq() && !live.containsKey(i)) {
-                NanoAssert(size_t(i->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
-                live.put(i,use);
+        void add(LInsp ins, LInsp use) {
+            if (!ins->isImmAny() && !live.containsKey(ins)) {
+                NanoAssert(size_t(ins->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
+                live.put(ins,use);
             }
         }
 
         void retire(LInsp i) {
             RetiredEntry *e = new (alloc) RetiredEntry();
             e->i = i;
             SeqBuilder<LIns*> livelist(alloc);
             HashMap<LIns*, LIns*>::Iter iter(live);
@@ -1897,24 +1897,26 @@ namespace nanojit
     {
         // - If 'ref' already has a name, use it.
         // - Otherwise, if it's a constant, use the constant.
         // - Otherwise, give it a name and use it.
         const char* name = lirNameMap->lookupName(ref);
         if (name) {
             VMPI_snprintf(buf->buf, buf->len, "%s", name);
         }
-        else if (ref->isconstf()) {
-            VMPI_snprintf(buf->buf, buf->len, "%g", ref->imm64f());
+        else if (ref->isconst()) {
+            formatImm(buf, ref->imm32());
         }
+#ifdef NANOJIT_64BIT
         else if (ref->isconstq()) {
             formatImmq(buf, ref->imm64());
         }
-        else if (ref->isconst()) {
-            formatImm(buf, ref->imm32());
+#endif
+        else if (ref->isconstf()) {
+            VMPI_snprintf(buf->buf, buf->len, "%g", ref->imm64f());
         }
         else {
             name = lirNameMap->createName(ref);
             VMPI_snprintf(buf->buf, buf->len, "%s", name);
         }
         return buf->buf;
     }
 
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -811,46 +811,56 @@ namespace nanojit
         bool isLoad() const {
             return isLInsLd();
         }
         bool isGuard() const {
             return isop(LIR_x) || isop(LIR_xf) || isop(LIR_xt) ||
                    isop(LIR_xbarrier) || isop(LIR_xtbl) ||
                    isop(LIR_addxov) || isop(LIR_subxov) || isop(LIR_mulxov);
         }
-        // True if the instruction is a 32-bit or smaller constant integer.
+        // True if the instruction is a 32-bit integer immediate.
         bool isconst() const {
             return isop(LIR_int);
         }
-        // True if the instruction is a 32-bit or smaller constant integer and
-        // has the value val when treated as a 32-bit signed integer.
+        // True if the instruction is a 32-bit integer immediate and
+        // has the value 'val' when treated as a 32-bit signed integer.
         bool isconstval(int32_t val) const {
             return isconst() && imm32()==val;
         }
-        // True if the instruction is a constant quad value.
+#ifdef NANOJIT_64BIT
+        // True if the instruction is a 64-bit integer immediate.
         bool isconstq() const {
-            return
-#ifdef NANOJIT_64BIT
-                isop(LIR_quad) ||
+            return isop(LIR_quad);
+        }
 #endif
-                isop(LIR_float);
-        }
-        // True if the instruction is a constant pointer value.
+        // True if the instruction is a pointer-sized integer immediate.
         bool isconstp() const
         {
 #ifdef NANOJIT_64BIT
             return isconstq();
 #else
             return isconst();
 #endif
         }
-        // True if the instruction is a constant float value.
+        // True if the instruction is a 64-bit float immediate.
         bool isconstf() const {
             return isop(LIR_float);
         }
+        // True if the instruction is a 64-bit integer or float immediate.
+        bool isconstqf() const {
+            return
+#ifdef NANOJIT_64BIT
+                isconstq() ||
+#endif
+                isconstf();
+        }
+        // True if the instruction an any type of immediate.
+        bool isImmAny() const {
+            return isconst() || isconstqf();
+        }
 
         bool isBranch() const {
             return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_jtbl);
         }
 
         LTy retType() const {
             return retTypes[opcode()];
         }
@@ -1311,23 +1321,24 @@ namespace nanojit
         return toLInsSk()->prevLIns;
     }
 
     inline uint8_t LIns::paramArg()  const { NanoAssert(isop(LIR_param)); return toLInsP()->arg; }
     inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_param)); return toLInsP()->kind; }
 
     inline int32_t LIns::imm32()     const { NanoAssert(isconst());  return toLInsI()->imm32; }
 
-    inline int32_t LIns::imm64_0()   const { NanoAssert(isconstq()); return toLInsN64()->imm64_0; }
-    inline int32_t LIns::imm64_1()   const { NanoAssert(isconstq()); return toLInsN64()->imm64_1; }
+    inline int32_t LIns::imm64_0()   const { NanoAssert(isconstqf()); return toLInsN64()->imm64_0; }
+    inline int32_t LIns::imm64_1()   const { NanoAssert(isconstqf()); return toLInsN64()->imm64_1; }
     uint64_t       LIns::imm64()     const {
-        NanoAssert(isconstq());
+        NanoAssert(isconstqf());
         return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64()->imm64_0);
     }
     double         LIns::imm64f()    const {
+        NanoAssert(isconstf());
         union {
             double f;
             uint64_t q;
         } u;
         u.q = imm64();
         return u.f;
     }
 
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -1390,17 +1390,17 @@ Assembler::asm_store64(LOpcode op, LInsp
 {
     //asm_output("<<< store64 (dr: %d)", dr);
 
     switch (op) {
         case LIR_stfi:
             if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
-                if (value->isconstq()) {
+                if (value->isconstf()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
                     STR(IP, rb, dr+4);
                     asm_ld_imm(IP, value->imm64_1(), false);
 
@@ -1423,33 +1423,33 @@ Assembler::asm_store64(LOpcode op, LInsp
                 FSTD(rv, baseReg, baseOffset);
 
                 if (!isS8(dr)) {
                     asm_add_imm(IP, rb, dr);
                 }
 
                 // if it's a constant, make sure our baseReg/baseOffset location
                 // has the right value
-                if (value->isconstq()) {
+                if (value->isconstf()) {
                     underrunProtect(4*4);
                     asm_immf_nochk(rv, value->imm64_0(), value->imm64_1());
                 }
             } else {
                 int da = findMemFor(value);
                 Register rb = findRegFor(base, GpRegs);
                 // *(uint64_t*)(rb+dr) = *(uint64_t*)(FP+da)
                 asm_mmq(rb, dr, FP, da);
             }
             return;
 
         case LIR_st32f:
             if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
-                if (value->isconstq()) {
+                if (value->isconstf()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
                     STR(IP, rb, dr+4);
                     asm_ld_imm(IP, value->imm64_1(), false);
 
@@ -1474,17 +1474,17 @@ Assembler::asm_store64(LOpcode op, LInsp
                 if (!isS8(dr)) {
                     asm_add_imm(IP, rb, dr);
                 }
 
                 FCVTSD(S14, rv);
 
                 // if it's a constant, make sure our baseReg/baseOffset location
                 // has the right value
-                if (value->isconstq()) {
+                if (value->isconstf()) {
                     underrunProtect(4*4);
                     asm_immf_nochk(rv, value->imm64_0(), value->imm64_1());
                 }
             } else {
                 NanoAssertMsg(0, "st32f not supported with non-VFP, fix me");
             }
             return;
         default:
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -356,17 +356,17 @@ namespace nanojit
                 ADDU(AT, AT, rbase);
                 LUI(AT, hi(dr));
             }
         }
     }
 
     void Assembler::asm_store_imm64(LIns *value, int dr, Register rbase)
     {
-        NanoAssert(value->isconstq());
+        NanoAssert(value->isconstf());
         int32_t msw = value->imm64_1();
         int32_t lsw = value->imm64_0();
 
         // li $at,lsw                   # iff lsw != 0
         // sw $at,off+LSWOFF($rbase)    # may use $0 instead of $at
         // li $at,msw                   # iff (msw != 0) && (msw != lsw)
         // sw $at,off+MSWOFF($rbase)    # may use $0 instead of $at
 
@@ -1097,17 +1097,17 @@ namespace nanojit
         if (op == LIR_stfi) {
             if (base->isop(LIR_alloc)) {
                 rbase = FP;
                 dr += findMemFor(base);
             }
             else
                 rbase = findRegFor(base, GpRegs);
 
-            if (value->isconstq())
+            if (value->isconstf())
                 asm_store_imm64(value, dr, rbase);
             else if (!cpu_has_fpu || value->isop(LIR_ldq)) {
 
                 int ds = findMemFor(value);
 
                 // lw $at,ds(FP)
                 // sw $at,dr($rbase)
                 // lw $at,ds+4(FP)
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -620,16 +620,18 @@ namespace nanojit
             ADDI(r, FP, d);
         }
         else if (i->isconst()) {
             if (!i->deprecated_getArIndex()) {
                 i->deprecated_markAsClear();
             }
             asm_li(r, i->imm32());
         }
+        // XXX: should really rematerializable isconstf() and isconstq() cases
+        // here; canRemat() assumes they will be rematerialized.
         else {
             d = findMemFor(i);
             if (IsFpReg(r)) {
                 NanoAssert(i->isN64());
                 LFD(r, d, FP);
             } else if (i->isN64()) {
                 NanoAssert(IsGpReg(r));
                 LD(r, d, FP);
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -378,17 +378,17 @@ namespace nanojit
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
                 return;
         }
 
         underrunProtect(48);
-        if (value->isconstq())
+        if (value->isconstf())
             {
                 // if a constant 64-bit value just store it now rather than
                 // generating a pointless store/load/store sequence
                 Register rb = findRegFor(base, GpRegs);
                 STW32(L2, dr+4, rb);
                 SET32(value->imm64_0(), L2);
                 STW32(L2, dr, rb);
                 SET32(value->imm64_1(), L2);
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -1376,17 +1376,17 @@ namespace nanojit
         if (ins->isop(LIR_alloc)) {
             int d = arDisp(ins);
             LEAQRM(r, d, FP);
         }
         else if (ins->isconst()) {
             ins->clearReg();
             asm_immi(r, ins->imm32(), /*canClobberCCs*/false);
         }
-        else if (ins->isop(LIR_quad)) {
+        else if (ins->isconstq()) {
             ins->clearReg();
             asm_immq(r, ins->imm64(), /*canClobberCCs*/false);
         }
         else if (ins->isconstf()) {
             ins->clearReg();
             asm_immf(r, ins->imm64(), /*canClobberCCs*/false);
         }
         else {
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -564,17 +564,17 @@ namespace nanojit
                 SSE_STSS(dr, rb, rt);
                 SSE_CVTSD2SS(rt, rv);
                 SSE_XORPDr(rt, rt);     // zero dest to ensure no dependency stalls
 
             } else {
                 FST32(pop?1:0, dr, rb);
             }
 
-        } else if (value->isconstq()) {
+        } else if (value->isconstf()) {
             STi(rb, dr+4, value->imm64_1());
             STi(rb, dr,   value->imm64_0());
 
         } else if (value->isop(LIR_ldf)) {
             // value is 64bit struct or int64_t, or maybe a double.
             // It may be live in an FPU reg.  Either way, don't put it in an
             // FPU reg just to load & store it.
 
@@ -1556,17 +1556,17 @@ namespace nanojit
             // We swap lhs/rhs on purpose here, it works out better with
             // only one fpu reg -- we can use divr/subr.
             LIns* rhs = ins->oprnd1();
             LIns* lhs = ins->oprnd2();
             debug_only( Register rr = ) prepareResultReg(ins, rmask(FST0));
             NanoAssert(FST0 == rr);
             NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
 
-            if (rhs->isconstq()) {
+            if (rhs->isconstf()) {
                 const uint64_t* p = findQuadConstant(rhs->imm64());
 
                 switch (op) {
                 case LIR_fadd:  FADDdm( (const double*)p);  break;
                 case LIR_fsub:  FSUBRdm((const double*)p);  break;
                 case LIR_fmul:  FMULdm( (const double*)p);  break;
                 case LIR_fdiv:  FDIVRdm((const double*)p);  break;
                 default:        NanoAssert(0);
@@ -1883,17 +1883,17 @@ namespace nanojit
                 if (pop)
                     FCOMPP();
                 else
                     FCOMP();
                 FLDr(FST0); // DUP
             } else {
                 TEST_AH(mask);
                 FNSTSW_AX();        // requires EAX to be free
-                if (rhs->isconstq())
+                if (rhs->isconstf())
                 {
                     const uint64_t* p = findQuadConstant(rhs->imm64());
                     FCOMdm((pop?1:0), (const double*)p);
                 }
                 else
                 {
                     int d = findMemFor(rhs);
                     FCOM((pop?1:0), d, FP);