Rename LIR opcodes in Nativei386.cpp (bug 559977 r=nnethercote+)
authorEdwin Smith <edwsmith@adobe.com>
Tue, 27 Apr 2010 20:05:58 -0400
changeset 41812 f49dca159319739302bcf51719906a5af290eb18
parent 41811 37bc06ac747aae5f2fb56349f9246d4f7b542633
child 41813 3a5a84f83e752520d41d25a6f1624dc3606cf1e0
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
reviewersnnethercote
bugs559977
milestone1.9.3a5pre
Rename LIR opcodes in Nativei386.cpp (bug 559977 r=nnethercote+)
js/src/nanojit/Nativei386.cpp
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -952,17 +952,17 @@ namespace nanojit
         RET();
         POPr(FP); // Restore caller's FP.
 
         return  _nIns;
     }
 
     void Assembler::asm_call(LInsp ins)
     {
-        Register rr = ( ins->isop(LIR_fcall) ? FST0 : retRegs[0] );
+        Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
         prepareResultReg(ins, rmask(rr));
 
         evictScratchRegsExcept(rmask(rr));
 
         const CallInfo* call = ins->callInfo();
         // must be signed, not unsigned
         uint32_t iargs = call->count_int32_args();
         int32_t fargs = call->count_args() - iargs;
@@ -1007,17 +1007,17 @@ namespace nanojit
                     // with CDECL only, caller pops args
                     ADDi(SP, extra+pushsize);
                 } else if (extra > 0) {
                     ADDi(SP, extra);
                 }
             }
         }
 
-        NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall));
+        NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
         if (!indirect) {
             CALL(call);
         }
         else {
             // Indirect call.  x86 Calling conventions don't use EAX as an
             // argument, and do use EAX as a return value.  We need a register
             // for the address to call, so we use EAX since it will always be
             // available.
@@ -1117,23 +1117,23 @@ namespace nanojit
             NanoAssertMsg(0, "Unknown branch type in nPatchBranch");
     }
 
     RegisterMask Assembler::hint(LIns* ins)
     {
         uint32_t op = ins->opcode();
         int prefer = 0;
 
-        if (op == LIR_icall) {
+        if (op == LIR_calli) {
             prefer = rmask(retRegs[0]);
         }
-        else if (op == LIR_fcall) {
+        else if (op == LIR_calld) {
             prefer = rmask(FST0);
         }
-        else if (op == LIR_param) {
+        else if (op == LIR_paramp) {
             uint8_t arg = ins->paramArg();
             if (ins->paramKind() == 0) {
                 uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
                 if (arg < max_regs)
                     prefer = rmask(argRegs[arg]);
             } else {
                 if (arg < NumSavedRegs)
                     prefer = rmask(savedRegs[arg]);
@@ -1160,41 +1160,41 @@ namespace nanojit
         // but it hasn't shown to help in real code yet.  Noting them anyway:
         // maybe sub? R = subl rL, const  =>  leal R, [rL + -const]
         // maybe lsh? R = lshl rL, 1/2/3  =>  leal R, [rL * 2/4/8]
         return false;
     }
 
     bool Assembler::canRemat(LIns* ins)
     {
-        return ins->isImmAny() || ins->isop(LIR_alloc) || canRematLEA(ins);
+        return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
     }
 
     // WARNING: the code generated by this function must not affect the
     // condition codes.  See asm_cmp().
     void Assembler::asm_restore(LInsp ins, Register r)
     {
         NanoAssert(ins->getReg() == r);
 
         uint32_t arg;
         uint32_t abi_regcount;
-        if (ins->isop(LIR_alloc)) {
-            // The value of a LIR_alloc instruction is the address of the
+        if (ins->isop(LIR_allocp)) {
+            // The value of a LIR_allocp instruction is the address of the
             // stack allocation.  We can rematerialize that from the record we
             // have of where the allocation lies in the stack.
             NanoAssert(ins->isInAr());  // must have stack slots allocated
             LEA(r, arDisp(ins), FP);
 
         } else if (ins->isImmI()) {
             asm_immi(r, ins->immI(), /*canClobberCCs*/false);
 
         } else if (ins->isImmD()) {
             asm_immf(r, ins->immQ(), ins->immD(), /*canClobberCCs*/false);
 
-        } else if (ins->isop(LIR_param) && ins->paramKind() == 0 &&
+        } else if (ins->isop(LIR_paramp) && ins->paramKind() == 0 &&
             (arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
             // Incoming arg is on stack, can restore it from there instead of spilling.
 
             // this case is intentionally not detected in canRemat(), because we still
             // emit a load instead of a fast ALU operation.  We don't want parameter
             // spills to have precedence over immediates & ALU ops, but if one does
             // spill, we want to load it directly from its stack area, saving a store
             // in the prolog.
@@ -1231,51 +1231,51 @@ namespace nanojit
     }
 
     void Assembler::asm_store32(LOpcode op, LIns* value, int dr, LIns* base)
     {
         if (value->isImmI()) {
             Register rb = getBaseReg(base, dr, GpRegs);
             int c = value->immI();
             switch (op) {
-                case LIR_stb:
+                case LIR_sti2c:
                     ST8i(rb, dr, c);
                     break;
-                case LIR_sts:
+                case LIR_sti2s:
                     ST16i(rb, dr, c);
                     break;
                 case LIR_sti:
                     STi(rb, dr, c);
                     break;
                 default:
                     NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
                     break;
             }
         }
         else
         {
             // Quirk of x86-32: reg must be a/b/c/d for single-byte stores.
-            const RegisterMask SrcRegs = (op == LIR_stb) ?
+            const RegisterMask SrcRegs = (op == LIR_sti2c) ?
                             (1<<EAX | 1<<ECX | 1<<EDX | 1<<EBX) :
                             GpRegs;
 
             Register ra, rb;
             if (base->isImmI()) {
                 // absolute address
                 rb = UnspecifiedReg;
                 dr += base->immI();
                 ra = findRegFor(value, SrcRegs);
             } else {
                 getBaseReg2(SrcRegs, value, ra, GpRegs, base, rb, dr);
             }
             switch (op) {
-                case LIR_stb:
+                case LIR_sti2c:
                     ST8(rb, dr, ra);
                     break;
-                case LIR_sts:
+                case LIR_sti2s:
                     ST16(rb, dr, ra);
                     break;
                 case LIR_sti:
                     ST(rb, dr, ra);
                     break;
                 default:
                     NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
                     break;
@@ -1312,26 +1312,26 @@ namespace nanojit
         //   spill slot.  (There must be a spill slot assigned.)  This is why
         //   we don't use prepareResultReg() here unlike most other places --
         //   because it mandates bringing the value into a register.
         //
         if (ins->isInReg()) {
             Register rr = ins->getReg();
             asm_maybe_spill(ins, false);    // if also in memory in post-state, spill it now
             switch (ins->opcode()) {
-            case LIR_ldf:
+            case LIR_ldd:
                 if (rmask(rr) & XmmRegs) {
                     SSE_LDQ(rr, db, rb);
                 } else {
                     NanoAssert(rmask(rr) & x87Regs);
                     FLDQ(db, rb);
                 }
                 break;
 
-            case LIR_ld32f:
+            case LIR_ldf2d:
                 if (rmask(rr) & XmmRegs) {
                     SSE_CVTSS2SD(rr, rr);
                     SSE_LDSS(rr, db, rb);
                     SSE_XORPDr(rr,rr);
                 } else {
                     NanoAssert(rmask(rr) & x87Regs);
                     FLD32(db, rb);
                 }
@@ -1342,22 +1342,22 @@ namespace nanojit
                 break;
             }
 
         } else {
             NanoAssert(ins->isInAr());
             int dr = arDisp(ins);
 
             switch (ins->opcode()) {
-            case LIR_ldf:
+            case LIR_ldd:
                 // Don't use an fpu reg to simply load & store the value.
                 asm_mmq(FP, dr, rb, db);
                 break;
 
-            case LIR_ld32f:
+            case LIR_ldf2d:
                 // Need to use fpu to expand 32->64.
                 FSTPQ(dr, FP);
                 FLD32(db, rb);
                 break;
 
             default:
                 NanoAssert(0);
                 break;
@@ -1366,17 +1366,17 @@ namespace nanojit
 
         freeResourcesOf(ins);
     }
 
     void Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
     {
         Register rb = getBaseReg(base, dr, GpRegs);
 
-        if (op == LIR_st32f) {
+        if (op == LIR_std2f) {
             bool pop = !value->isInReg();
             Register rv = ( pop
                           ? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
                           : value->getReg() );
 
             if (rmask(rv) & XmmRegs) {
                 // need a scratch reg
                 Register rt = registerAllocTmp(XmmRegs);
@@ -1389,17 +1389,17 @@ namespace nanojit
             } else {
                 FST32(pop?1:0, dr, rb);
             }
 
         } else if (value->isImmD()) {
             STi(rb, dr+4, value->immQorDhi());
             STi(rb, dr,   value->immQorDlo());
 
-        } else if (value->isop(LIR_ldf)) {
+        } else if (value->isop(LIR_ldd)) {
             // value is 64bit struct or int64_t, or maybe a double.
             // It may be live in an FPU reg.  Either way, don't put it in an
             // FPU reg just to load & store it.
 
             // a) If we know it's not a double, this is right.
             // b) If we guarded that it's a double, this store could be on the
             //    side exit, copying a non-double.
             // c) Maybe it's a double just being stored.  Oh well.
@@ -1456,39 +1456,39 @@ namespace nanojit
         // Handle float conditions separately.
         if (isCmpDOpcode(condop)) {
             return asm_fbranch(branchOnFalse, cond, targ);
         }
 
         if (branchOnFalse) {
             // op == LIR_xf/LIR_jf
             switch (condop) {
-            case LIR_eq:    JNE(targ);      break;
-            case LIR_lt:    JNL(targ);      break;
-            case LIR_le:    JNLE(targ);     break;
-            case LIR_gt:    JNG(targ);      break;
-            case LIR_ge:    JNGE(targ);     break;
-            case LIR_ult:   JNB(targ);      break;
-            case LIR_ule:   JNBE(targ);     break;
-            case LIR_ugt:   JNA(targ);      break;
-            case LIR_uge:   JNAE(targ);     break;
+            case LIR_eqi:   JNE(targ);      break;
+            case LIR_lti:   JNL(targ);      break;
+            case LIR_lei:   JNLE(targ);     break;
+            case LIR_gti:   JNG(targ);      break;
+            case LIR_gei:   JNGE(targ);     break;
+            case LIR_ltui:  JNB(targ);      break;
+            case LIR_leui:  JNBE(targ);     break;
+            case LIR_gtui:  JNA(targ);      break;
+            case LIR_geui:  JNAE(targ);     break;
             default:        NanoAssert(0);  break;
             }
         } else {
             // op == LIR_xt/LIR_jt
             switch (condop) {
-            case LIR_eq:    JE(targ);       break;
-            case LIR_lt:    JL(targ);       break;
-            case LIR_le:    JLE(targ);      break;
-            case LIR_gt:    JG(targ);       break;
-            case LIR_ge:    JGE(targ);      break;
-            case LIR_ult:   JB(targ);       break;
-            case LIR_ule:   JBE(targ);      break;
-            case LIR_ugt:   JA(targ);       break;
-            case LIR_uge:   JAE(targ);      break;
+            case LIR_eqi:   JE(targ);       break;
+            case LIR_lti:   JL(targ);       break;
+            case LIR_lei:   JLE(targ);      break;
+            case LIR_gti:   JG(targ);       break;
+            case LIR_gei:   JGE(targ);      break;
+            case LIR_ltui:  JB(targ);       break;
+            case LIR_leui:  JBE(targ);      break;
+            case LIR_gtui:  JA(targ);       break;
+            case LIR_geui:  JAE(targ);      break;
             default:        NanoAssert(0);  break;
             }
         }
         NIns* at = _nIns;
         asm_cmp(cond);
         return at;
     }
 
@@ -1557,19 +1557,19 @@ namespace nanojit
         NanoAssert(lhs->isI() && rhs->isI());
 
         // Ready to issue the compare.
         if (rhs->isImmI()) {
             int c = rhs->immI();
             // findRegFor() can call asm_restore() -- asm_restore() better not
             // disturb the CCs!
             Register r = findRegFor(lhs, GpRegs);
-            if (c == 0 && cond->isop(LIR_eq)) {
+            if (c == 0 && cond->isop(LIR_eqi)) {
                 NanoAssert(N_LOOKAHEAD >= 3);
-                if ((lhs->isop(LIR_and) || lhs->isop(LIR_or)) &&
+                if ((lhs->isop(LIR_andi) || lhs->isop(LIR_ori)) &&
                     cond == lookahead[1] && lhs == lookahead[2])
                 {
                     // Do nothing.  At run-time, 'lhs' will have just computed
                     // by an i386 instruction that sets ZF for us ('and' or
                     // 'or'), so we don't have to do it ourselves.
                 } else {
                     TEST(r, r);     // sets ZF according to the value of 'lhs'
                 }
@@ -1587,25 +1587,25 @@ namespace nanojit
     {
         LOpcode opcode = ins->opcode();
         Register r = prepareResultReg(ins, AllowableFlagRegs);
 
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
 
         if (_config.i386_sse2) {
-            // LIR_flt and LIR_fgt are handled by the same case because
-            // asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a).  Likewise
-            // for LIR_fle/LIR_fge.
+            // LIR_ltd and LIR_gtd are handled by the same case because
+            // asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise
+            // for LIR_led/LIR_ged.
             switch (opcode) {
-            case LIR_feq:   SETNP(r);       break;
-            case LIR_flt:
-            case LIR_fgt:   SETA(r);        break;
-            case LIR_fle:
-            case LIR_fge:   SETAE(r);       break;
+            case LIR_eqd:   SETNP(r);       break;
+            case LIR_ltd:
+            case LIR_gtd:   SETA(r);        break;
+            case LIR_led:
+            case LIR_ged:   SETAE(r);       break;
             default:        NanoAssert(0);  break;
             }
         } else {
             SETNP(r);
         }
 
         freeResourcesOf(ins);
 
@@ -1616,25 +1616,25 @@ namespace nanojit
     {
         LOpcode op = ins->opcode();
 
         Register r = prepareResultReg(ins, AllowableFlagRegs);
 
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
         switch (op) {
-        case LIR_eq:    SETE(r);        break;
-        case LIR_lt:    SETL(r);        break;
-        case LIR_le:    SETLE(r);       break;
-        case LIR_gt:    SETG(r);        break;
-        case LIR_ge:    SETGE(r);       break;
-        case LIR_ult:   SETB(r);        break;
-        case LIR_ule:   SETBE(r);       break;
-        case LIR_ugt:   SETA(r);        break;
-        case LIR_uge:   SETAE(r);       break;
+        case LIR_eqi:   SETE(r);        break;
+        case LIR_lti:   SETL(r);        break;
+        case LIR_lei:   SETLE(r);       break;
+        case LIR_gti:   SETG(r);        break;
+        case LIR_gei:   SETGE(r);       break;
+        case LIR_ltui:  SETB(r);        break;
+        case LIR_leui:  SETBE(r);       break;
+        case LIR_gtui:  SETA(r);        break;
+        case LIR_geui:  SETAE(r);       break;
         default:        NanoAssert(0);  break;
         }
 
         freeResourcesOf(ins);
 
         asm_cmp(ins);
     }
 
@@ -1656,62 +1656,62 @@ namespace nanojit
     // * asm:   spill rr if necessary
     //          ... some uses of lhs in ra...
     //
     void Assembler::asm_arith(LInsp ins)
     {
         LOpcode op = ins->opcode();
 
         // First special case.
-        if (op == LIR_mod) {
+        if (op == LIR_modi) {
             asm_div_mod(ins);
             return;
         }
 
         LInsp lhs = ins->oprnd1();
         LInsp rhs = ins->oprnd2();
 
         // Second special case.
-        // XXX: bug 547125: don't need this once LEA is used for LIR_add in all cases below
-        if (op == LIR_add && lhs->isop(LIR_alloc) && rhs->isImmI()) {
-            // LIR_add(LIR_alloc, LIR_int) -- use lea.
+        // XXX: bug 547125: don't need this once LEA is used for LIR_addi in all cases below
+        if (op == LIR_addi && lhs->isop(LIR_allocp) && rhs->isImmI()) {
+            // LIR_addi(LIR_allocp, LIR_immi) -- use lea.
             Register rr = prepareResultReg(ins, GpRegs);
             int d = findMemFor(lhs) + rhs->immI();
 
             LEA(rr, d, FP);
 
             freeResourcesOf(ins);
 
             return;
         }
 
         bool isConstRhs;
         RegisterMask allow = GpRegs;
         Register rb = UnspecifiedReg;
 
         switch (op) {
-        case LIR_div:
+        case LIR_divi:
             // Nb: if the div feeds into a mod it will be handled by
             // asm_div_mod() rather than here.
             isConstRhs = false;
             rb = findRegFor(rhs, (GpRegs & ~(rmask(EAX)|rmask(EDX))));
             allow = rmask(EAX);
             evictIfActive(EDX);
             break;
-        case LIR_mul:
-        case LIR_mulxov:
+        case LIR_muli:
+        case LIR_mulxovi:
             isConstRhs = false;
             if (lhs != rhs) {
                 rb = findRegFor(rhs, allow);
                 allow &= ~rmask(rb);
             }
             break;
-        case LIR_lsh:
-        case LIR_rsh:
-        case LIR_ush:
+        case LIR_lshi:
+        case LIR_rshi:
+        case LIR_rshui:
             isConstRhs = rhs->isImmI();
             if (!isConstRhs) {
                 rb = findSpecificRegFor(rhs, ECX);
                 allow &= ~rmask(rb);
             }
             break;
         default:
             isConstRhs = rhs->isImmI();
@@ -1728,74 +1728,74 @@ namespace nanojit
         // If 'lhs' isn't in a register, it can be clobbered by 'ins'.
         Register ra = lhs->isInReg() ? lhs->getReg() : rr;
 
         if (!isConstRhs) {
             if (lhs == rhs)
                 rb = ra;
 
             switch (op) {
-            case LIR_add:
-            case LIR_addxov:    ADD(rr, rb); break;     // XXX: bug 547125: could use LEA for LIR_add
-            case LIR_sub:
-            case LIR_subxov:    SUB(rr, rb); break;
-            case LIR_mul:
-            case LIR_mulxov:    MUL(rr, rb); break;
-            case LIR_and:       AND(rr, rb); break;
-            case LIR_or:        OR( rr, rb); break;
-            case LIR_xor:       XOR(rr, rb); break;
-            case LIR_lsh:       SHL(rr, rb); break;
-            case LIR_rsh:       SAR(rr, rb); break;
-            case LIR_ush:       SHR(rr, rb); break;
-            case LIR_div:
+            case LIR_addi:
+            case LIR_addxovi:    ADD(rr, rb); break;     // XXX: bug 547125: could use LEA for LIR_addi
+            case LIR_subi:
+            case LIR_subxovi:    SUB(rr, rb); break;
+            case LIR_muli:
+            case LIR_mulxovi:    MUL(rr, rb); break;
+            case LIR_andi:       AND(rr, rb); break;
+            case LIR_ori:        OR( rr, rb); break;
+            case LIR_xori:       XOR(rr, rb); break;
+            case LIR_lshi:       SHL(rr, rb); break;
+            case LIR_rshi:       SAR(rr, rb); break;
+            case LIR_rshui:      SHR(rr, rb); break;
+            case LIR_divi:
                 DIV(rb);
                 CDQ(); // sign-extend EAX into EDX:EAX
                 break;
             default:            NanoAssert(0);  break;
             }
 
         } else {
             int c = rhs->immI();
             switch (op) {
-            case LIR_add:
+            case LIR_addi:
                 // this doesn't set cc's, only use it when cc's not required.
                 LEA(rr, c, ra);
                 ra = rr; // suppress mov
                 break;
-            case LIR_addxov:    ADDi(rr, c);    break;
-            case LIR_sub:
-            case LIR_subxov:    SUBi(rr, c);    break;
-            case LIR_and:       ANDi(rr, c);    break;
-            case LIR_or:        ORi( rr, c);    break;
-            case LIR_xor:       XORi(rr, c);    break;
-            case LIR_lsh:       SHLi(rr, c);    break;
-            case LIR_rsh:       SARi(rr, c);    break;
-            case LIR_ush:       SHRi(rr, c);    break;
+            case LIR_addxovi:    ADDi(rr, c);    break;
+            case LIR_subi:
+            case LIR_subxovi:    SUBi(rr, c);    break;
+            case LIR_andi:       ANDi(rr, c);    break;
+            case LIR_ori:        ORi( rr, c);    break;
+            case LIR_xori:       XORi(rr, c);    break;
+            case LIR_lshi:       SHLi(rr, c);    break;
+            case LIR_rshi:       SARi(rr, c);    break;
+            case LIR_rshui:      SHRi(rr, c);    break;
             default:            NanoAssert(0);  break;
             }
         }
 
         if (rr != ra)
             MR(rr, ra);
 
         freeResourcesOf(ins);
         if (!lhs->isInReg()) {
             NanoAssert(ra == rr);
             findSpecificRegForUnallocated(lhs, ra);
         }
     }
 
-    // Generates code for a LIR_mod(LIR_div(divL, divR)) sequence.
+    // Generates code for a LIR_modi(LIR_divi(divL, divR)) sequence.
     void Assembler::asm_div_mod(LInsp mod)
     {
         LInsp div = mod->oprnd1();
 
-        // LIR_mod expects the LIR_div to be near (no interference from the register allocator).
-        NanoAssert(mod->isop(LIR_mod));
-        NanoAssert(div->isop(LIR_div));
+        // LIR_modi expects the LIR_divi to be near (no interference from the register allocator).
+        NanoAssert(mod->isop(LIR_modi));
+        NanoAssert(div->isop(LIR_divi));
 
         LInsp divL = div->oprnd1();
         LInsp divR = div->oprnd2();
 
         prepareResultReg(mod, rmask(EDX));
         prepareResultReg(div, rmask(EAX));
 
         Register rDivR = findRegFor(divR, (GpRegs & ~(rmask(EAX)|rmask(EDX))));
@@ -1835,20 +1835,20 @@ namespace nanojit
     {
         LIns* lhs = ins->oprnd1();
 
         Register rr = prepareResultReg(ins, GpRegs);
 
         // If 'lhs' isn't in a register, it can be clobbered by 'ins'.
         Register ra = lhs->isInReg() ? lhs->getReg() : rr;
 
-        if (ins->isop(LIR_not)) {
+        if (ins->isop(LIR_noti)) {
             NOT(rr);
         } else {
-            NanoAssert(ins->isop(LIR_neg));
+            NanoAssert(ins->isop(LIR_negi));
             NEG(rr);
         }
         if (rr != ra)
             MR(rr, ra);
 
         freeResourcesOf(ins);
         if (!lhs->isInReg()) {
             NanoAssert(ra == rr);
@@ -1863,39 +1863,39 @@ namespace nanojit
         int32_t d = ins->disp();
 
         Register rr = prepareResultReg(ins, GpRegs);
 
         if (base->isImmI()) {
             intptr_t addr = base->immI();
             addr += d;
             switch (op) {
-                case LIR_ldzb:
+                case LIR_lduc2ui:
                     LD8Zdm(rr, addr);
                     break;
-                case LIR_ldsb:
+                case LIR_ldc2i:
                     LD8Sdm(rr, addr);
                     break;
-                case LIR_ldzs:
+                case LIR_ldus2ui:
                     LD16Zdm(rr, addr);
                     break;
-                case LIR_ldss:
+                case LIR_lds2i:
                     LD16Sdm(rr, addr);
                     break;
-                case LIR_ld:
+                case LIR_ldi:
                     LDdm(rr, addr);
                     break;
                 default:
                     NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                     break;
             }
 
             freeResourcesOf(ins);
 
-        } else if (base->opcode() == LIR_piadd) {
+        } else if (base->opcode() == LIR_addp) {
             // Search for add(X,Y).
             LIns *lhs = base->oprnd1();
             LIns *rhs = base->oprnd2();
 
             // If we have this:
             //
             //   W = ld (add(X, shl(Y, Z)))[d] , where int(1) <= Z <= int(3)
             //
@@ -1907,17 +1907,17 @@ namespace nanojit
             //
             //   W = ld (add(X, Y))[d]
             //
             // which we treat like this:
             //
             //   W = ld (add(X, shl(Y, 0)))[d]
             //
             int scale;
-            if (rhs->opcode() == LIR_pilsh && rhs->oprnd2()->isImmI()) {
+            if (rhs->opcode() == LIR_lshp && rhs->oprnd2()->isImmI()) {
                 scale = rhs->oprnd2()->immI();
                 if (scale >= 1 && scale <= 3)
                     rhs = rhs->oprnd1();
                 else
                     scale = 0;
             } else {
                 scale = 0;
             }
@@ -1932,29 +1932,29 @@ namespace nanojit
 
             } else {
                 ra = lhs->getReg();
                 NanoAssert(ra != rr);
                 rb = rhs->isInReg() ? findRegFor(rhs, GpRegs & ~(rmask(ra))) : rr;
             }
 
             switch (op) {
-                case LIR_ldzb:
+                case LIR_lduc2ui:
                     LD8Zsib(rr, d, ra, rb, scale);
                     break;
-                case LIR_ldsb:
+                case LIR_ldc2i:
                     LD8Ssib(rr, d, ra, rb, scale);
                     break;
-                case LIR_ldzs:
+                case LIR_ldus2ui:
                     LD16Zsib(rr, d, ra, rb, scale);
                     break;
-                case LIR_ldss:
+                case LIR_lds2i:
                     LD16Ssib(rr, d, ra, rb, scale);
                     break;
-                case LIR_ld:
+                case LIR_ldi:
                     LDsib(rr, d, ra, rb, scale);
                     break;
                 default:
                     NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                     break;
             }
 
             freeResourcesOf(ins);
@@ -1965,74 +1965,74 @@ namespace nanojit
                 NanoAssert(rb == rr);
                 findSpecificRegForUnallocated(rhs, rb);
             }
 
         } else {
             Register ra = getBaseReg(base, d, GpRegs);
 
             switch (op) {
-                case LIR_ldzb:
+                case LIR_lduc2ui:
                     LD8Z(rr, d, ra);
                     break;
-                case LIR_ldsb:
+                case LIR_ldc2i:
                     LD8S(rr, d, ra);
                     break;
-                case LIR_ldzs:
+                case LIR_ldus2ui:
                     LD16Z(rr, d, ra);
                     break;
-                case LIR_ldss:
+                case LIR_lds2i:
                     LD16S(rr, d, ra);
                     break;
-                case LIR_ld:
+                case LIR_ldi:
                     LD(rr, d, ra);
                     break;
                 default:
                     NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                     break;
             }
 
             freeResourcesOf(ins);
-            if (!base->isop(LIR_alloc) && !base->isInReg()) {
+            if (!base->isop(LIR_allocp) && !base->isInReg()) {
                 NanoAssert(ra == rr);
                 findSpecificRegForUnallocated(base, ra);
             }
         }
     }
 
     void Assembler::asm_cmov(LInsp ins)
     {
         LIns* condval = ins->oprnd1();
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
         NanoAssert(condval->isCmp());
-        NanoAssert(ins->isop(LIR_cmov) && iftrue->isI() && iffalse->isI());
+        NanoAssert(ins->isop(LIR_cmovi) && iftrue->isI() && iffalse->isI());
 
         Register rr = prepareResultReg(ins, GpRegs);
 
         Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
 
         // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
         Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
 
         // WARNING: We cannot generate any code that affects the condition
         // codes between the MRcc generation here and the asm_cmp() call
         // below.  See asm_cmp() for more details.
         switch (condval->opcode()) {
             // Note that these are all opposites...
-            case LIR_eq:    MRNE(rr, rf);   break;
-            case LIR_lt:    MRGE(rr, rf);   break;
-            case LIR_le:    MRG( rr, rf);   break;
-            case LIR_gt:    MRLE(rr, rf);   break;
-            case LIR_ge:    MRL( rr, rf);   break;
-            case LIR_ult:   MRAE(rr, rf);   break;
-            case LIR_ule:   MRA( rr, rf);   break;
-            case LIR_ugt:   MRBE(rr, rf);   break;
-            case LIR_uge:   MRB( rr, rf);   break;
+            case LIR_eqi:    MRNE(rr, rf);   break;
+            case LIR_lti:    MRGE(rr, rf);   break;
+            case LIR_lei:    MRG( rr, rf);   break;
+            case LIR_gti:    MRLE(rr, rf);   break;
+            case LIR_gei:    MRL( rr, rf);   break;
+            case LIR_ltui:   MRAE(rr, rf);   break;
+            case LIR_leui:   MRA( rr, rf);   break;
+            case LIR_gtui:   MRBE(rr, rf);   break;
+            case LIR_geui:   MRB( rr, rf);   break;
             default: NanoAssert(0); break;
         }
 
         if (rr != rt)
             MR(rr, rt);
 
         freeResourcesOf(ins);
         if (!iftrue->isInReg()) {
@@ -2218,17 +2218,17 @@ namespace nanojit
                     // Rematerialize the constant.
                     asm_immi(r, ins->immI(), /*canClobberCCs*/true);
                 } else if (ins->isInReg()) {
                     if (r != ins->getReg())
                         MR(r, ins->getReg());
                 } else if (ins->isInAr()) {
                     int d = arDisp(ins);
                     NanoAssert(d != 0);
-                    if (ins->isop(LIR_alloc)) {
+                    if (ins->isop(LIR_allocp)) {
                         LEA(r, d, FP);
                     } else {
                         LD(r, d, FP);
                     }
 
                 } else {
                     // This is the last use, so fine to assign it
                     // to the scratch reg, it's dead after this point.
@@ -2250,17 +2250,17 @@ namespace nanojit
 
     void Assembler::asm_pusharg(LInsp ins)
     {
         // arg goes on stack
         if (!ins->isExtant() && ins->isImmI())
         {
             PUSHi(ins->immI());    // small const we push directly
         }
-        else if (!ins->isExtant() || ins->isop(LIR_alloc))
+        else if (!ins->isExtant() || ins->isop(LIR_allocp))
         {
             Register ra = findRegFor(ins, GpRegs);
             PUSHr(ra);
         }
         else if (ins->isInReg())
         {
             PUSHr(ins->getReg());
         }
@@ -2276,17 +2276,17 @@ namespace nanojit
         // arg goes on stack
         if (!ins->isExtant() && ins->isImmI())
         {
             // small const we push directly
             STi(SP, stkd, ins->immI());
         }
         else {
             Register ra;
-            if (!ins->isInReg() || ins->isop(LIR_alloc))
+            if (!ins->isInReg() || ins->isop(LIR_allocp))
                 ra = findRegFor(ins, GpRegs & (~SavedRegs));
             else
                 ra = ins->getReg();
             ST(SP, stkd, ra);
         }
 
         stkd += sizeof(int32_t);
     }
@@ -2354,20 +2354,20 @@ namespace nanojit
                 ra = lhs->getReg();
                 NanoAssert(rmask(ra) & XmmRegs);
             }
 
             if (lhs == rhs)
                 rb = ra;
 
             switch (op) {
-            case LIR_fadd:  SSE_ADDSD(rr, rb);  break;
-            case LIR_fsub:  SSE_SUBSD(rr, rb);  break;
-            case LIR_fmul:  SSE_MULSD(rr, rb);  break;
-            case LIR_fdiv:  SSE_DIVSD(rr, rb);  break;
+            case LIR_addd:  SSE_ADDSD(rr, rb);  break;
+            case LIR_subd:  SSE_SUBSD(rr, rb);  break;
+            case LIR_muld:  SSE_MULSD(rr, rb);  break;
+            case LIR_divd:  SSE_DIVSD(rr, rb);  break;
             default:        NanoAssert(0);
             }
 
             if (rr != ra)
                 SSE_MOVSD(rr, ra);
 
             freeResourcesOf(ins);
             if (!lhs->isInReg()) {
@@ -2384,31 +2384,31 @@ namespace nanojit
             debug_only( Register rr = ) prepareResultReg(ins, rmask(FST0));
             NanoAssert(FST0 == rr);
             NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
 
             if (rhs->isImmD()) {
                 const uint64_t* p = findImmDFromPool(rhs->immQ());
 
                 switch (op) {
-                case LIR_fadd:  FADDdm( (const double*)p);  break;
-                case LIR_fsub:  FSUBRdm((const double*)p);  break;
-                case LIR_fmul:  FMULdm( (const double*)p);  break;
-                case LIR_fdiv:  FDIVRdm((const double*)p);  break;
+                case LIR_addd:  FADDdm( (const double*)p);  break;
+                case LIR_subd:  FSUBRdm((const double*)p);  break;
+                case LIR_muld:  FMULdm( (const double*)p);  break;
+                case LIR_divd:  FDIVRdm((const double*)p);  break;
                 default:        NanoAssert(0);
                 }
 
             } else {
                 int db = findMemFor(rhs);
 
                 switch (op) {
-                case LIR_fadd:  FADD( db, FP);  break;
-                case LIR_fsub:  FSUBR(db, FP);  break;
-                case LIR_fmul:  FMUL( db, FP);  break;
-                case LIR_fdiv:  FDIVR(db, FP);  break;
+                case LIR_addd:  FADD( db, FP);  break;
+                case LIR_subd:  FSUBR(db, FP);  break;
+                case LIR_muld:  FMUL( db, FP);  break;
+                case LIR_divd:  FDIVR(db, FP);  break;
                 default:        NanoAssert(0);
                 }
             }
             freeResourcesOf(ins);
             if (!lhs->isInReg()) {
                 findSpecificRegForUnallocated(lhs, FST0);
             }
         }
@@ -2520,37 +2520,37 @@ namespace nanojit
     }
 
     NIns* Assembler::asm_fbranch(bool branchOnFalse, LIns *cond, NIns *targ)
     {
         NIns* at;
         LOpcode opcode = cond->opcode();
 
         if (_config.i386_sse2) {
-            // LIR_flt and LIR_fgt are handled by the same case because
-            // asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a).  Likewise
-            // for LIR_fle/LIR_fge.
+            // LIR_ltd and LIR_gtd are handled by the same case because
+            // asm_fcmp() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise
+            // for LIR_led/LIR_ged.
             if (branchOnFalse) {
                 // op == LIR_xf
                 switch (opcode) {
-                case LIR_feq:   JP(targ);       break;
-                case LIR_flt:
-                case LIR_fgt:   JNA(targ);      break;
-                case LIR_fle:
-                case LIR_fge:   JNAE(targ);     break;
+                case LIR_eqd:   JP(targ);       break;
+                case LIR_ltd:
+                case LIR_gtd:   JNA(targ);      break;
+                case LIR_led:
+                case LIR_ged:   JNAE(targ);     break;
                 default:        NanoAssert(0);  break;
                 }
             } else {
                 // op == LIR_xt
                 switch (opcode) {
-                case LIR_feq:   JNP(targ);      break;
-                case LIR_flt:
-                case LIR_fgt:   JA(targ);       break;
-                case LIR_fle:
-                case LIR_fge:   JAE(targ);      break;
+                case LIR_eqd:   JNP(targ);      break;
+                case LIR_ltd:
+                case LIR_gtd:   JA(targ);       break;
+                case LIR_led:
+                case LIR_ged:   JAE(targ);      break;
                 default:        NanoAssert(0);  break;
                 }
             }
         } else {
             if (branchOnFalse)
                 JP(targ);
             else
                 JNP(targ);
@@ -2570,39 +2570,39 @@ namespace nanojit
         LOpcode condop = cond->opcode();
         NanoAssert(isCmpDOpcode(condop));
         LIns* lhs = cond->oprnd1();
         LIns* rhs = cond->oprnd2();
         NanoAssert(lhs->isD() && rhs->isD());
 
         if (_config.i386_sse2) {
             // First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
-            if (condop == LIR_flt) {
-                condop = LIR_fgt;
+            if (condop == LIR_ltd) {
+                condop = LIR_gtd;
                 LIns* t = lhs; lhs = rhs; rhs = t;
-            } else if (condop == LIR_fle) {
-                condop = LIR_fge;
+            } else if (condop == LIR_led) {
+                condop = LIR_ged;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             }
 
-            if (condop == LIR_feq) {
+            if (condop == LIR_eqd) {
                 if (lhs == rhs) {
-                    // We can generate better code for LIR_feq when lhs==rhs (NaN test).
+                    // We can generate better code for LIR_eqd when lhs==rhs (NaN test).
 
                     // ucomisd    ZPC  outcome (SETNP/JNP succeeds if P==0)
                     // -------    ---  -------
                     // UNORDERED  111  SETNP/JNP fails
                     // EQUAL      100  SETNP/JNP succeeds
 
                     Register r = findRegFor(lhs, XmmRegs);
                     SSE_UCOMISD(r, r);
                 } else {
                     // LAHF puts the flags into AH like so:  SF:ZF:0:AF:0:PF:1:CF (aka. SZ0A_0P1C).
                     // We then mask out the bits as follows.
-                    // - LIR_feq: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
+                    // - LIR_eqd: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
                     int mask = 0x44;
 
                     // ucomisd       ZPC   lahf/test(0x44) SZP   outcome
                     // -------       ---   ---------       ---   -------
                     // UNORDERED     111   0100_0100       001   SETNP/JNP fails
                     // EQUAL         100   0100_0000       000   SETNP/JNP succeeds
                     // GREATER_THAN  000   0000_0000       011   SETNP/JNP fails
                     // LESS_THAN     001   0000_0000       011   SETNP/JNP fails
@@ -2611,93 +2611,93 @@ namespace nanojit
                     Register ra, rb;
                     findRegFor2(XmmRegs, lhs, ra, XmmRegs, rhs, rb);
 
                     TEST_AH(mask);
                     LAHF();
                     SSE_UCOMISD(ra, rb);
                 }
             } else {
-                // LIR_fgt:
+                // LIR_gtd:
                 //   ucomisd       ZPC   outcome (SETA/JA succeeds if CZ==00)
                 //   -------       ---   -------
                 //   UNORDERED     111   SETA/JA fails
                 //   EQUAL         100   SETA/JA fails
                 //   GREATER_THAN  000   SETA/JA succeeds
                 //   LESS_THAN     001   SETA/JA fails
                 //
-                // LIR_fge:
+                // LIR_ged:
                 //   ucomisd       ZPC   outcome (SETAE/JAE succeeds if C==0)
                 //   -------       ---   -------
                 //   UNORDERED     111   SETAE/JAE fails
                 //   EQUAL         100   SETAE/JAE succeeds
                 //   GREATER_THAN  000   SETAE/JAE succeeds
                 //   LESS_THAN     001   SETAE/JAE fails
 
                 Register ra, rb;
                 findRegFor2(XmmRegs, lhs, ra, XmmRegs, rhs, rb);
                 SSE_UCOMISD(ra, rb);
             }
 
         } else {
             // First, we convert (a > b) into (b < a), and (a >= b) into (b <= a).
             // Note that this is the opposite of the sse2 conversion above.
-            if (condop == LIR_fgt) {
-                condop = LIR_flt;
+            if (condop == LIR_gtd) {
+                condop = LIR_ltd;
                 LIns* t = lhs; lhs = rhs; rhs = t;
-            } else if (condop == LIR_fge) {
-                condop = LIR_fle;
+            } else if (condop == LIR_ged) {
+                condop = LIR_led;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             }
 
             // FNSTSW_AX puts the flags into AH like so:  B:C3:TOP3:TOP2:TOP1:C2:C1:C0.
             // Furthermore, fcom/fcomp/fcompp sets C3:C2:C0 the same values
             // that Z:P:C are set by ucomisd, and the relative positions in AH
             // line up.  (Someone at Intel has a sense of humour.)  Therefore
             // we can use the same lahf/test(mask) technique as used in the
             // sse2 case above.  We could use fcomi/fcomip/fcomipp which set
             // ZPC directly and then use LAHF instead of FNSTSW_AX and make
             // this code generally more like the sse2 code, but we don't
             // because fcomi/fcomip/fcomipp/lahf aren't available on earlier
             // x86 machines.
             //
             // The masks are as follows:
-            // - LIR_feq: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
-            // - LIR_flt: mask == 0x05 == 0000_0101b, which extracts 0000_0P0C from AH.
-            // - LIR_fle: mask == 0x41 == 0100_0001b, which extracts 0Z00_000C from AH.
+            // - LIR_eqd: mask == 0x44 == 0100_0100b, which extracts 0Z00_0P00 from AH.
+            // - LIR_ltd: mask == 0x05 == 0000_0101b, which extracts 0000_0P0C from AH.
+            // - LIR_led: mask == 0x41 == 0100_0001b, which extracts 0Z00_000C from AH.
             //
-            // LIR_feq (very similar to the sse2 case above):
+            // LIR_eqd (very similar to the sse2 case above):
             //   ucomisd  C3:C2:C0   lahf/test(0x44) SZP   outcome
             //   -------  --------   ---------       ---   -------
             //   UNORDERED     111   0100_0100       001   SETNP fails
             //   EQUAL         100   0100_0000       000   SETNP succeeds
             //   GREATER_THAN  000   0000_0000       011   SETNP fails
             //   LESS_THAN     001   0000_0000       011   SETNP fails
             //
-            // LIR_flt:
+            // LIR_ltd:
             //   fcom     C3:C2:C0   lahf/test(0x05) SZP   outcome
             //   -------  --------   ---------       ---   -------
             //   UNORDERED     111   0000_0101       001   SETNP fails
             //   EQUAL         100   0000_0000       011   SETNP fails
             //   GREATER_THAN  000   0000_0000       011   SETNP fails
             //   LESS_THAN     001   0000_0001       000   SETNP succeeds
             //
-            // LIR_fle:
+            // LIR_led:
             //   fcom     C3:C2:C0   lahf/test(0x41) SZP   outcome
             //   -------       ---   ---------       ---   -------
             //   UNORDERED     111   0100_0001       001   SETNP fails
             //   EQUAL         100   0100_0000       000   SETNP succeeds
             //   GREATER_THAN  000   0000_0000       011   SETNP fails
             //   LESS_THAN     001   0000_0001       010   SETNP succeeds
 
             int mask = 0;   // init to avoid MSVC compile warnings
             switch (condop) {
-            case LIR_feq:   mask = 0x44;    break;
-            case LIR_flt:   mask = 0x05;    break;
-            case LIR_fle:   mask = 0x41;    break;
+            case LIR_eqd:   mask = 0x44;    break;
+            case LIR_ltd:   mask = 0x05;    break;
+            case LIR_led:   mask = 0x41;    break;
             default:        NanoAssert(0);  break;
             }
 
             evictIfActive(EAX);
             int pop = !lhs->isInReg();
             findSpecificRegFor(lhs, FST0);
 
             if (lhs == rhs) {
@@ -2763,20 +2763,20 @@ namespace nanojit
 
         // Restore ESP from EBP, undoing SUBi(SP,amt) in the prologue
         MR(SP,FP);
 
         releaseRegisters();
         assignSavedRegs();
 
         LIns *val = ins->oprnd1();
-        if (ins->isop(LIR_ret)) {
+        if (ins->isop(LIR_reti)) {
             findSpecificRegFor(val, retRegs[0]);
         } else {
-            NanoAssert(ins->isop(LIR_fret));
+            NanoAssert(ins->isop(LIR_retd));
             findSpecificRegFor(val, FST0);
             fpu_pop();
         }
     }
 
     void Assembler::swapCodeChunks() {
         if (!_nExitIns)
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));