Bug 559973 - Rename LOpcode enums in NativePPC.cpp (r=nnethercote+)
authorEdwin Smith <edwsmith@adobe.com>
Fri, 07 May 2010 08:39:56 -0400
changeset 42661 63c4aabbc6158cc9644d308c1f4d874ac60bdc17
parent 42660 2475f620980cfa23838d0e77fcfd46a8ab4f1d65
child 42662 b0919f5cbe4bfdcba36a9ce8cac9b7cfc0637770
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
reviewersnnethercote
bugs559973
milestone1.9.3a5pre
Bug 559973 - Rename LOpcode enums in NativePPC.cpp (r=nnethercote+)
js/src/nanojit/NativePPC.cpp
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -133,104 +133,104 @@ namespace nanojit
 
     void Assembler::asm_load32(LIns *ins) {
         LIns* base = ins->oprnd1();
         int d = ins->disp();
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         Register ra = getBaseReg(base, d, GpRegs);
 
         switch(ins->opcode()) {
-            case LIR_ldzb:
+            case LIR_lduc2ui:
                 if (isS16(d)) {
                     LBZ(rr, d, ra);
                 } else {
                     LBZX(rr, ra, R0); // rr = [ra+R0]
                     asm_li(R0,d);
                 }
                 return;
-            case LIR_ldzs:
+            case LIR_ldus2ui:
                 // these are expected to be 2 or 4-byte aligned
                 if (isS16(d)) {
                     LHZ(rr, d, ra);
                 } else {
                     LHZX(rr, ra, R0); // rr = [ra+R0]
                     asm_li(R0,d);
                 }
                 return;
-            case LIR_ld:
+            case LIR_ldi:
                 // these are expected to be 4-byte aligned
                 if (isS16(d)) {
                     LWZ(rr, d, ra);
                 } else {
                     LWZX(rr, ra, R0); // rr = [ra+R0]
                     asm_li(R0,d);
                 }
                 return;
-            case LIR_ldsb:
-            case LIR_ldss:
+            case LIR_ldc2i:
+            case LIR_lds2i:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                 return;
         }
     }
 
     void Assembler::asm_store32(LOpcode op, LIns *value, int32_t dr, LIns *base) {
 
         switch (op) {
             case LIR_sti:
-            case LIR_stb:
+            case LIR_sti2c:
                 // handled by mainline code below for now
                 break;
-            case LIR_sts:
+            case LIR_sti2s:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
                 return;
         }
 
         Register rs = findRegFor(value, GpRegs);
         Register ra = value == base ? rs : getBaseReg(base, dr, GpRegs & ~rmask(rs));
 
     #if !PEDANTIC
         if (isS16(dr)) {
             switch (op) {
             case LIR_sti:
                 STW(rs, dr, ra);
                 break;
-            case LIR_stb:
+            case LIR_sti2c:
                 STB(rs, dr, ra);
                 break;
             }
             return;
         }
     #endif
 
         // general case store, any offset size
         switch (op) {
         case LIR_sti:
             STWX(rs, ra, R0);
             break;
-        case LIR_stb:
+        case LIR_sti2c:
             STBX(rs, ra, R0);
             break;
         }
         asm_li(R0, dr);
     }
 
     void Assembler::asm_load64(LIns *ins) {
 
         switch (ins->opcode()) {
-            case LIR_ldf:
+            case LIR_ldd:
             CASE64(LIR_ldq:)
                 // handled by mainline code below for now
                 break;
-            case LIR_ld32f:
+            case LIR_ldf2d:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
                 return;
         }
 
         LIns* base = ins->oprnd1();
@@ -308,21 +308,21 @@ namespace nanojit
         SLDI(r,r,32);                  // r[32:63] = r[0:31], r[0:31] = 0
         asm_li32(r, int32_t(imm>>32)); // r[0:31] = imm[32:63]
     }
 
     void Assembler::asm_store64(LOpcode op, LIns *value, int32_t dr, LIns *base) {
         NanoAssert(value->isQorD());
 
         switch (op) {
-            case LIR_stfi:
-            CASE64(LIR_stqi:)
+            case LIR_std:
+            CASE64(LIR_stq:)
                 // handled by mainline code below for now
                 break;
-            case LIR_st32f:
+            case LIR_std2f:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_store64 should never receive this LIR opcode");
                 return;
         }
 
         Register ra = getBaseReg(base, dr, GpRegs);
@@ -367,41 +367,41 @@ namespace nanojit
 
     void Assembler::asm_cond(LIns *ins) {
         LOpcode op = ins->opcode();
         LIns *a = ins->oprnd1();
         LIns *b = ins->oprnd2();
         ConditionRegister cr = CR7;
         Register r = deprecated_prepResultReg(ins, GpRegs);
         switch (op) {
-        case LIR_eq: case LIR_feq:
-        CASE64(LIR_qeq:)
+        case LIR_eqi: case LIR_eqd:
+        CASE64(LIR_eqq:)
             EXTRWI(r, r, 1, 4*cr+COND_eq); // extract CR7.eq
             MFCR(r);
             break;
-        case LIR_lt: case LIR_ult:
-        case LIR_flt: case LIR_fle:
-        CASE64(LIR_qlt:) CASE64(LIR_qult:)
+        case LIR_lti: case LIR_ltui:
+        case LIR_ltd: case LIR_led:
+        CASE64(LIR_ltq:) CASE64(LIR_ltuq:)
             EXTRWI(r, r, 1, 4*cr+COND_lt); // extract CR7.lt
             MFCR(r);
             break;
-        case LIR_gt: case LIR_ugt:
-        case LIR_fgt: case LIR_fge:
-        CASE64(LIR_qgt:) CASE64(LIR_qugt:)
+        case LIR_gti: case LIR_gtui:
+        case LIR_gtd: case LIR_ged:
+        CASE64(LIR_gtq:) CASE64(LIR_gtuq:)
             EXTRWI(r, r, 1, 4*cr+COND_gt); // extract CR7.gt
             MFCR(r);
             break;
-        case LIR_le: case LIR_ule:
-        CASE64(LIR_qle:) CASE64(LIR_qule:)
+        case LIR_lei: case LIR_leui:
+        CASE64(LIR_leq:) CASE64(LIR_leuq:)
             EXTRWI(r, r, 1, 4*cr+COND_eq); // extract CR7.eq
             MFCR(r);
             CROR(CR7, eq, lt, eq);
             break;
-        case LIR_ge: case LIR_uge:
-        CASE64(LIR_qge:) CASE64(LIR_quge:)
+        case LIR_gei: case LIR_geui:
+        CASE64(LIR_geq:) CASE64(LIR_geuq:)
             EXTRWI(r, r, 1, 4*cr+COND_eq); // select CR7.eq
             MFCR(r);
             CROR(CR7, eq, gt, eq);
             break;
         default:
             debug_only(outputf("%s",lirNames[ins->opcode()]);)
             TODO(asm_cond);
             break;
@@ -460,37 +460,37 @@ namespace nanojit
             }
             else {
                 // known far target
                 return asm_branch_far(onfalse, cond, targ);
             }
         }
         ConditionRegister cr = CR7;
         switch (cond->opcode()) {
-        case LIR_eq:
-        case LIR_feq:
-        CASE64(LIR_qeq:)
+        case LIR_eqi:
+        case LIR_eqd:
+        CASE64(LIR_eqq:)
             if (onfalse) BNE(cr,bd); else BEQ(cr,bd);
             break;
-        case LIR_lt: case LIR_ult:
-        case LIR_flt: case LIR_fle:
-        CASE64(LIR_qlt:) CASE64(LIR_qult:)
+        case LIR_lti: case LIR_ltui:
+        case LIR_ltd: case LIR_led:
+        CASE64(LIR_ltq:) CASE64(LIR_ltuq:)
             if (onfalse) BNL(cr,bd); else BLT(cr,bd);
             break;
-        case LIR_le: case LIR_ule:
-        CASE64(LIR_qle:) CASE64(LIR_qule:)
+        case LIR_lei: case LIR_leui:
+        CASE64(LIR_leq:) CASE64(LIR_leuq:)
             if (onfalse) BGT(cr,bd); else BLE(cr,bd);
             break;
-        case LIR_gt: case LIR_ugt:
-        case LIR_fgt: case LIR_fge:
-        CASE64(LIR_qgt:) CASE64(LIR_qugt:)
+        case LIR_gti: case LIR_gtui:
+        case LIR_gtd: case LIR_ged:
+        CASE64(LIR_gtq:) CASE64(LIR_gtuq:)
             if (onfalse) BNG(cr,bd); else BGT(cr,bd);
             break;
-        case LIR_ge: case LIR_uge:
-        CASE64(LIR_qge:) CASE64(LIR_quge:)
+        case LIR_gei: case LIR_geui:
+        CASE64(LIR_geq:) CASE64(LIR_geuq:)
             if (onfalse) BLT(cr,bd); else BGE(cr,bd);
             break;
         default:
             debug_only(outputf("%s",lirNames[cond->opcode()]);)
             TODO(unknown_cond);
         }
         if (!patch)
             patch = _nIns;
@@ -498,37 +498,37 @@ namespace nanojit
     }
 
     // general case branch to any address (using CTR)
     NIns *Assembler::asm_branch_far(bool onfalse, LIns *cond, NIns * const targ) {
         LOpcode condop = cond->opcode();
         ConditionRegister cr = CR7;
         underrunProtect(16);
         switch (condop) {
-        case LIR_eq:
-        case LIR_feq:
-        CASE64(LIR_qeq:)
+        case LIR_eqi:
+        case LIR_eqd:
+        CASE64(LIR_eqq:)
             if (onfalse) BNECTR(cr); else BEQCTR(cr);
             break;
-        case LIR_lt: case LIR_ult:
-        CASE64(LIR_qlt:) CASE64(LIR_qult:)
-        case LIR_flt: case LIR_fle:
+        case LIR_lti: case LIR_ltui:
+        CASE64(LIR_ltq:) CASE64(LIR_ltuq:)
+        case LIR_ltd: case LIR_led:
             if (onfalse) BNLCTR(cr); else BLTCTR(cr);
             break;
-        case LIR_le: case LIR_ule:
-        CASE64(LIR_qle:) CASE64(LIR_qule:)
+        case LIR_lei: case LIR_leui:
+        CASE64(LIR_leq:) CASE64(LIR_leuq:)
             if (onfalse) BGTCTR(cr); else BLECTR(cr);
             break;
-        case LIR_gt: case LIR_ugt:
-        CASE64(LIR_qgt:) CASE64(LIR_qugt:)
-        case LIR_fgt: case LIR_fge:
+        case LIR_gti: case LIR_gtui:
+        CASE64(LIR_gtq:) CASE64(LIR_gtuq:)
+        case LIR_gtd: case LIR_ged:
             if (onfalse) BNGCTR(cr); else BGTCTR(cr);
             break;
-        case LIR_ge: case LIR_uge:
-        CASE64(LIR_qge:) CASE64(LIR_quge:)
+        case LIR_gei: case LIR_geui:
+        CASE64(LIR_geq:) CASE64(LIR_geuq:)
             if (onfalse) BLTCTR(cr); else BGECTR(cr);
             break;
         default:
             debug_only(outputf("%s",lirNames[condop]);)
             TODO(unknown_cond);
         }
 
     #if !defined NANOJIT_64BIT
@@ -600,50 +600,50 @@ namespace nanojit
         else if (isCmpUQOpcode(condop)) {
             CMPLD(cr, ra, rb);
         }
     #endif
         else if (isCmpDOpcode(condop)) {
             // set the lt/gt bit for fle/fge.  We don't do this for
             // int/uint because in those cases we can invert the branch condition.
             // for float, we can't because of unordered comparisons
-            if (condop == LIR_fle)
+            if (condop == LIR_led)
                 CROR(cr, lt, lt, eq); // lt = lt|eq
-            else if (condop == LIR_fge)
+            else if (condop == LIR_ged)
                 CROR(cr, gt, gt, eq); // gt = gt|eq
             FCMPU(cr, ra, rb);
         }
         else {
             TODO(asm_cmp);
         }
     }
 
     void Assembler::asm_ret(LIns *ins) {
         genEpilogue();
         releaseRegisters();
         assignSavedRegs();
         LIns *value = ins->oprnd1();
-        Register r = ins->isop(LIR_fret) ? F1 : R3;
+        Register r = ins->isop(LIR_retd) ? F1 : R3;
         findSpecificRegFor(value, r);
     }
 
     void Assembler::asm_nongp_copy(Register r, Register s) {
         // PPC doesn't support any GPR<->FPR moves
         NanoAssert((rmask(r) & FpRegs) && (rmask(s) & FpRegs));
         FMR(r, s);
     }
 
     bool Assembler::canRemat(LIns* ins)
     {
-        return ins->isImmI() || ins->isop(LIR_alloc);
+        return ins->isImmI() || ins->isop(LIR_allocp);
     }
 
     void Assembler::asm_restore(LIns *i, Register r) {
         int d;
-        if (i->isop(LIR_alloc)) {
+        if (i->isop(LIR_allocp)) {
             d = deprecated_disp(i);
             ADDI(r, FP, d);
         }
         else if (i->isImmI()) {
             asm_li(r, i->immI());
         }
         else {
             d = findMemFor(i);
@@ -689,17 +689,17 @@ namespace nanojit
         }
         else {
             // saved param
             deprecated_prepResultReg(ins, rmask(savedRegs[a]));
         }
     }
 
     void Assembler::asm_call(LIns *ins) {
-        Register retReg = ( ins->isop(LIR_fcall) ? F1 : retRegs[0] );
+        Register retReg = ( ins->isop(LIR_calld) ? F1 : retRegs[0] );
         deprecated_prepResultReg(ins, rmask(retReg));
 
         // Do this after we've handled the call result, so we don't
         // force the call result to be spilled unnecessarily.
 
         evictScratchRegsExcept(0);
 
         const CallInfo* call = ins->callInfo();
@@ -780,17 +780,17 @@ namespace nanojit
             // arg goes in specific register
             if (p->isImmI()) {
                 asm_li(r, p->immI());
             } else {
                 if (p->isExtant()) {
                     if (!p->deprecated_hasKnownReg()) {
                         // load it into the arg reg
                         int d = findMemFor(p);
-                        if (p->isop(LIR_alloc)) {
+                        if (p->isop(LIR_allocp)) {
                             NanoAssert(isS16(d));
                             ADDI(r, FP, d);
                         } else if (p->isQorD()) {
                             LD(r, d, FP);
                         } else {
                             LWZ(r, d, FP);
                         }
                     } else {
@@ -852,94 +852,94 @@ namespace nanojit
         Register rr = deprecated_prepResultReg(ins, allow);
         Register ra = findRegFor(lhs, GpRegs);
 
         if (rhs->isImmI()) {
             int32_t rhsc = rhs->immI();
             if (isS16(rhsc)) {
                 // ppc arith immediate ops sign-exted the imm16 value
                 switch (op) {
-                case LIR_add:
-                CASE64(LIR_qiadd:)
+                case LIR_addi:
+                CASE64(LIR_addq:)
                     ADDI(rr, ra, rhsc);
                     return;
-                case LIR_sub:
+                case LIR_subi:
                     SUBI(rr, ra, rhsc);
                     return;
-                case LIR_mul:
+                case LIR_muli:
                     MULLI(rr, ra, rhsc);
                     return;
                 }
             }
             if (isU16(rhsc)) {
                 // ppc logical immediate zero-extend the imm16 value
                 switch (op) {
-                CASE64(LIR_qior:)
-                case LIR_or:
+                CASE64(LIR_orq:)
+                case LIR_ori:
                     ORI(rr, ra, rhsc);
                     return;
-                CASE64(LIR_qiand:)
-                case LIR_and:
+                CASE64(LIR_andq:)
+                case LIR_andi:
                     ANDI(rr, ra, rhsc);
                     return;
-                CASE64(LIR_qxor:)
-                case LIR_xor:
+                CASE64(LIR_xorq:)
+                case LIR_xori:
                     XORI(rr, ra, rhsc);
                     return;
                 }
             }
 
             // LIR shift ops only use last 5bits of shift const
             switch (op) {
-            case LIR_lsh:
+            case LIR_lshi:
                 SLWI(rr, ra, rhsc&31);
                 return;
-            case LIR_ush:
+            case LIR_rshui:
                 SRWI(rr, ra, rhsc&31);
                 return;
-            case LIR_rsh:
+            case LIR_rshi:
                 SRAWI(rr, ra, rhsc&31);
                 return;
             }
         }
 
         // general case, put rhs in register
         Register rb = rhs==lhs ? ra : findRegFor(rhs, GpRegs&~rmask(ra));
         switch (op) {
-            CASE64(LIR_qiadd:)
-            case LIR_add:
+            CASE64(LIR_addq:)
+            case LIR_addi:
                 ADD(rr, ra, rb);
                 break;
-            CASE64(LIR_qiand:)
-            case LIR_and:
+            CASE64(LIR_andq:)
+            case LIR_andi:
                 AND(rr, ra, rb);
                 break;
-            CASE64(LIR_qior:)
-            case LIR_or:
+            CASE64(LIR_orq:)
+            case LIR_ori:
                 OR(rr, ra, rb);
                 break;
-            CASE64(LIR_qxor:)
-            case LIR_xor:
+            CASE64(LIR_xorq:)
+            case LIR_xori:
                 XOR(rr, ra, rb);
                 break;
-            case LIR_sub:  SUBF(rr, rb, ra);    break;
-            case LIR_lsh:  SLW(rr, ra, R0);     ANDI(R0, rb, 31);   break;
-            case LIR_rsh:  SRAW(rr, ra, R0);    ANDI(R0, rb, 31);   break;
-            case LIR_ush:  SRW(rr, ra, R0);     ANDI(R0, rb, 31);   break;
-            case LIR_mul:  MULLW(rr, ra, rb);   break;
+            case LIR_subi:  SUBF(rr, rb, ra);    break;
+            case LIR_lshi:  SLW(rr, ra, R0);     ANDI(R0, rb, 31);   break;
+            case LIR_rshi:  SRAW(rr, ra, R0);    ANDI(R0, rb, 31);   break;
+            case LIR_rshui: SRW(rr, ra, R0);     ANDI(R0, rb, 31);   break;
+            case LIR_muli:  MULLW(rr, ra, rb);   break;
         #ifdef NANOJIT_64BIT
-            case LIR_qilsh:
+            case LIR_lshq:
                 SLD(rr, ra, R0);
                 ANDI(R0, rb, 63);
                 break;
-            case LIR_qursh:
+            case LIR_rshuq:
                 SRD(rr, ra, R0);
                 ANDI(R0, rb, 63);
                 break;
-            case LIR_qirsh:
+            case LIR_rshq:
                 SRAD(rr, ra, R0);
                 ANDI(R0, rb, 63);
                 TODO(qirsh);
                 break;
         #endif
             default:
                 debug_only(outputf("%s",lirNames[op]);)
                 TODO(asm_arith);
@@ -950,20 +950,20 @@ namespace nanojit
         LOpcode op = ins->opcode();
         LInsp lhs = ins->oprnd1();
         LInsp rhs = ins->oprnd2();
         RegisterMask allow = FpRegs;
         Register rr = deprecated_prepResultReg(ins, allow);
         Register ra, rb;
         findRegFor2(allow, lhs, ra, allow, rhs, rb);
         switch (op) {
-            case LIR_fadd: FADD(rr, ra, rb); break;
-            case LIR_fsub: FSUB(rr, ra, rb); break;
-            case LIR_fmul: FMUL(rr, ra, rb); break;
-            case LIR_fdiv: FDIV(rr, ra, rb); break;
+            case LIR_addd: FADD(rr, ra, rb); break;
+            case LIR_subd: FSUB(rr, ra, rb); break;
+            case LIR_muld: FMUL(rr, ra, rb); break;
+            case LIR_divd: FDIV(rr, ra, rb); break;
             default:
                 debug_only(outputf("%s",lirNames[op]);)
                 TODO(asm_fop);
         }
     }
 
     void Assembler::asm_i2f(LIns *ins) {
         Register r = deprecated_prepResultReg(ins, FpRegs);
@@ -1025,17 +1025,17 @@ namespace nanojit
     void Assembler::asm_promote(LIns *ins) {
         LOpcode op = ins->opcode();
         Register r = deprecated_prepResultReg(ins, GpRegs);
         Register v = findRegFor(ins->oprnd1(), GpRegs);
         switch (op) {
         default:
             debug_only(outputf("%s",lirNames[op]));
             TODO(asm_promote);
-        case LIR_u2q:
+        case LIR_ui2uq:
             CLRLDI(r, v, 32); // clears the top 32 bits
             break;
         case LIR_i2q:
             EXTSW(r, v);
             break;
         }
     }
     #endif
@@ -1213,20 +1213,20 @@ namespace nanojit
 
     void Assembler::asm_cmov(LInsp ins)
     {
         LIns* condval = ins->oprnd1();
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
     #ifdef NANOJIT_64BIT
-        NanoAssert((ins->opcode() == LIR_cmov  && iftrue->isI() && iffalse->isI()) ||
-                   (ins->opcode() == LIR_qcmov && iftrue->isQ() && iffalse->isQ()));
+        NanoAssert((ins->opcode() == LIR_cmovi  && iftrue->isI() && iffalse->isI()) ||
+                   (ins->opcode() == LIR_cmovq  && iftrue->isQ() && iffalse->isQ()));
     #else
-        NanoAssert((ins->opcode() == LIR_cmov  && iftrue->isI() && iffalse->isI()));
+        NanoAssert((ins->opcode() == LIR_cmovi  && iftrue->isI() && iffalse->isI()));
     #endif
 
         Register rr = prepareResultReg(ins, GpRegs);
         Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
 
         // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
         Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
 
@@ -1248,38 +1248,38 @@ namespace nanojit
         }
 
         asm_cmp(condval->opcode(), condval->oprnd1(), condval->oprnd2(), CR7);
     }
 
     RegisterMask Assembler::hint(LIns* ins) {
         LOpcode op = ins->opcode();
         RegisterMask prefer = 0;
-        if (op == LIR_icall)
+        if (op == LIR_calli)
             prefer = rmask(R3);
     #ifdef NANOJIT_64BIT
-        else if (op == LIR_qcall)
+        else if (op == LIR_callq)
             prefer = rmask(R3);
     #endif
-        else if (op == LIR_fcall)
+        else if (op == LIR_calld)
             prefer = rmask(F1);
-        else if (op == LIR_param) {
+        else if (op == LIR_paramp) {
             if (ins->paramKind() == 0) {
                 if (ins->paramArg() < 8) {
                     prefer = rmask(argRegs[ins->paramArg()]);
                 }
             }
         }
         return prefer;
     }
 
     void Assembler::asm_neg_not(LIns *ins) {
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         Register ra = findRegFor(ins->oprnd1(), GpRegs);
-        if (ins->isop(LIR_neg)) {
+        if (ins->isop(LIR_negi)) {
             NEG(rr, ra);
         } else {
             NOT(rr, ra);
         }
     }
 
     void Assembler::nInit(AvmCore*) {
     }
@@ -1388,23 +1388,23 @@ namespace nanojit
         regs.free = SavedRegs | 0x1ff8 /* R3-12 */ | 0x3ffe00000000LL /* F1-13 */;
         debug_only(regs.managed = regs.free);
     }
 
 #ifdef NANOJIT_64BIT
     void Assembler::asm_qbinop(LIns *ins) {
         LOpcode op = ins->opcode();
         switch (op) {
-        case LIR_qior:
-        case LIR_qiand:
-        case LIR_qursh:
-        case LIR_qirsh:
-        case LIR_qilsh:
-        case LIR_qxor:
-        case LIR_qiadd:
+        case LIR_orq:
+        case LIR_andq:
+        case LIR_rshuq:
+        case LIR_rshq:
+        case LIR_lshq:
+        case LIR_xorq:
+        case LIR_addq:
             asm_arith(ins);
             break;
         default:
             debug_only(outputf("%s",lirNames[op]));
             TODO(asm_qbinop);
         }
     }
 #endif // NANOJIT_64BIT