Bug 559971 - Rename LOpcode enums in NativeARM.cpp (r=nnethercote+ f=jacob.bramley+)
authorEdwin Smith <edwsmith@adobe.com>
Thu, 06 May 2010 09:32:06 -0400
changeset 42658 e090f2679b68883ad2bbbdf856b52256c10435a7
parent 42657 732dbc8fa4569650b11d2a2910a9d8ae786099dc
child 42659 787c6df92ea68607075b88374bd6e60b0df50fb3
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
reviewersnnethercote
bugs559971
milestone1.9.3a5pre
Bug 559971 - Rename LOpcode enums in NativeARM.cpp (r=nnethercote+ f=jacob.bramley+)
js/src/nanojit/NativeARM.cpp
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -622,18 +622,18 @@ Assembler::asm_arg(ArgType ty, LInsp arg
 // This function operates in the same way as asm_arg, except that it will only
 // handle arguments where (ArgType)ty == ARGTYPE_D.
 void
 Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
 {
     // The stack pointer must always be at least aligned to 4 bytes.
     NanoAssert((stkd & 3) == 0);
     // The only use for this function when we are using soft floating-point
-    // is for LIR_qjoin.
-    NanoAssert(_config.arm_vfp || arg->isop(LIR_qjoin));
+    // is for LIR_ii2d.
+    NanoAssert(_config.arm_vfp || arg->isop(LIR_ii2d));
 
     Register    fp_reg = deprecated_UnknownReg;
 
     if (_config.arm_vfp) {
         fp_reg = findRegFor(arg, FpRegs);
         NanoAssert(deprecated_isKnownReg(fp_reg));
     }
 
@@ -728,17 +728,17 @@ Assembler::asm_regarg(ArgType ty, LInsp 
         // arg goes in specific register
         if (p->isImmI()) {
             asm_ld_imm(r, p->immI());
         } else {
             if (p->isExtant()) {
                 if (!p->deprecated_hasKnownReg()) {
                     // load it into the arg reg
                     int d = findMemFor(p);
-                    if (p->isop(LIR_alloc)) {
+                    if (p->isop(LIR_allocp)) {
                         asm_add_imm(r, FP, d, 0);
                     } else {
                         LDR(r, FP, d);
                     }
                 } else {
                     // it must be in a saved reg
                     MOV(r, p->deprecated_getReg());
                 }
@@ -768,17 +768,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
     if (arg->isExtant() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
         // The argument resides somewhere in registers, so we simply need to
         // push it onto the stack.
         if (!_config.arm_vfp || !isF64) {
             NanoAssert(IsGpReg(rr));
 
             STR(rr, SP, stkd);
         } else {
-            // According to the comments in asm_arg_64, LIR_qjoin
+            // According to the comments in asm_arg_64, LIR_ii2d
             // can have a 64-bit argument even if VFP is disabled. However,
             // asm_arg_64 will split the argument and issue two 32-bit
             // arguments to asm_stkarg so we can ignore that case here and
             // assert that we will never get 64-bit arguments unless VFP is
             // available.
             NanoAssert(_config.arm_vfp);
             NanoAssert(IsFpReg(rr));
 
@@ -790,17 +790,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
             FSTD(rr, SP, stkd);
         }
     } else {
         // The argument does not reside in registers, so we need to get some
         // memory for it and then copy it onto the stack.
         int d = findMemFor(arg);
         if (!isF64) {
             STR(IP, SP, stkd);
-            if (arg->isop(LIR_alloc)) {
+            if (arg->isop(LIR_allocp)) {
                 asm_add_imm(IP, FP, d);
             } else {
                 LDR(IP, FP, d);
             }
         } else {
 #ifdef NJ_ARM_EABI
             // EABI requires that 64-bit arguments are 64-bit aligned.
             NanoAssert((stkd & 7) == 0);
@@ -812,17 +812,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
             LDR(IP, FP, d);
         }
     }
 }
 
 void
 Assembler::asm_call(LInsp ins)
 {
-    if (_config.arm_vfp && ins->isop(LIR_fcall)) {
+    if (_config.arm_vfp && ins->isop(LIR_calld)) {
         /* Because ARM actually returns the result in (R0,R1), and not in a
          * floating point register, the code to move the result into a correct
          * register is below.  We do nothing here.
          *
          * The reason being that if we did something here, the final code
          * sequence we'd get would be something like:
          *     MOV {R0-R3},params        [from below]
          *     BL function               [from below]
@@ -850,29 +850,29 @@ Assembler::asm_call(LInsp ins)
 
     const CallInfo* ci = ins->callInfo();
     ArgType argTypes[MAXARGS];
     uint32_t argc = ci->getArgTypes(argTypes);
     bool indirect = ci->isIndirect();
 
     // If we aren't using VFP, assert that the LIR operation is an integer
     // function call.
-    NanoAssert(_config.arm_vfp || ins->isop(LIR_icall));
+    NanoAssert(_config.arm_vfp || ins->isop(LIR_calli));
 
     // If we're using VFP, and the return type is a double, it'll come back in
     // R0/R1. We need to either place it in the result fp reg, or store it.
     // See comments above for more details as to why this is necessary here
     // for floating point calls, but not for integer calls.
     if (_config.arm_vfp && ins->isExtant()) {
         // If the result size is a floating-point value, treat the result
         // specially, as described previously.
         if (ci->returnType() == ARGTYPE_D) {
             Register rr = ins->deprecated_getReg();
 
-            NanoAssert(ins->opcode() == LIR_fcall);
+            NanoAssert(ins->opcode() == LIR_calld);
 
             if (!deprecated_isKnownReg(rr)) {
                 int d = deprecated_disp(ins);
                 NanoAssert(d != 0);
                 deprecated_freeRsrcOf(ins);
 
                 // The result doesn't have a register allocated, so store the
                 // result (in R0,R1) directly to its stack slot.
@@ -1157,21 +1157,21 @@ Assembler::nPatchBranch(NIns* branch, NI
     }
 }
 
 RegisterMask
 Assembler::hint(LIns* ins)
 {
     uint32_t op = ins->opcode();
     int prefer = 0;
-    if (op == LIR_icall)
+    if (op == LIR_calli)
         prefer = rmask(R0);
-    else if (op == LIR_callh)
+    else if (op == LIR_hcalli)
         prefer = rmask(R1);
-    else if (op == LIR_param) {
+    else if (op == LIR_paramp) {
         if (ins->paramKind() == 0) {
             if (ins->paramArg() < 4)
                 prefer = rmask(argRegs[ins->paramArg()]);
         }
     }
     return prefer;
 }
 
@@ -1202,25 +1202,25 @@ Assembler::asm_store32(LOpcode op, LIns 
         case LIR_sti:
             if (isU12(-dr) || isU12(dr)) {
                 STR(ra, rb, dr);
             } else {
                 STR(ra, IP, 0);
                 asm_add_imm(IP, rb, dr);
             }
             return;
-        case LIR_stb:
+        case LIR_sti2c:
             if (isU12(-dr) || isU12(dr)) {
                 STRB(ra, rb, dr);
             } else {
                 STRB(ra, IP, 0);
                 asm_add_imm(IP, rb, dr);
             }
             return;
-        case LIR_sts:
+        case LIR_sti2s:
             // Similar to the sti/stb case, but the max offset is smaller.
             if (isU8(-dr) || isU8(dr)) {
                 STRH(ra, rb, dr);
             } else {
                 STRH(ra, IP, 0);
                 asm_add_imm(IP, rb, dr);
             }
             return;
@@ -1246,23 +1246,23 @@ canRematALU(LIns *ins)
         ;
     }
     return false;
 }
 
 bool
 Assembler::canRemat(LIns* ins)
 {
-    return ins->isImmI() || ins->isop(LIR_alloc) || canRematALU(ins);
+    return ins->isImmI() || ins->isop(LIR_allocp) || canRematALU(ins);
 }
 
 void
 Assembler::asm_restore(LInsp i, Register r)
 {
-    if (i->isop(LIR_alloc)) {
+    if (i->isop(LIR_allocp)) {
         asm_add_imm(r, FP, deprecated_disp(i));
     } else if (i->isImmI()) {
         asm_ld_imm(r, i->immI());
     } else if (canRematALU(i)) {
         Register rn = i->oprnd1()->getReg();
         int32_t imm = i->oprnd2()->immI();
         switch (i->opcode()) {
         case LIR_addi: asm_add_imm(r, rn, imm, /*stat=*/ 0); break;
@@ -1349,17 +1349,17 @@ Assembler::asm_load64(LInsp ins)
 
     Register rb = findRegFor(base, GpRegs);
     NanoAssert(IsGpReg(rb));
     deprecated_freeRsrcOf(ins);
 
     //outputf("--- load64: Finished register allocation.");
 
     switch (ins->opcode()) {
-        case LIR_ldf:
+        case LIR_ldd:
             if (_config.arm_vfp && deprecated_isKnownReg(rr)) {
                 // VFP is enabled and the result will go into a register.
                 NanoAssert(IsFpReg(rr));
 
                 if (!isS8(offset >> 2) || (offset&3) != 0) {
                     FLDD(rr,IP,0);
                     asm_add_imm(IP, rb, offset);
                 } else {
@@ -1375,17 +1375,17 @@ Assembler::asm_load64(LInsp ins)
                 // Check that the offset is 8-byte (64-bit) aligned.
                 NanoAssert((d & 0x7) == 0);
 
                 // *(uint64_t*)(FP+d) = *(uint64_t*)(rb+offset)
                 asm_mmq(FP, d, rb, offset);
             }
             return;
 
-        case LIR_ld32f:
+        case LIR_ldf2d:
             if (_config.arm_vfp) {
                 if (deprecated_isKnownReg(rr)) {
                     NanoAssert(IsFpReg(rr));
                     FCVTDS(rr, S14);
                 } else {
                     // Normally D7 isn't allowed to be used as an FP reg.
                     // In this case we make an explicit exception.
                     if (isS8(d)) {
@@ -1419,17 +1419,17 @@ Assembler::asm_load64(LInsp ins)
 }
 
 void
 Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
 {
     //asm_output("<<< store64 (dr: %d)", dr);
 
     switch (op) {
-        case LIR_stfi:
+        case LIR_std:
             if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isImmD()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
@@ -1468,17 +1468,17 @@ Assembler::asm_store64(LOpcode op, LInsp
             } else {
                 int da = findMemFor(value);
                 Register rb = findRegFor(base, GpRegs);
                 // *(uint64_t*)(rb+dr) = *(uint64_t*)(FP+da)
                 asm_mmq(rb, dr, FP, da);
             }
             return;
 
-        case LIR_st32f:
+        case LIR_std2f:
             if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isImmD()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
@@ -2168,37 +2168,37 @@ Assembler::asm_fop(LInsp ins)
 
     Register ra = findRegFor(lhs, FpRegs);
     Register rb = (rhs == lhs) ? ra : findRegFor(rhs, FpRegs & ~rmask(ra));
 
     // XXX special-case 1.0 and 0.0
 
     switch (op)
     {
-        case LIR_fadd:      FADDD(rr,ra,rb);    break;
-        case LIR_fsub:      FSUBD(rr,ra,rb);    break;
-        case LIR_fmul:      FMULD(rr,ra,rb);    break;
-        case LIR_fdiv:      FDIVD(rr,ra,rb);    break;
+        case LIR_addd:      FADDD(rr,ra,rb);    break;
+        case LIR_subd:      FSUBD(rr,ra,rb);    break;
+        case LIR_muld:      FMULD(rr,ra,rb);    break;
+        case LIR_divd:      FDIVD(rr,ra,rb);    break;
         default:            NanoAssert(0);      break;
     }
 }
 
 void
 Assembler::asm_fcmp(LInsp ins)
 {
     LInsp lhs = ins->oprnd1();
     LInsp rhs = ins->oprnd2();
     LOpcode op = ins->opcode();
 
     NanoAssert(isCmpDOpcode(op));
 
     Register ra, rb;
     findRegFor2(FpRegs, lhs, ra, FpRegs, rhs, rb);
 
-    int e_bit = (op != LIR_feq);
+    int e_bit = (op != LIR_eqd);
 
     // do the comparison and get results loaded in ARM status register
     FMSTAT();
     FCMPD(ra, rb, e_bit);
 }
 
 /* Call this with targ set to 0 if the target is not yet known and the branch
  * will be patched up later.
@@ -2218,32 +2218,32 @@ Assembler::asm_branch(bool branchOnFalse
     bool    fp_cond;
 
     // Select the appropriate ARM condition code to match the LIR instruction.
     switch (condop)
     {
         // Floating-point conditions. Note that the VFP LT/LE conditions
         // require use of the unsigned condition codes, even though
         // float-point comparisons are always signed.
-        case LIR_feq:   cc = EQ;    fp_cond = true;     break;
-        case LIR_flt:   cc = LO;    fp_cond = true;     break;
-        case LIR_fle:   cc = LS;    fp_cond = true;     break;
-        case LIR_fge:   cc = GE;    fp_cond = true;     break;
-        case LIR_fgt:   cc = GT;    fp_cond = true;     break;
+        case LIR_eqd:   cc = EQ;    fp_cond = true;     break;
+        case LIR_ltd:   cc = LO;    fp_cond = true;     break;
+        case LIR_led:   cc = LS;    fp_cond = true;     break;
+        case LIR_ged:   cc = GE;    fp_cond = true;     break;
+        case LIR_gtd:   cc = GT;    fp_cond = true;     break;
 
         // Standard signed and unsigned integer comparisons.
-        case LIR_eq:    cc = EQ;    fp_cond = false;    break;
-        case LIR_lt:    cc = LT;    fp_cond = false;    break;
-        case LIR_le:    cc = LE;    fp_cond = false;    break;
-        case LIR_gt:    cc = GT;    fp_cond = false;    break;
-        case LIR_ge:    cc = GE;    fp_cond = false;    break;
-        case LIR_ult:   cc = LO;    fp_cond = false;    break;
-        case LIR_ule:   cc = LS;    fp_cond = false;    break;
-        case LIR_ugt:   cc = HI;    fp_cond = false;    break;
-        case LIR_uge:   cc = HS;    fp_cond = false;    break;
+        case LIR_eqi:   cc = EQ;    fp_cond = false;    break;
+        case LIR_lti:   cc = LT;    fp_cond = false;    break;
+        case LIR_lei:   cc = LE;    fp_cond = false;    break;
+        case LIR_gti:   cc = GT;    fp_cond = false;    break;
+        case LIR_gei:   cc = GE;    fp_cond = false;    break;
+        case LIR_ltui:  cc = LO;    fp_cond = false;    break;
+        case LIR_leui:  cc = LS;    fp_cond = false;    break;
+        case LIR_gtui:  cc = HI;    fp_cond = false;    break;
+        case LIR_geui:  cc = HS;    fp_cond = false;    break;
 
         // Default case for invalid or unexpected LIR instructions.
         default:        cc = AL;    fp_cond = false;    break;
     }
 
     // Invert the condition if required.
     if (branchOnFalse)
         cc = OppositeCond(cc);
@@ -2267,19 +2267,19 @@ Assembler::asm_branch(bool branchOnFalse
         asm_cmp(cond);
 
     return at;
 }
 
 void Assembler::asm_branch_xov(LOpcode op, NIns* target)
 {
     // Because MUL can't set the V flag, we use SMULL and CMP to set the Z flag
-    // to detect overflow on multiply. Thus, if we have a LIR_mulxov, we must
+    // to detect overflow on multiply. Thus, if we have a LIR_mulxovi, we must
     // be conditional on !Z, not V.
-    ConditionCode cc = ( op == LIR_mulxov ? NE : VS );
+    ConditionCode cc = ( op == LIR_mulxovi ? NE : VS );
 
     // Emit a suitable branch instruction.
     B_cond(cc, target);
 }
 
 void
 Assembler::asm_cmp(LIns *cond)
 {
@@ -2287,17 +2287,17 @@ Assembler::asm_cmp(LIns *cond)
     LInsp rhs = cond->oprnd2();
 
     NanoAssert(lhs->isI() && rhs->isI());
 
     // ready to issue the compare
     if (rhs->isImmI()) {
         int c = rhs->immI();
         Register r = findRegFor(lhs, GpRegs);
-        if (c == 0 && cond->isop(LIR_eq)) {
+        if (c == 0 && cond->isop(LIR_eqi)) {
             TST(r, r);
         } else {
             asm_cmpi(r, c);
         }
     } else {
         Register ra, rb;
         findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
         CMP(ra, rb);
@@ -2328,44 +2328,44 @@ Assembler::asm_cmpi(Register r, int32_t 
 
 void
 Assembler::asm_fcond(LInsp ins)
 {
     // only want certain regs
     Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
 
     switch (ins->opcode()) {
-        case LIR_feq: SETEQ(r); break;
-        case LIR_flt: SETLO(r); break; // } note: VFP LT/LE operations require use of
-        case LIR_fle: SETLS(r); break; // } unsigned LO/LS condition codes!
-        case LIR_fge: SETGE(r); break;
-        case LIR_fgt: SETGT(r); break;
+        case LIR_eqd: SETEQ(r); break;
+        case LIR_ltd: SETLO(r); break; // } note: VFP LT/LE operations require use of
+        case LIR_led: SETLS(r); break; // } unsigned LO/LS condition codes!
+        case LIR_ged: SETGE(r); break;
+        case LIR_gtd: SETGT(r); break;
         default: NanoAssert(0); break;
     }
 
     asm_fcmp(ins);
 }
 
 void
 Assembler::asm_cond(LInsp ins)
 {
     Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
     LOpcode op = ins->opcode();
 
     switch(op)
     {
-        case LIR_eq:  SETEQ(r); break;
-        case LIR_lt:  SETLT(r); break;
-        case LIR_le:  SETLE(r); break;
-        case LIR_gt:  SETGT(r); break;
-        case LIR_ge:  SETGE(r); break;
-        case LIR_ult: SETLO(r); break;
-        case LIR_ule: SETLS(r); break;
-        case LIR_ugt: SETHI(r); break;
-        case LIR_uge: SETHS(r); break;
+        case LIR_eqi:  SETEQ(r); break;
+        case LIR_lti:  SETLT(r); break;
+        case LIR_lei:  SETLE(r); break;
+        case LIR_gti:  SETGT(r); break;
+        case LIR_gei:  SETGE(r); break;
+        case LIR_ltui: SETLO(r); break;
+        case LIR_leui: SETLS(r); break;
+        case LIR_gtui: SETHI(r); break;
+        case LIR_geui: SETHS(r); break;
         default:      NanoAssert(0);  break;
     }
     asm_cmp(ins);
 }
 
 void
 Assembler::asm_arith(LInsp ins)
 {
@@ -2394,44 +2394,44 @@ Assembler::asm_arith(LInsp ins)
     // determine if the value can be encoded in an ARM instruction. If the
     // value cannot be encoded, it will be loaded into a register.
     //
     // Note that the MUL instruction can never take an immediate argument so
     // even if the argument is constant, we must allocate a register for it.
     //
     // Note: It is possible to use a combination of the barrel shifter and the
     // basic arithmetic instructions to generate constant multiplications.
-    // However, LIR_mul is never invoked with a constant during
+    // However, LIR_muli is never invoked with a constant during
     // trace-tests.js so it is very unlikely to be worthwhile implementing it.
-    if (rhs->isImmI() && op != LIR_mul && op != LIR_mulxov)
+    if (rhs->isImmI() && op != LIR_muli && op != LIR_mulxovi)
     {
-        if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_ialloc)) {
+        if ((op == LIR_addi || op == LIR_addxovi) && lhs->isop(LIR_alloci)) {
             // Add alloc+const. The result should be the address of the
             // allocated space plus a constant.
             Register    rs = deprecated_prepResultReg(ins, allow);
             int         d = findMemFor(lhs) + rhs->immI();
 
             NanoAssert(deprecated_isKnownReg(rs));
             asm_add_imm(rs, FP, d);
         }
 
         int32_t immI = rhs->immI();
 
         switch (op)
         {
-            case LIR_add:       asm_add_imm(rr, ra, immI);     break;
-            case LIR_addxov:    asm_add_imm(rr, ra, immI, 1);  break;
-            case LIR_sub:       asm_sub_imm(rr, ra, immI);     break;
-            case LIR_subxov:    asm_sub_imm(rr, ra, immI, 1);  break;
-            case LIR_and:       asm_and_imm(rr, ra, immI);     break;
-            case LIR_or:        asm_orr_imm(rr, ra, immI);     break;
-            case LIR_xor:       asm_eor_imm(rr, ra, immI);     break;
-            case LIR_lsh:       LSLi(rr, ra, immI);            break;
-            case LIR_rsh:       ASRi(rr, ra, immI);            break;
-            case LIR_ush:       LSRi(rr, ra, immI);            break;
+            case LIR_addi:       asm_add_imm(rr, ra, immI);     break;
+            case LIR_addxovi:    asm_add_imm(rr, ra, immI, 1);  break;
+            case LIR_subi:       asm_sub_imm(rr, ra, immI);     break;
+            case LIR_subxovi:    asm_sub_imm(rr, ra, immI, 1);  break;
+            case LIR_andi:       asm_and_imm(rr, ra, immI);     break;
+            case LIR_ori:        asm_orr_imm(rr, ra, immI);     break;
+            case LIR_xori:       asm_eor_imm(rr, ra, immI);     break;
+            case LIR_lshi:       LSLi(rr, ra, immI);            break;
+            case LIR_rshi:       ASRi(rr, ra, immI);            break;
+            case LIR_rshui:      LSRi(rr, ra, immI);            break;
 
             default:
                 NanoAssertMsg(0, "Unsupported");
                 break;
         }
 
         // We've already emitted an instruction, so return now.
         return;
@@ -2448,27 +2448,27 @@ Assembler::asm_arith(LInsp ins)
             rb = findRegFor(rhs, allow);
         allow &= ~rmask(rb);
     }
     NanoAssert(deprecated_isKnownReg(rb));
 
     const Register SBZ = (Register)0;
     switch (op)
     {
-        case LIR_add:       ADDs(rr, ra, rb, 0);    break;
-        case LIR_addxov:    ADDs(rr, ra, rb, 1);    break;
-        case LIR_sub:       SUBs(rr, ra, rb, 0);    break;
-        case LIR_subxov:    SUBs(rr, ra, rb, 1);    break;
-        case LIR_and:       ANDs(rr, ra, rb, 0);    break;
-        case LIR_or:        ORRs(rr, ra, rb, 0);    break;
-        case LIR_xor:       EORs(rr, ra, rb, 0);    break;
+        case LIR_addi:       ADDs(rr, ra, rb, 0);    break;
+        case LIR_addxovi:    ADDs(rr, ra, rb, 1);    break;
+        case LIR_subi:       SUBs(rr, ra, rb, 0);    break;
+        case LIR_subxovi:    SUBs(rr, ra, rb, 1);    break;
+        case LIR_andi:       ANDs(rr, ra, rb, 0);    break;
+        case LIR_ori:        ORRs(rr, ra, rb, 0);    break;
+        case LIR_xori:       EORs(rr, ra, rb, 0);    break;
 
-        // XXX: LIR_mul can be done more efficiently than LIR_mulxov.  See bug 542629.
-        case LIR_mul:
-        case LIR_mulxov:
+        // XXX: LIR_muli can be done more efficiently than LIR_mulxovi.  See bug 542629.
+        case LIR_muli:
+        case LIR_mulxovi:
             // ARMv5 and earlier cores cannot do a MUL where the first operand
             // is also the result, so we need a special case to handle that.
             //
             // We try to use rb as the first operand by default because it is
             // common for (rr == ra) and is thus likely to be the most
             // efficient method.
 
             if ((_config.arm_arch > 5) || (rr != rb)) {
@@ -2526,25 +2526,25 @@ Assembler::asm_arith(LInsp ins)
                     ALUr(AL, mov, 1, IP, ra, ra);
                 }
             }
             break;
 
         // The shift operations need a mask to match the JavaScript
         // specification because the ARM architecture allows a greater shift
         // range than JavaScript.
-        case LIR_lsh:
+        case LIR_lshi:
             LSL(rr, ra, IP);
             ANDi(IP, rb, 0x1f);
             break;
-        case LIR_rsh:
+        case LIR_rshi:
             ASR(rr, ra, IP);
             ANDi(IP, rb, 0x1f);
             break;
-        case LIR_ush:
+        case LIR_rshui:
             LSR(rr, ra, IP);
             ANDi(IP, rb, 0x1f);
             break;
         default:
             NanoAssertMsg(0, "Unsupported");
             break;
     }
 }
@@ -2558,69 +2558,69 @@ Assembler::asm_neg_not(LInsp ins)
     LIns* lhs = ins->oprnd1();
     // If this is the last use of lhs in reg, we can re-use result reg.
     // Else, lhs already has a register assigned.
     Register ra = ( !lhs->isInReg()
                   ? findSpecificRegFor(lhs, rr)
                   : lhs->deprecated_getReg() );
     NanoAssert(deprecated_isKnownReg(ra));
 
-    if (op == LIR_not)
+    if (op == LIR_noti)
         MVN(rr, ra);
     else
         RSBS(rr, ra);
 }
 
 void
 Assembler::asm_load32(LInsp ins)
 {
     LOpcode op = ins->opcode();
     LIns* base = ins->oprnd1();
     int d = ins->disp();
 
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     Register ra = getBaseReg(base, d, GpRegs);
 
     switch (op) {
-        case LIR_ldzb:
+        case LIR_lduc2ui:
             if (isU12(-d) || isU12(d)) {
                 LDRB(rr, ra, d);
             } else {
                 LDRB(rr, IP, 0);
                 asm_add_imm(IP, ra, d);
             }
             return;
-        case LIR_ldzs:
+        case LIR_ldus2ui:
             // Some ARM machines require 2-byte alignment here.
             // Similar to the ldcb/ldzb case, but the max offset is smaller.
             if (isU8(-d) || isU8(d)) {
                 LDRH(rr, ra, d);
             } else {
                 LDRH(rr, IP, 0);
                 asm_add_imm(IP, ra, d);
             }
             return;
-        case LIR_ld:
+        case LIR_ldi:
             // Some ARM machines require 4-byte alignment here.
             if (isU12(-d) || isU12(d)) {
                 LDR(rr, ra, d);
             } else {
                 LDR(rr, IP, 0);
                 asm_add_imm(IP, ra, d);
             }
             return;
-        case LIR_ldsb:
+        case LIR_ldc2i:
             if (isU8(-d) || isU8(d)) {
                 LDRSB(rr, ra, d);
             } else {
                 LDRSB(rr, IP, 0);
                 asm_add_imm(IP, ra, d);
             }
             return;
-        case LIR_ldss:
+        case LIR_lds2i:
             if (isU8(-d) || isU8(d)) {
                 LDRSH(rr, ra, d);
             } else {
                 LDRSH(rr, IP, 0);
                 asm_add_imm(IP, ra, d);
             }
             return;
         default:
@@ -2632,35 +2632,35 @@ Assembler::asm_load32(LInsp ins)
 void
 Assembler::asm_cmov(LInsp ins)
 {
     LIns* condval = ins->oprnd1();
     LIns* iftrue  = ins->oprnd2();
     LIns* iffalse = ins->oprnd3();
 
     NanoAssert(condval->isCmp());
-    NanoAssert(ins->opcode() == LIR_cmov && iftrue->isI() && iffalse->isI());
+    NanoAssert(ins->opcode() == LIR_cmovi && iftrue->isI() && iffalse->isI());
 
     const Register rr = deprecated_prepResultReg(ins, GpRegs);
 
     // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
     // (This is true on Intel, is it true on all architectures?)
     const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
     switch (condval->opcode()) {
         // note that these are all opposites...
-        case LIR_eq:    MOVNE(rr, iffalsereg);  break;
-        case LIR_lt:    MOVGE(rr, iffalsereg);  break;
-        case LIR_le:    MOVGT(rr, iffalsereg);  break;
-        case LIR_gt:    MOVLE(rr, iffalsereg);  break;
-        case LIR_ge:    MOVLT(rr, iffalsereg);  break;
-        case LIR_ult:   MOVHS(rr, iffalsereg);  break;
-        case LIR_ule:   MOVHI(rr, iffalsereg);  break;
-        case LIR_ugt:   MOVLS(rr, iffalsereg);  break;
-        case LIR_uge:   MOVLO(rr, iffalsereg);  break;
-        default: debug_only( NanoAssert(0) );   break;
+        case LIR_eqi:    MOVNE(rr, iffalsereg);  break;
+        case LIR_lti:    MOVGE(rr, iffalsereg);  break;
+        case LIR_lei:    MOVGT(rr, iffalsereg);  break;
+        case LIR_gti:    MOVLE(rr, iffalsereg);  break;
+        case LIR_gei:    MOVLT(rr, iffalsereg);  break;
+        case LIR_ltui:   MOVHS(rr, iffalsereg);  break;
+        case LIR_leui:   MOVHI(rr, iffalsereg);  break;
+        case LIR_gtui:   MOVLS(rr, iffalsereg);  break;
+        case LIR_geui:   MOVLO(rr, iffalsereg);  break;
+        default: debug_only( NanoAssert(0) );    break;
     }
     /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
     asm_cmp(condval);
 }
 
 void
 Assembler::asm_qhi(LInsp ins)
 {
@@ -2723,26 +2723,26 @@ Assembler::asm_ret(LIns *ins)
     MOV(IP, R0);
 
     // Pop the stack frame.
     MOV(SP,FP);
 
     releaseRegisters();
     assignSavedRegs();
     LIns *value = ins->oprnd1();
-    if (ins->isop(LIR_ret)) {
+    if (ins->isop(LIR_reti)) {
         findSpecificRegFor(value, R0);
     }
     else {
-        NanoAssert(ins->isop(LIR_fret));
+        NanoAssert(ins->isop(LIR_retd));
         if (_config.arm_vfp) {
             Register reg = findRegFor(value, FpRegs);
             FMRRD(R0, R1, reg);
         } else {
-            NanoAssert(value->isop(LIR_qjoin));
+            NanoAssert(value->isop(LIR_ii2d));
             findSpecificRegFor(value->oprnd1(), R0); // lo
             findSpecificRegFor(value->oprnd2(), R1); // hi
         }
     }
 }
 
 void
 Assembler::asm_jtbl(LIns* ins, NIns** table)