Bug 562152 - Remove typedef LInsp (r=nnethercote+)
authorEdwin Smith <edwsmith@adobe.com>
Thu, 10 Jun 2010 11:22:18 -0400
changeset 47432 47e02e48437a9ed3f2afd43b4abe14e3a320adb6
parent 47431 230fd4230baab0db1eaf4be5f8557412390f9f84
child 47433 895516ddc0be1f657faa81639d82069cce5da43c
push id14372
push userrsayre@mozilla.com
push dateWed, 14 Jul 2010 13:02:13 +0000
treeherdermozilla-central@c26c255bade9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnnethercote
bugs562152
milestone1.9.3a5pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 562152 - Remove typedef LInsp (r=nnethercote+)
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeARM.h
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/NativeMIPS.h
js/src/nanojit/NativePPC.cpp
js/src/nanojit/NativeSparc.cpp
js/src/nanojit/Nativei386.cpp
js/src/nanojit/Nativei386.h
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -400,17 +400,17 @@ namespace nanojit
         return findRegFor(i, rmask(w));
     }
 
     // Like findRegFor(), but called when the LIns is used as a pointer.  It
     // doesn't have to be called, findRegFor() can still be used, but it can
     // optimize the LIR_allocp case by indexing off FP, thus saving the use of
     // a GpReg.
     //
-    Register Assembler::getBaseReg(LInsp base, int &d, RegisterMask allow)
+    Register Assembler::getBaseReg(LIns* base, int &d, RegisterMask allow)
     {
     #if !PEDANTIC
         if (base->isop(LIR_allocp)) {
             // The value of a LIR_allocp is a pointer to its stack memory,
             // which is always relative to FP.  So we can just return FP if we
             // also adjust 'd' (and can do so in a valid manner).  Or, in the
             // PEDANTIC case, we can just assign a register as normal;
             // findRegFor() will allocate the stack memory for LIR_allocp if
@@ -632,17 +632,17 @@ namespace nanojit
             // whose return value was ignored (ie. if ins->isInReg() was false
             // prior to the findRegFor() call).
             FSTP(FST0);     // pop the fpu result since it isn't used
         }
 #endif
         return r;
     }
 
-    void Assembler::asm_maybe_spill(LInsp ins, bool pop)
+    void Assembler::asm_maybe_spill(LIns* ins, bool pop)
     {
         int d = ins->isInAr() ? arDisp(ins) : 0;
         Register r = ins->getReg();
         if (ins->isInAr()) {
             verbose_only( RefBuf b;
                           if (_logc->lcbits & LC_Native) {
                              setOutputForEOL("  <= spill %s",
                              _thisfrag->lirbuf->printer->formatRef(&b, ins)); } )
@@ -746,17 +746,17 @@ namespace nanojit
         for (GuardRecord* lr = exit->guards; lr; lr = lr->next) {
             Fragment *frag = lr->exit->target;
             NanoAssert(frag->fragEntry != 0);
             si->table[si->index] = frag->fragEntry;
         }
     }
 #endif
 
-    NIns* Assembler::asm_exit(LInsp guard)
+    NIns* Assembler::asm_exit(LIns* guard)
     {
         SideExit *exit = guard->record()->exit;
         NIns* at = 0;
         if (!_branchStateMap.get(exit))
         {
             at = asm_leave_trace(guard);
         }
         else
@@ -765,17 +765,17 @@ namespace nanojit
             intersectRegisterState(*captured);
             at = exit->target->fragEntry;
             NanoAssert(at != 0);
             _branchStateMap.remove(exit);
         }
         return at;
     }
 
-    NIns* Assembler::asm_leave_trace(LInsp guard)
+    NIns* Assembler::asm_leave_trace(LIns* guard)
     {
         verbose_only( verbose_outputf("----------------------------------- ## END exit block %p", guard);)
 
         // This point is unreachable.  So free all the registers.  If an
         // instruction has a stack entry we will leave it alone, otherwise we
         // free it entirely.  intersectRegisterState() will restore.
         RegAlloc capture = _allocator;
         releaseRegisters();
@@ -1164,24 +1164,24 @@ namespace nanojit
 #define countlir_jcc()
 #define countlir_label()
 #define countlir_xcc()
 #define countlir_x()
 #define countlir_call()
 #define countlir_jtbl()
 #endif
 
-    void Assembler::asm_jmp(LInsp ins, InsList& pending_lives)
+    void Assembler::asm_jmp(LIns* ins, InsList& pending_lives)
     {
         NanoAssert((ins->isop(LIR_j) && !ins->oprnd1()) ||
                    (ins->isop(LIR_jf) && ins->oprnd1()->isImmI(0)) ||
                    (ins->isop(LIR_jt) && ins->oprnd1()->isImmI(1)));
 
         countlir_jmp();
-        LInsp to = ins->getTarget();
+        LIns* to = ins->getTarget();
         LabelState *label = _labels.get(to);
         // The jump is always taken so whatever register state we
         // have from downstream code, is irrelevant to code before
         // this jump.  So clear it out.  We will pick up register
         // state from the jump target, if we have seen that label.
         releaseRegisters();
         if (label && label->addr) {
             // Forward jump - pick up register state from target.
@@ -1198,33 +1198,33 @@ namespace nanojit
             else {
                 intersectRegisterState(label->regs);
             }
             JMP(0);
             _patches.put(_nIns, to);
         }
     }
 
-    void Assembler::asm_jcc(LInsp ins, InsList& pending_lives)
+    void Assembler::asm_jcc(LIns* ins, InsList& pending_lives)
     {
         bool branchOnFalse = (ins->opcode() == LIR_jf);
         LIns* cond = ins->oprnd1();
         if (cond->isImmI()) {
             if ((!branchOnFalse && !cond->immI()) || (branchOnFalse && cond->immI())) {
                 // jmp never taken, not needed
             } else {
                 asm_jmp(ins, pending_lives);    // jmp always taken
             }
             return;
         }
 
         // Changes to the logic below will likely need to be propagated to Assembler::asm_jov().
 
         countlir_jcc();
-        LInsp to = ins->getTarget();
+        LIns* to = ins->getTarget();
         LabelState *label = _labels.get(to);
         if (label && label->addr) {
             // Forward jump to known label.  Need to merge with label's register state.
             unionRegisterState(label->regs);
             asm_branch(branchOnFalse, cond, label->addr);
         }
         else {
             // Back edge.
@@ -1238,24 +1238,24 @@ namespace nanojit
                 // Evict all registers, most conservative approach.
                 intersectRegisterState(label->regs);
             }
             NIns *branch = asm_branch(branchOnFalse, cond, 0);
             _patches.put(branch,to);
         }
     }
 
-    void Assembler::asm_jov(LInsp ins, InsList& pending_lives)
+    void Assembler::asm_jov(LIns* ins, InsList& pending_lives)
     {
         // The caller is responsible for countlir_* profiling, unlike
         // asm_jcc above.  The reason for this is that asm_jov may not be
         // be called if the instruction is dead, and it is our convention
         // to count such instructions anyway.
         LOpcode op = ins->opcode();
-        LInsp to = ins->getTarget();
+        LIns* to = ins->getTarget();
         LabelState *label = _labels.get(to);
         if (label && label->addr) {
             // forward jump to known label.  need to merge with label's register state.
             unionRegisterState(label->regs);
             asm_branch_ov(op, label->addr);
         }
         else {
             // back edge.
@@ -1269,26 +1269,26 @@ namespace nanojit
                 // evict all registers, most conservative approach.
                 intersectRegisterState(label->regs);
             }
             NIns *branch = asm_branch_ov(op, 0);
             _patches.put(branch,to);
         }
     }
 
-    void Assembler::asm_x(LInsp ins)
+    void Assembler::asm_x(LIns* ins)
     {
         verbose_only( _thisfrag->nStaticExits++; )
         countlir_x();
         // Generate the side exit branch on the main trace.
         NIns *exit = asm_exit(ins);
         JMP(exit);
     }
 
-    void Assembler::asm_xcc(LInsp ins)
+    void Assembler::asm_xcc(LIns* ins)
     {
         LIns* cond = ins->oprnd1();
         if (cond->isImmI()) {
             if ((ins->isop(LIR_xt) && !cond->immI()) || (ins->isop(LIR_xf) && cond->immI())) {
                 // guard never taken, not needed
             } else {
                 asm_x(ins);     // guard always taken
             }
@@ -1351,17 +1351,17 @@ namespace nanojit
         // The trace must end with one of these opcodes.  Mark it as live.
         NanoAssert(reader->finalIns()->isop(LIR_x)    ||
                    reader->finalIns()->isop(LIR_xtbl) ||
                    reader->finalIns()->isRet()        ||
                    isLiveOpcode(reader->finalIns()->opcode()));
 
         for (currIns = reader->read(); !currIns->isop(LIR_start); currIns = reader->read())
         {
-            LInsp ins = currIns;        // give it a shorter name for local use
+            LIns* ins = currIns;        // give it a shorter name for local use
 
             if (!ins->isLive()) {
                 NanoAssert(!ins->isExtant());
                 continue;
             }
 
 #ifdef NJ_VERBOSE
             // Output the post-regstate (registers and/or activation).
@@ -1384,17 +1384,17 @@ namespace nanojit
                 case LIR_regfence:
                     evictAllActiveRegs();
                     break;
 
                 case LIR_livei:
                 CASE64(LIR_liveq:)
                 case LIR_lived: {
                     countlir_live();
-                    LInsp op1 = ins->oprnd1();
+                    LIns* op1 = ins->oprnd1();
                     op1->setResultLive();
                     // LIR_allocp's are meant to live until the point of the
                     // LIR_livep instruction, marking other expressions as
                     // live ensures that they remain so at loop bottoms.
                     // LIR_allocp areas require special treatment because they
                     // are accessed indirectly and the indirect accesses are
                     // invisible to the assembler, other than via LIR_livep.
                     // Other expression results are only accessed directly in
@@ -1455,17 +1455,17 @@ namespace nanojit
                     countlir_param();
                     if (ins->isExtant()) {
                         asm_param(ins);
                     }
                     break;
 
 #if NJ_SOFTFLOAT_SUPPORTED
                 case LIR_hcalli: {
-                    LInsp op1 = ins->oprnd1();
+                    LIns* op1 = ins->oprnd1();
                     op1->setResultLive();
                     if (ins->isExtant()) {
                         // Return result of quad-call in register.
                         deprecated_prepResultReg(ins, rmask(retRegs[1]));
                         // If hi half was used, we must use the call to ensure it happens.
                         findSpecificRegFor(op1, retRegs[0]);
                     }
                     break;
@@ -1974,20 +1974,20 @@ namespace nanojit
             LIns *ins = b->savedRegs[i];
             if (ins)
                 findMemFor(ins);
         }
     }
 
     void Assembler::assignParamRegs()
     {
-        LInsp state = _thisfrag->lirbuf->state;
+        LIns* state = _thisfrag->lirbuf->state;
         if (state)
             findSpecificRegForUnallocated(state, argRegs[state->paramArg()]);
-        LInsp param1 = _thisfrag->lirbuf->param1;
+        LIns* param1 = _thisfrag->lirbuf->param1;
         if (param1)
             findSpecificRegForUnallocated(param1, argRegs[param1->paramArg()]);
     }
 
     void Assembler::handleLoopCarriedExprs(InsList& pending_lives)
     {
         // ensure that exprs spanning the loop are marked live at the end of the loop
         reserveSavedRegs();
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -316,17 +316,17 @@ namespace nanojit
             Register    registerAllocTmp(RegisterMask allow);
             void        registerResetAll();
             void        evictAllActiveRegs();
             void        evictSomeActiveRegs(RegisterMask regs);
             void        evictScratchRegsExcept(RegisterMask ignore);
             void        intersectRegisterState(RegAlloc& saved);
             void        unionRegisterState(RegAlloc& saved);
             void        assignSaved(RegAlloc &saved, RegisterMask skip);
-            LInsp       findVictim(RegisterMask allow);
+            LIns*       findVictim(RegisterMask allow);
 
             Register    getBaseReg(LIns *ins, int &d, RegisterMask allow);
             void        getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv,
                                     RegisterMask allowBase, LIns* base, Register& rb, int &d);
 #if NJ_USES_IMMD_POOL
             const uint64_t*
                         findImmDFromPool(uint64_t q);
 #endif
@@ -398,72 +398,72 @@ namespace nanojit
 
             NIns*       _epilogue;
             AssmError   _err;           // 0 = means assemble() appears ok, otherwise it failed
         #if PEDANTIC
             NIns*       pedanticTop;
         #endif
 
             // Holds the current instruction during gen().
-            LInsp       currIns;
+            LIns*       currIns;
 
             AR          _activation;
             RegAlloc    _allocator;
 
             verbose_only( void asm_inc_m32(uint32_t*); )
             void        asm_mmq(Register rd, int dd, Register rs, int ds);
-            void        asm_jmp(LInsp ins, InsList& pending_lives);
-            void        asm_jcc(LInsp ins, InsList& pending_lives);
-            void        asm_jov(LInsp ins, InsList& pending_lives);
-            void        asm_x(LInsp ins);
-            void        asm_xcc(LInsp ins);
-            NIns*       asm_exit(LInsp guard);
-            NIns*       asm_leave_trace(LInsp guard);
+            void        asm_jmp(LIns* ins, InsList& pending_lives);
+            void        asm_jcc(LIns* ins, InsList& pending_lives);
+            void        asm_jov(LIns* ins, InsList& pending_lives);
+            void        asm_x(LIns* ins);
+            void        asm_xcc(LIns* ins);
+            NIns*       asm_exit(LIns* guard);
+            NIns*       asm_leave_trace(LIns* guard);
             void        asm_store32(LOpcode op, LIns *val, int d, LIns *base);
             void        asm_store64(LOpcode op, LIns *val, int d, LIns *base);
 
             // WARNING: the implementation of asm_restore() should emit fast code
             // to rematerialize instructions where canRemat() returns true.
             // Otherwise, register allocation decisions will be suboptimal.
-            void        asm_restore(LInsp, Register);
+            void        asm_restore(LIns*, Register);
 
-            void        asm_maybe_spill(LInsp ins, bool pop);
+            void        asm_maybe_spill(LIns* ins, bool pop);
             void        asm_spill(Register rr, int d, bool pop, bool quad);
-            void        asm_load64(LInsp ins);
-            void        asm_ret(LInsp ins);
+            void        asm_load64(LIns* ins);
+            void        asm_ret(LIns* ins);
 #ifdef NANOJIT_64BIT
-            void        asm_immq(LInsp ins);
+            void        asm_immq(LIns* ins);
 #endif
-            void        asm_immd(LInsp ins);
-            void        asm_condd(LInsp ins);
-            void        asm_cond(LInsp ins);
-            void        asm_arith(LInsp ins);
-            void        asm_neg_not(LInsp ins);
-            void        asm_load32(LInsp ins);
-            void        asm_cmov(LInsp ins);
-            void        asm_param(LInsp ins);
-            void        asm_immi(LInsp ins);
+            void        asm_immd(LIns* ins);
+            void        asm_condd(LIns* ins);
+            void        asm_cond(LIns* ins);
+            void        asm_arith(LIns* ins);
+            void        asm_neg_not(LIns* ins);
+            void        asm_load32(LIns* ins);
+            void        asm_cmov(LIns* ins);
+            void        asm_param(LIns* ins);
+            void        asm_immi(LIns* ins);
 #if NJ_SOFTFLOAT_SUPPORTED
-            void        asm_qlo(LInsp ins);
-            void        asm_qhi(LInsp ins);
+            void        asm_qlo(LIns* ins);
+            void        asm_qhi(LIns* ins);
             void        asm_qjoin(LIns *ins);
 #endif
-            void        asm_fneg(LInsp ins);
-            void        asm_fop(LInsp ins);
-            void        asm_i2d(LInsp ins);
-            void        asm_ui2d(LInsp ins);
-            void        asm_d2i(LInsp ins);
+            void        asm_fneg(LIns* ins);
+            void        asm_fop(LIns* ins);
+            void        asm_i2d(LIns* ins);
+            void        asm_ui2d(LIns* ins);
+            void        asm_d2i(LIns* ins);
 #ifdef NANOJIT_64BIT
-            void        asm_q2i(LInsp ins);
+            void        asm_q2i(LIns* ins);
             void        asm_promote(LIns *ins);
 #endif
             void        asm_nongp_copy(Register r, Register s);
-            void        asm_call(LInsp);
-            Register    asm_binop_rhs_reg(LInsp ins);
-            NIns*       asm_branch(bool branchOnFalse, LInsp cond, NIns* targ);
+            void        asm_call(LIns*);
+            Register    asm_binop_rhs_reg(LIns* ins);
+            NIns*       asm_branch(bool branchOnFalse, LIns* cond, NIns* targ);
             NIns*       asm_branch_ov(LOpcode op, NIns* targ);
             void        asm_switch(LIns* ins, NIns* target);
             void        asm_jtbl(LIns* ins, NIns** table);
             void        emitJumpTable(SwitchInfo* si, NIns* target);
             void        assignSavedRegs();
             void        reserveSavedRegs();
             void        assignParamRegs();
             void        handleLoopCarriedExprs(InsList& pending_lives);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -129,23 +129,23 @@ namespace nanojit
         _logc->printf("=== BEGIN %s ===\n", _title);
         int j = 0;
         for (Seq<char*>* p = _strs.get(); p != NULL; p = p->tail)
             _logc->printf("  %02d: %s\n", j++, p->head);
         _logc->printf("=== END %s ===\n", _title);
         _logc->printf("\n");
     }
 
-    LInsp ReverseLister::read()
+    LIns* ReverseLister::read()
     {
         // This check is necessary to avoid printing the LIR_start multiple
         // times due to lookahead in Assembler::gen().
         if (_prevIns && _prevIns->isop(LIR_start))
             return _prevIns;
-        LInsp ins = in->read();
+        LIns* ins = in->read();
         InsBuf b;
         const char* str = _printer->formatIns(&b, ins);
         char* cpy = new (_alloc) char[strlen(str)+1];
         VMPI_strcpy(cpy, str);
         _strs.insert(cpy);
         _prevIns = ins;
         return ins;
     }
@@ -197,17 +197,17 @@ namespace nanojit
     {
         chunkAlloc();
         // Link LIR stream back to prior instruction.
         // Unlike all the ins*() functions, we don't call makeRoom() here
         // because we know we have enough space, having just started a new
         // page.
         LInsSk* insSk = (LInsSk*)_unused;
         LIns*   ins   = insSk->getLIns();
-        ins->initLInsSk((LInsp)addrOfLastLInsOnCurrentChunk);
+        ins->initLInsSk((LIns*)addrOfLastLInsOnCurrentChunk);
         _unused += sizeof(LInsSk);
         verbose_only(_stats.lir++);
     }
 
     // Make room for a single instruction.
     uintptr_t LirBuffer::makeRoom(size_t szB)
     {
         // Make sure the size is ok
@@ -244,163 +244,163 @@ namespace nanojit
             moveToNewChunk(addrOfLastLInsOnChunk);
         }
 
         // Make sure it's word-aligned.
         NanoAssert(0 == startOfRoom % sizeof(void*));
         return startOfRoom;
     }
 
-    LInsp LirBufWriter::insStore(LOpcode op, LInsp val, LInsp base, int32_t d, AccSet accSet)
+    LIns* LirBufWriter::insStore(LOpcode op, LIns* val, LIns* base, int32_t d, AccSet accSet)
     {
         if (isS16(d)) {
             LInsSt* insSt = (LInsSt*)_buf->makeRoom(sizeof(LInsSt));
             LIns*   ins   = insSt->getLIns();
             ins->initLInsSt(op, val, base, d, accSet);
             return ins;
         } else {
             // If the displacement is more than 16 bits, put it in a separate instruction.
             return insStore(op, val, ins2(LIR_addp, base, insImmWord(d)), 0, accSet);
         }
     }
 
-    LInsp LirBufWriter::ins0(LOpcode op)
+    LIns* LirBufWriter::ins0(LOpcode op)
     {
         LInsOp0* insOp0 = (LInsOp0*)_buf->makeRoom(sizeof(LInsOp0));
         LIns*    ins    = insOp0->getLIns();
         ins->initLInsOp0(op);
         return ins;
     }
 
-    LInsp LirBufWriter::ins1(LOpcode op, LInsp o1)
+    LIns* LirBufWriter::ins1(LOpcode op, LIns* o1)
     {
         LInsOp1* insOp1 = (LInsOp1*)_buf->makeRoom(sizeof(LInsOp1));
         LIns*    ins    = insOp1->getLIns();
         ins->initLInsOp1(op, o1);
         return ins;
     }
 
-    LInsp LirBufWriter::ins2(LOpcode op, LInsp o1, LInsp o2)
+    LIns* LirBufWriter::ins2(LOpcode op, LIns* o1, LIns* o2)
     {
         LInsOp2* insOp2 = (LInsOp2*)_buf->makeRoom(sizeof(LInsOp2));
         LIns*    ins    = insOp2->getLIns();
         ins->initLInsOp2(op, o1, o2);
         return ins;
     }
 
-    LInsp LirBufWriter::ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3)
+    LIns* LirBufWriter::ins3(LOpcode op, LIns* o1, LIns* o2, LIns* o3)
     {
         LInsOp3* insOp3 = (LInsOp3*)_buf->makeRoom(sizeof(LInsOp3));
         LIns*    ins    = insOp3->getLIns();
         ins->initLInsOp3(op, o1, o2, o3);
         return ins;
     }
 
-    LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet)
+    LIns* LirBufWriter::insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet)
     {
         if (isS16(d)) {
             LInsLd* insLd = (LInsLd*)_buf->makeRoom(sizeof(LInsLd));
             LIns*   ins   = insLd->getLIns();
             ins->initLInsLd(op, base, d, accSet);
             return ins;
         } else {
             // If the displacement is more than 16 bits, put it in a separate instruction.
             // Note that CseFilter::insLoad() also does this, so this will
             // only occur if CseFilter has been removed from the pipeline.
             return insLoad(op, ins2(LIR_addp, base, insImmWord(d)), 0, accSet);
         }
     }
 
-    LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
+    LIns* LirBufWriter::insGuard(LOpcode op, LIns* c, GuardRecord *gr)
     {
         debug_only( if (LIR_x == op || LIR_xbarrier == op) NanoAssert(!c); )
         return ins2(op, c, (LIns*)gr);
     }
 
-    LInsp LirBufWriter::insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr)
+    LIns* LirBufWriter::insGuardXov(LOpcode op, LIns* a, LIns* b, GuardRecord *gr)
     {
         return ins3(op, a, b, (LIns*)gr);
     }
 
-    LInsp LirBufWriter::insBranch(LOpcode op, LInsp condition, LInsp toLabel)
+    LIns* LirBufWriter::insBranch(LOpcode op, LIns* condition, LIns* toLabel)
     {
         NanoAssert((op == LIR_j && !condition) ||
                    ((op == LIR_jf || op == LIR_jt) && condition));
         return ins2(op, condition, toLabel);
     }
 
-    LInsp LirBufWriter::insBranchJov(LOpcode op, LInsp a, LInsp b, LInsp toLabel)
+    LIns* LirBufWriter::insBranchJov(LOpcode op, LIns* a, LIns* b, LIns* toLabel)
     {
         return ins3(op, a, b, toLabel);
     }
 
     LIns* LirBufWriter::insJtbl(LIns* index, uint32_t size)
     {
         LInsJtbl* insJtbl = (LInsJtbl*) _buf->makeRoom(sizeof(LInsJtbl));
         LIns**    table   = new (_buf->_allocator) LIns*[size];
         LIns*     ins     = insJtbl->getLIns();
         VMPI_memset(table, 0, size * sizeof(LIns*));
         ins->initLInsJtbl(index, size, table);
         return ins;
     }
 
-    LInsp LirBufWriter::insAlloc(int32_t size)
+    LIns* LirBufWriter::insAlloc(int32_t size)
     {
         size = (size+3)>>2; // # of required 32bit words
         LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
         LIns*  ins  = insI->getLIns();
         ins->initLInsI(LIR_allocp, size);
         return ins;
     }
 
-    LInsp LirBufWriter::insParam(int32_t arg, int32_t kind)
+    LIns* LirBufWriter::insParam(int32_t arg, int32_t kind)
     {
         LInsP* insP = (LInsP*)_buf->makeRoom(sizeof(LInsP));
         LIns*  ins  = insP->getLIns();
         ins->initLInsP(arg, kind);
         if (kind) {
             NanoAssert(arg < NumSavedRegs);
             _buf->savedRegs[arg] = ins;
         }
         return ins;
     }
 
-    LInsp LirBufWriter::insImmI(int32_t imm)
+    LIns* LirBufWriter::insImmI(int32_t imm)
     {
         LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
         LIns*  ins  = insI->getLIns();
         ins->initLInsI(LIR_immi, imm);
         return ins;
     }
 
 #ifdef NANOJIT_64BIT
-    LInsp LirBufWriter::insImmQ(uint64_t imm)
+    LIns* LirBufWriter::insImmQ(uint64_t imm)
     {
         LInsQorD* insQorD = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
         LIns*     ins     = insQorD->getLIns();
         ins->initLInsQorD(LIR_immq, imm);
         return ins;
     }
 #endif
 
-    LInsp LirBufWriter::insImmD(double d)
+    LIns* LirBufWriter::insImmD(double d)
     {
         LInsQorD* insQorD = (LInsQorD*)_buf->makeRoom(sizeof(LInsQorD));
         LIns*     ins     = insQorD->getLIns();
         union {
             double d;
             uint64_t q;
         } u;
         u.d = d;
         ins->initLInsQorD(LIR_immd, u.q);
         return ins;
     }
 
     // Reads the next non-skip instruction.
-    LInsp LirReader::read()
+    LIns* LirReader::read()
     {
         static const uint8_t insSizes[] = {
         // LIR_start is treated specially -- see below.
 #define OP___(op, number, repKind, retType, isCse) \
             ((number) == LIR_start ? 0 : sizeof(LIns##repKind)),
 #include "LIRopcode.tbl"
 #undef OP___
             0
@@ -408,18 +408,18 @@ namespace nanojit
 
         // Check the invariant: _ins never points to a skip.
         NanoAssert(_ins && !_ins->isop(LIR_skip));
 
         // Step back one instruction.  Use a table lookup rather than a switch
         // to avoid branch mispredictions.  LIR_start is given a special size
         // of zero so that we don't step back past the start of the block.
         // (Callers of this function should stop once they see a LIR_start.)
-        LInsp ret = _ins;
-        _ins = (LInsp)(uintptr_t(_ins) - insSizes[_ins->opcode()]);
+        LIns* ret = _ins;
+        _ins = (LIns*)(uintptr_t(_ins) - insSizes[_ins->opcode()]);
 
         // Ensure _ins doesn't end up pointing to a skip.
         while (_ins->isop(LIR_skip)) {
             NanoAssert(_ins->prevLIns() != _ins);
             _ins = _ins->prevLIns();
         }
 
         return ret;
@@ -514,17 +514,17 @@ namespace nanojit
 
         // oprnd_2 must be in the same position in LIns{Op2,Op3,St}
         // because oprnd2() is used for all of them.
         #define OP2OFFSET (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2))
         NanoStaticAssert( OP2OFFSET == (offsetof(LInsOp3, ins) - offsetof(LInsOp3, oprnd_2)) );
         NanoStaticAssert( OP2OFFSET == (offsetof(LInsSt,  ins) - offsetof(LInsSt,  oprnd_2)) );
     }
 
-    bool insIsS16(LInsp i)
+    bool insIsS16(LIns* i)
     {
         if (i->isImmI()) {
             int c = i->immI();
             return isS16(c);
         }
         if (i->isCmov()) {
             return insIsS16(i->oprnd2()) && insIsS16(i->oprnd3());
         }
@@ -862,17 +862,17 @@ namespace nanojit
                 } else if (v == LIR_muli) {
                     // x * 1 = x
                     return oprnd1;
                 }
             }
         }
 
 #if NJ_SOFTFLOAT_SUPPORTED
-        LInsp ins;
+        LIns* ins;
         if (v == LIR_ii2d && oprnd1->isop(LIR_dlo2i) && oprnd2->isop(LIR_dhi2i) &&
             (ins = oprnd1->oprnd1()) == oprnd2->oprnd1()) {
             // qjoin(qlo(x),qhi(x)) == x
             return ins;
         }
 #endif
 
         return out->ins2(v, oprnd1, oprnd2);
@@ -896,17 +896,17 @@ namespace nanojit
             // (y == x) ? x : y  =>  y
             // (x == y) ? x : y  =>  y
             return oprnd3;
         }
 
         return out->ins3(v, oprnd1, oprnd2, oprnd3);
     }
 
-    LIns* ExprFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
+    LIns* ExprFilter::insGuard(LOpcode v, LIns* c, GuardRecord *gr)
     {
         if (v == LIR_xt || v == LIR_xf) {
             if (c->isImmI()) {
                 if ((v == LIR_xt && !c->immI()) || (v == LIR_xf && c->immI())) {
                     return 0; // no guard needed
                 } else {
 #ifdef JS_TRACER
                     // We're emitting a guard that will always fail. Any code
@@ -925,20 +925,20 @@ namespace nanojit
                 }
             }
         }
         return out->insGuard(v, c, gr);
     }
 
     // Simplify operator if possible.  Always return NULL if overflow is possible.
 
-    LIns* ExprFilter::simplifyOverflowArith(LOpcode op, LInsp *opnd1, LInsp *opnd2)
+    LIns* ExprFilter::simplifyOverflowArith(LOpcode op, LIns** opnd1, LIns** opnd2)
     {
-        LInsp oprnd1 = *opnd1;
-        LInsp oprnd2 = *opnd2;
+        LIns* oprnd1 = *opnd1;
+        LIns* oprnd2 = *opnd2;
 
         if (oprnd1->isImmI() && oprnd2->isImmI()) {
             int32_t c1 = oprnd1->immI();
             int32_t c2 = oprnd2->immI();
             double d = 0.0;
 
             // The code below attempts to perform the operation while
             // detecting overflow.  For multiplication, we may unnecessarily
@@ -999,17 +999,17 @@ namespace nanojit
             } else if (c == 1 && (op == LIR_muljovi || op == LIR_mulxovi)) {
                 return oprnd1;
             }
         }
 
         return NULL;
     }
 
-    LIns* ExprFilter::insGuardXov(LOpcode op, LInsp oprnd1, LInsp oprnd2, GuardRecord *gr)
+    LIns* ExprFilter::insGuardXov(LOpcode op, LIns* oprnd1, LIns* oprnd2, GuardRecord *gr)
     {
         LIns* simplified = simplifyOverflowArith(op, &oprnd1, &oprnd2);
         if (simplified)
             return simplified;
 
         return out->insGuardXov(op, oprnd1, oprnd2, gr);
     }
 
@@ -1036,17 +1036,17 @@ namespace nanojit
                     v = invertCondJmpOpcode(v);
                     c = c->oprnd1();
                 }
             }
         }
         return out->insBranch(v, c, t);
     }
 
-    LIns* ExprFilter::insBranchJov(LOpcode op, LInsp oprnd1, LInsp oprnd2, LIns* target)
+    LIns* ExprFilter::insBranchJov(LOpcode op, LIns* oprnd1, LIns* oprnd2, LIns* target)
     {
         LIns* simplified = simplifyOverflowArith(op, &oprnd1, &oprnd2);
         if (simplified)
             return simplified;
 
         return out->insBranchJov(op, oprnd1, oprnd2, target);
     }
 
@@ -1083,17 +1083,17 @@ namespace nanojit
     LIns* LirWriter::insChoose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov)
     {
         // 'cond' must be a conditional, unless it has been optimized to 0 or
         // 1.  In that case make it an ==0 test and flip the branches.  It'll
         // get constant-folded by ExprFilter subsequently.
         if (!cond->isCmp()) {
             NanoAssert(cond->isImmI());
             cond = insEqI_0(cond);
-            LInsp tmp = iftrue;
+            LIns* tmp = iftrue;
             iftrue = iffalse;
             iffalse = tmp;
         }
 
         if (use_cmov) {
             LOpcode op = LIR_cmovi;
             if (iftrue->isI() && iffalse->isI()) {
                 op = LIR_cmovi;
@@ -1104,50 +1104,50 @@ namespace nanojit
             } else if (iftrue->isD() && iffalse->isD()) {
                 NanoAssertMsg(0, "LIR_fcmov doesn't exist yet, sorry");
             } else {
                 NanoAssert(0);  // type error
             }
             return ins3(op, cond, iftrue, iffalse);
         }
 
-        LInsp ncond = ins1(LIR_negi, cond); // cond ? -1 : 0
+        LIns* ncond = ins1(LIR_negi, cond); // cond ? -1 : 0
         return ins2(LIR_ori,
                     ins2(LIR_andi, iftrue, ncond),
                     ins2(LIR_andi, iffalse, ins1(LIR_noti, ncond)));
     }
 
-    LIns* LirBufWriter::insCall(const CallInfo *ci, LInsp args[])
+    LIns* LirBufWriter::insCall(const CallInfo *ci, LIns* args[])
     {
         LOpcode op = getCallOpcode(ci);
 #if NJ_SOFTFLOAT_SUPPORTED
         // SoftFloat: convert LIR_calld to LIR_calli.
         if (_config.soft_float && op == LIR_calld)
             op = LIR_calli;
 #endif
 
         int32_t argc = ci->count_args();
         NanoAssert(argc <= (int)MAXARGS);
 
         // Allocate space for and copy the arguments.  We use the same
         // allocator as the normal LIR buffers so it has the same lifetime.
         // Nb: this must be kept in sync with arg().
-        LInsp* args2 = (LInsp*)_buf->_allocator.alloc(argc * sizeof(LInsp));
-        memcpy(args2, args, argc * sizeof(LInsp));
+        LIns** args2 = (LIns**)_buf->_allocator.alloc(argc * sizeof(LIns*));
+        memcpy(args2, args, argc * sizeof(LIns*));
 
         // Allocate and write the call instruction.
         LInsC* insC = (LInsC*)_buf->makeRoom(sizeof(LInsC));
         LIns*  ins  = insC->getLIns();
         ins->initLInsC(op, args2, ci);
         return ins;
     }
 
     using namespace avmplus;
 
-    StackFilter::StackFilter(LirFilter *in, Allocator& alloc, LInsp sp)
+    StackFilter::StackFilter(LirFilter *in, Allocator& alloc, LIns* sp)
         : LirFilter(in), sp(sp), stk(alloc), top(0)
     {}
 
     // If we see a sequence like this:
     //
     //   sti sp[0]
     //   ...
     //   sti sp[0]
@@ -1159,23 +1159,23 @@ namespace nanojit
     //   stfi sp[0]
     //   ...
     //   sti sp[0]
     //
     // we can again remove the first store -- even though the second store
     // doesn't clobber the high four bytes -- because we know the entire value
     // stored by the first store is dead.
     //
-    LInsp StackFilter::read()
+    LIns* StackFilter::read()
     {
         for (;;) {
-            LInsp ins = in->read();
+            LIns* ins = in->read();
 
             if (ins->isStore()) {
-                LInsp base = ins->oprnd2();
+                LIns* base = ins->oprnd2();
                 if (base == sp) {
                     // 'disp' must be eight-aligned because each stack entry is 8 bytes.
                     NanoAssert((ins->disp() & 0x7) == 0);
 
                     int d = ins->disp() >> 3;
                     if (d >= top) {
                         continue;
                     } else {
@@ -1227,24 +1227,24 @@ namespace nanojit
         LiveTable(Allocator& alloc)
             : alloc(alloc)
             , live(alloc)
             , retired(alloc)
             , retiredCount(0)
             , maxlive(0)
         { }
 
-        void add(LInsp ins, LInsp use) {
+        void add(LIns* ins, LIns* use) {
             if (!ins->isImmAny() && !live.containsKey(ins)) {
                 NanoAssert(size_t(ins->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
                 live.put(ins,use);
             }
         }
 
-        void retire(LInsp i) {
+        void retire(LIns* i) {
             RetiredEntry *e = new (alloc) RetiredEntry();
             e->i = i;
             SeqBuilder<LIns*> livelist(alloc);
             HashMap<LIns*, LIns*>::Iter iter(live);
             int live_count = 0;
             while (iter.next()) {
                 LIns* ins = iter.key();
                 if (!ins->isV()) {
@@ -1256,17 +1256,17 @@ namespace nanojit
             if (live_count > maxlive)
                 maxlive = live_count;
 
             live.remove(i);
             retired.insert(e);
             retiredCount++;
         }
 
-        bool contains(LInsp i) {
+        bool contains(LIns* i) {
             return live.containsKey(i);
         }
     };
 
     /*
      * traverse the LIR buffer and discover which instructions are live
      * by starting from instructions with side effects (stores, calls, branches)
      * and marking instructions used by them.  Works bottom-up, in one pass.
@@ -1277,17 +1277,17 @@ namespace nanojit
     {
         // traverse backwards to find live exprs and a few other stats.
 
         LiveTable live(alloc);
         uint32_t exits = 0;
         int total = 0;
         if (frag->lirbuf->state)
             live.add(frag->lirbuf->state, 0);
-        for (LInsp ins = in->read(); !ins->isop(LIR_start); ins = in->read())
+        for (LIns* ins = in->read(); !ins->isop(LIR_start); ins = in->read())
         {
             total++;
 
             // First handle instructions that are always live (ie. those that
             // don't require being marked as live), eg. those with
             // side-effects.  We ignore LIR_paramp.
             if (ins->isLive() && !ins->isop(LIR_paramp))
             {
@@ -1481,17 +1481,17 @@ namespace nanojit
 
             if (e->i->isGuard() || e->i->isBranch() || e->i->isRet()) {
                 logc->printf("\n");
                 newblock = true;
             }
         }
     }
 
-    void LirNameMap::addNameWithSuffix(LInsp ins, const char *name, int suffix,
+    void LirNameMap::addNameWithSuffix(LIns* ins, const char *name, int suffix,
                                        bool ignoreOneSuffix) {
         // The lookup may succeed, ie. we may already have a name for this
         // instruction.  This can happen because of CSE.  Eg. if we have this:
         //
         //   ins = addName("foo", insImmI(0))
         //
         // that assigns the name "foo1" to 'ins'.  If we later do this:
         //
@@ -1514,21 +1514,21 @@ namespace nanojit
 
             char *copy = new (alloc) char[VMPI_strlen(name2)+1];
             VMPI_strcpy(copy, name2);
             Entry *e = new (alloc) Entry(copy);
             names.put(ins, e);
         }
     }
 
-    void LirNameMap::addName(LInsp ins, const char* name) {
+    void LirNameMap::addName(LIns* ins, const char* name) {
         addNameWithSuffix(ins, name, namecounts.add(name), /*ignoreOneSuffix*/true);
     }
 
-    const char* LirNameMap::createName(LInsp ins) {
+    const char* LirNameMap::createName(LIns* ins) {
         if (ins->isCall()) {
 #if NJ_SOFTFLOAT_SUPPORTED
             if (ins->isop(LIR_hcalli)) {
                 ins = ins->oprnd1();    // we've presumably seen the other half already
             } else
 #endif
             {
                 addNameWithSuffix(ins, ins->callInfo()->_name, funccounts.add(ins->callInfo()),
@@ -1537,17 +1537,17 @@ namespace nanojit
         } else {
             addNameWithSuffix(ins, lirNames[ins->opcode()], lircounts.add(ins->opcode()),
                               /*ignoreOneSuffix*/false);
 
         }
         return names.get(ins)->name;
     }
 
-    const char* LirNameMap::lookupName(LInsp ins)
+    const char* LirNameMap::lookupName(LIns* ins)
     {
         Entry* e = names.get(ins);
         return e ? e->name : NULL;
     }
 
 
     char* LInsPrinter::formatAccSet(RefBuf* buf, AccSet accSet) {
         int i = 0;
@@ -1905,17 +1905,17 @@ namespace nanojit
         m_cap[LInsCall]         = 64;
         m_cap[LInsLoadReadOnly] = 16;
         m_cap[LInsLoadStack]    = 16;
         m_cap[LInsLoadRStack]   = 16;
         m_cap[LInsLoadOther]    = 16;
         m_cap[LInsLoadMultiple] = 16;
 
         for (LInsHashKind kind = LInsFirst; kind <= LInsLast; kind = nextKind(kind)) {
-            m_list[kind] = new (alloc) LInsp[m_cap[kind]];
+            m_list[kind] = new (alloc) LIns*[m_cap[kind]];
         }
         clear();
     }
 
     // Inlined/separated version of SuperFastHash.
     // This content is copyrighted by Paul Hsieh.
     // For reference see: http://www.azillionmonkeys.com/qed/hash.html
     //
@@ -1957,17 +1957,17 @@ namespace nanojit
         hash ^= hash << 4;
         hash += hash >> 17;
         hash ^= hash << 25;
         hash += hash >> 6;
         return hash;
     }
 
     void CseFilter::clear(LInsHashKind kind) {
-        VMPI_memset(m_list[kind], 0, sizeof(LInsp)*m_cap[kind]);
+        VMPI_memset(m_list[kind], 0, sizeof(LIns*)*m_cap[kind]);
         m_used[kind] = 0;
     }
 
     void CseFilter::clear() {
         for (LInsHashKind kind = LInsFirst; kind <= LInsLast; kind = nextKind(kind)) {
             clear(kind);
         }
     }
@@ -1976,87 +1976,87 @@ namespace nanojit
         return hashfinish(hash32(0, a));
     }
 
     inline uint32_t CseFilter::hashImmQorD(uint64_t a) {
         uint32_t hash = hash32(0, uint32_t(a >> 32));
         return hashfinish(hash32(hash, uint32_t(a)));
     }
 
-    inline uint32_t CseFilter::hash1(LOpcode op, LInsp a) {
+    inline uint32_t CseFilter::hash1(LOpcode op, LIns* a) {
         uint32_t hash = hash8(0, uint8_t(op));
         return hashfinish(hashptr(hash, a));
     }
 
-    inline uint32_t CseFilter::hash2(LOpcode op, LInsp a, LInsp b) {
+    inline uint32_t CseFilter::hash2(LOpcode op, LIns* a, LIns* b) {
         uint32_t hash = hash8(0, uint8_t(op));
         hash = hashptr(hash, a);
         return hashfinish(hashptr(hash, b));
     }
 
-    inline uint32_t CseFilter::hash3(LOpcode op, LInsp a, LInsp b, LInsp c) {
+    inline uint32_t CseFilter::hash3(LOpcode op, LIns* a, LIns* b, LIns* c) {
         uint32_t hash = hash8(0, uint8_t(op));
         hash = hashptr(hash, a);
         hash = hashptr(hash, b);
         return hashfinish(hashptr(hash, c));
     }
 
     NanoStaticAssert(sizeof(AccSet) == 1);  // required for hashLoad to work properly
 
     // Nb: no need to hash the load's AccSet because each region's loads go in
     // a different hash table.
-    inline uint32_t CseFilter::hashLoad(LOpcode op, LInsp a, int32_t d, AccSet accSet) {
+    inline uint32_t CseFilter::hashLoad(LOpcode op, LIns* a, int32_t d, AccSet accSet) {
         uint32_t hash = hash8(0,uint8_t(op));
         hash = hashptr(hash, a);
         hash = hash32(hash, d);
         return hashfinish(hash8(hash, accSet));
     }
 
-    inline uint32_t CseFilter::hashCall(const CallInfo *ci, uint32_t argc, LInsp args[]) {
+    inline uint32_t CseFilter::hashCall(const CallInfo *ci, uint32_t argc, LIns* args[]) {
         uint32_t hash = hashptr(0, ci);
         for (int32_t j=argc-1; j >= 0; j--)
             hash = hashptr(hash,args[j]);
         return hashfinish(hash);
     }
 
     void CseFilter::grow(LInsHashKind kind)
     {
         const uint32_t oldcap = m_cap[kind];
         m_cap[kind] <<= 1;
-        LInsp *oldlist = m_list[kind];
-        m_list[kind] = new (alloc) LInsp[m_cap[kind]];
-        VMPI_memset(m_list[kind], 0, m_cap[kind] * sizeof(LInsp));
+        LIns** oldlist = m_list[kind];
+        m_list[kind] = new (alloc) LIns*[m_cap[kind]];
+        VMPI_memset(m_list[kind], 0, m_cap[kind] * sizeof(LIns*));
         find_t find = m_find[kind];
         for (uint32_t i = 0; i < oldcap; i++) {
-            LInsp ins = oldlist[i];
+            LIns* ins = oldlist[i];
             if (!ins) continue;
             uint32_t j = (this->*find)(ins);
             NanoAssert(!m_list[kind][j]);
             m_list[kind][j] = ins;
         }
     }
 
-    void CseFilter::add(LInsHashKind kind, LInsp ins, uint32_t k)
+    void CseFilter::add(LInsHashKind kind, LIns* ins, uint32_t k)
     {
         NanoAssert(!m_list[kind][k]);
         m_used[kind]++;
         m_list[kind][k] = ins;
         if ((m_used[kind] * 4) >= (m_cap[kind] * 3)) {  // load factor of 0.75
             grow(kind);
         }
     }
 
-    inline LInsp CseFilter::findImmI(int32_t a, uint32_t &k)
+    inline LIns* CseFilter::findImmI(int32_t a, uint32_t &k)
     {
         LInsHashKind kind = LInsImmI;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hashImmI(a) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             NanoAssert(ins->isImmI());
             if (ins->immI() == a)
                 return ins;
             // Quadratic probe:  h(k,i) = h(k) + 0.5i + 0.5i^2, which gives the
             // sequence h(k), h(k)+1, h(k)+3, h(k)+6, h+10, ...  This is a
             // good sequence for 2^n-sized tables as the values h(k,i) for i
@@ -2064,257 +2064,257 @@ namespace nanojit
             // See http://portal.acm.org/citation.cfm?id=360737 and
             // http://en.wikipedia.org/wiki/Quadratic_probing (fetched
             // 06-Nov-2009) for more details.
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::findImmI(LInsp ins)
+    uint32_t CseFilter::findImmI(LIns* ins)
     {
         uint32_t k;
         findImmI(ins->immI(), k);
         return k;
     }
 
 #ifdef NANOJIT_64BIT
-    inline LInsp CseFilter::findImmQ(uint64_t a, uint32_t &k)
+    inline LIns* CseFilter::findImmQ(uint64_t a, uint32_t &k)
     {
         LInsHashKind kind = LInsImmQ;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hashImmQorD(a) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             NanoAssert(ins->isImmQ());
             if (ins->immQ() == a)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::findImmQ(LInsp ins)
+    uint32_t CseFilter::findImmQ(LIns* ins)
     {
         uint32_t k;
         findImmQ(ins->immQ(), k);
         return k;
     }
 #endif
 
-    inline LInsp CseFilter::findImmD(uint64_t a, uint32_t &k)
+    inline LIns* CseFilter::findImmD(uint64_t a, uint32_t &k)
     {
         LInsHashKind kind = LInsImmD;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hashImmQorD(a) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             NanoAssert(ins->isImmD());
             if (ins->immDasQ() == a)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::findImmD(LInsp ins)
+    uint32_t CseFilter::findImmD(LIns* ins)
     {
         uint32_t k;
         findImmD(ins->immDasQ(), k);
         return k;
     }
 
-    inline LInsp CseFilter::find1(LOpcode op, LInsp a, uint32_t &k)
+    inline LIns* CseFilter::find1(LOpcode op, LIns* a, uint32_t &k)
     {
         LInsHashKind kind = LIns1;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hash1(op, a) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             if (ins->isop(op) && ins->oprnd1() == a)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::find1(LInsp ins)
+    uint32_t CseFilter::find1(LIns* ins)
     {
         uint32_t k;
         find1(ins->opcode(), ins->oprnd1(), k);
         return k;
     }
 
-    inline LInsp CseFilter::find2(LOpcode op, LInsp a, LInsp b, uint32_t &k)
+    inline LIns* CseFilter::find2(LOpcode op, LIns* a, LIns* b, uint32_t &k)
     {
         LInsHashKind kind = LIns2;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hash2(op, a, b) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             if (ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::find2(LInsp ins)
+    uint32_t CseFilter::find2(LIns* ins)
     {
         uint32_t k;
         find2(ins->opcode(), ins->oprnd1(), ins->oprnd2(), k);
         return k;
     }
 
-    inline LInsp CseFilter::find3(LOpcode op, LInsp a, LInsp b, LInsp c, uint32_t &k)
+    inline LIns* CseFilter::find3(LOpcode op, LIns* a, LIns* b, LIns* c, uint32_t &k)
     {
         LInsHashKind kind = LIns3;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hash3(op, a, b, c) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             if (ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b && ins->oprnd3() == c)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::find3(LInsp ins)
+    uint32_t CseFilter::find3(LIns* ins)
     {
         uint32_t k;
         find3(ins->opcode(), ins->oprnd1(), ins->oprnd2(), ins->oprnd3(), k);
         return k;
     }
 
-    inline LInsp CseFilter::findLoad(LOpcode op, LInsp a, int32_t d, AccSet accSet,
+    inline LIns* CseFilter::findLoad(LOpcode op, LIns* a, int32_t d, AccSet accSet,
                                      LInsHashKind kind, uint32_t &k)
     {
         (void)accSet;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hashLoad(op, a, d, accSet) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             NanoAssert(ins->accSet() == accSet);
             if (ins->isop(op) && ins->oprnd1() == a && ins->disp() == d)
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::findLoadReadOnly(LInsp ins)
+    uint32_t CseFilter::findLoadReadOnly(LIns* ins)
     {
         uint32_t k;
         findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadReadOnly, k);
         return k;
     }
 
-    uint32_t CseFilter::findLoadStack(LInsp ins)
+    uint32_t CseFilter::findLoadStack(LIns* ins)
     {
         uint32_t k;
         findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadStack, k);
         return k;
     }
 
-    uint32_t CseFilter::findLoadRStack(LInsp ins)
+    uint32_t CseFilter::findLoadRStack(LIns* ins)
     {
         uint32_t k;
         findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadRStack, k);
         return k;
     }
 
-    uint32_t CseFilter::findLoadOther(LInsp ins)
+    uint32_t CseFilter::findLoadOther(LIns* ins)
     {
         uint32_t k;
         findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadOther, k);
         return k;
     }
 
-    uint32_t CseFilter::findLoadMultiple(LInsp ins)
+    uint32_t CseFilter::findLoadMultiple(LIns* ins)
     {
         uint32_t k;
         findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadMultiple, k);
         return k;
     }
 
-    bool argsmatch(LInsp ins, uint32_t argc, LInsp args[])
+    bool argsmatch(LIns* ins, uint32_t argc, LIns* args[])
     {
         for (uint32_t j=0; j < argc; j++)
             if (ins->arg(j) != args[j])
                 return false;
         return true;
     }
 
-    inline LInsp CseFilter::findCall(const CallInfo *ci, uint32_t argc, LInsp args[], uint32_t &k)
+    inline LIns* CseFilter::findCall(const CallInfo *ci, uint32_t argc, LIns* args[], uint32_t &k)
     {
         LInsHashKind kind = LInsCall;
         const uint32_t bitmask = m_cap[kind] - 1;
         k = hashCall(ci, argc, args) & bitmask;
         uint32_t n = 1;
         while (true) {
-            LInsp ins = m_list[kind][k];
+            LIns* ins = m_list[kind][k];
             if (!ins)
                 return NULL;
             if (ins->isCall() && ins->callInfo() == ci && argsmatch(ins, argc, args))
                 return ins;
             k = (k + n) & bitmask;
             n += 1;
         }
     }
 
-    uint32_t CseFilter::findCall(LInsp ins)
+    uint32_t CseFilter::findCall(LIns* ins)
     {
-        LInsp args[MAXARGS];
+        LIns* args[MAXARGS];
         uint32_t argc = ins->argc();
         NanoAssert(argc < MAXARGS);
         for (uint32_t j=0; j < argc; j++)
             args[j] = ins->arg(j);
         uint32_t k;
         findCall(ins->callInfo(), argc, args, k);
         return k;
     }
 
     LIns* CseFilter::insImmI(int32_t imm)
     {
         uint32_t k;
-        LInsp ins = findImmI(imm, k);
+        LIns* ins = findImmI(imm, k);
         if (!ins) {
             ins = out->insImmI(imm);
             add(LInsImmI, ins, k);
         }
         // We assume that downstream stages do not modify the instruction, so
         // that we can insert 'ins' into slot 'k'.  Check this.
         NanoAssert(ins->isop(LIR_immi) && ins->immI() == imm);
         return ins;
     }
 
 #ifdef NANOJIT_64BIT
     LIns* CseFilter::insImmQ(uint64_t q)
     {
         uint32_t k;
-        LInsp ins = findImmQ(q, k);
+        LIns* ins = findImmQ(q, k);
         if (!ins) {
             ins = out->insImmQ(q);
             add(LInsImmQ, ins, k);
         }
         NanoAssert(ins->isop(LIR_immq) && ins->immQ() == q);
         return ins;
     }
 #endif
@@ -2324,79 +2324,79 @@ namespace nanojit
         uint32_t k;
         // We must pun 'd' as a uint64_t otherwise 0 and -0 will be treated as
         // equal, which breaks things (see bug 527288).
         union {
             double d;
             uint64_t u64;
         } u;
         u.d = d;
-        LInsp ins = findImmD(u.u64, k);
+        LIns* ins = findImmD(u.u64, k);
         if (!ins) {
             ins = out->insImmD(d);
             add(LInsImmD, ins, k);
         }
         NanoAssert(ins->isop(LIR_immd) && ins->immDasQ() == u.u64);
         return ins;
     }
 
     LIns* CseFilter::ins0(LOpcode op)
     {
         if (op == LIR_label)
             clear();
         return out->ins0(op);
     }
 
-    LIns* CseFilter::ins1(LOpcode op, LInsp a)
+    LIns* CseFilter::ins1(LOpcode op, LIns* a)
     {
-        LInsp ins;
+        LIns* ins;
         if (isCseOpcode(op)) {
             uint32_t k;
             ins = find1(op, a, k);
             if (!ins) {
                 ins = out->ins1(op, a);
                 add(LIns1, ins, k);
             }
         } else {
             ins = out->ins1(op, a);
         }
         NanoAssert(ins->isop(op) && ins->oprnd1() == a);
         return ins;
     }
 
-    LIns* CseFilter::ins2(LOpcode op, LInsp a, LInsp b)
+    LIns* CseFilter::ins2(LOpcode op, LIns* a, LIns* b)
     {
-        LInsp ins;
+        LIns* ins;
         NanoAssert(isCseOpcode(op));
         uint32_t k;
         ins = find2(op, a, b, k);
         if (!ins) {
             ins = out->ins2(op, a, b);
             add(LIns2, ins, k);
         }
         NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b);
         return ins;
     }
 
-    LIns* CseFilter::ins3(LOpcode op, LInsp a, LInsp b, LInsp c)
+    LIns* CseFilter::ins3(LOpcode op, LIns* a, LIns* b, LIns* c)
     {
         NanoAssert(isCseOpcode(op));
         uint32_t k;
-        LInsp ins = find3(op, a, b, c, k);
+        LIns* ins = find3(op, a, b, c, k);
         if (!ins) {
             ins = out->ins3(op, a, b, c);
             add(LIns3, ins, k);
         }
         NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b && ins->oprnd3() == c);
         return ins;
     }
 
-    LIns* CseFilter::insLoad(LOpcode op, LInsp base, int32_t disp, AccSet loadAccSet)
+    LIns* CseFilter::insLoad(LOpcode op, LIns* base, int32_t disp, AccSet loadAccSet)
     {
-        LInsp ins;
+        LIns* ins;
         if (isS16(disp)) {
             // Clear all loads aliased by stores and calls since the last time
             // we were in this function.
             if (storesSinceLastLoad != ACC_NONE) {
                 NanoAssert(!(storesSinceLastLoad & ACC_READONLY));  // can't store to READONLY
                 if (storesSinceLastLoad & ACC_STACK)  { clear(LInsLoadStack); }
                 if (storesSinceLastLoad & ACC_RSTACK) { clear(LInsLoadRStack); }
                 if (storesSinceLastLoad & ACC_OTHER)  { clear(LInsLoadOther); }
@@ -2427,34 +2427,34 @@ namespace nanojit
             // If the displacement is more than 16 bits, put it in a separate
             // instruction.  Nb: LirBufWriter also does this, we do it here
             // too because CseFilter relies on LirBufWriter not changing code.
             ins = insLoad(op, ins2(LIR_addp, base, insImmWord(disp)), 0, loadAccSet);
         }
         return ins;
     }
 
-    LIns* CseFilter::insStore(LOpcode op, LInsp value, LInsp base, int32_t disp, AccSet accSet)
+    LIns* CseFilter::insStore(LOpcode op, LIns* value, LIns* base, int32_t disp, AccSet accSet)
     {
-        LInsp ins;
+        LIns* ins;
         if (isS16(disp)) {
             storesSinceLastLoad |= accSet;
             ins = out->insStore(op, value, base, disp, accSet);
             NanoAssert(ins->isop(op) && ins->oprnd1() == value && ins->oprnd2() == base &&
                        ins->disp() == disp && ins->accSet() == accSet);
         } else {
             // If the displacement is more than 16 bits, put it in a separate
             // instruction.  Nb: LirBufWriter also does this, we do it here
             // too because CseFilter relies on LirBufWriter not changing code.
             ins = insStore(op, value, ins2(LIR_addp, base, insImmWord(disp)), 0, accSet);
         }
         return ins;
     }
 
-    LInsp CseFilter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
+    LIns* CseFilter::insGuard(LOpcode op, LIns* c, GuardRecord *gr)
     {
         // LIR_xt and LIR_xf guards are CSEable.  Note that we compare the
         // opcode and condition when determining if two guards are equivalent
         // -- in find1() and hash1() -- but we do *not* compare the
         // GuardRecord.  This works because:
         // - If guard 1 is taken (exits) then guard 2 is never reached, so
         //   guard 2 can be removed.
         // - If guard 1 is not taken then neither is guard 2, so guard 2 can
@@ -2463,52 +2463,52 @@ namespace nanojit
         // The underlying assumptions that are required for this to be safe:
         // - There's never a path from the side exit of guard 1 back to guard
         //   2;  for tree-shaped fragments this should be true.
         // - GuardRecords do not contain information other than what is needed
         //   to execute a successful exit.  That is currently true.
         // - The CSE algorithm will always keep guard 1 and remove guard 2
         //   (not vice versa).  The current algorithm does this.
         //
-        LInsp ins;
+        LIns* ins;
         if (isCseOpcode(op)) {
             // conditional guard
             uint32_t k;
             ins = find1(op, c, k);
             if (!ins) {
                 ins = out->insGuard(op, c, gr);
                 add(LIns1, ins, k);
             }
         } else {
             ins = out->insGuard(op, c, gr);
         }
         NanoAssert(ins->isop(op) && ins->oprnd1() == c);
         return ins;
     }
 
-    LInsp CseFilter::insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr)
+    LIns* CseFilter::insGuardXov(LOpcode op, LIns* a, LIns* b, GuardRecord *gr)
     {
         // LIR_*xov are CSEable.  See CseFilter::insGuard() for details.
         NanoAssert(isCseOpcode(op));
         // conditional guard
         uint32_t k;
-        LInsp ins = find2(op, a, b, k);
+        LIns* ins = find2(op, a, b, k);
         if (!ins) {
             ins = out->insGuardXov(op, a, b, gr);
             add(LIns2, ins, k);
         }
         NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b);
         return ins;
     }
 
     // There is no CseFilter::insBranchJov(), as LIR_*jov* are not CSEable.
 
-    LInsp CseFilter::insCall(const CallInfo *ci, LInsp args[])
+    LIns* CseFilter::insCall(const CallInfo *ci, LIns* args[])
     {
-        LInsp ins;
+        LIns* ins;
         uint32_t argc = ci->count_args();
         if (ci->_isPure) {
             NanoAssert(ci->_storeAccSet == ACC_NONE);
             uint32_t k;
             ins = findCall(ci, argc, args, k);
             if (!ins) {
                 ins = out->insCall(ci, args);
                 add(LInsCall, ins, k);
@@ -2585,17 +2585,17 @@ namespace nanojit
     LIns* SoftFloatFilter::split(LIns *a) {
         if (a->isD() && !a->isop(LIR_ii2d)) {
             // all F64 args must be qjoin's for soft-float
             a = ins2(LIR_ii2d, ins1(LIR_dlo2i, a), ins1(LIR_dhi2i, a));
         }
         return a;
     }
 
-    LIns* SoftFloatFilter::split(const CallInfo *call, LInsp args[]) {
+    LIns* SoftFloatFilter::split(const CallInfo *call, LIns* args[]) {
         LIns *lo = out->insCall(call, args);
         LIns *hi = out->ins1(LIR_hcalli, lo);
         return out->ins2(LIR_ii2d, lo, hi);
     }
 
     LIns* SoftFloatFilter::callD1(const CallInfo *call, LIns *a) {
         LIns *args[] = { split(a) };
         return split(call, args);
@@ -2625,17 +2625,17 @@ namespace nanojit
         if (ci) {
             if (isCmpDOpcode(op))
                 return cmpD(ci, a, b);
             return callD2(ci, a, b);
         }
         return out->ins2(op, a, b);
     }
 
-    LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) {
+    LIns* SoftFloatFilter::insCall(const CallInfo *ci, LIns* args[]) {
         uint32_t nArgs = ci->count_args();
         for (uint32_t i = 0; i < nArgs; i++)
             args[i] = split(args[i]);
 
         if (ci->returnType() == ARGTYPE_D) {
             // This function returns a double as two 32bit values, so replace
             // call with qjoin(qhi(call), call).
             return split(ci, args);
@@ -2778,17 +2778,17 @@ namespace nanojit
     }
 
     void ValidateWriter::checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2)
     {
         if (!ins->isop(op2))
             errorStructureShouldBe(op, "argument", argN, ins, lirNames[op2]);
     }
 
-    void ValidateWriter::checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxAccSet)
+    void ValidateWriter::checkAccSet(LOpcode op, LIns* base, AccSet accSet, AccSet maxAccSet)
     {
         if (accSet == ACC_NONE)
             errorAccSet(lirNames[op], accSet, "it should not equal ACC_NONE");
 
         if (accSet & ~maxAccSet)
             errorAccSet(lirNames[op], accSet,
                 "it should not contain bits that aren't in ACC_LOAD_ANY/ACC_STORE_ANY");
 
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -960,17 +960,16 @@ namespace nanojit
         #ifdef NANOJIT_64BIT
             return (void*)immQ();
         #else
             return (void*)immI();
         #endif
         }
     };
 
-    typedef LIns* LInsp;
     typedef SeqBuilder<LIns*> InsList;
     typedef SeqBuilder<char*> StringList;
 
 
     // 0-operand form.  Used for LIR_start and LIR_label.
     class LInsOp0
     {
     private:
@@ -1422,71 +1421,71 @@ namespace nanojit
     {
     public:
         LirWriter *out;
 
         LirWriter(LirWriter* out)
             : out(out) {}
         virtual ~LirWriter() {}
 
-        virtual LInsp ins0(LOpcode v) {
+        virtual LIns* ins0(LOpcode v) {
             return out->ins0(v);
         }
-        virtual LInsp ins1(LOpcode v, LIns* a) {
+        virtual LIns* ins1(LOpcode v, LIns* a) {
             return out->ins1(v, a);
         }
-        virtual LInsp ins2(LOpcode v, LIns* a, LIns* b) {
+        virtual LIns* ins2(LOpcode v, LIns* a, LIns* b) {
             return out->ins2(v, a, b);
         }
-        virtual LInsp ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
+        virtual LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
             return out->ins3(v, a, b, c);
         }
-        virtual LInsp insGuard(LOpcode v, LIns *c, GuardRecord *gr) {
+        virtual LIns* insGuard(LOpcode v, LIns *c, GuardRecord *gr) {
             return out->insGuard(v, c, gr);
         }
-        virtual LInsp insGuardXov(LOpcode v, LIns *a, LIns* b, GuardRecord *gr) {
+        virtual LIns* insGuardXov(LOpcode v, LIns *a, LIns* b, GuardRecord *gr) {
             return out->insGuardXov(v, a, b, gr);
         }
-        virtual LInsp insBranch(LOpcode v, LIns* condition, LIns* to) {
+        virtual LIns* insBranch(LOpcode v, LIns* condition, LIns* to) {
             return out->insBranch(v, condition, to);
         }
-        virtual LInsp insBranchJov(LOpcode v, LIns* a, LIns* b, LIns* to) {
+        virtual LIns* insBranchJov(LOpcode v, LIns* a, LIns* b, LIns* to) {
             return out->insBranchJov(v, a, b, to);
         }
         // arg: 0=first, 1=second, ...
         // kind: 0=arg 1=saved-reg
-        virtual LInsp insParam(int32_t arg, int32_t kind) {
+        virtual LIns* insParam(int32_t arg, int32_t kind) {
             return out->insParam(arg, kind);
         }
-        virtual LInsp insImmI(int32_t imm) {
+        virtual LIns* insImmI(int32_t imm) {
             return out->insImmI(imm);
         }
 #ifdef NANOJIT_64BIT
-        virtual LInsp insImmQ(uint64_t imm) {
+        virtual LIns* insImmQ(uint64_t imm) {
             return out->insImmQ(imm);
         }
 #endif
-        virtual LInsp insImmD(double d) {
+        virtual LIns* insImmD(double d) {
             return out->insImmD(d);
         }
-        virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) {
+        virtual LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) {
             return out->insLoad(op, base, d, accSet);
         }
-        virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) {
+        virtual LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) {
             return out->insStore(op, value, base, d, accSet);
         }
         // args[] is in reverse order, ie. args[0] holds the rightmost arg.
-        virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
+        virtual LIns* insCall(const CallInfo *call, LIns* args[]) {
             return out->insCall(call, args);
         }
-        virtual LInsp insAlloc(int32_t size) {
+        virtual LIns* insAlloc(int32_t size) {
             NanoAssert(size != 0);
             return out->insAlloc(size);
         }
-        virtual LInsp insJtbl(LIns* index, uint32_t size) {
+        virtual LIns* insJtbl(LIns* index, uint32_t size) {
             return out->insJtbl(index, size);
         }
 
         // convenience functions
 
         // Inserts a conditional to execute and branches to execute if
         // the condition is true and false respectively.
         LIns* insChoose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
@@ -1587,40 +1586,40 @@ namespace nanojit
                 return c;
             }
         };
 
         CountMap<int> lircounts;
         CountMap<const CallInfo *> funccounts;
         CountMap<const char *> namecounts;
 
-        void addNameWithSuffix(LInsp i, const char *s, int suffix, bool ignoreOneSuffix);
+        void addNameWithSuffix(LIns* i, const char *s, int suffix, bool ignoreOneSuffix);
 
         class Entry
         {
         public:
             Entry(int) : name(0) {}
             Entry(char* n) : name(n) {}
             char* name;
         };
 
-        HashMap<LInsp, Entry*> names;
+        HashMap<LIns*, Entry*> names;
 
     public:
         LirNameMap(Allocator& alloc)
             : alloc(alloc),
             lircounts(alloc),
             funccounts(alloc),
             namecounts(alloc),
             names(alloc)
         {}
 
-        void        addName(LInsp ins, const char *s);  // gives 'ins' a special name
-        const char* createName(LInsp ins);              // gives 'ins' a generic name
-        const char* lookupName(LInsp ins);
+        void        addName(LIns* ins, const char *s);  // gives 'ins' a special name
+        const char* createName(LIns* ins);              // gives 'ins' a generic name
+        const char* lookupName(LIns* ins);
     };
 
     // We use big buffers for cases where we need to fit a whole instruction,
     // and smaller buffers for all the others.  These should easily be long
     // enough, but for safety the formatXyz() functions check and won't exceed
     // those limits.
     class InsBuf {
     public:
@@ -1636,30 +1635,30 @@ namespace nanojit
     class LInsPrinter
     {
     private:
         Allocator& alloc;
 
         char *formatImmI(RefBuf* buf, int32_t c);
         char *formatImmQ(RefBuf* buf, uint64_t c);
         char *formatImmD(RefBuf* buf, double c);
-        void formatGuard(InsBuf* buf, LInsp ins);
-        void formatGuardXov(InsBuf* buf, LInsp ins);
+        void formatGuard(InsBuf* buf, LIns* ins);
+        void formatGuardXov(InsBuf* buf, LIns* ins);
 
     public:
         LInsPrinter(Allocator& alloc)
             : alloc(alloc)
         {
             addrNameMap = new (alloc) AddrNameMap(alloc);
             lirNameMap = new (alloc) LirNameMap(alloc);
         }
 
         char *formatAddr(RefBuf* buf, void* p);
-        char *formatRef(RefBuf* buf, LInsp ref, bool showImmValue = true);
-        char *formatIns(InsBuf* buf, LInsp ins);
+        char *formatRef(RefBuf* buf, LIns* ref, bool showImmValue = true);
+        char *formatIns(InsBuf* buf, LIns* ins);
         char *formatAccSet(RefBuf* buf, AccSet accSet);
 
         AddrNameMap* addrNameMap;
         LirNameMap* lirNameMap;
     };
 
 
     class VerboseWriter : public LirWriter
@@ -1670,26 +1669,26 @@ namespace nanojit
         const char* const prefix;
         bool const always_flush;
     public:
         VerboseWriter(Allocator& alloc, LirWriter *out, LInsPrinter* printer, LogControl* logc,
                       const char* prefix = "", bool always_flush = false)
             : LirWriter(out), code(alloc), printer(printer), logc(logc), prefix(prefix), always_flush(always_flush)
         {}
 
-        LInsp add(LInsp i) {
+        LIns* add(LIns* i) {
             if (i) {
                 code.add(i);
                 if (always_flush)
                     flush();
             }
             return i;
         }
 
-        LInsp add_flush(LInsp i) {
+        LIns* add_flush(LIns* i) {
             if ((i = add(i)) != 0)
                 flush();
             return i;
         }
 
         void flush()
         {
             if (!code.isEmpty()) {
@@ -1700,62 +1699,62 @@ namespace nanojit
                     count++;
                 }
                 code.clear();
                 if (count > 1)
                     logc->printf("\n");
             }
         }
 
-        LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) {
+        LIns* insGuard(LOpcode op, LIns* cond, GuardRecord *gr) {
             return add_flush(out->insGuard(op,cond,gr));
         }
 
-        LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr) {
+        LIns* insGuardXov(LOpcode op, LIns* a, LIns* b, GuardRecord *gr) {
             return add_flush(out->insGuardXov(op,a,b,gr));
         }
 
-        LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
+        LIns* insBranch(LOpcode v, LIns* condition, LIns* to) {
             return add_flush(out->insBranch(v, condition, to));
         }
 
-        LIns* insBranchJov(LOpcode v, LInsp a, LInsp b, LInsp to) {
+        LIns* insBranchJov(LOpcode v, LIns* a, LIns* b, LIns* to) {
             return add_flush(out->insBranchJov(v, a, b, to));
         }
 
         LIns* insJtbl(LIns* index, uint32_t size) {
             return add_flush(out->insJtbl(index, size));
         }
 
         LIns* ins0(LOpcode v) {
             if (v == LIR_label || v == LIR_start) {
                 flush();
             }
             return add(out->ins0(v));
         }
 
-        LIns* ins1(LOpcode v, LInsp a) {
+        LIns* ins1(LOpcode v, LIns* a) {
             return isRetOpcode(v) ? add_flush(out->ins1(v, a)) : add(out->ins1(v, a));
         }
-        LIns* ins2(LOpcode v, LInsp a, LInsp b) {
+        LIns* ins2(LOpcode v, LIns* a, LIns* b) {
             return add(out->ins2(v, a, b));
         }
-        LIns* ins3(LOpcode v, LInsp a, LInsp b, LInsp c) {
+        LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
             return add(out->ins3(v, a, b, c));
         }
-        LIns* insCall(const CallInfo *call, LInsp args[]) {
+        LIns* insCall(const CallInfo *call, LIns* args[]) {
             return add_flush(out->insCall(call, args));
         }
         LIns* insParam(int32_t i, int32_t kind) {
             return add(out->insParam(i, kind));
         }
-        LIns* insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) {
+        LIns* insLoad(LOpcode v, LIns* base, int32_t disp, AccSet accSet) {
             return add(out->insLoad(v, base, disp, accSet));
         }
-        LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accSet) {
+        LIns* insStore(LOpcode op, LIns* v, LIns* b, int32_t d, AccSet accSet) {
             return add(out->insStore(op, v, b, d, accSet));
         }
         LIns* insAlloc(int32_t size) {
             return add(out->insAlloc(size));
         }
         LIns* insImmI(int32_t imm) {
             return add(out->insImmI(imm));
         }
@@ -1777,19 +1776,19 @@ namespace nanojit
         ExprFilter(LirWriter *out) : LirWriter(out) {}
         LIns* ins1(LOpcode v, LIns* a);
         LIns* ins2(LOpcode v, LIns* a, LIns* b);
         LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
         LIns* insGuard(LOpcode, LIns* cond, GuardRecord *);
         LIns* insGuardXov(LOpcode, LIns* a, LIns* b, GuardRecord *);
         LIns* insBranch(LOpcode, LIns* cond, LIns* target);
         LIns* insBranchJov(LOpcode, LIns* a, LIns* b, LIns* target);
-        LIns* insLoad(LOpcode op, LInsp base, int32_t off, AccSet accSet);
+        LIns* insLoad(LOpcode op, LIns* base, int32_t off, AccSet accSet);
     private:
-        LIns* simplifyOverflowArith(LOpcode op, LInsp *opnd1, LInsp *opnd2);
+        LIns* simplifyOverflowArith(LOpcode op, LIns** opnd1, LIns** opnd2);
     };
 
     class CseFilter: public LirWriter
     {
         enum LInsHashKind {
             // We divide instruction kinds into groups.  LIns0 isn't present
             // because we don't need to record any 0-ary instructions.
             LInsImmI = 0,
@@ -1825,95 +1824,95 @@ namespace nanojit
         // lists appropriately (some instructions are more common than others).
         // It also lets us have kind-specific find/add/grow functions, which
         // are faster than generic versions.
         //
         // Nb: Size must be a power of 2.
         //     Don't start too small, or we'll waste time growing and rehashing.
         //     Don't start too large, will waste memory.
         //
-        LInsp*      m_list[LInsLast + 1];
+        LIns**      m_list[LInsLast + 1];
         uint32_t    m_cap[LInsLast + 1];
         uint32_t    m_used[LInsLast + 1];
-        typedef uint32_t (CseFilter::*find_t)(LInsp);
+        typedef uint32_t (CseFilter::*find_t)(LIns*);
         find_t      m_find[LInsLast + 1];
 
         AccSet      storesSinceLastLoad;    // regions stored to since the last load
 
         Allocator& alloc;
 
         static uint32_t hash8(uint32_t hash, const uint8_t data);
         static uint32_t hash32(uint32_t hash, const uint32_t data);
         static uint32_t hashptr(uint32_t hash, const void* data);
         static uint32_t hashfinish(uint32_t hash);
 
         static uint32_t hashImmI(int32_t);
         static uint32_t hashImmQorD(uint64_t);     // not NANOJIT_64BIT-only -- used by findImmD()
-        static uint32_t hash1(LOpcode op, LInsp);
-        static uint32_t hash2(LOpcode op, LInsp, LInsp);
-        static uint32_t hash3(LOpcode op, LInsp, LInsp, LInsp);
-        static uint32_t hashLoad(LOpcode op, LInsp, int32_t, AccSet);
-        static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]);
+        static uint32_t hash1(LOpcode op, LIns*);
+        static uint32_t hash2(LOpcode op, LIns*, LIns*);
+        static uint32_t hash3(LOpcode op, LIns*, LIns*, LIns*);
+        static uint32_t hashLoad(LOpcode op, LIns*, int32_t, AccSet);
+        static uint32_t hashCall(const CallInfo *call, uint32_t argc, LIns* args[]);
 
         // These versions are used before an LIns has been created.
-        LInsp findImmI(int32_t a, uint32_t &k);
+        LIns* findImmI(int32_t a, uint32_t &k);
 #ifdef NANOJIT_64BIT
-        LInsp findImmQ(uint64_t a, uint32_t &k);
+        LIns* findImmQ(uint64_t a, uint32_t &k);
 #endif
-        LInsp findImmD(uint64_t d, uint32_t &k);
-        LInsp find1(LOpcode v, LInsp a, uint32_t &k);
-        LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
-        LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
-        LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHashKind kind,
+        LIns* findImmD(uint64_t d, uint32_t &k);
+        LIns* find1(LOpcode v, LIns* a, uint32_t &k);
+        LIns* find2(LOpcode v, LIns* a, LIns* b, uint32_t &k);
+        LIns* find3(LOpcode v, LIns* a, LIns* b, LIns* c, uint32_t &k);
+        LIns* findLoad(LOpcode v, LIns* a, int32_t b, AccSet accSet, LInsHashKind kind,
                        uint32_t &k);
-        LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &k);
+        LIns* findCall(const CallInfo *call, uint32_t argc, LIns* args[], uint32_t &k);
 
         // These versions are used after an LIns has been created; they are
         // used for rehashing after growing.  They just call onto the
         // multi-arg versions above.
-        uint32_t findImmI(LInsp ins);
+        uint32_t findImmI(LIns* ins);
 #ifdef NANOJIT_64BIT
-        uint32_t findImmQ(LInsp ins);
+        uint32_t findImmQ(LIns* ins);
 #endif
-        uint32_t findImmD(LInsp ins);
-        uint32_t find1(LInsp ins);
-        uint32_t find2(LInsp ins);
-        uint32_t find3(LInsp ins);
-        uint32_t findCall(LInsp ins);
-        uint32_t findLoadReadOnly(LInsp ins);
-        uint32_t findLoadStack(LInsp ins);
-        uint32_t findLoadRStack(LInsp ins);
-        uint32_t findLoadOther(LInsp ins);
-        uint32_t findLoadMultiple(LInsp ins);
+        uint32_t findImmD(LIns* ins);
+        uint32_t find1(LIns* ins);
+        uint32_t find2(LIns* ins);
+        uint32_t find3(LIns* ins);
+        uint32_t findCall(LIns* ins);
+        uint32_t findLoadReadOnly(LIns* ins);
+        uint32_t findLoadStack(LIns* ins);
+        uint32_t findLoadRStack(LIns* ins);
+        uint32_t findLoadOther(LIns* ins);
+        uint32_t findLoadMultiple(LIns* ins);
 
         void grow(LInsHashKind kind);
 
         // 'k' is the index found by findXYZ().
-        void add(LInsHashKind kind, LInsp ins, uint32_t k);
+        void add(LInsHashKind kind, LIns* ins, uint32_t k);
 
         void clear();               // clears all tables
         void clear(LInsHashKind);   // clears one table
 
     public:
         CseFilter(LirWriter *out, Allocator&);
 
         LIns* insImmI(int32_t imm);
 #ifdef NANOJIT_64BIT
         LIns* insImmQ(uint64_t q);
 #endif
         LIns* insImmD(double d);
         LIns* ins0(LOpcode v);
-        LIns* ins1(LOpcode v, LInsp);
-        LIns* ins2(LOpcode v, LInsp, LInsp);
-        LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
-        LIns* insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet);
-        LIns* insStore(LOpcode op, LInsp value, LInsp base, int32_t d, AccSet accSet);
-        LIns* insCall(const CallInfo *call, LInsp args[]);
-        LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
-        LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
+        LIns* ins1(LOpcode v, LIns*);
+        LIns* ins2(LOpcode v, LIns*, LIns*);
+        LIns* ins3(LOpcode v, LIns*, LIns*, LIns*);
+        LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet);
+        LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet);
+        LIns* insCall(const CallInfo *call, LIns* args[]);
+        LIns* insGuard(LOpcode op, LIns* cond, GuardRecord *gr);
+        LIns* insGuardXov(LOpcode op, LIns* a, LIns* b, GuardRecord *gr);
     };
 
     class LirBuffer
     {
         public:
             LirBuffer(Allocator& alloc);
             void        clear();
             uintptr_t   makeRoom(size_t szB);   // make room for an instruction
@@ -1927,18 +1926,18 @@ namespace nanojit
             // stats
             struct
             {
                 uint32_t lir;    // # instructions
             }
             _stats;
 
             AbiKind abi;
-            LInsp state,param1,sp,rp;
-            LInsp savedRegs[NumSavedRegs];
+            LIns *state, *param1, *sp, *rp;
+            LIns* savedRegs[NumSavedRegs];
 
         protected:
             friend class LirBufWriter;
 
             /** Each chunk is just a raw area of LIns instances, with no header
                 and no more than 8-byte alignment.  The chunk size is somewhat arbitrary. */
             static const size_t CHUNK_SZB = 8000;
 
@@ -1958,99 +1957,99 @@ namespace nanojit
         const Config&           _config;
 
         public:
             LirBufWriter(LirBuffer* buf, const Config& config)
                 : LirWriter(0), _buf(buf), _config(config) {
             }
 
             // LirWriter interface
-            LInsp   insLoad(LOpcode op, LInsp base, int32_t disp, AccSet accSet);
-            LInsp   insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp, AccSet accSet);
-            LInsp   ins0(LOpcode op);
-            LInsp   ins1(LOpcode op, LInsp o1);
-            LInsp   ins2(LOpcode op, LInsp o1, LInsp o2);
-            LInsp   ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3);
-            LInsp   insParam(int32_t i, int32_t kind);
-            LInsp   insImmI(int32_t imm);
+            LIns*   insLoad(LOpcode op, LIns* base, int32_t disp, AccSet accSet);
+            LIns*   insStore(LOpcode op, LIns* o1, LIns* o2, int32_t disp, AccSet accSet);
+            LIns*   ins0(LOpcode op);
+            LIns*   ins1(LOpcode op, LIns* o1);
+            LIns*   ins2(LOpcode op, LIns* o1, LIns* o2);
+            LIns*   ins3(LOpcode op, LIns* o1, LIns* o2, LIns* o3);
+            LIns*   insParam(int32_t i, int32_t kind);
+            LIns*   insImmI(int32_t imm);
 #ifdef NANOJIT_64BIT
-            LInsp   insImmQ(uint64_t imm);
+            LIns*   insImmQ(uint64_t imm);
 #endif
-            LInsp   insImmD(double d);
-            LInsp   insCall(const CallInfo *call, LInsp args[]);
-            LInsp   insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
-            LInsp   insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
-            LInsp   insBranch(LOpcode v, LInsp condition, LInsp to);
-            LInsp   insBranchJov(LOpcode v, LInsp a, LInsp b, LInsp to);
-            LInsp   insAlloc(int32_t size);
-            LInsp   insJtbl(LIns* index, uint32_t size);
+            LIns*   insImmD(double d);
+            LIns*   insCall(const CallInfo *call, LIns* args[]);
+            LIns*   insGuard(LOpcode op, LIns* cond, GuardRecord *gr);
+            LIns*   insGuardXov(LOpcode op, LIns* a, LIns* b, GuardRecord *gr);
+            LIns*   insBranch(LOpcode v, LIns* condition, LIns* to);
+            LIns*   insBranchJov(LOpcode v, LIns* a, LIns* b, LIns* to);
+            LIns*   insAlloc(int32_t size);
+            LIns*   insJtbl(LIns* index, uint32_t size);
     };
 
     class LirFilter
     {
     public:
         LirFilter *in;
         LirFilter(LirFilter *in) : in(in) {}
         virtual ~LirFilter(){}
 
         // It's crucial that once this reaches the LIR_start at the beginning
         // of the buffer, that it just keeps returning that LIR_start LIns on
         // any subsequent calls.
-        virtual LInsp read() {
+        virtual LIns* read() {
             return in->read();
         }
-        virtual LInsp finalIns() {
+        virtual LIns* finalIns() {
             return in->finalIns();
         }
     };
 
     // concrete
     class LirReader : public LirFilter
     {
-        LInsp _ins;         // next instruction to be read;  invariant: is never a skip
-        LInsp _finalIns;    // final instruction in the stream;  ie. the first one to be read
+        LIns* _ins;         // next instruction to be read;  invariant: is never a skip
+        LIns* _finalIns;    // final instruction in the stream;  ie. the first one to be read
 
     public:
-        LirReader(LInsp ins) : LirFilter(0), _ins(ins), _finalIns(ins)
+        LirReader(LIns* ins) : LirFilter(0), _ins(ins), _finalIns(ins)
         {
             // The last instruction for a fragment shouldn't be a skip.
             // (Actually, if the last *inserted* instruction exactly fills up
             // a chunk, a new chunk will be created, and thus the last *written*
             // instruction will be a skip -- the one needed for the
             // cross-chunk link.  But the last *inserted* instruction is what
             // is recorded and used to initialise each LirReader, and that is
             // what is seen here, and therefore this assertion holds.)
             NanoAssert(ins && !ins->isop(LIR_skip));
         }
         virtual ~LirReader() {}
 
         // Returns next instruction and advances to the prior instruction.
         // Invariant: never returns a skip.
-        LInsp read();
+        LIns* read();
 
-        LInsp finalIns() {
+        LIns* finalIns() {
             return _finalIns;
         }
     };
 
     verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);)
 
     // WARNING: StackFilter assumes that all stack entries are eight bytes.
     // Some of its optimisations aren't valid if that isn't true.  See
     // StackFilter::read() for more details.
     class StackFilter: public LirFilter
     {
-        LInsp sp;
+        LIns* sp;
         BitSet stk;
         int top;
-        int getTop(LInsp br);
+        int getTop(LIns* br);
 
     public:
-        StackFilter(LirFilter *in, Allocator& alloc, LInsp sp);
-        LInsp read();
+        StackFilter(LirFilter *in, Allocator& alloc, LIns* sp);
+        LIns* read();
     };
 
     struct SoftFloatOps
     {
         const CallInfo* opmap[LIR_sentinel];
         SoftFloatOps();
     };
 
@@ -2060,23 +2059,23 @@ namespace nanojit
     // hardware (eg. some ARM machines).
     class SoftFloatFilter: public LirWriter
     {
     public:
         static const CallInfo* opmap[LIR_sentinel];
 
         SoftFloatFilter(LirWriter *out);
         LIns *split(LIns *a);
-        LIns *split(const CallInfo *call, LInsp args[]);
+        LIns *split(const CallInfo *call, LIns* args[]);
         LIns *callD1(const CallInfo *call, LIns *a);
         LIns *callD2(const CallInfo *call, LIns *a, LIns *b);
         LIns *cmpD(const CallInfo *call, LIns *a, LIns *b);
         LIns *ins1(LOpcode op, LIns *a);
         LIns *ins2(LOpcode op, LIns *a, LIns *b);
-        LIns *insCall(const CallInfo *ci, LInsp args[]);
+        LIns *insCall(const CallInfo *ci, LIns* args[]);
     };
 
 
 #ifdef DEBUG
     // This class does thorough checking of LIR.  It checks *implicit* LIR
     // instructions, ie. LIR instructions specified via arguments -- to
     // methods like insLoad() -- that have not yet been converted into
     // *explicit* LIns objects in a LirBuffer.  The reason for this is that if
@@ -2097,24 +2096,24 @@ namespace nanojit
         const char* type2string(LTy type);
         void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]);
         void errorStructureShouldBe(LOpcode op, const char* argDesc, int argN, LIns* arg,
                                     const char* shouldBeDesc);
         void errorAccSet(const char* what, AccSet accSet, const char* shouldDesc);
         void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2);
         void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins);
         void checkLInsIsNull(LOpcode op, int argN, LIns* ins);
-        void checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxAccSet);
+        void checkAccSet(LOpcode op, LIns* base, AccSet accSet, AccSet maxAccSet);
 
-        LInsp sp, rp;
+        LIns *sp, *rp;
 
     public:
         ValidateWriter(LirWriter* out, LInsPrinter* printer, const char* where);
-        void setSp(LInsp ins) { sp = ins; }
-        void setRp(LInsp ins) { rp = ins; }
+        void setSp(LIns* ins) { sp = ins; }
+        void setRp(LIns* ins) { rp = ins; }
 
         LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet);
         LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet);
         LIns* ins0(LOpcode v);
         LIns* ins1(LOpcode v, LIns* a);
         LIns* ins2(LOpcode v, LIns* a, LIns* b);
         LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
         LIns* insParam(int32_t arg, int32_t kind);
@@ -2163,14 +2162,14 @@ namespace nanojit
             , _printer(printer)
             , _title(title)
             , _strs(alloc)
             , _logc(logc)
             , _prevIns(NULL)
         { }
 
         void finish();
-        LInsp read();
+        LIns* read();
     };
 #endif
 
 }
 #endif // __nanojit_LIR__
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -503,17 +503,17 @@ Assembler::genPrologue()
     NIns *patchEntry = _nIns;
 
     MOV(FP, SP);
     PUSH_mask(savingMask);
     return patchEntry;
 }
 
 void
-Assembler::nFragExit(LInsp guard)
+Assembler::nFragExit(LIns* guard)
 {
     SideExit *  exit = guard->record()->exit;
     Fragment *  frag = exit->target;
 
     bool        target_is_known = frag && frag->fragEntry;
 
     if (target_is_known) {
         // The target exists so we can simply emit a branch to its location.
@@ -607,17 +607,17 @@ Assembler::genEpilogue()
  * - doubles are placed in subsequent arg registers; if the next
  *   available register is r3, the low order word goes into r3
  *   and the high order goes on the stack.
  * - 32-bit arguments are placed in the next available arg register,
  * - both doubles and 32-bit arguments are placed on stack with 32-bit
  *   alignment.
  */
 void
-Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd)
+Assembler::asm_arg(ArgType ty, LIns* arg, Register& r, int& stkd)
 {
     // The stack pointer must always be at least aligned to 4 bytes.
     NanoAssert((stkd & 3) == 0);
 
     if (ty == ARGTYPE_D) {
         // This task is fairly complex and so is delegated to asm_arg_64.
         asm_arg_64(arg, r, stkd);
     } else {
@@ -632,17 +632,17 @@ Assembler::asm_arg(ArgType ty, LInsp arg
         }
     }
 }
 
 // Encode a 64-bit floating-point argument using the appropriate ABI.
 // This function operates in the same way as asm_arg, except that it will only
 // handle arguments where (ArgType)ty == ARGTYPE_D.
 void
-Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
+Assembler::asm_arg_64(LIns* arg, Register& r, int& stkd)
 {
     // The stack pointer must always be at least aligned to 4 bytes.
     NanoAssert((stkd & 3) == 0);
     // The only use for this function when we are using soft floating-point
     // is for LIR_ii2d.
     NanoAssert(_config.arm_vfp || arg->isop(LIR_ii2d));
 
     Register    fp_reg = deprecated_UnknownReg;
@@ -730,17 +730,17 @@ Assembler::asm_arg_64(LInsp arg, Registe
         }
 #endif
         asm_stkarg(arg, stkd);
         stkd += 8;
     }
 }
 
 void
-Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
+Assembler::asm_regarg(ArgType ty, LIns* p, Register r)
 {
     NanoAssert(deprecated_isKnownReg(r));
     if (ty == ARGTYPE_I || ty == ARGTYPE_UI)
     {
         // arg goes in specific register
         if (p->isImmI()) {
             asm_ld_imm(r, p->immI());
         } else {
@@ -770,17 +770,17 @@ Assembler::asm_regarg(ArgType ty, LInsp 
         NanoAssert(ty == ARGTYPE_D);
         // fpu argument in register - should never happen since FPU
         // args are converted to two 32-bit ints on ARM
         NanoAssert(false);
     }
 }
 
 void
-Assembler::asm_stkarg(LInsp arg, int stkd)
+Assembler::asm_stkarg(LIns* arg, int stkd)
 {
     bool isF64 = arg->isD();
 
     Register rr;
     if (arg->isExtant() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
         // The argument resides somewhere in registers, so we simply need to
         // push it onto the stack.
         if (!_config.arm_vfp || !isF64) {
@@ -825,17 +825,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
             LDR(IP, FP, d+4);
             asm_str(IP, SP, stkd);
             LDR(IP, FP, d);
         }
     }
 }
 
 void
-Assembler::asm_call(LInsp ins)
+Assembler::asm_call(LIns* ins)
 {
     if (_config.arm_vfp && ins->isop(LIR_calld)) {
         /* Because ARM actually returns the result in (R0,R1), and not in a
          * floating point register, the code to move the result into a correct
          * register is below.  We do nothing here.
          *
          * The reason being that if we did something here, the final code
          * sequence we'd get would be something like:
@@ -1265,17 +1265,17 @@ canRematALU(LIns *ins)
 
 bool
 Assembler::canRemat(LIns* ins)
 {
     return ins->isImmI() || ins->isop(LIR_allocp) || canRematALU(ins);
 }
 
 void
-Assembler::asm_restore(LInsp i, Register r)
+Assembler::asm_restore(LIns* i, Register r)
 {
     // The following registers should never be restored:
     NanoAssert(r != PC);
     NanoAssert(r != IP);
     NanoAssert(r != SP);
 
     if (i->isop(LIR_allocp)) {
         asm_add_imm(r, FP, deprecated_disp(i));
@@ -1355,17 +1355,17 @@ Assembler::asm_spill(Register rr, int d,
                 _nIns++;
                 verbose_only( asm_output("merge next into STMDB"); )
             }
         }
     }
 }
 
 void
-Assembler::asm_load64(LInsp ins)
+Assembler::asm_load64(LIns* ins)
 {
     //asm_output("<<< load64");
 
     NanoAssert(ins->isD());
 
     LIns* base = ins->oprnd1();
     int offset = ins->disp();
 
@@ -1439,17 +1439,17 @@ Assembler::asm_load64(LInsp ins)
             NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
             return;
     }
 
     //asm_output(">>> load64");
 }
 
 void
-Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
+Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
 {
     //asm_output("<<< store64 (dr: %d)", dr);
 
     switch (op) {
         case LIR_std:
             if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
@@ -1581,17 +1581,17 @@ Assembler::asm_immd_nochk(Register rr, i
 
     *(--_nIns) = (NIns) immDhi;
     *(--_nIns) = (NIns) immDlo;
 
     B_nochk(_nIns+2);
 }
 
 void
-Assembler::asm_immd(LInsp ins)
+Assembler::asm_immd(LIns* ins)
 {
     int d = deprecated_disp(ins);
     Register rr = ins->deprecated_getReg();
 
     deprecated_freeRsrcOf(ins);
 
     if (_config.arm_vfp && deprecated_isKnownReg(rr)) {
         if (d)
@@ -1618,17 +1618,17 @@ Assembler::asm_nongp_copy(Register r, Re
     } else {
         // We can't move a double-precision FP register into a 32-bit GP
         // register, so assert that no calling code is trying to do that.
         NanoAssert(0);
     }
 }
 
 Register
-Assembler::asm_binop_rhs_reg(LInsp)
+Assembler::asm_binop_rhs_reg(LIns*)
 {
     return deprecated_UnknownReg;
 }
 
 /**
  * copy 64 bits: (rd+dd) <- (rs+ds)
  */
 void
@@ -2232,69 +2232,69 @@ Assembler::B_cond_chk(ConditionCode _c, 
     }
 }
 
 /*
  * VFP
  */
 
 void
-Assembler::asm_i2d(LInsp ins)
+Assembler::asm_i2d(LIns* ins)
 {
     Register rr = deprecated_prepResultReg(ins, FpRegs);
     Register srcr = findRegFor(ins->oprnd1(), GpRegs);
 
     // todo: support int value in memory, as per x86
     NanoAssert(deprecated_isKnownReg(srcr));
 
     FSITOD(rr, S14);
     FMSR(S14, srcr);
 }
 
 void
-Assembler::asm_ui2d(LInsp ins)
+Assembler::asm_ui2d(LIns* ins)
 {
     Register rr = deprecated_prepResultReg(ins, FpRegs);
     Register sr = findRegFor(ins->oprnd1(), GpRegs);
 
     // todo: support int value in memory, as per x86
     NanoAssert(deprecated_isKnownReg(sr));
 
     FUITOD(rr, S14);
     FMSR(S14, sr);
 }
 
-void Assembler::asm_d2i(LInsp ins)
+void Assembler::asm_d2i(LIns* ins)
 {
     // where our result goes
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     Register sr = findRegFor(ins->oprnd1(), FpRegs);
 
     FMRS(rr, S14);
     FTOSID(S14, sr);
 }
 
 void
-Assembler::asm_fneg(LInsp ins)
+Assembler::asm_fneg(LIns* ins)
 {
-    LInsp lhs = ins->oprnd1();
+    LIns* lhs = ins->oprnd1();
     Register rr = deprecated_prepResultReg(ins, FpRegs);
 
     Register sr = ( !lhs->isInReg()
                   ? findRegFor(lhs, FpRegs)
                   : lhs->deprecated_getReg() );
 
     FNEGD(rr, sr);
 }
 
 void
-Assembler::asm_fop(LInsp ins)
+Assembler::asm_fop(LIns* ins)
 {
-    LInsp lhs = ins->oprnd1();
-    LInsp rhs = ins->oprnd2();
+    LIns* lhs = ins->oprnd1();
+    LIns* rhs = ins->oprnd2();
     LOpcode op = ins->opcode();
 
     // rr = ra OP rb
 
     Register rr = deprecated_prepResultReg(ins, FpRegs);
 
     Register ra = findRegFor(lhs, FpRegs);
     Register rb = (rhs == lhs) ? ra : findRegFor(rhs, FpRegs & ~rmask(ra));
@@ -2307,20 +2307,20 @@ Assembler::asm_fop(LInsp ins)
         case LIR_subd:      FSUBD(rr,ra,rb);    break;
         case LIR_muld:      FMULD(rr,ra,rb);    break;
         case LIR_divd:      FDIVD(rr,ra,rb);    break;
         default:            NanoAssert(0);      break;
     }
 }
 
 void
-Assembler::asm_cmpd(LInsp ins)
+Assembler::asm_cmpd(LIns* ins)
 {
-    LInsp lhs = ins->oprnd1();
-    LInsp rhs = ins->oprnd2();
+    LIns* lhs = ins->oprnd1();
+    LIns* rhs = ins->oprnd2();
     LOpcode op = ins->opcode();
 
     NanoAssert(isCmpDOpcode(op));
 
     Register ra, rb;
     findRegFor2(FpRegs, lhs, ra, FpRegs, rhs, rb);
 
     int e_bit = (op != LIR_eqd);
@@ -2329,17 +2329,17 @@ Assembler::asm_cmpd(LInsp ins)
     FMSTAT();
     FCMPD(ra, rb, e_bit);
 }
 
 /* Call this with targ set to 0 if the target is not yet known and the branch
  * will be patched up later.
  */
 NIns*
-Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
+Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
 {
     LOpcode condop = cond->opcode();
     NanoAssert(cond->isCmp());
     NanoAssert(_config.arm_vfp || !isCmpDOpcode(condop));
 
     // The old "never" condition code has special meaning on newer ARM cores,
     // so use "always" as a sensible default code.
     ConditionCode cc = AL;
@@ -2409,18 +2409,18 @@ NIns* Assembler::asm_branch_ov(LOpcode o
     // Emit a suitable branch instruction.
     B_cond(cc, target);
     return _nIns;
 }
 
 void
 Assembler::asm_cmp(LIns *cond)
 {
-    LInsp lhs = cond->oprnd1();
-    LInsp rhs = cond->oprnd2();
+    LIns* lhs = cond->oprnd1();
+    LIns* rhs = cond->oprnd2();
 
     NanoAssert(lhs->isI() && rhs->isI());
 
     // ready to issue the compare
     if (rhs->isImmI()) {
         int c = rhs->immI();
         Register r = findRegFor(lhs, GpRegs);
         if (c == 0 && cond->isop(LIR_eqi)) {
@@ -2453,17 +2453,17 @@ Assembler::asm_cmpi(Register r, int32_t 
             underrunProtect(4 + LD32_size);
             CMP(r, IP);
             asm_ld_imm(IP, imm);
         }
     }
 }
 
 void
-Assembler::asm_condd(LInsp ins)
+Assembler::asm_condd(LIns* ins)
 {
     // only want certain regs
     Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
 
     switch (ins->opcode()) {
         case LIR_eqd: SETEQ(r); break;
         case LIR_ltd: SETLO(r); break; // } note: VFP LT/LE operations require use of
         case LIR_led: SETLS(r); break; // } unsigned LO/LS condition codes!
@@ -2471,17 +2471,17 @@ Assembler::asm_condd(LInsp ins)
         case LIR_gtd: SETGT(r); break;
         default: NanoAssert(0); break;
     }
 
     asm_cmpd(ins);
 }
 
 void
-Assembler::asm_cond(LInsp ins)
+Assembler::asm_cond(LIns* ins)
 {
     Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
     LOpcode op = ins->opcode();
 
     switch(op)
     {
         case LIR_eqi:  SETEQ(r); break;
         case LIR_lti:  SETLT(r); break;
@@ -2493,21 +2493,21 @@ Assembler::asm_cond(LInsp ins)
         case LIR_gtui: SETHI(r); break;
         case LIR_geui: SETHS(r); break;
         default:      NanoAssert(0);  break;
     }
     asm_cmp(ins);
 }
 
 void
-Assembler::asm_arith(LInsp ins)
+Assembler::asm_arith(LIns* ins)
 {
     LOpcode op = ins->opcode();
-    LInsp   lhs = ins->oprnd1();
-    LInsp   rhs = ins->oprnd2();
+    LIns*   lhs = ins->oprnd1();
+    LIns*   rhs = ins->oprnd2();
 
     RegisterMask    allow = GpRegs;
 
     // We always need the result register and the first operand register.
     Register        rr = deprecated_prepResultReg(ins, allow);
 
     // If this is the last use of lhs in reg, we can re-use the result reg.
     // Else, lhs already has a register assigned.
@@ -2676,17 +2676,17 @@ Assembler::asm_arith(LInsp ins)
             break;
         default:
             NanoAssertMsg(0, "Unsupported");
             break;
     }
 }
 
 void
-Assembler::asm_neg_not(LInsp ins)
+Assembler::asm_neg_not(LIns* ins)
 {
     LOpcode op = ins->opcode();
     Register rr = deprecated_prepResultReg(ins, GpRegs);
 
     LIns* lhs = ins->oprnd1();
     // If this is the last use of lhs in reg, we can re-use result reg.
     // Else, lhs already has a register assigned.
     Register ra = ( !lhs->isInReg()
@@ -2696,17 +2696,17 @@ Assembler::asm_neg_not(LInsp ins)
 
     if (op == LIR_noti)
         MVN(rr, ra);
     else
         RSBS(rr, ra);
 }
 
 void
-Assembler::asm_load32(LInsp ins)
+Assembler::asm_load32(LIns* ins)
 {
     LOpcode op = ins->opcode();
     LIns* base = ins->oprnd1();
     int d = ins->disp();
 
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     Register ra = getBaseReg(base, d, GpRegs);
 
@@ -2756,17 +2756,17 @@ Assembler::asm_load32(LInsp ins)
             return;
         default:
             NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
             return;
     }
 }
 
 void
-Assembler::asm_cmov(LInsp ins)
+Assembler::asm_cmov(LIns* ins)
 {
     LIns* condval = ins->oprnd1();
     LIns* iftrue  = ins->oprnd2();
     LIns* iffalse = ins->oprnd3();
 
     NanoAssert(condval->isCmp());
     NanoAssert(ins->opcode() == LIR_cmovi && iftrue->isI() && iffalse->isI());
 
@@ -2788,35 +2788,35 @@ Assembler::asm_cmov(LInsp ins)
         case LIR_geui:   MOVLO(rr, iffalsereg);  break;
         default: debug_only( NanoAssert(0) );    break;
     }
     /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
     asm_cmp(condval);
 }
 
 void
-Assembler::asm_qhi(LInsp ins)
+Assembler::asm_qhi(LIns* ins)
 {
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     LIns *q = ins->oprnd1();
     int d = findMemFor(q);
     LDR(rr, FP, d+4);
 }
 
 void
-Assembler::asm_qlo(LInsp ins)
+Assembler::asm_qlo(LIns* ins)
 {
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     LIns *q = ins->oprnd1();
     int d = findMemFor(q);
     LDR(rr, FP, d);
 }
 
 void
-Assembler::asm_param(LInsp ins)
+Assembler::asm_param(LIns* ins)
 {
     uint32_t a = ins->paramArg();
     uint32_t kind = ins->paramKind();
     if (kind == 0) {
         // ordinary param
         AbiKind abi = _thisfrag->lirbuf->abi;
         uint32_t abi_regcount = abi == ABI_CDECL ? 4 : abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
         if (a < abi_regcount) {
@@ -2830,17 +2830,17 @@ Assembler::asm_param(LInsp ins)
         }
     } else {
         // saved param
         deprecated_prepResultReg(ins, rmask(savedRegs[a]));
     }
 }
 
 void
-Assembler::asm_immi(LInsp ins)
+Assembler::asm_immi(LIns* ins)
 {
     Register rr = deprecated_prepResultReg(ins, GpRegs);
     asm_ld_imm(rr, ins->immI());
 }
 
 void
 Assembler::asm_ret(LIns *ins)
 {
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -216,26 +216,26 @@ verbose_only( extern const char* shiftNa
     void        BranchWithLink(NIns* addr);                                     \
     inline void BLX(Register addr, bool chk = true);                            \
     void        JMP_far(NIns*);                                                 \
     void        B_cond_chk(ConditionCode, NIns*, bool);                         \
     void        underrunProtect(int bytes);                                     \
     void        nativePageReset();                                              \
     void        nativePageSetup();                                              \
     void        asm_immd_nochk(Register, int32_t, int32_t);                     \
-    void        asm_regarg(ArgType, LInsp, Register);                           \
-    void        asm_stkarg(LInsp p, int stkd);                                  \
+    void        asm_regarg(ArgType, LIns*, Register);                           \
+    void        asm_stkarg(LIns* p, int stkd);                                  \
     void        asm_cmpi(Register, int32_t imm);                                \
     void        asm_ldr_chk(Register d, Register b, int32_t off, bool chk);     \
     int32_t     asm_str(Register rt, Register rr, int32_t off);                 \
     void        asm_cmp(LIns *cond);                                            \
     void        asm_cmpd(LIns *cond);                                           \
     void        asm_ld_imm(Register d, int32_t imm, bool chk = true);           \
-    void        asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd);         \
-    void        asm_arg_64(LInsp arg, Register& r, int& stkd);                  \
+    void        asm_arg(ArgType ty, LIns* arg, Register& r, int& stkd);         \
+    void        asm_arg_64(LIns* arg, Register& r, int& stkd);                  \
     void        asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0);   \
     void        asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0);   \
     void        asm_and_imm(Register rd, Register rn, int32_t imm, int stat = 0);   \
     void        asm_orr_imm(Register rd, Register rn, int32_t imm, int stat = 0);   \
     void        asm_eor_imm(Register rd, Register rn, int32_t imm, int stat = 0);   \
     inline bool     encOp2Imm(uint32_t literal, uint32_t * enc);                \
     inline uint32_t CountLeadingZeroes(uint32_t data);                          \
     int *       _nSlot;                                                         \
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -384,17 +384,17 @@ namespace nanojit
         else {
             SW(AT, dr+mswoff(), rbase);
             // If the MSW & LSW values are different, reload AT
             if (msw != lsw)
                 asm_li(AT, msw);
         }
     }
 
-    void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
+    void Assembler::asm_regarg(ArgType ty, LIns* p, Register r)
     {
         NanoAssert(deprecated_isKnownReg(r));
         if (ty == ARGTYPE_I || ty == ARGTYPE_UI) {
             // arg goes in specific register
             if (p->isImmI())
                 asm_li(r, p->immI());
             else {
                 if (p->isExtant()) {
@@ -418,17 +418,17 @@ namespace nanojit
             }
         }
         else {
             // Other argument types unsupported
             NanoAssert(false);
         }
     }
 
-    void Assembler::asm_stkarg(LInsp arg, int stkd)
+    void Assembler::asm_stkarg(LIns* arg, int stkd)
     {
         bool isF64 = arg->isD();
         Register rr;
         if (arg->isExtant() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
             // The argument resides somewhere in registers, so we simply need to
             // push it onto the stack.
             if (!cpu_has_fpu || !isF64) {
                 NanoAssert(IsGpReg(rr));
@@ -461,17 +461,17 @@ namespace nanojit
             }
         }
     }
 
     // Encode a 64-bit floating-point argument using the appropriate ABI.
     // This function operates in the same way as asm_arg, except that it will only
     // handle arguments where (ArgType)ty == ARGTYPE_D.
     void
-    Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd)
+    Assembler::asm_arg_64(LIns* arg, Register& r, Register& fr, int& stkd)
     {
         // The stack offset always be at least aligned to 4 bytes.
         NanoAssert((stkd & 3) == 0);
 #if NJ_SOFTFLOAT_SUPPORTED
         NanoAssert(arg->isop(LIR_ii2d));
 #else
         NanoAssert(cpu_has_fpu);
 #endif
@@ -563,35 +563,35 @@ namespace nanojit
         LUI(AT,0x41f0);
         CVT_D_W(fr,ft);            // branch delay slot
         BGEZ(v,here);
         MTC1(v,ft);
 
         TAG("asm_ui2d(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
 
-    void Assembler::asm_d2i(LInsp ins)
+    void Assembler::asm_d2i(LIns* ins)
     {
         NanoAssert(cpu_has_fpu);
 
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         Register sr = findRegFor(ins->oprnd1(), FpRegs);
         // trunc.w.d $sr,$sr
         // mfc1 $rr,$sr
         MFC1(rr,sr);
         TRUNC_W_D(sr,sr);
         TAG("asm_d2i(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
 
     void Assembler::asm_fop(LIns *ins)
     {
         NanoAssert(cpu_has_fpu);
         if (cpu_has_fpu) {
-            LInsp lhs = ins->oprnd1();
-            LInsp rhs = ins->oprnd2();
+            LIns* lhs = ins->oprnd1();
+            LIns* rhs = ins->oprnd2();
             LOpcode op = ins->opcode();
 
             // rr = ra OP rb
 
             Register rr = deprecated_prepResultReg(ins, FpRegs);
             Register ra = findRegFor(lhs, FpRegs);
             Register rb = (rhs == lhs) ? ra : findRegFor(rhs, FpRegs & ~rmask(ra));
 
@@ -606,17 +606,17 @@ namespace nanojit
         }
         TAG("asm_fop(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
 
     void Assembler::asm_fneg(LIns *ins)
     {
         NanoAssert(cpu_has_fpu);
         if (cpu_has_fpu) {
-            LInsp lhs = ins->oprnd1();
+            LIns* lhs = ins->oprnd1();
             Register rr = deprecated_prepResultReg(ins, FpRegs);
             Register sr = ( !lhs->isInReg()
                             ? findRegFor(lhs, FpRegs)
                             : lhs->deprecated_getReg() );
             NEG_D(rr, sr);
         }
         TAG("asm_fneg(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
@@ -924,18 +924,18 @@ namespace nanojit
             deprecated_prepResultReg(ins, rmask(savedRegs[a]));
         }
         TAG("asm_param(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
 
     void Assembler::asm_arith(LIns *ins)
     {
         LOpcode op = ins->opcode();
-        LInsp lhs = ins->oprnd1();
-        LInsp rhs = ins->oprnd2();
+        LIns* lhs = ins->oprnd1();
+        LIns* rhs = ins->oprnd2();
 
         RegisterMask allow = GpRegs;
 
         // We always need the result register and the first operand register.
         Register rr = deprecated_prepResultReg(ins, allow);
 
         // If this is the last use of lhs in reg, we can re-use the result reg.
         // Else, lhs already has a register assigned.
@@ -1561,17 +1561,17 @@ namespace nanojit
      * - doubles are 64-bit aligned.  both in registers and on the stack.
      *   If the next available argument register is A1, it is skipped
      *   and the double is placed in A2:A3.  If A0:A1 or A2:A3 are not
      *   available, the double is placed on the stack, 64-bit aligned.
      * - 32-bit arguments are placed in registers and 32-bit aligned
      *   on the stack.
      */
     void
-    Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd)
+    Assembler::asm_arg(ArgType ty, LIns* arg, Register& r, Register& fr, int& stkd)
     {
         // The stack offset must always be at least aligned to 4 bytes.
         NanoAssert((stkd & 3) == 0);
 
         if (ty == ARGTYPE_D) {
             // This task is fairly complex and so is delegated to asm_arg_64.
             asm_arg_64(arg, r, fr, stkd);
         } else {
@@ -1586,17 +1586,17 @@ namespace nanojit
             // The o32 ABI calling convention is that if the first arguments
             // is not a double, subsequent double values are passed in integer registers
             fr = r;
             stkd += 4;
         }
     }
 
     void
-    Assembler::asm_call(LInsp ins)
+    Assembler::asm_call(LIns* ins)
     {
         Register rr;
         LOpcode op = ins->opcode();
 
         switch (op) {
         case LIR_calld:
             NanoAssert(cpu_has_fpu);
             rr = FV0;
--- a/js/src/nanojit/NativeMIPS.h
+++ b/js/src/nanojit/NativeMIPS.h
@@ -172,20 +172,20 @@ namespace nanojit
     void asm_ldst64(bool store, Register fr, int offset, Register b);   \
     void asm_store_imm64(LIns *value, int dr, Register rbase);          \
     void asm_li32(Register r, int32_t imm);                             \
     void asm_li_d(Register fr, int32_t msw, int32_t lsw);               \
     void asm_li(Register r, int32_t imm);                               \
     void asm_j(NIns*, bool bdelay);                                     \
     void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr);        \
     void asm_move(Register d, Register s);                              \
-    void asm_regarg(ArgType ty, LInsp p, Register r);                   \
-    void asm_stkarg(LInsp arg, int stkd);                               \
-    void asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd);     \
-    void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd);   \
+    void asm_regarg(ArgType ty, LIns* p, Register r);                   \
+    void asm_stkarg(LIns* arg, int stkd);                               \
+    void asm_arg(ArgType ty, LIns* arg, Register& r, Register& fr, int& stkd);     \
+    void asm_arg_64(LIns* arg, Register& r, Register& fr, int& stkd);   \
     NIns *asm_branchtarget(NIns*);                                      \
     NIns *asm_bxx(bool, LOpcode, Register, Register, NIns*);
 
 // REQ: Platform specific declarations to include in RegAlloc class
 #define DECLARE_PLATFORM_REGALLOC()
 
 // REQ:
 #define swapptrs()  do {                                                \
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -725,17 +725,17 @@ namespace nanojit
 
         int param_size = 0;
 
         Register r = R3;
         Register fr = F1;
         for(uint32_t i = 0; i < argc; i++) {
             uint32_t j = argc - i - 1;
             ArgType ty = argTypes[j];
-            LInsp arg = ins->arg(j);
+            LIns* arg = ins->arg(j);
             NanoAssert(ty != ARGTYPE_V);
             if (ty != ARGTYPE_D) {
                 // GP arg
                 if (r <= R10) {
                     asm_regarg(ty, arg, r);
                     r = nextreg(r);
                     param_size += sizeof(void*);
                 } else {
@@ -758,17 +758,17 @@ namespace nanojit
                     TODO(stack_double);
                 }
             }
         }
         if (param_size > max_param_size)
             max_param_size = param_size;
     }
 
-    void Assembler::asm_regarg(ArgType ty, LInsp p, Register r)
+    void Assembler::asm_regarg(ArgType ty, LIns* p, Register r)
     {
         NanoAssert(r != deprecated_UnknownReg);
         NanoAssert(ty != ARGTYPE_V);
         if (ty != ARGTYPE_D)
         {
         #ifdef NANOJIT_64BIT
             if (ty == ARGTYPE_I) {
                 // sign extend 32->64
@@ -842,18 +842,18 @@ namespace nanojit
         else {
             NanoAssert(!quad);
             STW(rr, d, FP);
         }
     }
 
     void Assembler::asm_arith(LIns *ins) {
         LOpcode op = ins->opcode();
-        LInsp lhs = ins->oprnd1();
-        LInsp rhs = ins->oprnd2();
+        LIns* lhs = ins->oprnd1();
+        LIns* rhs = ins->oprnd2();
         RegisterMask allow = GpRegs;
         Register rr = deprecated_prepResultReg(ins, allow);
         Register ra = findRegFor(lhs, GpRegs);
 
         if (rhs->isImmI()) {
             int32_t rhsc = rhs->immI();
             if (isS16(rhsc)) {
                 // ppc arith immediate ops sign-exted the imm16 value
@@ -944,18 +944,18 @@ namespace nanojit
             default:
                 debug_only(outputf("%s",lirNames[op]);)
                 TODO(asm_arith);
         }
     }
 
     void Assembler::asm_fop(LIns *ins) {
         LOpcode op = ins->opcode();
-        LInsp lhs = ins->oprnd1();
-        LInsp rhs = ins->oprnd2();
+        LIns* lhs = ins->oprnd1();
+        LIns* rhs = ins->oprnd2();
         RegisterMask allow = FpRegs;
         Register rr = deprecated_prepResultReg(ins, allow);
         Register ra, rb;
         findRegFor2(allow, lhs, ra, allow, rhs, rb);
         switch (op) {
             case LIR_addd: FADD(rr, ra, rb); break;
             case LIR_subd: FSUB(rr, ra, rb); break;
             case LIR_muld: FMUL(rr, ra, rb); break;
@@ -1006,17 +1006,17 @@ namespace nanojit
         LI(R0, 0);
         LFD(r, d, SP);
         STW(v, d+4, SP);
         STW(R0, d, SP);
         LIS(R0, 0x4330);
     #endif
     }
 
-    void Assembler::asm_d2i(LInsp) {
+    void Assembler::asm_d2i(LIns*) {
         NanoAssertMsg(0, "NJ_F2I_SUPPORTED not yet supported for this architecture");
     }
 
     #if defined NANOJIT_64BIT
     // XXX: this is sub-optimal, see https://bugzilla.mozilla.org/show_bug.cgi?id=540368#c7.
     void Assembler::asm_q2i(LIns *ins) {
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         int d = findMemFor(ins->oprnd1());
@@ -1207,17 +1207,17 @@ namespace nanojit
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
             // This jump will call underrunProtect again, but since we're on a new
             // page, nothing will happen.
             br(pc, 0);
         }
     #endif
     }
 
-    void Assembler::asm_cmov(LInsp ins)
+    void Assembler::asm_cmov(LIns* ins)
     {
         LIns* condval = ins->oprnd1();
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
     #ifdef NANOJIT_64BIT
         NanoAssert((ins->opcode() == LIR_cmovi  && iftrue->isI() && iffalse->isI()) ||
                    (ins->opcode() == LIR_cmovq  && iftrue->isQ() && iffalse->isQ()));
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -113,17 +113,17 @@ namespace nanojit
     }
 
     void Assembler::asm_align_code() {
         while(uintptr_t(_nIns) & 15) {
             NOP();
         }
     }
 
-    void Assembler::nFragExit(LInsp guard)
+    void Assembler::nFragExit(LIns* guard)
     {
         SideExit* exit = guard->record()->exit;
         Fragment *frag = exit->target;
         GuardRecord *lr;
         if (frag && frag->fragEntry)
             {
                 JMP(frag->fragEntry);
                 lr = 0;
@@ -146,17 +146,17 @@ namespace nanojit
     {
         underrunProtect(12);
         RESTORE(G0, G0, G0); //restore
         JMPLI(I7, 8, G0); //ret
         ORI(O0, 0, I0);
         return  _nIns;
     }
 
-    void Assembler::asm_call(LInsp ins)
+    void Assembler::asm_call(LIns* ins)
     {
         Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
         deprecated_prepResultReg(ins, rmask(retReg));
 
         // Do this after we've handled the call result, so we don't
         // force the call result to be spilled unnecessarily.
 
         evictScratchRegsExcept(0);
@@ -250,17 +250,17 @@ namespace nanojit
         return 0;
     }
 
     bool Assembler::canRemat(LIns* ins)
     {
         return ins->isImmI() || ins->isop(LIR_allocp);
     }
 
-    void Assembler::asm_restore(LInsp i, Register r)
+    void Assembler::asm_restore(LIns* i, Register r)
     {
         underrunProtect(24);
         if (i->isop(LIR_allocp)) {
             ADD(FP, L2, r);
             int32_t d = deprecated_disp(i);
             SET32(d, L2);
         }
         else if (i->isImmI()) {
@@ -336,17 +336,17 @@ namespace nanojit
         NanoAssert(d);
         if (rmask(rr) & FpRegs) {
             STDF32(rr, d, FP);
         } else {
             STW32(rr, d, FP);
         }
     }
 
-    void Assembler::asm_load64(LInsp ins)
+    void Assembler::asm_load64(LIns* ins)
     {
         switch (ins->opcode()) {
             case LIR_ldd:
                 // handled by mainline code below for now
                 break;
             case LIR_ldf2d:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
@@ -379,17 +379,17 @@ namespace nanojit
         if (rr != deprecated_UnknownReg)
             {
                 NanoAssert(rmask(rr)&FpRegs);
                 _allocator.retire(rr);
                 LDDF32(rb, db, rr);
             }
     }
 
-    void Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
+    void Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
     {
         switch (op) {
             case LIR_std:
                 // handled by mainline code below for now
                 break;
             case LIR_std2f:
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
@@ -461,17 +461,17 @@ namespace nanojit
         // put it in an FPU reg just to load & store it.
         Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
         STW32(t, dd+4, rd);
         LDSW32(rs, ds+4, t);
         STW32(t, dd, rd);
         LDSW32(rs, ds, t);
     }
 
-    NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
+    NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
     {
         NIns* at = 0;
         LOpcode condop = cond->opcode();
         NanoAssert(cond->isCmp());
         if (isCmpDOpcode(condop))
             {
                 return asm_branchd(branchOnFalse, cond, targ);
             }
@@ -553,18 +553,18 @@ namespace nanojit
         BVS(0, tt);
         return at;
     }
 
     void Assembler::asm_cmp(LIns *cond)
     {
         underrunProtect(12);
 
-        LInsp lhs = cond->oprnd1();
-        LInsp rhs = cond->oprnd2();
+        LIns* lhs = cond->oprnd1();
+        LIns* rhs = cond->oprnd2();
 
         NanoAssert(lhs->isI() && rhs->isI());
 
         // ready to issue the compare
         if (rhs->isImmI())
             {
                 int c = rhs->immI();
                 Register r = findRegFor(lhs, GpRegs);
@@ -579,17 +579,17 @@ namespace nanojit
         else
             {
                 Register ra, rb;
                 findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
                 SUBCC(ra, rb, G0);
             }
     }
 
-    void Assembler::asm_condd(LInsp ins)
+    void Assembler::asm_condd(LIns* ins)
     {
         // only want certain regs
         Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
         underrunProtect(8);
         LOpcode condop = ins->opcode();
         NanoAssert(isCmpDOpcode(condop));
         if (condop == LIR_eqd)
             MOVFEI(1, 0, 0, 0, r);
@@ -600,17 +600,17 @@ namespace nanojit
         else if (condop == LIR_ged)
             MOVFGEI(1, 0, 0, 0, r);
         else // if (condop == LIR_gtd)
             MOVFGI(1, 0, 0, 0, r);
         ORI(G0, 0, r);
         asm_cmpd(ins);
     }
 
-    void Assembler::asm_cond(LInsp ins)
+    void Assembler::asm_cond(LIns* ins)
     {
         underrunProtect(8);
         // only want certain regs
         LOpcode op = ins->opcode();
         Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
 
         if (op == LIR_eqi)
             MOVEI(1, 1, 0, 0, r);
@@ -629,22 +629,22 @@ namespace nanojit
         else if (op == LIR_gtui)
             MOVGUI(1, 1, 0, 0, r);
         else // if (op == LIR_geui)
             MOVCCI(1, 1, 0, 0, r);
         ORI(G0, 0, r);
         asm_cmp(ins);
     }
 
-    void Assembler::asm_arith(LInsp ins)
+    void Assembler::asm_arith(LIns* ins)
     {
         underrunProtect(28);
         LOpcode op = ins->opcode();
-        LInsp lhs = ins->oprnd1();
-        LInsp rhs = ins->oprnd2();
+        LIns* lhs = ins->oprnd1();
+        LIns* rhs = ins->oprnd2();
 
         Register rb = deprecated_UnknownReg;
         RegisterMask allow = GpRegs;
         bool forceReg = (op == LIR_muli || op == LIR_mulxovi || !rhs->isImmI());
 
         if (lhs != rhs && forceReg)
             {
                 if ((rb = asm_binop_rhs_reg(ins)) == deprecated_UnknownReg) {
@@ -716,17 +716,17 @@ namespace nanojit
                     NanoAssertMsg(0, "Unsupported");
                 SET32(c, L2);
             }
 
         if ( rr != ra )
             ORI(ra, 0, rr);
     }
 
-    void Assembler::asm_neg_not(LInsp ins)
+    void Assembler::asm_neg_not(LIns* ins)
     {
         underrunProtect(8);
         LOpcode op = ins->opcode();
         Register rr = deprecated_prepResultReg(ins, GpRegs);
 
         LIns* lhs = ins->oprnd1();
         // if this is last use of lhs in reg, we can re-use result reg
         // else, lhs already has a register assigned.
@@ -738,17 +738,17 @@ namespace nanojit
             ORN(G0, rr, rr);
         else
             SUB(G0, rr, rr);
 
         if ( rr != ra )
             ORI(ra, 0, rr);
     }
 
-    void Assembler::asm_load32(LInsp ins)
+    void Assembler::asm_load32(LIns* ins)
     {
         underrunProtect(12);
         LOpcode op = ins->opcode();
         LIns* base = ins->oprnd1();
         int d = ins->disp();
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         Register ra = getBaseReg(base, d, GpRegs);
         switch(op) {
@@ -766,17 +766,17 @@ namespace nanojit
                 NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
                 return;
             default:
                 NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
                 return;
         }
     }
 
-    void Assembler::asm_cmov(LInsp ins)
+    void Assembler::asm_cmov(LIns* ins)
     {
         underrunProtect(4);
         LOpcode op = ins->opcode();
         LIns* condval = ins->oprnd1();
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
         NanoAssert(condval->isCmp());
@@ -801,35 +801,35 @@ namespace nanojit
             case LIR_geui: MOVCS (iffalsereg, 1, 0, 0, rr); break;
                 debug_only( default: NanoAssert(0); break; )
                     }
         }
         /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
         asm_cmp(condval);
     }
 
-    void Assembler::asm_param(LInsp ins)
+    void Assembler::asm_param(LIns* ins)
     {
         uint32_t a = ins->paramArg();
         uint32_t kind = ins->paramKind();
         deprecated_prepResultReg(ins, rmask(argRegs[a]));
     }
 
-    void Assembler::asm_immi(LInsp ins)
+    void Assembler::asm_immi(LIns* ins)
     {
         underrunProtect(8);
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         int32_t val = ins->immI();
         if (val == 0)
             XOR(rr, rr, rr);
         else
             SET32(val, rr);
     }
 
-    void Assembler::asm_immd(LInsp ins)
+    void Assembler::asm_immd(LIns* ins)
     {
         underrunProtect(64);
         Register rr = ins->deprecated_getReg();
         if (rr != deprecated_UnknownReg)
             {
                 // @todo -- add special-cases for 0 and 1
                 _allocator.retire(rr);
                 ins->clearReg();
@@ -846,33 +846,33 @@ namespace nanojit
             {
                 STW32(L2, d+4, FP);
                 SET32(ins->immDlo(), L2);
                 STW32(L2, d, FP);
                 SET32(ins->immDhi(), L2);
             }
     }
 
-    void Assembler::asm_fneg(LInsp ins)
+    void Assembler::asm_fneg(LIns* ins)
     {
         underrunProtect(4);
         Register rr = deprecated_prepResultReg(ins, FpRegs);
         LIns* lhs = ins->oprnd1();
 
         // lhs into reg, prefer same reg as result
         // if this is last use of lhs in reg, we can re-use result reg
         // else, lhs already has a different reg assigned
         Register ra = ( !lhs->isInReg()
                       ? findSpecificRegFor(lhs, rr)
                       : findRegFor(lhs, FpRegs) );
 
         FNEGD(ra, rr);
     }
 
-    void Assembler::asm_fop(LInsp ins)
+    void Assembler::asm_fop(LIns* ins)
     {
         underrunProtect(4);
         LOpcode op = ins->opcode();
         LIns *lhs = ins->oprnd1();
         LIns *rhs = ins->oprnd2();
 
         RegisterMask allow = FpRegs;
         Register ra, rb;
@@ -885,27 +885,27 @@ namespace nanojit
             FSUBD(ra, rb, rr);
         else if (op == LIR_muld)
             FMULD(ra, rb, rr);
         else //if (op == LIR_divd)
             FDIVD(ra, rb, rr);
 
     }
 
-    void Assembler::asm_i2d(LInsp ins)
+    void Assembler::asm_i2d(LIns* ins)
     {
         underrunProtect(32);
         // where our result goes
         Register rr = deprecated_prepResultReg(ins, FpRegs);
         int d = findMemFor(ins->oprnd1());
         FITOD(rr, rr);
         LDDF32(FP, d, rr);
     }
 
-    void Assembler::asm_ui2d(LInsp ins)
+    void Assembler::asm_ui2d(LIns* ins)
     {
         underrunProtect(72);
         // where our result goes
         Register rr = deprecated_prepResultReg(ins, FpRegs);
         Register rt = registerAllocTmp(FpRegs & ~(rmask(rr)));
         Register gr = findRegFor(ins->oprnd1(), GpRegs);
         int disp = -8;
 
@@ -914,17 +914,17 @@ namespace nanojit
         LDDF32(SP, disp, rr);
         STWI(G0, disp+4, SP);
         LDDF32(SP, disp, rt);
         STWI(gr, disp+4, SP);
         STWI(G1, disp, SP);
         SETHI(0x43300000, G1);
     }
 
-    void Assembler::asm_d2i(LInsp ins) {
+    void Assembler::asm_d2i(LIns* ins) {
         LIns *lhs = ins->oprnd1();
         Register rr = prepareResultReg(ins, GpRegs);
         Register ra = findRegFor(lhs, FpRegs);
         int d = findMemFor(ins);
         LDSW32(FP, d, rr);
         STF32(ra, d, FP);
         FDTOI(ra, ra);
     }
@@ -995,17 +995,17 @@ namespace nanojit
 
         FCMPD(rLhs, rRhs);
     }
 
     void Assembler::nativePageReset()
     {
     }
 
-    Register Assembler::asm_binop_rhs_reg(LInsp ins)
+    Register Assembler::asm_binop_rhs_reg(LIns* ins)
     {
         return deprecated_UnknownReg;
     }
 
     void Assembler::nativePageSetup()
     {
         NanoAssert(!_inExit);
         if (!_nIns)
@@ -1027,17 +1027,17 @@ namespace nanojit
         NIns *eip = _nIns;
         // This may be in a normal code chunk or an exit code chunk.
         if (eip - n < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
             JMP_long_nocheck((intptr_t)eip);
         }
     }
 
-    void Assembler::asm_ret(LInsp ins)
+    void Assembler::asm_ret(LIns* ins)
     {
         genEpilogue();
         releaseRegisters();
         assignSavedRegs();
         LIns *val = ins->oprnd1();
         if (ins->isop(LIR_reti)) {
             findSpecificRegFor(val, retRegs[0]);
         } else {
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -896,17 +896,17 @@ namespace nanojit
         verbose_only( asm_output("[frag entry]"); )
         NIns *fragEntry = _nIns;
         MR(FP, SP); // Establish our own FP.
         PUSHr(FP); // Save caller's FP.
 
         return fragEntry;
     }
 
-    void Assembler::nFragExit(LInsp guard)
+    void Assembler::nFragExit(LIns* guard)
     {
         SideExit *exit = guard->record()->exit;
         Fragment *frag = exit->target;
         GuardRecord *lr = 0;
         bool destKnown = (frag && frag->fragEntry);
 
         // Generate jump to epilog and initialize lr.
         // If the guard is LIR_xtbl, use a jump table with epilog in every entry
@@ -950,17 +950,17 @@ namespace nanojit
     NIns *Assembler::genEpilogue()
     {
         RET();
         POPr(FP); // Restore caller's FP.
 
         return  _nIns;
     }
 
-    void Assembler::asm_call(LInsp ins)
+    void Assembler::asm_call(LIns* ins)
     {
         Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
         prepareResultReg(ins, rmask(rr));
 
         evictScratchRegsExcept(rmask(rr));
 
         const CallInfo* call = ins->callInfo();
         // must be signed, not unsigned
@@ -1165,17 +1165,17 @@ namespace nanojit
 
     bool Assembler::canRemat(LIns* ins)
     {
         return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
     }
 
     // WARNING: the code generated by this function must not affect the
     // condition codes.  See asm_cmp().
-    void Assembler::asm_restore(LInsp ins, Register r)
+    void Assembler::asm_restore(LIns* ins, Register r)
     {
         NanoAssert(ins->getReg() == r);
 
         uint32_t arg;
         uint32_t abi_regcount;
         if (ins->isop(LIR_allocp)) {
             // The value of a LIR_allocp instruction is the address of the
             // stack allocation.  We can rematerialize that from the record we
@@ -1292,17 +1292,17 @@ namespace nanojit
         } else if (rmask(rr) & XmmRegs) {
             SSE_STQ(d, FP, rr);
         } else {
             NanoAssert(rmask(rr) & x87Regs);
             FSTQ((pop?1:0), d, FP);
         }
     }
 
-    void Assembler::asm_load64(LInsp ins)
+    void Assembler::asm_load64(LIns* ins)
     {
         LIns* base = ins->oprnd1();
         int db = ins->disp();
 
         Register rb = getBaseReg(base, db, GpRegs);
 
         // There are two cases:
         // - 'ins' is in FpRegs: load it.
@@ -1362,17 +1362,17 @@ namespace nanojit
                 NanoAssert(0);
                 break;
             }
         }
 
         freeResourcesOf(ins);
     }
 
-    void Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
+    void Assembler::asm_store64(LOpcode op, LIns* value, int dr, LIns* base)
     {
         Register rb = getBaseReg(base, dr, GpRegs);
 
         if (op == LIR_std2f) {
             bool pop = !value->isInReg();
             Register rv = ( pop
                           ? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
                           : value->getReg() );
@@ -1443,17 +1443,17 @@ namespace nanojit
             Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
             ST(rd, dd+4, t);
             LD(t, ds+4, rs);
             ST(rd, dd, t);
             LD(t, ds, rs);
         }
     }
 
-    NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
+    NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
     {
         LOpcode condop = cond->opcode();
         NanoAssert(cond->isCmp());
 
         // Handle float conditions separately.
         if (isCmpDOpcode(condop)) {
             return asm_branchd(branchOnFalse, cond, targ);
         }
@@ -1547,18 +1547,18 @@ namespace nanojit
     // such code will be run after the test/cmp but before the instruction
     // that consumes the condition code.  And because this function calls
     // findRegFor() before the test/cmp is generated, and findRegFor() calls
     // asm_restore(), that means that asm_restore() cannot generate code which
     // affects the condition codes.
     //
     void Assembler::asm_cmp(LIns *cond)
     {
-        LInsp lhs = cond->oprnd1();
-        LInsp rhs = cond->oprnd2();
+        LIns* lhs = cond->oprnd1();
+        LIns* rhs = cond->oprnd2();
 
         NanoAssert(lhs->isI() && rhs->isI());
 
         // Ready to issue the compare.
         if (rhs->isImmI()) {
             int c = rhs->immI();
             // findRegFor() can call asm_restore() -- asm_restore() better not
             // disturb the CCs!
@@ -1595,17 +1595,17 @@ namespace nanojit
             }
         } else {
             Register ra, rb;
             findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb);
             CMP(ra, rb);
         }
     }
 
-    void Assembler::asm_condd(LInsp ins)
+    void Assembler::asm_condd(LIns* ins)
     {
         LOpcode opcode = ins->opcode();
         Register r = prepareResultReg(ins, AllowableFlagRegs);
 
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
 
         if (_config.i386_sse2) {
@@ -1624,17 +1624,17 @@ namespace nanojit
             SETNP(r);
         }
 
         freeResourcesOf(ins);
 
         asm_cmpd(ins);
     }
 
-    void Assembler::asm_cond(LInsp ins)
+    void Assembler::asm_cond(LIns* ins)
     {
         LOpcode op = ins->opcode();
 
         Register r = prepareResultReg(ins, AllowableFlagRegs);
 
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
         switch (op) {
@@ -1668,28 +1668,28 @@ namespace nanojit
     //   asm:   define lhs into ra
     //   asm:   define rhs into rb
     //          ...
     // * asm:   mov rr, ra
     // * asm:   add rr, rb
     // * asm:   spill rr if necessary
     //          ... some uses of lhs in ra...
     //
-    void Assembler::asm_arith(LInsp ins)
+    void Assembler::asm_arith(LIns* ins)
     {
         LOpcode op = ins->opcode();
 
         // First special case.
         if (op == LIR_modi) {
             asm_div_mod(ins);
             return;
         }
 
-        LInsp lhs = ins->oprnd1();
-        LInsp rhs = ins->oprnd2();
+        LIns* lhs = ins->oprnd1();
+        LIns* rhs = ins->oprnd2();
 
         // Second special case.
         // XXX: bug 547125: don't need this once LEA is used for LIR_addi in all cases below
         if (op == LIR_addi && lhs->isop(LIR_allocp) && rhs->isImmI()) {
             // LIR_addi(LIR_allocp, LIR_immi) -- use lea.
             Register rr = prepareResultReg(ins, GpRegs);
             int d = findMemFor(lhs) + rhs->immI();
 
@@ -1802,26 +1802,26 @@ namespace nanojit
         freeResourcesOf(ins);
         if (!lhs->isInReg()) {
             NanoAssert(ra == rr);
             findSpecificRegForUnallocated(lhs, ra);
         }
     }
 
     // Generates code for a LIR_modi(LIR_divi(divL, divR)) sequence.
-    void Assembler::asm_div_mod(LInsp mod)
+    void Assembler::asm_div_mod(LIns* mod)
     {
-        LInsp div = mod->oprnd1();
+        LIns* div = mod->oprnd1();
 
         // LIR_modi expects the LIR_divi to be near (no interference from the register allocator).
         NanoAssert(mod->isop(LIR_modi));
         NanoAssert(div->isop(LIR_divi));
 
-        LInsp divL = div->oprnd1();
-        LInsp divR = div->oprnd2();
+        LIns* divL = div->oprnd1();
+        LIns* divR = div->oprnd2();
 
         prepareResultReg(mod, rmask(EDX));
         prepareResultReg(div, rmask(EAX));
 
         Register rDivR = findRegFor(divR, (GpRegs & ~(rmask(EAX)|rmask(EDX))));
         Register rDivL = divL->isInReg() ? divL->getReg() : EAX;
 
         DIV(rDivR);
@@ -1849,17 +1849,17 @@ namespace nanojit
     //
     //   asm:   define lhs into ra
     //          ...
     // * asm:   mov rr, ra
     // * asm:   neg rr
     // * asm:   spill rr if necessary
     //          ... more uses of lhs in ra...
     //
-    void Assembler::asm_neg_not(LInsp ins)
+    void Assembler::asm_neg_not(LIns* ins)
     {
         LIns* lhs = ins->oprnd1();
 
         Register rr = prepareResultReg(ins, GpRegs);
 
         // If 'lhs' isn't in a register, it can be clobbered by 'ins'.
         Register ra = lhs->isInReg() ? lhs->getReg() : rr;
 
@@ -1874,17 +1874,17 @@ namespace nanojit
 
         freeResourcesOf(ins);
         if (!lhs->isInReg()) {
             NanoAssert(ra == rr);
             findSpecificRegForUnallocated(lhs, ra);
         }
     }
 
-    void Assembler::asm_load32(LInsp ins)
+    void Assembler::asm_load32(LIns* ins)
     {
         LOpcode op = ins->opcode();
         LIns* base = ins->oprnd1();
         int32_t d = ins->disp();
 
         Register rr = prepareResultReg(ins, GpRegs);
 
         if (base->isImmI()) {
@@ -2016,17 +2016,17 @@ namespace nanojit
             freeResourcesOf(ins);
             if (!base->isop(LIR_allocp) && !base->isInReg()) {
                 NanoAssert(ra == rr);
                 findSpecificRegForUnallocated(base, ra);
             }
         }
     }
 
-    void Assembler::asm_cmov(LInsp ins)
+    void Assembler::asm_cmov(LIns* ins)
     {
         LIns* condval = ins->oprnd1();
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
         NanoAssert(condval->isCmp());
         NanoAssert(ins->isop(LIR_cmovi) && iftrue->isI() && iffalse->isI());
 
@@ -2061,17 +2061,17 @@ namespace nanojit
         if (!iftrue->isInReg()) {
             NanoAssert(rt == rr);
             findSpecificRegForUnallocated(iftrue, rr);
         }
 
         asm_cmp(condval);
     }
 
-    void Assembler::asm_param(LInsp ins)
+    void Assembler::asm_param(LIns* ins)
     {
         uint32_t arg = ins->paramArg();
         uint32_t kind = ins->paramKind();
         if (kind == 0) {
             // ordinary param
             AbiKind abi = _thisfrag->lirbuf->abi;
             uint32_t abi_regcount = max_abi_regs[abi];
             // argRegs must have as many elements as the largest argument register
@@ -2092,17 +2092,17 @@ namespace nanojit
         } else {
             // Saved param.
             prepareResultReg(ins, rmask(savedRegs[arg]));
             // No code to generate.
         }
         freeResourcesOf(ins);
     }
 
-    void Assembler::asm_immi(LInsp ins)
+    void Assembler::asm_immi(LIns* ins)
     {
         Register rr = prepareResultReg(ins, GpRegs);
 
         asm_immi(rr, ins->immI(), /*canClobberCCs*/true);
 
         freeResourcesOf(ins);
     }
 
@@ -2148,17 +2148,17 @@ namespace nanojit
                 FLD1();
             } else {
                 const uint64_t* p = findImmDFromPool(q);
                 FLDQdm((const double*)p);
             }
         }
     }
 
-    void Assembler::asm_immd(LInsp ins)
+    void Assembler::asm_immd(LIns* ins)
     {
         NanoAssert(ins->isImmD());
         if (ins->isInReg()) {
             Register rr = ins->getReg();
             NanoAssert(rmask(rr) & FpRegs);
             asm_immd(rr, ins->immDasQ(), ins->immD(), /*canClobberCCs*/true);
         } else {
             // Do nothing, will be rematerialized when necessary.
@@ -2180,17 +2180,17 @@ namespace nanojit
         return negateMask;
     }
 
     static uint32_t *negateMask = negateMaskInit();
 #else
     static const AVMPLUS_ALIGN16(uint32_t) negateMask[] = {0,0x80000000,0,0};
 #endif
 
-    void Assembler::asm_fneg(LInsp ins)
+    void Assembler::asm_fneg(LIns* ins)
     {
         LIns *lhs = ins->oprnd1();
 
         if (_config.i386_sse2) {
             Register rr = prepareResultReg(ins, XmmRegs);
 
             // If 'lhs' isn't in a register, it can be clobbered by 'ins'.
             Register ra;
@@ -2225,17 +2225,17 @@ namespace nanojit
             FCHS();
 
             freeResourcesOf(ins);
             if (!lhs->isInReg())
                 findSpecificRegForUnallocated(lhs, FST0);
         }
     }
 
-    void Assembler::asm_arg(ArgType ty, LInsp ins, Register r, int32_t& stkd)
+    void Assembler::asm_arg(ArgType ty, LIns* ins, Register r, int32_t& stkd)
     {
         // If 'r' is known, then that's the register we have to put 'ins'
         // into.
 
         if (ty == ARGTYPE_I || ty == ARGTYPE_UI) {
             if (r != UnspecifiedReg) {
                 if (ins->isImmI()) {
                     // Rematerialize the constant.
@@ -2266,17 +2266,17 @@ namespace nanojit
             }
 
         } else {
             NanoAssert(ty == ARGTYPE_D);
             asm_farg(ins, stkd);
         }
     }
 
-    void Assembler::asm_pusharg(LInsp ins)
+    void Assembler::asm_pusharg(LIns* ins)
     {
         // arg goes on stack
         if (!ins->isExtant() && ins->isImmI())
         {
             PUSHi(ins->immI());    // small const we push directly
         }
         else if (!ins->isExtant() || ins->isop(LIR_allocp))
         {
@@ -2289,17 +2289,17 @@ namespace nanojit
         }
         else
         {
             NanoAssert(ins->isInAr());
             PUSHm(arDisp(ins), FP);
         }
     }
 
-    void Assembler::asm_stkarg(LInsp ins, int32_t& stkd)
+    void Assembler::asm_stkarg(LIns* ins, int32_t& stkd)
     {
         // arg goes on stack
         if (!ins->isExtant() && ins->isImmI())
         {
             // small const we push directly
             STi(SP, stkd, ins->immI());
         }
         else {
@@ -2309,17 +2309,17 @@ namespace nanojit
             else
                 ra = ins->getReg();
             ST(SP, stkd, ra);
         }
 
         stkd += sizeof(int32_t);
     }
 
-    void Assembler::asm_farg(LInsp ins, int32_t& stkd)
+    void Assembler::asm_farg(LIns* ins, int32_t& stkd)
     {
         NanoAssert(ins->isD());
         Register r = findRegFor(ins, FpRegs);
         if (rmask(r) & XmmRegs) {
             SSE_STQ(stkd, SP, r);
         } else {
             FSTPQ(stkd, SP);
 
@@ -2338,17 +2338,17 @@ namespace nanojit
             evict(ins);
         }
         if (!_config.i386_fixed_esp)
             SUBi(ESP, 8);
 
         stkd += sizeof(double);
     }
 
-    void Assembler::asm_fop(LInsp ins)
+    void Assembler::asm_fop(LIns* ins)
     {
         LOpcode op = ins->opcode();
         if (_config.i386_sse2)
         {
             LIns *lhs = ins->oprnd1();
             LIns *rhs = ins->oprnd2();
 
             RegisterMask allow = XmmRegs;
@@ -2432,17 +2432,17 @@ namespace nanojit
             }
             freeResourcesOf(ins);
             if (!lhs->isInReg()) {
                 findSpecificRegForUnallocated(lhs, FST0);
             }
         }
     }
 
-    void Assembler::asm_i2d(LInsp ins)
+    void Assembler::asm_i2d(LIns* ins)
     {
         LIns* lhs = ins->oprnd1();
 
         Register rr = prepareResultReg(ins, FpRegs);
         if (rmask(rr) & XmmRegs) {
             // todo support int value in memory
             Register ra = findRegFor(lhs, GpRegs);
             SSE_CVTSI2SD(rr, ra);
@@ -2450,17 +2450,17 @@ namespace nanojit
         } else {
             int d = findMemFor(lhs);
             FILD(d, FP);
         }
 
         freeResourcesOf(ins);
     }
 
-    void Assembler::asm_ui2d(LInsp ins)
+    void Assembler::asm_ui2d(LIns* ins)
     {
         LIns* lhs = ins->oprnd1();
 
         Register rr = prepareResultReg(ins, FpRegs);
         if (rmask(rr) & XmmRegs) {
             Register rt = registerAllocTmp(GpRegs);
 
             // Technique inspired by gcc disassembly.  Edwin explains it:
@@ -2504,17 +2504,17 @@ namespace nanojit
             FILDQ(disp, base);
             STi(base, disp+4, 0);   // high 32 bits = 0
             ST(base, disp, ra);     // low 32 bits = unsigned value
         }
 
         freeResourcesOf(ins);
     }
 
-    void Assembler::asm_d2i(LInsp ins)
+    void Assembler::asm_d2i(LIns* ins)
     {
         LIns *lhs = ins->oprnd1();
 
         if (_config.i386_sse2) {
             Register rr = prepareResultReg(ins, GpRegs);
             Register ra = findRegFor(lhs, XmmRegs);
             SSE_CVTSD2SI(rr, ra);
         } else {
@@ -2775,17 +2775,17 @@ namespace nanojit
         NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
         // This may be in a normal code chunk or an exit code chunk.
         if (eip - n < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
             JMP(eip);
         }
     }
 
-    void Assembler::asm_ret(LInsp ins)
+    void Assembler::asm_ret(LIns* ins)
     {
         genEpilogue();
 
         // Restore ESP from EBP, undoing SUBi(SP,amt) in the prologue
         MR(SP,FP);
 
         releaseRegisters();
         assignSavedRegs();
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -181,20 +181,20 @@ namespace nanojit
 
     #define DECLARE_PLATFORM_ASSEMBLER()    \
         const static Register argRegs[2], retRegs[2]; \
         int32_t max_stk_args;\
         void nativePageReset();\
         void nativePageSetup();\
         void underrunProtect(int);\
         void asm_immi(Register r, int32_t val, bool canClobberCCs);\
-        void asm_stkarg(LInsp p, int32_t& stkd);\
-        void asm_farg(LInsp, int32_t& stkd);\
-        void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\
-        void asm_pusharg(LInsp);\
+        void asm_stkarg(LIns* p, int32_t& stkd);\
+        void asm_farg(LIns*, int32_t& stkd);\
+        void asm_arg(ArgType ty, LIns* p, Register r, int32_t& stkd);\
+        void asm_pusharg(LIns*);\
         void asm_cmpd(LIns *cond);\
         NIns* asm_branchd(bool, LIns*, NIns*);\
         void asm_cmp(LIns *cond); \
         void asm_div_mod(LIns *cond); \
         void asm_load(int d, Register r); \
         void asm_immd(Register r, uint64_t q, double d, bool canClobberCCs); \
         void IMM8(int32_t i) { \
             _nIns -= 1; \