Bug 572798 - add LIR_callv (r=nnethercote)
authorEdwin Smith <edwsmith@adobe.com>
Thu, 23 Sep 2010 15:08:15 -0400
changeset 54741 df05d03542f711f92a9ea2faab04cd2b3f2af82c
parent 54740 f9a5fbc24118fdf25fe62ae443ac9dff88e03539
child 54742 53354d177d97ce322d1b3bcc53adea6bb421c85c
push id16011
push userrsayre@mozilla.com
push dateWed, 29 Sep 2010 06:01:57 +0000
treeherdermozilla-central@d7e659b4f80c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnnethercote
bugs572798
milestone2.0b7pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 572798 - add LIR_callv (r=nnethercote) Adds LIR_callv for calls to helper functions that return void. Added a ValidateWriter check that LIR_callv to be paired with ARGTYPE_V, plus checks for the other obvious pairings, plus a check that callv must not call a _pure=1 function. getCallOpcode() returns LIR_callv for ARGTYPE_V, as expected. This means that some calls will return LTy_V from LIns::retType(), as expected, but unlike before. This in turn can cause a ValidateWriter error if an instruction uses the result of a void call. (after all, that's the point). Each backend was modified to not assign a register or save the result of a void call.
js/src/lirasm/lirasm.cpp
js/src/lirasm/tests/callv.in
js/src/lirasm/tests/callv.out
js/src/nanojit/Assembler.cpp
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/LIRopcode.tbl
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/NativePPC.cpp
js/src/nanojit/NativeSH4.cpp
js/src/nanojit/NativeSparc.cpp
js/src/nanojit/NativeX64.cpp
js/src/nanojit/Nativei386.cpp
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -389,28 +389,34 @@ double callid1(int i, double x, double y
 double callid2(int i, int j, int k, double x) {
     return x / (double)(i + j + k);
 }
 
 double callid3(int i, int j, double x, int k, double y, double z) {
     return (x + y + z) / (double)(i + j + k);
 }
 
+// Simple print function for testing void calls.
+void printi(int x) {
+    cout << x << endl;
+}
+
 Function functions[] = {
-    FN(puts,   CallInfo::typeSig1(ARGTYPE_I, ARGTYPE_P)),
-    FN(sin,    CallInfo::typeSig1(ARGTYPE_D, ARGTYPE_D)),
-    FN(malloc, CallInfo::typeSig1(ARGTYPE_P, ARGTYPE_P)),
-    FN(free,   CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_P)),
-    FN(calld1, CallInfo::typeSig8(ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D,
-                                  ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D)),
+    FN(puts,    CallInfo::typeSig1(ARGTYPE_I, ARGTYPE_P)),
+    FN(sin,     CallInfo::typeSig1(ARGTYPE_D, ARGTYPE_D)),
+    FN(malloc,  CallInfo::typeSig1(ARGTYPE_P, ARGTYPE_P)),
+    FN(free,    CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_P)),
+    FN(calld1,  CallInfo::typeSig8(ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D,
+                                   ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D, ARGTYPE_D)),
     FN(callid1, CallInfo::typeSig6(ARGTYPE_D, ARGTYPE_I, ARGTYPE_D, ARGTYPE_D,
                                    ARGTYPE_I, ARGTYPE_I, ARGTYPE_D)),
     FN(callid2, CallInfo::typeSig4(ARGTYPE_D, ARGTYPE_I, ARGTYPE_I, ARGTYPE_I, ARGTYPE_D)),
     FN(callid3, CallInfo::typeSig6(ARGTYPE_D, ARGTYPE_I, ARGTYPE_I, ARGTYPE_D,
                                    ARGTYPE_I, ARGTYPE_D, ARGTYPE_D)),
+    FN(printi,  CallInfo::typeSig1(ARGTYPE_V, ARGTYPE_I)),
 };
 
 template<typename out, typename in> out
 lexical_cast(in arg)
 {
     stringstream tmp;
     out ret;
     if ((tmp << arg && tmp >> ret && tmp.eof()))
@@ -734,22 +740,23 @@ FragmentAssembler::assemble_call(const s
             if      (args[i]->isD()) argTypes[i] = ARGTYPE_D;
 #ifdef NANOJIT_64BIT
             else if (args[i]->isQ()) argTypes[i] = ARGTYPE_Q;
 #endif
             else                     argTypes[i] = ARGTYPE_I;
         }
 
         // Select return type from opcode.
-        ArgType retType = ARGTYPE_V;
-        if      (mOpcode == LIR_calli) retType = ARGTYPE_I;
-        else if (mOpcode == LIR_calld) retType = ARGTYPE_D;
+        ArgType retType = ARGTYPE_P;
+        if      (mOpcode == LIR_callv) retType = ARGTYPE_V;
+        else if (mOpcode == LIR_calli) retType = ARGTYPE_I;
 #ifdef NANOJIT_64BIT
         else if (mOpcode == LIR_callq) retType = ARGTYPE_Q;
 #endif
+        else if (mOpcode == LIR_calld) retType = ARGTYPE_D;
         else                           nyi("callh");
         ci->_typesig = CallInfo::typeSigN(retType, argc, argTypes);
     }
 
     return mLir->insCall(ci, args);
 }
 
 LIns *
@@ -1186,20 +1193,21 @@ FragmentAssembler::assembleFragment(LirT
           case LIR_addjovi:
           case LIR_subjovi:
           case LIR_muljovi:
           CASE64(LIR_addjovq:)
           CASE64(LIR_subjovq:)
             ins = assemble_jump_jov();
             break;
 
+          case LIR_callv:
           case LIR_calli:
           CASESF(LIR_hcalli:)
+          CASE64(LIR_callq:)
           case LIR_calld:
-          CASE64(LIR_callq:)
             ins = assemble_call(op);
             break;
 
           case LIR_reti:
             ins = assemble_ret(RT_INT);
             break;
 
 #ifdef NANOJIT_64BIT
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/callv.in
@@ -0,0 +1,5 @@
+; test call to void function
+
+forty_two = immi 42
+callv printi cdecl forty_two
+reti forty_two
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/callv.out
@@ -0,0 +1,2 @@
+42
+Output is: 42
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1951,16 +1951,17 @@ namespace nanojit
                     countlir_alu();
                     ins->oprnd1()->setResultLive();
                     ins->oprnd2()->setResultLive();
                     if (ins->isExtant()) {
                         asm_cond(ins);
                     }
                     break;
 
+                case LIR_callv:
                 case LIR_calli:
                 CASE64(LIR_callq:)
                 case LIR_calld:
                     countlir_call();
                     for (int i = 0, argc = ins->argc(); i < argc; i++)
                         ins->arg(i)->setResultLive();
                     // It must be impure or pure-and-extant -- it couldn't be
                     // pure-and-not-extant, because there's no way the codegen
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -1478,19 +1478,20 @@ namespace nanojit
                 case LIR_cmovi:
                 CASE64(LIR_cmovq:)
                 case LIR_cmovd:
                     live.add(ins->oprnd1(), 0);
                     live.add(ins->oprnd2(), 0);
                     live.add(ins->oprnd3(), 0);
                     break;
 
+                case LIR_callv:
                 case LIR_calli:
+                CASE64(LIR_callq:)
                 case LIR_calld:
-                CASE64(LIR_callq:)
                     for (int i = 0, argc = ins->argc(); i < argc; i++)
                         live.add(ins->arg(i), 0);
                     break;
 
                 default:
                     NanoAssertMsgf(0, "unhandled opcode: %d", ins->opcode());
                     break;
                 }
@@ -1734,19 +1735,20 @@ namespace nanojit
                 VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], i->size());
                 break;
 
             case LIR_start:
             case LIR_regfence:
                 VMPI_snprintf(s, n, "%s", lirNames[op]);
                 break;
 
+            case LIR_callv:
             case LIR_calli:
-            case LIR_calld:
-            CASE64(LIR_callq:) {
+            CASE64(LIR_callq:)
+            case LIR_calld: {
                 const CallInfo* call = i->callInfo();
                 int32_t argc = i->argc();
                 int32_t m = int32_t(n);     // Windows doesn't have 'ssize_t'
                 if (call->isIndirect())
                     m -= VMPI_snprintf(s, m, "%s = %s%s [%s] ( ", formatRef(&b1, i), lirNames[op],
                                        formatAccSet(&b2, call->_storeAccSet),
                                        formatRef(&b3, i->arg(--argc)));
                 else
@@ -3389,24 +3391,53 @@ namespace nanojit
     }
 #endif
 
     LIns* ValidateWriter::insImmD(double d)
     {
         return out->insImmD(d);
     }
 
+    static const char* argtypeNames[] = {
+        "void",     // ARGTYPE_V  = 0
+        "int32_t",  // ARGTYPE_I  = 1
+        "uint32_t", // ARGTYPE_UI = 2
+        "uint64_t", // ARGTYPE_Q  = 3
+        "double"    // ARGTYPE_D  = 4
+    };
+
     LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[])
     {
         ArgType argTypes[MAXARGS];
         uint32_t nArgs = ci->getArgTypes(argTypes);
         LTy formals[MAXARGS];
         LIns* args[MAXARGS];    // in left-to-right order, unlike args0[]
 
         LOpcode op = getCallOpcode(ci);
+        ArgType retType = ci->returnType();
+
+        if ((op == LIR_callv) != (retType == ARGTYPE_V) ||
+            (op == LIR_calli) != (retType == ARGTYPE_UI ||
+                                  retType == ARGTYPE_I) ||
+#ifdef NANOJIT_64BIT
+            (op == LIR_callq) != (retType == ARGTYPE_Q) ||
+#endif
+            (op == LIR_calld) != (retType == ARGTYPE_D)) {
+            NanoAssertMsgf(0,
+                "LIR structure error (%s): return type mismatch: opcode %s with %s return type",
+                whereInPipeline, lirNames[op], argtypeNames[retType]);
+        }
+
+        if (op == LIR_callv && ci->_isPure) {
+            // Since nobody can use the result of a void call, any pure call
+            // would just be dead.  This is probably a mistake.
+            NanoAssertMsgf(0,
+                "LIR structure error (%s): LIR_callv must only be used with nonpure functions.",
+                whereInPipeline);
+        }
 
         if (ci->_isPure && ci->_storeAccSet != ACCSET_NONE)
             errorAccSet(ci->_name, ci->_storeAccSet, "it should be ACCSET_NONE for pure functions");
 
         // This loop iterates over the args from right-to-left (because arg()
         // and getArgTypes() use right-to-left order), but puts the results
         // into formals[] and args[] in left-to-right order so that arg
         // numbers in error messages make sense to the user.
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -504,23 +504,23 @@ namespace nanojit
     inline LOpcode invertCmpDOpcode(LOpcode op) {
         NanoAssert(isCmpDOpcode(op));
         return LOpcode(op ^ 1);
     }
 
     inline LOpcode getCallOpcode(const CallInfo* ci) {
         LOpcode op = LIR_callp;
         switch (ci->returnType()) {
-        case ARGTYPE_V: op = LIR_callp; break;
+        case ARGTYPE_V: op = LIR_callv; break;
         case ARGTYPE_I:
         case ARGTYPE_UI: op = LIR_calli; break;
-        case ARGTYPE_D: op = LIR_calld; break;
 #ifdef NANOJIT_64BIT
         case ARGTYPE_Q: op = LIR_callq; break;
 #endif
+        case ARGTYPE_D: op = LIR_calld; break;
         default:        NanoAssert(0);  break;
         }
         return op;
     }
 
     LOpcode arithOpcodeD2I(LOpcode op);
 #ifdef NANOJIT_64BIT
     LOpcode cmpOpcodeI2Q(LOpcode op);
@@ -922,17 +922,18 @@ namespace nanojit
         }
         bool isRet() const {
             return isRetOpcode(opcode());
         }
         bool isCmp() const {
             return isCmpOpcode(opcode());
         }
         bool isCall() const {
-            return isop(LIR_calli) ||
+            return isop(LIR_callv) ||
+                   isop(LIR_calli) ||
 #if defined NANOJIT_64BIT
                    isop(LIR_callq) ||
 #endif
                    isop(LIR_calld);
         }
         bool isCmov() const {
             return isCmovOpcode(opcode());
         }
--- a/js/src/nanojit/LIRopcode.tbl
+++ b/js/src/nanojit/LIRopcode.tbl
@@ -156,21 +156,20 @@ OP___(std,      29, St,   V,    0)  // s
 OP___(std2f,    30, St,   V,    0)  // store double as a float (losing precision)
 
 OP_UN(31)
 OP_UN(32)
 
 //---------------------------------------------------------------------------
 // Calls
 //---------------------------------------------------------------------------
-OP___(calli,    33, C,    I,   -1)  // call subroutine that returns an int
-OP_64(callq,    34, C,    Q,   -1)  // call subroutine that returns a quad
-OP___(calld,    35, C,    D,   -1)  // call subroutine that returns a double
-
-OP_UN(36)
+OP___(callv,    33, C,    V,   -1)  // call subroutine that returns void
+OP___(calli,    34, C,    I,   -1)  // call subroutine that returns an int
+OP_64(callq,    35, C,    Q,   -1)  // call subroutine that returns a quad
+OP___(calld,    36, C,    D,   -1)  // call subroutine that returns a double
 
 //---------------------------------------------------------------------------
 // Branches and labels
 //---------------------------------------------------------------------------
 // 'jt' and 'jf' must be adjacent so that (op ^ 1) gives the opposite one.
 // Static assertions in LIR.h check this requirement.
 OP___(j,        37, Op2,  V,    0)  // jump always
 OP___(jt,       38, Op2,  V,    0)  // jump if true
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -813,17 +813,17 @@ Assembler::asm_call(LIns* ins)
          * restoring of spilled data into R0 is done via a call to
          * prepareResultReg(R0) in the other branch of this if-then-else,
          * meaning that evictScratchRegsExcept() will not modify R0. However,
          * prepareResultReg is not aware of the concept of using a register
          * pair (R0,R1) for the result of a single operation, so it can only be
          * used here with the ultimate VFP register, and not R0/R1, which
          * potentially allows for R0/R1 to get corrupted as described.
          */
-    } else {
+    } else if (!ins->isop(LIR_callv)) {
         prepareResultReg(ins, rmask(retRegs[0]));
         // Immediately free the resources as we need to re-use the register for
         // the arguments.
         freeResourcesOf(ins);
     }
 
     // Do this after we've handled the call result, so we don't
     // force the call result to be spilled unnecessarily.
@@ -832,17 +832,17 @@ Assembler::asm_call(LIns* ins)
 
     const CallInfo* ci = ins->callInfo();
     ArgType argTypes[MAXARGS];
     uint32_t argc = ci->getArgTypes(argTypes);
     bool indirect = ci->isIndirect();
 
     // If we aren't using VFP, assert that the LIR operation is an integer
     // function call.
-    NanoAssert(ARM_VFP || ins->isop(LIR_calli));
+    NanoAssert(ARM_VFP || ins->isop(LIR_callv) || ins->isop(LIR_calli));
 
     // If we're using VFP, and the return type is a double, it'll come back in
     // R0/R1. We need to either place it in the result fp reg, or store it.
     // See comments above for more details as to why this is necessary here
     // for floating point calls, but not for integer calls.
     if (ARM_VFP && ins->isExtant()) {
         // If the result size is a floating-point value, treat the result
         // specially, as described previously.
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -1707,37 +1707,38 @@ namespace nanojit
             fr = r;
             stkd += 4;
         }
     }
 
     void
     Assembler::asm_call(LIns* ins)
     {
-        Register rr;
-        LOpcode op = ins->opcode();
+        if (!ins->isop(LIR_callv)) {
+            Register rr;
+            LOpcode op = ins->opcode();
 
-        switch (op) {
-        case LIR_calld:
-            NanoAssert(cpu_has_fpu);
-            rr = FV0;
-            break;
-        case LIR_calli:
-            rr = retRegs[0];
-            break;
-        default:
-            BADOPCODE(op);
-            return;
+            switch (op) {
+            case LIR_calli:
+                rr = retRegs[0];
+                break;
+            case LIR_calld:
+                NanoAssert(cpu_has_fpu);
+                rr = FV0;
+                break;
+            default:
+                BADOPCODE(op);
+                return;
+            }
+
+            deprecated_prepResultReg(ins, rmask(rr));
         }
 
-        deprecated_prepResultReg(ins, rmask(rr));
-
         // Do this after we've handled the call result, so we don't
         // force the call result to be spilled unnecessarily.
-
         evictScratchRegsExcept(0);
 
         const CallInfo* ci = ins->callInfo();
         ArgType argTypes[MAXARGS];
         uint32_t argc = ci->getArgTypes(argTypes);
         bool indirect = ci->isIndirect();
 
         // FIXME: Put one of the argument moves into the BDS slot
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -690,22 +690,23 @@ namespace nanojit
         }
         else {
             // saved param
             deprecated_prepResultReg(ins, rmask(savedRegs[a]));
         }
     }
 
     void Assembler::asm_call(LIns *ins) {
-        Register retReg = ( ins->isop(LIR_calld) ? F1 : retRegs[0] );
-        deprecated_prepResultReg(ins, rmask(retReg));
+        if (!ins->isop(LIR_callv)) {
+            Register retReg = ( ins->isop(LIR_calld) ? F1 : retRegs[0] );
+            deprecated_prepResultReg(ins, rmask(retReg));
+        }
 
         // Do this after we've handled the call result, so we don't
         // force the call result to be spilled unnecessarily.
-
         evictScratchRegsExcept(0);
 
         const CallInfo* call = ins->callInfo();
         ArgType argTypes[MAXARGS];
         uint32_t argc = call->getArgTypes(argTypes);
 
         bool indirect;
         if (!(indirect = call->isIndirect())) {
--- a/js/src/nanojit/NativeSH4.cpp
+++ b/js/src/nanojit/NativeSH4.cpp
@@ -34,16 +34,18 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "nanojit.h"
 
+#if defined FEATURE_NANOJIT && defined NANOJIT_SH4
+
 namespace nanojit
 {
     const int      Assembler::NumArgRegs  = 4;
     const Register Assembler::argRegs[]   = { R4, R5, R6, R7 };
     const Register Assembler::retRegs[]   = { R0, R1 };
     const Register Assembler::savedRegs[] = { R8, R9, R10, R11, R12, R13 };
 
     const int      Assembler::NumArgDregs = 4;
@@ -2029,24 +2031,26 @@ namespace nanojit
             Register reg = findRegFor(arg, FpRegs);
 
             asm_store64d(reg, used_stack, SP);
             asm_load64d(offset, FP, reg);
         }
     }
 
     void Assembler::asm_call(LIns *inst) {
-        Register result_reg = inst->isop(LIR_calld) ? retDregs[0] : retRegs[0];
-
-        prepareResultReg(inst, rmask(result_reg));
-
-        // Do this after we've handled the call result, so we don't
-        // force the call result to be spilled unnecessarily.
-        evictScratchRegsExcept(rmask(result_reg));
-
+        if (!inst->isop(LIR_callv)) {
+            Register result_reg = inst->isop(LIR_calld) ? retDregs[0] : retRegs[0];
+            prepareResultReg(inst, rmask(result_reg));
+
+            // Do this after we've handled the call result, so we don't
+            // force the call result to be spilled unnecessarily.
+            evictScratchRegsExcept(rmask(result_reg));
+        } else {
+            evictScratchRegsExcept(0);
+        }
         ArgType types[MAXARGS];
         const CallInfo* call = inst->callInfo();
         uint32_t argc = call->getArgTypes(types);
         bool indirect = call->isIndirect();
 
         // Emit the branch.
         if (!indirect) {
             NIns *target = (NIns*)call->_address;
@@ -3226,8 +3230,9 @@ namespace nanojit
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
 
             // This jump will call underrunProtect again, but since we're on
             // a new page large enough to host its code, nothing will happen.
             JMP(pc, true);
         }
     }
 }
+#endif // FEATURE_NANOJIT && FEATURE_SH4
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -148,33 +148,35 @@ namespace nanojit
         RESTORE(G0, G0, G0); //restore
         JMPLI(I7, 8, G0); //ret
         ORI(O0, 0, I0);
         return  _nIns;
     }
 
     void Assembler::asm_call(LIns* ins)
     {
-        Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
-        deprecated_prepResultReg(ins, rmask(retReg));
+        if (!ins->isop(LIR_callv)) {
+            Register retReg = ( ins->isop(LIR_calld) ? F0 : retRegs[0] );
+            deprecated_prepResultReg(ins, rmask(retReg));
+        }
 
         // Do this after we've handled the call result, so we don't
         // force the call result to be spilled unnecessarily.
-
         evictScratchRegsExcept(0);
 
         const CallInfo* ci = ins->callInfo();
 
         underrunProtect(8);
         NOP();
 
         ArgType argTypes[MAXARGS];
         uint32_t argc = ci->getArgTypes(argTypes);
 
-        NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
+        NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) ||
+                   ins->isop(LIR_calld));
         verbose_only(if (_logc->lcbits & LC_Native)
                      outputf("        %p:", _nIns);
                      )
         bool indirect = ci->isIndirect();
         if (!indirect) {
             CALL(ci);
         }
         else {
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -898,20 +898,23 @@ namespace nanojit
             NEG(rr);
         if (rr != ra)
             MR(rr, ra);
 
         endOpRegs(ins, rr, ra);
     }
 
     void Assembler::asm_call(LIns *ins) {
-        Register rr = ( ins->isop(LIR_calld) ? XMM0 : retRegs[0] );
-        prepareResultReg(ins, rmask(rr));
-
-        evictScratchRegsExcept(rmask(rr));
+        if (!ins->isop(LIR_callv)) {
+            Register rr = ( ins->isop(LIR_calld) ? XMM0 : retRegs[0] );
+            prepareResultReg(ins, rmask(rr));
+            evictScratchRegsExcept(rmask(rr));
+        } else {
+            evictScratchRegsExcept(0);
+        }
 
         const CallInfo *call = ins->callInfo();
         ArgType argTypes[MAXARGS];
         int argc = call->getArgTypes(argTypes);
 
         if (!call->isIndirect()) {
             verbose_only(if (_logc->lcbits & LC_Native)
                 outputf("        %p:", _nIns);
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -961,21 +961,23 @@ namespace nanojit
         RET();
         POPr(FP); // Restore caller's FP.
 
         return  _nIns;
     }
 
     void Assembler::asm_call(LIns* ins)
     {
-        Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
-        prepareResultReg(ins, rmask(rr));
-
-        evictScratchRegsExcept(rmask(rr));
-
+        if (!ins->isop(LIR_callv)) {
+            Register rr = ( ins->isop(LIR_calld) ? FST0 : retRegs[0] );
+            prepareResultReg(ins, rmask(rr));
+            evictScratchRegsExcept(rmask(rr));
+        } else {
+            evictScratchRegsExcept(0);
+        }
         const CallInfo* call = ins->callInfo();
         // must be signed, not unsigned
         uint32_t iargs = call->count_int32_args();
         int32_t fargs = call->count_args() - iargs;
 
         bool indirect = call->isIndirect();
         if (indirect) {
             // target arg isn't pushed, its consumed in the call
@@ -1016,17 +1018,17 @@ namespace nanojit
                     // with CDECL only, caller pops args
                     ADDi(SP, extra+pushsize);
                 } else if (extra > 0) {
                     ADDi(SP, extra);
                 }
             }
         }
 
-        NanoAssert(ins->isop(LIR_callp) || ins->isop(LIR_calld));
+        NanoAssert(ins->isop(LIR_callv) || ins->isop(LIR_callp) || ins->isop(LIR_calld));
         if (!indirect) {
             CALL(call);
         }
         else {
             // Indirect call.  x86 Calling conventions don't use EAX as an
             // argument, and do use EAX as a return value.  We need a register
             // for the address to call, so we use EAX since it will always be
             // available.