Bug 574969 - add LIR_qasd / LIR_dasq. r=edwsmith.
authorNicholas Nethercote <nnethercote@mozilla.com>
Tue, 20 Jul 2010 21:04:21 -0700
changeset 48521 e3f0aafdd13be728ac4fcf91d1c4706060fd7fd0
parent 48520 31cf8db8400b016af9e5c01581533ed75cfeef2c
child 48522 f93be2cc64d873da097436c0c61ae8602663fe1c
push id14748
push userrsayre@mozilla.com
push dateSun, 01 Aug 2010 00:33:23 +0000
treeherdermozilla-central@f0df797bb2a9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersedwsmith
bugs574969
milestone2.0b2pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 574969 - add LIR_qasd / LIR_dasq. r=edwsmith.
js/src/lirasm/LInsClasses.tbl
js/src/lirasm/lirasm.cpp
js/src/lirasm/testlirc.sh
js/src/lirasm/tests/64-bit/dasq.in
js/src/lirasm/tests/64-bit/dasq.out
js/src/lirasm/tests/64-bit/qasd.in
js/src/lirasm/tests/64-bit/qasd.out
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/LIR.cpp
js/src/nanojit/LIRopcode.tbl
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/NativePPC.cpp
js/src/nanojit/NativeX64.cpp
--- a/js/src/lirasm/LInsClasses.tbl
+++ b/js/src/lirasm/LInsClasses.tbl
@@ -99,20 +99,22 @@ CL___(  LOP_D_DD,       0)  // 51%  LIR_
 // cmov has a low weight because is also used with LIR_div/LIR_mod.
 CL___(  LOP_I_BII,      1)  // 52%  LIR_cmov
 CL_64(  LOP_Q_BQQ,      2)  // 54%  LIR_qcmov
 
 CL___(  LOP_B_II,       3)  // 57%  LIR_eq,  LIR_lt,  etc
 CL_64(  LOP_B_QQ,       3)  // 60%  LIR_qeq, LIR_qlt, etc
 CL___(  LOP_B_DD,       3)  // 63%  LIR_feq, LIR_flt, etc
 
-CL_64(  LOP_Q_I,        2)  // 65%  LIR_i2q, LIR_u2q
-CL___(  LOP_D_I,        2)  // 67%  LIR_i2f, LIR_u2f
-CL_64(  LOP_I_Q,        1)  // 68%  LIR_q2i
-CL___(  LOP_I_D,        1)  // 69%  LIR_qlo, LIR_qhi, LIR_f2i
+CL_64(  LOP_Q_I,        1)  // 64%  LIR_i2q, LIR_u2q
+CL___(  LOP_D_I,        1)  // 65%  LIR_i2f, LIR_u2f
+CL_64(  LOP_I_Q,        1)  // 66%  LIR_q2i
+CL___(  LOP_I_D,        1)  // 67%  LIR_qlo, LIR_qhi, LIR_f2i
+CL_64(  LOP_Q_D,        1)  // 68%  LIR_dasq
+CL_64(  LOP_D_Q,        1)  // 69%  LIR_qasd
 CL___(  LOP_D_II,       1)  // 70%  LIR_qjoin
 
 CL___(  LLD_I,          3)  // 73%  LIR_ld, LIR_ldc, LIR_ld*b, LIR_ld*s
 CL_64(  LLD_Q,          2)  // 75%  LIR_ldq, LIR_ldqc
 CL___(  LLD_D,          3)  // 78%  LIR_ldf, LIR_ldfc
 
 CL___(  LST_I,          5)  // 83%  LIR_sti
 CL_64(  LST_Q,          4)  // 87%  LIR_stqi
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -943,16 +943,18 @@ FragmentAssembler::assembleFragment(LirT
           case LIR_negi:
           case LIR_negd:
           case LIR_noti:
           CASESF(LIR_dlo2i:)
           CASESF(LIR_dhi2i:)
           CASE64(LIR_q2i:)
           CASE64(LIR_i2q:)
           CASE64(LIR_ui2uq:)
+          CASE64(LIR_dasq:)
+          CASE64(LIR_qasd:)
           case LIR_i2d:
           case LIR_ui2d:
           case LIR_d2i:
 #if defined NANOJIT_IA32 || defined NANOJIT_X64
           case LIR_modi:
 #endif
             need(1);
             ins = mLir->ins1(mOpcode,
@@ -965,16 +967,17 @@ FragmentAssembler::assembleFragment(LirT
 #if defined NANOJIT_IA32 || defined NANOJIT_X64
           case LIR_divi:
 #endif
           case LIR_addd:
           case LIR_subd:
           case LIR_muld:
           case LIR_divd:
           CASE64(LIR_addq:)
+          CASE64(LIR_subq:)
           case LIR_andi:
           case LIR_ori:
           case LIR_xori:
           CASE64(LIR_andq:)
           CASE64(LIR_orq:)
           CASE64(LIR_xorq:)
           case LIR_lshi:
           case LIR_rshi:
@@ -1420,16 +1423,24 @@ FragmentAssembler::assembleRandomFragmen
 
     vector<LOpcode> I_D_ops;
 #if NJ_SOFTFLOAT_SUPPORTED
     I_D_ops.push_back(LIR_dlo2i);
     I_D_ops.push_back(LIR_dhi2i);
 #endif
     I_D_ops.push_back(LIR_d2i);
 
+#ifdef NANOJIT_64BIT
+    vector<LOpcode> Q_D_ops;
+    Q_D_ops.push_back(LIR_dasq);
+
+    vector<LOpcode> D_Q_ops;
+    D_Q_ops.push_back(LIR_qasd);
+#endif
+
     vector<LOpcode> D_II_ops;
 #if NJ_SOFTFLOAT_SUPPORTED
     D_II_ops.push_back(LIR_ii2d);
 #endif
 
     vector<LOpcode> I_loads;
     I_loads.push_back(LIR_ldi);          // weight LIR_ldi more heavily
     I_loads.push_back(LIR_ldi);
@@ -1771,16 +1782,34 @@ FragmentAssembler::assembleRandomFragmen
             if (!Ds.empty()) {
                 ins = mLir->ins1(rndPick(I_D_ops), rndPick(Ds));
                 addOrReplace(Is, ins);
                 n++;
             }
 #endif
             break;
 
+#if defined NANOJIT_X64
+        case LOP_Q_D:
+            if (!Ds.empty()) {
+                ins = mLir->ins1(rndPick(Q_D_ops), rndPick(Ds));
+                addOrReplace(Qs, ins);
+                n++;
+            }
+            break;
+
+        case LOP_D_Q:
+            if (!Qs.empty()) {
+                ins = mLir->ins1(rndPick(D_Q_ops), rndPick(Qs));
+                addOrReplace(Ds, ins);
+                n++;
+            }
+            break;
+#endif
+
         case LOP_D_II:
             if (!Is.empty() && !D_II_ops.empty()) {
                 ins = mLir->ins2(rndPick(D_II_ops), rndPick(Is), rndPick(Is));
                 addOrReplace(Ds, ins);
                 n++;
             }
             break;
 
@@ -2087,16 +2116,17 @@ usageAndQuit(const string& progname)
     cout <<
         "usage: " << progname << " [options] [filename]\n"
         "Options:\n"
         "  -h --help        print this message\n"
         "  -v --verbose     print LIR and assembly code\n"
         "  --execute        execute LIR\n"
         "  --[no-]optimize  enable or disable optimization of the LIR (default=off)\n"
         "  --random [N]     generate a random LIR block of size N (default=1000)\n"
+        "  --word-size      prints the word size (32 or 64) for this build of lirasm and exits\n"
         " i386-specific options:\n"
         "  --sse            use SSE2 instructions\n"
         " ARM-specific options:\n"
         "  --arch N         generate code for ARM architecture version N (default=7)\n"
         "  --[no]vfp        enable or disable the generation of ARM VFP code (default=on)\n"
         ;
     exit(0);
 }
@@ -2161,16 +2191,20 @@ processCmdLine(int argc, char **argv, Cm
                         errMsgAndQuit(opts.progname, "--random argument must be greater than zero");
                     opts.random = res;          // next arg is a number, use that for the size
                     i++;
                 } else {
                     opts.random = defaultSize;  // next arg is not a number
                 }
             }
         }
+        else if (arg == "--word-size") {
+            cout << sizeof(void*) * 8 << "\n";
+            exit(0);
+        }
 
         // Architecture-specific flags.
 #if defined NANOJIT_IA32
         else if (arg == "--sse") {
             i386_sse = true;
         }
 #elif defined NANOJIT_ARM
         else if ((arg == "--arch") && (i < argc-1)) {
--- a/js/src/lirasm/testlirc.sh
+++ b/js/src/lirasm/testlirc.sh
@@ -42,16 +42,25 @@ do
     runtest $infile
 done
 
 runtest "--random 1000000"
 runtest "--random 1000000 --optimize"
 
 # ---- Platform-specific tests and configurations. ----
 
+# 64-bit platforms
+if [[ $($LIRASM --word-size) == 64 ]]
+then
+    for infile in "$TESTS_DIR"/64-bit/*.in
+    do
+        runtest $infile
+    done
+fi
+
 # ARM
 if [[ $(uname -m) == arm* ]]
 then
     for infile in "$TESTS_DIR"/*.in
     do
         # Run standard tests, but set code generation for older architectures.
         # It may also be beneficial to test ARMv6 and ARMv7 with --novfp, but such
         # a platform seems so unlikely that it probably isn't worthwhile. It's also
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/64-bit/dasq.in
@@ -0,0 +1,8 @@
+q = immq 12345
+d = qasd q
+q2 = dasq d
+one = immd 1.0      ; do some intermediate stuff to make it less trivial
+two = immd 2.0
+three = addd one two
+i2 = q2i q2
+reti i2
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/64-bit/dasq.out
@@ -0,0 +1,1 @@
+Output is: 12345
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/64-bit/qasd.in
@@ -0,0 +1,7 @@
+one = immq 1
+d = immd 123.45
+q = dasq d
+q2 = addq q one     ; do some intermediate stuff just to complicate things
+q3 = subq q2 one
+d2 = qasd q3
+retd d2
new file mode 100644
--- /dev/null
+++ b/js/src/lirasm/tests/64-bit/qasd.out
@@ -0,0 +1,1 @@
+Output is: 123.45
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1646,27 +1646,43 @@ namespace nanojit
                     break;
 
 #ifdef NANOJIT_64BIT
                 case LIR_i2q:
                 case LIR_ui2uq:
                     countlir_alu();
                     ins->oprnd1()->setResultLive();
                     if (ins->isExtant()) {
-                        asm_promote(ins);
+                        asm_ui2uq(ins);
                     }
                     break;
 
                 case LIR_q2i:
                     countlir_alu();
                     ins->oprnd1()->setResultLive();
                     if (ins->isExtant()) {
                         asm_q2i(ins);
                     }
                     break;
+
+                case LIR_dasq:
+                    countlir_alu();
+                    ins->oprnd1()->setResultLive();
+                    if (ins->isExtant()) {
+                        asm_dasq(ins);
+                    }
+                    break;
+
+                case LIR_qasd:
+                    countlir_alu();
+                    ins->oprnd1()->setResultLive();
+                    if (ins->isExtant()) {
+                        asm_qasd(ins);
+                    }
+                    break;
 #endif
                 case LIR_sti2c:
                 case LIR_sti2s:
                 case LIR_sti:
                     countlir_st();
                     ins->oprnd1()->setResultLive();
                     ins->oprnd2()->setResultLive();
                     asm_store32(op, ins->oprnd1(), ins->disp(), ins->oprnd2());
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -448,17 +448,19 @@ namespace nanojit
 #endif
             void        asm_fneg(LIns* ins);
             void        asm_fop(LIns* ins);
             void        asm_i2d(LIns* ins);
             void        asm_ui2d(LIns* ins);
             void        asm_d2i(LIns* ins);
 #ifdef NANOJIT_64BIT
             void        asm_q2i(LIns* ins);
-            void        asm_promote(LIns *ins);
+            void        asm_ui2uq(LIns *ins);
+            void        asm_dasq(LIns *ins);
+            void        asm_qasd(LIns *ins);
 #endif
             void        asm_nongp_copy(Register r, Register s);
             void        asm_call(LIns*);
             Register    asm_binop_rhs_reg(LIns* ins);
             NIns*       asm_branch(bool branchOnFalse, LIns* cond, NIns* targ);
             NIns*       asm_branch_ov(LOpcode op, NIns* targ);
             void        asm_switch(LIns* ins, NIns* target);
             void        asm_jtbl(LIns* ins, NIns** table);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -545,16 +545,24 @@ namespace nanojit
         case LIR_i2q:
             if (oprnd->isImmI())
                 return insImmQ(int64_t(int32_t(oprnd->immI())));
             break;
         case LIR_ui2uq:
             if (oprnd->isImmI())
                 return insImmQ(uint64_t(uint32_t(oprnd->immI())));
             break;
+        case LIR_dasq:
+            if (oprnd->isop(LIR_qasd))
+                return oprnd->oprnd1();
+            break;
+        case LIR_qasd:
+            if (oprnd->isop(LIR_dasq))
+                return oprnd->oprnd1();
+            break;
 #endif
 #if NJ_SOFTFLOAT_SUPPORTED
         case LIR_dlo2i:
             if (oprnd->isImmD())
                 return insImmI(oprnd->immDlo());
             if (oprnd->isop(LIR_ii2d))
                 return oprnd->oprnd1();
             break;
@@ -582,20 +590,23 @@ namespace nanojit
             if (oprnd->isImmD())
                 return insImmD(-oprnd->immD());
             if (oprnd->isop(LIR_subd))
                 return out->ins2(LIR_subd, oprnd->oprnd2(), oprnd->oprnd1());
             goto involution;
         case LIR_i2d:
             if (oprnd->isImmI())
                 return insImmD(oprnd->immI());
+            // Nb: i2d(d2i(x)) != x
             break;
         case LIR_d2i:
             if (oprnd->isImmD())
                 return insImmI(int32_t(oprnd->immD()));
+            if (oprnd->isop(LIR_i2d))
+                return oprnd->oprnd1();
             break;
         case LIR_ui2d:
             if (oprnd->isImmI())
                 return insImmD(uint32_t(oprnd->immI()));
             break;
         default:
             ;
         }
@@ -1388,16 +1399,18 @@ namespace nanojit
                 CASESF(LIR_dhi2i:)
                 CASESF(LIR_hcalli:)
                 CASE64(LIR_i2q:)
                 CASE64(LIR_ui2uq:)
                 case LIR_i2d:
                 case LIR_ui2d:
                 CASE64(LIR_q2i:)
                 case LIR_d2i:
+                CASE64(LIR_dasq:)
+                CASE64(LIR_qasd:)
                 CASE86(LIR_modi:)
                     live.add(ins->oprnd1(), 0);
                     break;
 
                 case LIR_sti:
                 CASE64(LIR_stq:)
                 case LIR_std:
                 case LIR_sti2c:
@@ -1807,16 +1820,18 @@ namespace nanojit
             CASESF(LIR_dlo2i:)
             CASESF(LIR_dhi2i:)
             case LIR_noti:
             CASE86(LIR_modi:)
             CASE64(LIR_i2q:)
             CASE64(LIR_ui2uq:)
             CASE64(LIR_q2i:)
             case LIR_d2i:
+            CASE64(LIR_dasq:)
+            CASE64(LIR_qasd:)
                 VMPI_snprintf(s, n, "%s = %s %s", formatRef(&b1, i), lirNames[op],
                              formatRef(&b2, i->oprnd1()));
                 break;
 
             case LIR_x:
             case LIR_xt:
             case LIR_xf:
             case LIR_xbarrier:
@@ -2971,16 +2986,17 @@ namespace nanojit
 
 #ifdef NANOJIT_64BIT
         case LIR_i2q:
         case LIR_ui2uq:
             formals[0] = LTy_I;
             break;
 
         case LIR_q2i:
+        case LIR_qasd:
         case LIR_retq:
         case LIR_liveq:
             formals[0] = LTy_Q;
             break;
 #endif
 
 #if defined NANOJIT_IA32 || defined NANOJIT_X64
         case LIR_modi:       // see LIRopcode.tbl for why 'mod' is unary
@@ -3002,16 +3018,17 @@ namespace nanojit
             formals[0] = LTy_I;
             break;
 #endif
 
         case LIR_negd:
         case LIR_retd:
         case LIR_lived:
         case LIR_d2i:
+        CASE64(LIR_dasq:)
             formals[0] = LTy_D;
             break;
 
         case LIR_file:
         case LIR_line:
             // XXX: not sure about these ones.  Ignore for the moment.
             nArgs = 0;
             break;
--- a/js/src/nanojit/LIRopcode.tbl
+++ b/js/src/nanojit/LIRopcode.tbl
@@ -289,58 +289,57 @@ OP___(divd,    104, Op2,  D,    1)  // d
 // code for it.  It's used in TraceMonkey briefly but is always demoted to a
 // LIR_modl or converted to a function call before Nanojit has to do anything
 // serious with it.
 OP___(modd,    105, Op2,  D,    1)  // modulo double
 
 OP___(cmovi,   106, Op3,  I,    1)  // conditional move int
 OP_64(cmovq,   107, Op3,  Q,    1)  // conditional move quad
 
-OP_UN(108)
-
 //---------------------------------------------------------------------------
 // Conversions
 //---------------------------------------------------------------------------
-OP_64(i2q,     109, Op1,  Q,    1)  // sign-extend int to quad
-OP_64(ui2uq,   110, Op1,  Q,    1)  // zero-extend unsigned int to unsigned quad
-OP_64(q2i,     111, Op1,  I,    1)  // truncate quad to int (removes the high 32 bits)
+OP_64(i2q,     108, Op1,  Q,    1)  // sign-extend int to quad
+OP_64(ui2uq,   109, Op1,  Q,    1)  // zero-extend unsigned int to unsigned quad
+OP_64(q2i,     110, Op1,  I,    1)  // truncate quad to int (removes the high 32 bits)
 
-OP___(i2d,     112, Op1,  D,    1)  // convert int to double
-OP___(ui2d,    113, Op1,  D,    1)  // convert unsigned int to double
-OP___(d2i,     114, Op1,  I,    1)  // convert double to int (no exceptions raised, platform rounding rules)
+OP___(i2d,     111, Op1,  D,    1)  // convert int to double
+OP___(ui2d,    112, Op1,  D,    1)  // convert unsigned int to double
+OP___(d2i,     113, Op1,  I,    1)  // convert double to int (no exceptions raised, platform rounding rules)
+
+OP_64(dasq,    114, Op1,  Q,    1)  // interpret the bits of a double as a quad
+OP_64(qasd,    115, Op1,  D,    1)  // interpret the bits of a quad as a double
 
 //---------------------------------------------------------------------------
 // Overflow arithmetic
 //---------------------------------------------------------------------------
 // These all exit if overflow occurred.  The result is valid on either path.
-OP___(addxovi, 115, Op3,  I,    1)  // add int and exit on overflow
-OP___(subxovi, 116, Op3,  I,    1)  // subtract int and exit on overflow
-OP___(mulxovi, 117, Op3,  I,    1)  // multiply int and exit on overflow
+OP___(addxovi, 116, Op3,  I,    1)  // add int and exit on overflow
+OP___(subxovi, 117, Op3,  I,    1)  // subtract int and exit on overflow
+OP___(mulxovi, 118, Op3,  I,    1)  // multiply int and exit on overflow
 
 // These all branch if overflow occurred.  The result is valid on either path.
-OP___(addjovi, 118, Op3,  I,    1)  // add int and branch on overflow
-OP___(subjovi, 119, Op3,  I,    1)  // subtract int and branch on overflow
-OP___(muljovi, 120, Op3,  I,    1)  // multiply int and branch on overflow
+OP___(addjovi, 119, Op3,  I,    1)  // add int and branch on overflow
+OP___(subjovi, 120, Op3,  I,    1)  // subtract int and branch on overflow
+OP___(muljovi, 121, Op3,  I,    1)  // multiply int and branch on overflow
 
-OP_64(addjovq, 121, Op3,  Q,    1)  // add quad and branch on overflow
-OP_64(subjovq, 122, Op3,  Q,    1)  // subtract quad and branch on overflow
+OP_64(addjovq, 122, Op3,  Q,    1)  // add quad and branch on overflow
+OP_64(subjovq, 123, Op3,  Q,    1)  // subtract quad and branch on overflow
 
 //---------------------------------------------------------------------------
 // SoftFloat
 //---------------------------------------------------------------------------
-OP_SF(dlo2i,   123, Op1,  I,    1)  // get the low  32 bits of a double as an int
-OP_SF(dhi2i,   124, Op1,  I,    1)  // get the high 32 bits of a double as an int
-OP_SF(ii2d,    125, Op2,  D,    1)  // join two ints (1st arg is low bits, 2nd is high)
+OP_SF(dlo2i,   124, Op1,  I,    1)  // get the low  32 bits of a double as an int
+OP_SF(dhi2i,   125, Op1,  I,    1)  // get the high 32 bits of a double as an int
+OP_SF(ii2d,    126, Op2,  D,    1)  // join two ints (1st arg is low bits, 2nd is high)
 
 // LIR_hcalli is a hack that's only used on 32-bit platforms that use
 // SoftFloat.  Its operand is always a LIR_calli, but one that specifies a
 // function that returns a double.  It indicates that the double result is
 // returned via two 32-bit integer registers.  The result is always used as the
 // second operand of a LIR_ii2d.
-OP_SF(hcalli,  126, Op1,  I,    1)
-
-OP_UN(127)
+OP_SF(hcalli,  127, Op1,  I,    1)
 
 #undef OP_UN
 #undef OP_32
 #undef OP_64
 #undef OP_SF
 #undef OP_86
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -642,21 +642,21 @@ namespace nanojit
 
 #ifdef NANOJIT_64BIT
     void
     Assembler::asm_q2i(LIns *)
     {
         NanoAssert(0);  // q2i shouldn't occur on 32-bit platforms
     }
 
-    void Assembler::asm_promote(LIns *ins)
+    void Assembler::asm_ui2uq(LIns *ins)
     {
         USE(ins);
-        TODO(asm_promote);
-        TAG("asm_promote(ins=%p{%s})", ins, lirNames[ins->opcode()]);
+        TODO(asm_ui2uq);
+        TAG("asm_ui2uq(ins=%p{%s})", ins, lirNames[ins->opcode()]);
     }
 #endif
 
     void Assembler::asm_load64(LIns *ins)
     {
         NanoAssert(ins->isD());
 
         LIns* base = ins->oprnd1();
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -1018,32 +1018,41 @@ namespace nanojit
     #if defined NANOJIT_64BIT
     // XXX: this is sub-optimal, see https://bugzilla.mozilla.org/show_bug.cgi?id=540368#c7.
     void Assembler::asm_q2i(LIns *ins) {
         Register rr = deprecated_prepResultReg(ins, GpRegs);
         int d = findMemFor(ins->oprnd1());
         LWZ(rr, d+4, FP);
     }
 
-    void Assembler::asm_promote(LIns *ins) {
+    void Assembler::asm_ui2uq(LIns *ins) {
         LOpcode op = ins->opcode();
         Register r = deprecated_prepResultReg(ins, GpRegs);
         Register v = findRegFor(ins->oprnd1(), GpRegs);
         switch (op) {
         default:
             debug_only(outputf("%s",lirNames[op]));
-            TODO(asm_promote);
+            TODO(asm_ui2uq);
         case LIR_ui2uq:
             CLRLDI(r, v, 32); // clears the top 32 bits
             break;
         case LIR_i2q:
             EXTSW(r, v);
             break;
         }
     }
+
+    void Assembler::asm_dasq(LIns *ins) {
+        TODO(asm_dasq);
+    }
+
+    void Assembler::asm_qasd(LIns *ins) {
+        TODO(asm_qasd);
+    }
+
     #endif
 
 #ifdef NANOJIT_64BIT
     void Assembler::asm_immq(LIns *ins) {
         Register r = ins->deprecated_getReg();
         if (deprecated_isKnownReg(r) && (rmask(r) & FpRegs)) {
             // FPR already assigned, fine, use it
             deprecated_freeRsrcOf(ins);
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -1034,29 +1034,43 @@ namespace nanojit
     void Assembler::asm_q2i(LIns *ins) {
         Register rr, ra;
         beginOp1Regs(ins, GpRegs, rr, ra);
         NanoAssert(IsGpReg(ra));
         MOVLR(rr, ra);  // 32bit mov zeros the upper 32bits of the target
         endOpRegs(ins, rr, ra);
     }
 
-    void Assembler::asm_promote(LIns *ins) {
+    void Assembler::asm_ui2uq(LIns *ins) {
         Register rr, ra;
         beginOp1Regs(ins, GpRegs, rr, ra);
         NanoAssert(IsGpReg(ra));
         if (ins->isop(LIR_ui2uq)) {
             MOVLR(rr, ra);      // 32bit mov zeros the upper 32bits of the target
         } else {
             NanoAssert(ins->isop(LIR_i2q));
             MOVSXDR(rr, ra);    // sign extend 32->64
         }
         endOpRegs(ins, rr, ra);
     }
 
+    void Assembler::asm_dasq(LIns *ins) {
+        Register rr = prepareResultReg(ins, GpRegs);
+        Register ra = findRegFor(ins->oprnd1(), FpRegs);
+        asm_nongp_copy(rr, ra);
+        freeResourcesOf(ins);
+    }
+
+    void Assembler::asm_qasd(LIns *ins) {
+        Register rr = prepareResultReg(ins, FpRegs);
+        Register ra = findRegFor(ins->oprnd1(), GpRegs);
+        asm_nongp_copy(rr, ra);
+        freeResourcesOf(ins);
+    }
+
     // The CVTSI2SD instruction only writes to the low 64bits of the target
     // XMM register, which hinders register renaming and makes dependence
     // chains longer.  So we precede with XORPS to clear the target register.
 
     void Assembler::asm_i2d(LIns *ins) {
         LIns *a = ins->oprnd1();
         NanoAssert(ins->isD() && a->isI());