Merge tracemonkey to mozilla-central.
authorRobert Sayre <sayrer@gmail.com>
Sun, 07 Jun 2009 12:51:41 -0400
changeset 28982 d7ce3ce19a37f23402c58e31ed47215e767fc1ce
parent 28979 ad24e23c05a62e4c8abdd62791560fd57f3e27b6 (current diff)
parent 28981 9fbf544c1782d11fa0bcfb1e7cbc1928f158bf72 (diff)
child 28983 e371e6c5d12db9c8c84dd1a39d50eca80e126711
push id7357
push userrsayre@mozilla.com
push dateSun, 07 Jun 2009 18:06:51 +0000
treeherderautoland@d7ce3ce19a37 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone1.9.2a1pre
Merge tracemonkey to mozilla-central.
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -4004,17 +4004,21 @@ js_AttemptToExtendTree(JSContext* cx, VM
                guard (anchor) has the type information for everything below the current scope,
                and the actual guard we exited from has the types for everything in the current
                scope (and whatever it inlined). We have to merge those maps here. */
             VMSideExit* e1 = anchor;
             VMSideExit* e2 = exitedFrom;
             fullMap.add(getStackTypeMap(e1), e1->numStackSlotsBelowCurrentFrame);
             fullMap.add(getStackTypeMap(e2), e2->numStackSlots);
             stackSlots = fullMap.length();
-            fullMap.add(getGlobalTypeMap(e1), e1->numGlobalSlots);
+            fullMap.add(getGlobalTypeMap(e2), e2->numGlobalSlots);
+            if (e1->numGlobalSlots >= e2->numGlobalSlots) {
+                fullMap.add(getGlobalTypeMap(e1) + e2->numGlobalSlots,
+                            e1->numGlobalSlots - e2->numGlobalSlots);
+            }
             ngslots = e1->numGlobalSlots;
             typeMap = fullMap.data();
         }
         JS_ASSERT(ngslots >= anchor->numGlobalSlots);
         return js_StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots,
                                 ngslots, typeMap, exitedFrom, outer, cx->fp->argc);
     }
     return false;
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -391,16 +391,100 @@ namespace nanojit
             case LIR_fcall:
             case LIR_fcalli:
             case LIR_i2f:
             case LIR_u2f:
                 return true;
         }
     }
     
+#if defined(_DEBUG)
+    bool LIns::isOp1() const {
+        switch (firstWord.code) {
+            case LIR_skip:
+            case LIR_ret:
+            case LIR_live:
+            case LIR_neg:
+#if !defined NANOJIT_64BIT
+            case LIR_callh:
+#endif
+            case LIR_not:
+            case LIR_qlo:
+            case LIR_qhi:
+            case LIR_ov:
+            case LIR_cs:
+            case LIR_file:
+            case LIR_line:
+            case LIR_fret:
+            case LIR_fneg:
+            case LIR_i2f:
+            case LIR_u2f:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+
+    bool LIns::isOp2() const {
+        switch (firstWord.code) {
+            case LIR_ld:
+            case LIR_loop:
+            case LIR_x:
+            case LIR_jt:
+            case LIR_jf:
+            case LIR_ldcs:
+            case LIR_feq:
+            case LIR_flt:
+            case LIR_fgt:
+            case LIR_fle:
+            case LIR_fge:
+            case LIR_cmov:
+            case LIR_add:
+            case LIR_sub:
+            case LIR_mul:
+            case LIR_and:
+            case LIR_or:
+            case LIR_xor:
+            case LIR_lsh:
+            case LIR_rsh:
+            case LIR_ush:
+            case LIR_xt:
+            case LIR_xf:
+            case LIR_ldcb:
+            case LIR_eq:
+            case LIR_lt:
+            case LIR_gt:
+            case LIR_le:
+            case LIR_ge:
+            case LIR_ult:
+            case LIR_ugt:
+            case LIR_ule:
+            case LIR_uge:
+            case LIR_2:
+            case LIR_xbarrier:
+            case LIR_xtbl:
+            case LIR_ldq:
+            case LIR_qiand:
+            case LIR_qiadd:
+            case LIR_qcmov:
+            case LIR_fadd:
+            case LIR_fsub:
+            case LIR_fmul:
+            case LIR_fdiv:
+            case LIR_qior:
+            case LIR_qilsh:
+                return true;
+
+            default:
+                return false;
+        }
+    }
+#endif // defined(_DEBUG)
+
 	bool LIns::isCmp() const {
         LOpcode op = firstWord.code;
         return (op >= LIR_eq && op <= LIR_uge) || (op >= LIR_feq && op <= LIR_fge);
 	}
 
     bool LIns::isCond() const {
         LOpcode op = firstWord.code;
         return (op == LIR_ov) || (op == LIR_cs) || isCmp();
@@ -502,23 +586,25 @@ namespace nanojit
 
     size_t LIns::callInsSlots() const
 	{
         return argSlots(argc()) + 1;
 	}
 
 	const CallInfo* LIns::callInfo() const
 	{
+        NanoAssert(isCall());
         return c.ci;
 	}
 
     // Index args in r-l order.  arg(0) is rightmost arg.
     // Nb: this must be kept in sync with insCall().
     LInsp LIns::arg(uint32_t i) 
 	{
+        NanoAssert(isCall());
         NanoAssert(i < argc());
         LInsp* offs = (LInsp*)this - (i+1);
         return *offs;
 	}
 
     LIns* LirWriter::ins2i(LOpcode v, LIns* oprnd1, int32_t imm)
     {
         return ins2(v, oprnd1, insImm(imm));
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -237,22 +237,28 @@ namespace nanojit
             u_type      u;
             c_type      c;
             i_type      i;
             i64_type    i64;
             sti_type    sti;
 		};
 
 	public:
-        LIns* oprnd1() const { return u.oprnd_1; }
-        LIns* oprnd2() const { return u.oprnd_2; }
+        LIns* oprnd1() const {
+            NanoAssert(isOp1() || isOp2() || isStore());
+            return u.oprnd_1;
+        }
+        LIns* oprnd2() const {
+            NanoAssert(isOp2() || isStore());
+            return u.oprnd_2;
+        }
 
         inline LOpcode opcode()   const { return firstWord.code; }
-        inline uint8_t imm8()     const { return c.imm8a; }
-        inline uint8_t imm8b()    const { return c.imm8b; }
+        inline uint8_t imm8()     const { NanoAssert(isop(LIR_param)); return c.imm8a; }
+        inline uint8_t imm8b()    const { NanoAssert(isop(LIR_param)); return c.imm8b; }
         inline int32_t imm32()    const { NanoAssert(isconst());  return i.imm32; }
         inline int32_t imm64_0()  const { NanoAssert(isconstq()); return i64.imm64_0; }
         inline int32_t imm64_1()  const { NanoAssert(isconstq()); return i64.imm64_1; }
         uint64_t       imm64()    const;
         double         imm64f()   const;
         Reservation*   resv()           { return &firstWord; }
         void*	payload() const;
         inline Page*	page()			{ return (Page*) alignTo(this,NJ_PAGE_SIZE); }
@@ -280,16 +286,20 @@ namespace nanojit
 		#else
 		    return (void*)imm32();
         #endif      
 		}
 		
 		bool isCse(const CallInfo *functions) const;
         bool isRet() const { return nanojit::isRetOpcode(firstWord.code); }
 		bool isop(LOpcode o) const { return firstWord.code == o; }
+        #if defined(_DEBUG)
+        bool isOp1() const;     // true for unary ops
+        bool isOp2() const;     // true for binary ops
+        #endif
 		bool isQuad() const;
 		bool isCond() const;
         bool isFloat() const;
 		bool isCmp() const;
         bool isCall() const { 
             LOpcode op = LOpcode(firstWord.code & ~LIR64);
             return op == LIR_call || op == LIR_calli;
         }
@@ -314,26 +324,35 @@ namespace nanojit
 		bool isconstval(int32_t val) const;
 		// True if the instruction is a constant quad value.
 		bool isconstq() const;
 		// True if the instruction is a constant pointer value.
 		bool isconstp() const;
 		bool isBranch() const {
 			return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j);
 		}
-        void setimm32(int32_t x) { i.imm32 = x; }
+        void setimm32(int32_t x) { NanoAssert(isconst()); i.imm32 = x; }
         // Set the opcode and clear resv.
         void initOpcodeAndClearResv(LOpcode);
         Reservation* initResv();
         void         clearResv();
 
 		// operand-setting methods
-        void setOprnd1(LIns* r) { u.oprnd_1 = r; }
-        void setOprnd2(LIns* r) { u.oprnd_2 = r; }
-        void setDisp(int32_t d) { sti.disp = d; }
+        void setOprnd1(LIns* r) {
+            NanoAssert(isOp1() || isOp2() || isStore());
+            u.oprnd_1 = r;
+        }
+        void setOprnd2(LIns* r) {
+            NanoAssert(isOp2() || isStore());
+            u.oprnd_2 = r;
+        }
+        void setDisp(int32_t d) {
+            NanoAssert(isStore());
+            sti.disp = d;
+        }
 		void setTarget(LIns* t);
 		LIns* getTarget();
 
         GuardRecord *record();
 
 		inline uint32_t argc() const {
 			NanoAssert(isCall());
 			return c.imm8b;