Bug 478340 - TM: kill many of the warnings when building 'js'. r=mrbkap
authorNicholas Nethercote <nnethercote@mozilla.com>
Thu, 19 Feb 2009 11:17:31 -0800
changeset 25469 86c57e08cfe78ef5572620563f56ea3857a73b14
parent 25218 4cf75fc4d19647cfb8a2d57a7959809dddc16cbb
child 25470 a2b6a4c57a0557c922bef92bc96c388441192384
push id5575
push userrsayre@mozilla.com
push dateWed, 25 Feb 2009 09:05:38 +0000
treeherdermozilla-central@8eba35e62d92 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmrbkap
bugs478340
milestone1.9.2a1pre
Bug 478340 - TM: kill many of the warnings when building 'js'. r=mrbkap
js/src/jsparse.cpp
js/src/jstracer.cpp
js/src/nanojit/Assembler.cpp
js/src/nanojit/LIR.cpp
js/src/nanojit/Nativei386.cpp
js/src/nanojit/Nativei386.h
js/src/shell/js.cpp
--- a/js/src/jsparse.cpp
+++ b/js/src/jsparse.cpp
@@ -2795,17 +2795,17 @@ Statement(JSContext *cx, JSTokenStream *
 
                 /*
                  * Rewrite 'for (<decl> x = i in o)' where <decl> is 'let',
                  * 'var', or 'const' to hoist the initializer or the entire
                  * decl out of the loop head. TOK_VAR is the type for both
                  * 'var' and 'const'.
                  */
                 pn2 = pn1->pn_head;
-                if (pn2->pn_type == TOK_NAME && pn2->pn_expr
+                if ((pn2->pn_type == TOK_NAME && pn2->pn_expr)
 #if JS_HAS_DESTRUCTURING
                     || pn2->pn_type == TOK_ASSIGN
 #endif
                     ) {
                     pnseq = NewParseNode(cx, ts, PN_LIST, tc);
                     if (!pnseq)
                         return NULL;
                     pnseq->pn_type = TOK_SEQ;
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -1204,17 +1204,17 @@ TraceRecorder::TraceRecorder(JSContext* 
     this->terminate = false;
     this->outerToBlacklist = outerToBlacklist;
     this->wasRootFragment = _fragment == _fragment->root;
 
     debug_only_v(printf("recording starting from %s:%u@%u\n",
                         cx->fp->script->filename,
                         js_FramePCToLineNumber(cx, cx->fp),
                         FramePCOffset(cx->fp));)
-    debug_only_v(printf("globalObj=%p, shape=%d\n", this->globalObj, OBJ_SHAPE(this->globalObj));)
+    debug_only_v(printf("globalObj=%p, shape=%d\n", (void*)this->globalObj, OBJ_SHAPE(this->globalObj));)
 
     lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf);
     debug_only_v(lir = verbose_filter = new (&gc) VerboseWriter(&gc, lir, lirbuf->names);)
 #ifdef NJ_SOFTFLOAT
     lir = float_filter = new (&gc) SoftFloatFilter(lir);
 #endif
     lir = cse_filter = new (&gc) CseFilter(lir, &gc);
     lir = expr_filter = new (&gc) ExprFilter(lir);
@@ -1464,28 +1464,28 @@ ValueToNative(JSContext* cx, jsval v, ui
         /* Watch out for pseudo-booleans. */
         JS_ASSERT(tag == JSVAL_BOOLEAN);
         *(JSBool*)slot = JSVAL_TO_PSEUDO_BOOLEAN(v);
         debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);)
         return;
       case JSVAL_STRING:
         JS_ASSERT(tag == JSVAL_STRING);
         *(JSString**)slot = JSVAL_TO_STRING(v);
-        debug_only_v(printf("string<%p> ", *(JSString**)slot);)
+        debug_only_v(printf("string<%p> ", (void*)(*(JSString**)slot));)
         return;
       case JSVAL_TNULL:
         JS_ASSERT(tag == JSVAL_OBJECT);
         *(JSObject**)slot = NULL;
         return;
       default:
         /* Note: we should never see JSVAL_BOXED in an entry type map. */
         JS_ASSERT(type == JSVAL_OBJECT);
         JS_ASSERT(tag == JSVAL_OBJECT);
         *(JSObject**)slot = JSVAL_TO_OBJECT(v);
-        debug_only_v(printf("object<%p:%s> ", JSVAL_TO_OBJECT(v),
+        debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
                             JSVAL_IS_NULL(v)
                             ? "null"
                             : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
         return;
     }
 }
 
 /* We maintain an emergency pool of doubles so we can recover safely if a trace runs
@@ -1579,34 +1579,34 @@ NativeToValue(JSContext* cx, jsval& v, u
         v = AllocateDoubleFromReservedPool(cx);
         JS_ASSERT(JSVAL_IS_DOUBLE(v) && *JSVAL_TO_DOUBLE(v) == 0.0);
         *JSVAL_TO_DOUBLE(v) = d;
         return;
       }
       case JSVAL_STRING:
         v = STRING_TO_JSVAL(*(JSString**)slot);
         JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING); /* if this fails the pointer was not aligned */
-        debug_only_v(printf("string<%p> ", *(JSString**)slot);)
+        debug_only_v(printf("string<%p> ", (void*)(*(JSString**)slot));)
         break;
       case JSVAL_BOXED:
         v = *(jsval*)slot;
         JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
-        debug_only_v(printf("box<%lx> ", v));
+        debug_only_v(printf("box<%x> ", v));
         break;
       case JSVAL_TNULL:
         JS_ASSERT(*(JSObject**)slot == NULL);
         v = JSVAL_NULL;
-        debug_only_v(printf("null<%p> ", *(JSObject**)slot));
+        debug_only_v(printf("null<%p> ", (void*)(*(JSObject**)slot)));
         break;
       default:
         JS_ASSERT(type == JSVAL_OBJECT);
         v = OBJECT_TO_JSVAL(*(JSObject**)slot);
         JS_ASSERT(JSVAL_TAG(v) == JSVAL_OBJECT); /* if this fails the pointer was not aligned */
         JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
-        debug_only_v(printf("object<%p:%s> ", JSVAL_TO_OBJECT(v),
+        debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
                             JSVAL_IS_NULL(v)
                             ? "null"
                             : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
         break;
     }
 }
 
 /* Attempt to unbox the given list of interned globals onto the native global frame. */
@@ -1759,17 +1759,17 @@ TraceRecorder::import(LIns* base, ptrdif
     if (mark)
         JS_ARENA_RELEASE(&cx->tempPool, mark);
     addName(ins, name);
 
     static const char* typestr[] = {
         "object", "int", "double", "3", "string", "5", "boolean", "any"
     };
     debug_only_v(printf("import vp=%p name=%s type=%s flags=%d\n",
-                        p, name, typestr[t & 7], t >> 3);)
+                        (void*)p, name, typestr[t & 7], t >> 3);)
 #endif
 }
 
 JS_REQUIRES_STACK void
 TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots,
                       unsigned callDepth, uint8* typeMap)
 {
     /* If we get a partial list that doesn't have all the types (i.e. recording from a side
@@ -2296,17 +2296,17 @@ TraceRecorder::deduceTypeStability(Fragm
     unsigned stage_count;
     jsval** stage_vals = (jsval**)alloca(sizeof(jsval*) * (treeInfo->typeMap.length()));
     LIns** stage_ins = (LIns**)alloca(sizeof(LIns*) * (treeInfo->typeMap.length()));
 
     /* First run through and see if we can close ourselves - best case! */
     stage_count = 0;
     success = false;
 
-    debug_only_v(printf("Checking type stability against self=%p\n", fragment);)
+    debug_only_v(printf("Checking type stability against self=%p\n", (void*)fragment);)
 
     m = typemap = treeInfo->globalTypeMap();
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
         debug_only_v(printf("%s%d ", vpname, vpnum);)
         if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) {
             /* If the failure was an int->double, tell the oracle. */
             if (*m == JSVAL_INT && isNumber(*vp) && !isPromoteInt(get(vp))) {
                 oracle.markGlobalSlotUndemotable(cx, gslots[n]);
@@ -2347,17 +2347,17 @@ checktype_fail_1:
     demote = false;
 
     /* At this point the tree is about to be incomplete, so let's see if we can connect to any
      * peer fragment that is type stable.
      */
     Fragment* f;
     TreeInfo* ti;
     for (f = root_peer; f != NULL; f = f->peer) {
-        debug_only_v(printf("Checking type stability against peer=%p (code=%p)\n", f, f->code());)
+        debug_only_v(printf("Checking type stability against peer=%p (code=%p)\n", (void*)f, f->code());)
         if (!f->code())
             continue;
         ti = (TreeInfo*)f->vmprivate;
         /* Don't allow varying stack depths */
         if ((ti->nStackTypes != treeInfo->nStackTypes) ||
             (ti->typeMap.length() != treeInfo->typeMap.length()) ||
             (ti->globalSlots->length() != treeInfo->globalSlots->length()))
             continue;
@@ -2581,17 +2581,17 @@ TraceRecorder::closeLoop(JSTraceMonitor*
              */
             if (walkedOutOfLoop()) {
                 exit->pc = terminate_pc;
                 exit->imacpc = terminate_imacpc;
             }
         } else {
             JS_ASSERT(peer->code());
             exit->target = peer;
-            debug_only_v(printf("Joining type-unstable trace to target fragment %p.\n", peer);)
+            debug_only_v(printf("Joining type-unstable trace to target fragment %p.\n", (void*)peer);)
             stable = true;
             ((TreeInfo*)peer->vmprivate)->dependentTrees.addUnique(fragment->root);
         }
 
         compile(tm);
     } else {
         exit->target = fragment->root;
         fragment->lastIns = lir->insGuard(LIR_loop, lir->insImm(1), exitIns);
@@ -2627,17 +2627,17 @@ TraceRecorder::joinEdgesToEntry(Fragment
             ti = (TreeInfo*)peer->vmprivate;
             uexit = ti->unstableExits;
             unext = &ti->unstableExits;
             while (uexit != NULL) {
                 bool remove = js_JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit);
                 JS_ASSERT(!remove || fragment != peer);
                 debug_only_v(if (remove) {
                              printf("Joining type-stable trace to target exit %p->%p.\n",
-                                    uexit->fragment, uexit->exit); });
+                                    (void*)uexit->fragment, (void*)uexit->exit); });
                 if (!remove) {
                     /* See if this exit contains mismatch demotions, which imply trashing a tree.
                        This is actually faster than trashing the original tree as soon as the
                        instability is detected, since we could have compiled a fairly stable
                        tree that ran faster with integers. */
                     unsigned stackCount = 0;
                     unsigned globalCount = 0;
                     t1 = treeInfo->stackTypeMap();
@@ -3226,26 +3226,16 @@ js_SynthesizeFrame(JSContext* cx, const 
     // FIXME? we must count stack slots from caller's operand stack up to (but not including)
     // callee's, including missing arguments. Could we shift everything down to the caller's
     // fp->slots (where vars start) and avoid some of the complexity?
     return (fi.s.spdist - fp->down->script->nfixed) +
            ((fun->nargs > fp->argc) ? fun->nargs - fp->argc : 0) +
            script->nfixed;
 }
 
-#ifdef JS_JIT_SPEW
-static void
-js_dumpMap(TypeMap const & tm) {
-    uint8 *data = tm.data();
-    for (unsigned i = 0; i < tm.length(); ++i) {
-        printf("typemap[%d] = %c\n", i, typeChar[data[i]]);
-    }
-}
-#endif
-
 JS_REQUIRES_STACK bool
 js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, Fragment* outer,
               uint32 globalShape, SlotList* globalSlots)
 {
     JS_ASSERT(f->root == f);
 
     /* Make sure the global type map didn't change on us. */
     JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
@@ -3574,17 +3564,17 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
         bool success = false;
 
         f = r->findNestedCompatiblePeer(f, &empty);
         if (f && f->code())
             success = r->adjustCallerTypes(f);
 
         if (!success) {
             AUDIT(noCompatInnerTrees);
-            debug_only_v(printf("No compatible inner tree (%p).\n", f);)
+            debug_only_v(printf("No compatible inner tree (%p).\n", (void*)f);)
 
             Fragment* old = getLoop(tm, tm->recorder->getFragment()->root->ip, ti->globalShape);
             if (old == NULL)
                 old = tm->recorder->getFragment();
             js_AbortRecording(cx, "No compatible inner tree");
             if (!f && oracle.hit(peer_root->ip) < MAX_INNER_RECORD_BLACKLIST)
                 return false;
             if (old->recordAttempts < MAX_MISMATCH)
@@ -3656,17 +3646,17 @@ js_IsEntryTypeCompatible(jsval* vp, uint
 
     switch (*m) {
       case JSVAL_INT:
         jsint i;
         if (JSVAL_IS_INT(*vp))
             return true;
         if ((tag == JSVAL_DOUBLE) && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(*vp), i))
             return true;
-        debug_only_v(printf("int != tag%u(value=%lu) ", tag, *vp);)
+        debug_only_v(printf("int != tag%u(value=%lu) ", tag, (unsigned long)*vp);)
         return false;
       case JSVAL_DOUBLE:
         if (JSVAL_IS_INT(*vp) || tag == JSVAL_DOUBLE)
             return true;
         debug_only_v(printf("double != tag%u ", tag);)
         return false;
       case JSVAL_BOOLEAN:
         if (tag == JSVAL_BOOLEAN)
@@ -3715,17 +3705,17 @@ TraceRecorder::findNestedCompatiblePeer(
             if (empty)
                 *empty = f;
             continue;
         }
 
         unsigned demotes = 0;
         ti = (TreeInfo*)f->vmprivate;
 
-        debug_only_v(printf("checking nested types %p: ", f);)
+        debug_only_v(printf("checking nested types %p: ", (void*)f);)
 
         if (ngslots > ti->nGlobalTypes())
             ti->typeMap.captureMissingGlobalTypes(cx, *ti->globalSlots, ti->nStackTypes);
 
         uint8* m = ti->typeMap.data();
 
         FORALL_SLOTS(cx, ngslots, gslots, 0,
             debug_only_v(printf("%s%d=", vpname, vpnum);)
@@ -3810,17 +3800,17 @@ check_fail:
  * @param nodemote      If true, will try to find a peer that does not require demotion.
  */
 static JS_REQUIRES_STACK Fragment*
 js_FindVMCompatiblePeer(JSContext* cx, Fragment* f)
 {
     for (; f != NULL; f = f->peer) {
         if (f->vmprivate == NULL)
             continue;
-        debug_only_v(printf("checking vm types %p (ip: %p): ", f, f->ip);)
+        debug_only_v(printf("checking vm types %p (ip: %p): ", (void*)f, f->ip);)
         if (js_CheckEntryTypes(cx, (TreeInfo*)f->vmprivate))
             return f;
     }
     return NULL;
 }
 
 static void
 LeaveTree(InterpState&, VMSideExit* lr);
@@ -4091,17 +4081,17 @@ LeaveTree(InterpState& state, VMSideExit
 #endif
 
     debug_only_v(printf("leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%d, sp=%d, "
                         "calldepth=%d, cycles=%llu\n",
                         fp->script->filename,
                         js_FramePCToLineNumber(cx, fp),
                         FramePCOffset(fp),
                         js_CodeName[fp->imacpc ? *fp->imacpc : *fp->regs->pc],
-                        lr,
+                        (void*)lr,
                         lr->exitType,
                         fp->regs->sp - StackBase(fp),
                         calldepth,
                         cycles));
 
     /* If this trace is part of a tree, later branches might have added additional globals for
        which we don't have any type information available in the side exit. We merge in this
        information from the entry type-map. See also comment in the constructor of TraceRecorder
@@ -4221,17 +4211,17 @@ monitor_loop:
         }
         /* Threshold not reached yet. */
         return false;
     }
 
     debug_only_v(printf("Looking for compat peer %d@%d, from %p (ip: %p, hits=%d)\n",
                         js_FramePCToLineNumber(cx, cx->fp),
                         FramePCOffset(cx->fp),
-                        f, f->ip, oracle.getHits(f->ip));)
+                        (void*)f, f->ip, oracle.getHits(f->ip));)
     Fragment* match = js_FindVMCompatiblePeer(cx, f);
     /* If we didn't find a tree that actually matched, keep monitoring the loop. */
     if (!match)
         goto monitor_loop;
 
     VMSideExit* lr = NULL;
     VMSideExit* innermostNestedGuard = NULL;
 
@@ -4575,25 +4565,25 @@ js_FlushJITOracle(JSContext* cx)
     oracle.clear();
 }
 
 extern JS_REQUIRES_STACK void
 js_FlushScriptFragments(JSContext* cx, JSScript* script)
 {
     if (!TRACING_ENABLED(cx))
         return;
-    debug_only_v(printf("Flushing fragments for JSScript %p.\n", script);)
+    debug_only_v(printf("Flushing fragments for JSScript %p.\n", (void*)script);)
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
     for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
         for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
             /* Disable future use of any script-associated VMFragment.*/
             if (JS_UPTRDIFF((*f)->ip, script->code) < script->length) {
                 debug_only_v(printf("Disconnecting VMFragment %p "
                                     "with ip %p, in range [%p,%p).\n",
-                                    *f, (*f)->ip, script->code,
+                                    (void*)(*f), (*f)->ip, script->code,
                                     script->code + script->length));
                 VMFragment* next = (*f)->next;
                 if (tm->fragmento)
                     tm->fragmento->clearFragment(*f);
                 *f = next;
             } else {
                 f = &((*f)->next);
             }
@@ -9405,17 +9395,17 @@ js_DumpPeerStability(JSTraceMonitor* tm,
     Fragment* f;
     TreeInfo* ti;
     bool looped = false;
     unsigned length = 0;
 
     for (f = getLoop(tm, ip, globalShape); f != NULL; f = f->peer) {
         if (!f->vmprivate)
             continue;
-        printf("fragment %p:\nENTRY: ", f);
+        printf("fragment %p:\nENTRY: ", (void*)f);
         ti = (TreeInfo*)f->vmprivate;
         if (looped)
             JS_ASSERT(ti->nStackTypes == length);
         for (unsigned i = 0; i < ti->nStackTypes; i++)
             printf("S%d ", ti->stackTypeMap()[i]);
         for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
             printf("G%d ", ti->globalTypeMap()[i]);
         printf("\n");
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -75,17 +75,17 @@ namespace nanojit
 	    }
 
 	public:
 		DeadCodeFilter(LirFilter *in, const CallInfo *f) : LirFilter(in), functions(f) {}
 		LInsp read() {
 			for (;;) {
 				LInsp i = in->read();
 				if (!i || i->isGuard() || i->isBranch()
-					|| i->isCall() && !i->isCse(functions)
+					|| (i->isCall() && !i->isCse(functions))
 					|| !ignoreInstruction(i))
 					return i;
 			}
 		}
 	};
 
 #ifdef NJ_VERBOSE
 	class VerboseBlockReader: public LirFilter
@@ -405,18 +405,18 @@ namespace nanojit
 
 	#ifdef _DEBUG
 	
 	void Assembler::resourceConsistencyCheck()
 	{
 		if (error()) return;
 
 #ifdef NANOJIT_IA32
-        NanoAssert(_allocator.active[FST0] && _fpuStkDepth == -1 ||
-            !_allocator.active[FST0] && _fpuStkDepth == 0);
+        NanoAssert((_allocator.active[FST0] && _fpuStkDepth == -1) ||
+            (!_allocator.active[FST0] && _fpuStkDepth == 0));
 #endif
 		
         AR &ar = _activation;
 		// check AR entries
 		NanoAssert(ar.highwatermark < NJ_MAX_STACK_ENTRY);
 		LIns* ins = 0;
 		RegAlloc* regs = &_allocator;
 		for(uint32_t i = ar.lowwatermark; i < ar.tos; i++)
@@ -571,18 +571,18 @@ namespace nanojit
 		// if we didn't have a reservation, allocate one now
         if (!resv)
 			resv = reserveAlloc(i);
 
         r = resv->reg;
 
 #ifdef AVMPLUS_IA32
         if (r != UnknownReg && 
-            ((rmask(r)&XmmRegs) && !(allow&XmmRegs) ||
-                 (rmask(r)&x87Regs) && !(allow&x87Regs)))
+            (((rmask(r)&XmmRegs) && !(allow&XmmRegs)) ||
+                 ((rmask(r)&x87Regs) && !(allow&x87Regs))))
         {
             // x87 <-> xmm copy required
             //_nvprof("fpu-evict",1);
             evict(r);
             r = UnknownReg;
         }
 #endif
 
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -497,17 +497,17 @@ namespace nanojit
 			iop = i->opcode();
 		}
 		while (is_trace_skip_tramp(iop)||iop==LIR_2);
 		_i = i;
 		return cur;
 	}
 
 	bool FASTCALL isCmp(LOpcode c) {
-		return c >= LIR_eq && c <= LIR_uge || c >= LIR_feq && c <= LIR_fge;
+		return (c >= LIR_eq && c <= LIR_uge) || (c >= LIR_feq && c <= LIR_fge);
 	}
     
 	bool FASTCALL isCond(LOpcode c) {
 		return (c == LIR_ov) || (c == LIR_cs) || isCmp(c);
 	}
 
     bool FASTCALL isFloat(LOpcode c) {
         switch (c) {
@@ -565,17 +565,17 @@ namespace nanojit
 
 	bool FASTCALL isCse(LOpcode op) {
 		op = LOpcode(op & ~LIR64);
 		return op >= LIR_ldcs && op <= LIR_uge;
 	}
 
     bool LIns::isCse(const CallInfo *functions) const
     { 
-		return nanojit::isCse(u.code) || isCall() && callInfo()->_cse;
+		return nanojit::isCse(u.code) || (isCall() && callInfo()->_cse);
     }
 
 	void LIns::setimm16(int32_t x)
 	{
 		NanoAssert(isS16(x));
 		i.imm16 = int16_t(x);
 	}
 
@@ -955,17 +955,17 @@ namespace nanojit
 					return oprnd2;
 				else if (v == LIR_eq && oprnd1->isop(LIR_or) && 
 					oprnd1->oprnd2()->isconst() &&
 					oprnd1->oprnd2()->constval() != 0) {
 					// (x or c) != 0 if c != 0
 					return insImm(0);
 				}
 			}
-			else if (c == -1 || c == 1 && oprnd1->isCmp()) {
+			else if (c == -1 || (c == 1 && oprnd1->isCmp())) {
 				if (v == LIR_or) {
 					// x | -1 = -1, cmp | 1 = 1
 					return oprnd2;
 				}
 				else if (v == LIR_and) {
 					// x & -1 = x, cmp & 1 = cmp
 					return oprnd1;
 				}
@@ -981,17 +981,17 @@ namespace nanojit
 
 		return out->ins2(v, oprnd1, oprnd2);
 	}
 
 	LIns* ExprFilter::insGuard(LOpcode v, LInsp c, LInsp x)
 	{
 		if (v == LIR_xt || v == LIR_xf) {
 			if (c->isconst()) {
-				if (v == LIR_xt && !c->constval() || v == LIR_xf && c->constval()) {
+				if ((v == LIR_xt && !c->constval()) || (v == LIR_xf && c->constval())) {
 					return 0; // no guard needed
 				}
 				else {
 #ifdef JS_TRACER
 					// We're emitting a guard that will always fail. Any code
 					// emitted after this guard is dead code. We could
 					// silently optimize out the rest of the emitted code, but
 					// this could indicate a performance problem or other bug,
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -444,17 +444,17 @@ namespace nanojit
         else if (op == LIR_fcall || op == LIR_fcalli) {
             prefer &= rmask(FST0);
         }
         else if (op == LIR_param) {
             uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
             if (i->imm8() < max_regs)
     			prefer &= rmask(Register(i->imm8()));
         }
-        else if (op == LIR_callh || op == LIR_rsh && i->oprnd1()->opcode()==LIR_callh) {
+        else if (op == LIR_callh || (op == LIR_rsh && i->oprnd1()->opcode()==LIR_callh)) {
             prefer &= rmask(retRegs[1]);
         }
         else if (i->isCmp()) {
 			prefer &= AllowableFlagRegs;
         }
         else if (i->isconst()) {
             prefer &= ScratchRegs;
         }
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -373,17 +373,17 @@ namespace nanojit
 #define TEST(d,s)	do { count_alu(); ALU(0x85,d,s);				asm_output("test %s,%s",gpn(d),gpn(s)); } while(0)
 #define CMP(l,r)	do { count_alu(); ALU(0x3b, (l),(r));			asm_output("cmp %s,%s",gpn(l),gpn(r)); } while(0)
 #define CMPi(r,i)	do { count_alu(); ALUi(0x3d,r,i);				asm_output("cmp %s,%d",gpn(r),i); } while(0)
 
 #define MR(d,s)		do { count_mov(); ALU(0x8b,d,s);				asm_output("mov %s,%s",gpn(d),gpn(s)); } while(0)
 #define LEA(r,d,b)	do { count_alu(); ALUm(0x8d, r,d,b);			asm_output("lea %s,%d(%s)",gpn(r),d,gpn(b)); } while(0)
 // lea %r, d(%i*4)
 // This addressing mode is not supported by the MODRMSIB macro.
-#define LEAmi4(r,d,i) do { count_alu(); IMM32(d); *(--_nIns) = (2<<6)|(i<<3)|5; *(--_nIns) = (0<<6)|(r<<3)|4; *(--_nIns) = 0x8d;                    asm_output("lea %s, %p(%s*4)", gpn(r), d, gpn(i)); } while(0)
+#define LEAmi4(r,d,i) do { count_alu(); IMM32(d); *(--_nIns) = (2<<6)|(i<<3)|5; *(--_nIns) = (0<<6)|(r<<3)|4; *(--_nIns) = 0x8d;                    asm_output("lea %s, %p(%s*4)", gpn(r), (void*)d, gpn(i)); } while(0)
 
 #define SETE(r)		do { count_alu(); ALU2(0x0f94,(r),(r));			asm_output("sete %s",gpn(r)); } while(0)
 #define SETNP(r)	do { count_alu(); ALU2(0x0f9B,(r),(r));			asm_output("setnp %s",gpn(r)); } while(0)
 #define SETL(r)		do { count_alu(); ALU2(0x0f9C,(r),(r));			asm_output("setl %s",gpn(r)); } while(0)
 #define SETLE(r)	do { count_alu(); ALU2(0x0f9E,(r),(r));			asm_output("setle %s",gpn(r)); } while(0)
 #define SETG(r)		do { count_alu(); ALU2(0x0f9F,(r),(r));			asm_output("setg %s",gpn(r)); } while(0)
 #define SETGE(r)	do { count_alu(); ALU2(0x0f9D,(r),(r));			asm_output("setge %s",gpn(r)); } while(0)
 #define SETB(r)     do { count_alu(); ALU2(0x0f92,(r),(r));          asm_output("setb %s",gpn(r)); } while(0)
@@ -413,17 +413,17 @@ namespace nanojit
 #define LD(reg,disp,base)	do { 	\
 	count_ld();\
 	ALUm(0x8b,reg,disp,base);	\
 	asm_output("mov %s,%d(%s)",gpn(reg),disp,gpn(base)); } while(0)
 
 #define LDdm(reg,addr) do {		\
 	count_ld();                 \
 	ALUdm(0x8b,reg,addr);		\
-	asm_output("mov %s,0(%lx)",gpn(reg),addr); \
+	asm_output("mov %s,0(%lx)",gpn(reg),(unsigned long)addr); \
 	} while (0)
 
 
 #define SIBIDX(n)	"1248"[n]
 
 #define LDsib(reg,disp,base,index,scale) do {	\
 	count_ld();                                 \
 	ALUsib(0x8b,reg,base,index,scale,disp);		\
@@ -431,34 +431,34 @@ namespace nanojit
 	} while (0)
 
 // load 16-bit, sign extend
 #define LD16S(r,d,b) do { count_ld(); ALU2m(0x0fbf,r,d,b); asm_output("movsx %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
 	
 // load 16-bit, zero extend
 #define LD16Z(r,d,b) do { count_ld(); ALU2m(0x0fb7,r,d,b); asm_output("movsz %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
 
-#define LD16Zdm(r,addr) do { count_ld(); ALU2dm(0x0fb7,r,addr); asm_output("movsz %s,0(%lx)", gpn(r),addr); } while (0)
+#define LD16Zdm(r,addr) do { count_ld(); ALU2dm(0x0fb7,r,addr); asm_output("movsz %s,0(%lx)", gpn(r),(unsigned long)addr); } while (0)
 
 #define LD16Zsib(r,disp,base,index,scale) do {	\
 	count_ld();                                 \
 	ALU2sib(0x0fb7,r,base,index,scale,disp);	\
 	asm_output("movsz %s,%d(%s+%s*%c)",gpn(r),disp,gpn(base),gpn(index),SIBIDX(scale)); \
 	} while (0)
 
 // load 8-bit, zero extend
 // note, only 5-bit offsets (!) are supported for this, but that's all we need at the moment
 // (movzx actually allows larger offsets mode but 5-bit gives us advantage in Thumb mode)
 #define LD8Z(r,d,b)	do { NanoAssert((d)>=0&&(d)<=31); ALU2m(0x0fb6,r,d,b); asm_output("movzx %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
 
 #define LD8Zdm(r,addr) do { \
 	count_ld(); \
 	NanoAssert((d)>=0&&(d)<=31); \
 	ALU2dm(0x0fb6,r,addr); \
-	asm_output("movzx %s,0(%lx)", gpn(r),addr); \
+	asm_output("movzx %s,0(%lx)", gpn(r),(long unsigned)addr); \
 	} while(0)
 
 #define LD8Zsib(r,disp,base,index,scale) do {	\
 	count_ld();                                 \
 	NanoAssert((d)>=0&&(d)<=31);				\
 	ALU2sib(0x0fb6,r,base,index,scale,disp);	\
 	asm_output("movzx %s,%d(%s+%s*%c)",gpn(r),disp,gpn(base),gpn(index),SIBIDX(scale)); \
 	} while(0)
@@ -777,17 +777,17 @@ namespace nanojit
 // floating point unit
 #define FPUc(o)								\
 		underrunProtect(2);					\
 		*(--_nIns) = ((uint8_t)(o)&0xff);		\
 		*(--_nIns) = (uint8_t)(((o)>>8)&0xff)
 
 #define FPU(o,r)							\
 		underrunProtect(2);					\
-		*(--_nIns) = uint8_t(((uint8_t)(o)&0xff) | r&7);\
+		*(--_nIns) = uint8_t(((uint8_t)(o)&0xff) | (r&7));\
 		*(--_nIns) = (uint8_t)(((o)>>8)&0xff)
 
 #define FPUm(o,d,b)							\
 		underrunProtect(7);					\
 		MODRMm((uint8_t)(o), d, b);			\
 		*(--_nIns) = (uint8_t)((o)>>8)
 
 #define TEST_AH(i) do { 							\
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -210,17 +210,17 @@ GetLine(FILE *file, const char * prompt)
             return NULL;
         if (linep[0] != '\0')
             add_history(linep);
         return linep;
     }
 #endif
     size_t len = 0;
     if (*prompt != '\0') {
-        fprintf(gOutFile, prompt);
+        fprintf(gOutFile, "%s", prompt);
         fflush(gOutFile);
     }
     size = 80;
     buffer = (char *) malloc(size);
     if (!buffer)
         return NULL;
     char *current = buffer;
     while (fgets(current, size - len, file)) {