Merge.
authorRobert Sayre <sayrer@gmail.com>
Tue, 02 Nov 2010 18:53:40 -0700
changeset 57690 7ec0a71652a6f0314eb49852ef93e71d0a0649f0
parent 57689 1d0e006769b046fd42c2b3283a99c9a4fd09572f (current diff)
parent 57688 279e8fdbc3468f060c0575a0fc8bfd9bced7ebdf (diff)
child 57691 06f233f9259baf090850943340a9f54f7c3068ae
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
milestone2.0b8pre
Merge.
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/testBug593559.js
@@ -0,0 +1,10 @@
+var gen = (function () {yield})();
+var t = gen.throw;
+
+try {
+    new t;
+} catch (e) {
+    actual = "" + e;
+}
+assertEq(actual, "TypeError: Generator.prototype.throw called on incompatible Object");
+
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/testBug607659.js
@@ -0,0 +1,8 @@
+var g = 0;
+Object.defineProperty(RegExp.prototype, 'test', { get:function() { ++g } });
+function f() {
+    for (var i = 0; i < 100; ++i)
+        /a/.exec('a');
+}
+f();
+assertEq(g, 0);
--- a/js/src/jit-test/tests/basic/testProxyConstructors.js
+++ b/js/src/jit-test/tests/basic/testProxyConstructors.js
@@ -1,9 +1,16 @@
+// |jit-test| error: ExitCleanly
+
 // proxies can return primitives
 assertEq(new (Proxy.createFunction({}, function(){}, function(){})), undefined);
 
 x = Proxy.createFunction((function () {}), Uint16Array, wrap)
 new(wrap(x))
 
 // proxies can return the callee
 var x = Proxy.createFunction({}, function (q) { return q; });
 new x(x);
+
+// not an error
+new (Proxy.createFunction({}, "".indexOf));
+
+throw "ExitCleanly"
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -4413,16 +4413,27 @@ JS_PUBLIC_API(JSScript *)
 JS_CompileUCScript(JSContext *cx, JSObject *obj, const jschar *chars, size_t length,
                    const char *filename, uintN lineno)
 {
     JS_THREADSAFE_ASSERT(cx->compartment != cx->runtime->defaultCompartment);
     return JS_CompileUCScriptForPrincipals(cx, obj, NULL, chars, length, filename, lineno);
 }
 
 JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipalsVersion(JSContext *cx, JSObject *obj,
+                                     JSPrincipals *principals,
+                                     const char *bytes, size_t length,
+                                     const char *filename, uintN lineno,
+                                     JSVersion version)
+{
+    AutoVersionAPI ava(cx, version);
+    return JS_CompileScriptForPrincipals(cx, obj, principals, bytes, length, filename, lineno);   
+}
+
+JS_PUBLIC_API(JSScript *)
 JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
                               JSPrincipals *principals,
                               const char *bytes, size_t length,
                               const char *filename, uintN lineno)
 {
     JS_THREADSAFE_ASSERT(cx->compartment != cx->runtime->defaultCompartment);
     CHECK_REQUEST(cx);
 
@@ -4534,16 +4545,24 @@ JS_CompileFileHandleForPrincipals(JSCont
         js_DestroyScript(cx, script);
         script = NULL;
     }
     LAST_FRAME_CHECKS(cx, script);
     return script;
 }
 
 JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipalsVersion(JSContext *cx, JSObject *obj, const char *filename,
+                                         FILE *file, JSPrincipals *principals, JSVersion version)
+{
+    AutoVersionAPI ava(cx, version);
+    return JS_CompileFileHandleForPrincipals(cx, obj, filename, file, principals);
+}
+
+JS_PUBLIC_API(JSScript *)
 JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename, FILE *file)
 {
     JS_THREADSAFE_ASSERT(cx->compartment != cx->runtime->defaultCompartment);
     return JS_CompileFileHandleForPrincipals(cx, obj, filename, file, NULL);
 }
 
 JS_PUBLIC_API(JSObject *)
 JS_NewScriptObject(JSContext *cx, JSScript *script)
@@ -4776,16 +4795,25 @@ JS_ExecuteScript(JSContext *cx, JSObject
     /* This should receive only scripts handed out via the JSAPI. */
     JS_ASSERT(script == JSScript::emptyScript() || script->u.object);
     ok = Execute(cx, obj, script, NULL, 0, Valueify(rval));
     LAST_FRAME_CHECKS(cx, ok);
     return ok;
 }
 
 JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptVersion(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval,
+                        JSVersion version)
+{
+    AutoVersionAPI ava(cx, version);
+    return JS_ExecuteScript(cx, obj, script, rval);
+}
+
+
+JS_PUBLIC_API(JSBool)
 JS_EvaluateUCScriptForPrincipalsVersion(JSContext *cx, JSObject *obj,
                                         JSPrincipals *principals,
                                         const jschar *chars, uintN length,
                                         const char *filename, uintN lineno,
                                         jsval *rval, JSVersion version)
 {
     AutoVersionAPI avi(cx, version);
     return JS_EvaluateUCScriptForPrincipals(cx, obj, principals, chars, length, filename, lineno,
@@ -4927,19 +4955,30 @@ JS_New(JSContext *cx, JSObject *ctor, ui
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return NULL;
 
     args.callee().setObject(*ctor);
     args.thisv().setNull();
     memcpy(args.argv(), argv, argc * sizeof(jsval));
 
     bool ok = InvokeConstructor(cx, args);
-    JSObject *obj = (ok && args.rval().isObject())
-                    ? &args.rval().toObject()
-                    : NULL;
+
+    JSObject *obj = NULL;
+    if (ok) {
+        if (args.rval().isObject()) {
+            obj = &args.rval().toObject();
+        } else {
+            /*
+             * Although constructors may return primitives (via proxies), this
+             * API is asking for an object, so we report an error.
+             */
+            JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_NEW_RESULT,
+                                 js_ValueToPrintableString(cx, args.rval()));
+        }
+    }
 
     LAST_FRAME_CHECKS(cx, ok);
     return obj;
 }
 
 JS_PUBLIC_API(JSOperationCallback)
 JS_SetOperationCallback(JSContext *cx, JSOperationCallback callback)
 {
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -480,26 +480,26 @@ extern JS_PUBLIC_DATA(jsid) JSID_EMPTY;
                                            set of the same-named property in an
                                            object that delegates to a prototype
                                            containing this property */
 #define JSPROP_INDEX            0x80    /* name is actually (jsint) index */
 #define JSPROP_SHORTID          0x100   /* set in JSPropertyDescriptor.attrs
                                            if getters/setters use a shortid */
 
 /* Function flags, set in JSFunctionSpec and passed to JS_NewFunction etc. */
-#define JSFUN_CONSTRUCTOR       0x02    /* native that can be called as a ctor
-                                           without creating a this object */
 #define JSFUN_LAMBDA            0x08    /* expressed, not declared, function */
 #define JSFUN_HEAVYWEIGHT       0x80    /* activation requires a Call object */
 
 #define JSFUN_HEAVYWEIGHT_TEST(f)  ((f) & JSFUN_HEAVYWEIGHT)
 
 #define JSFUN_PRIMITIVE_THIS  0x0100    /* |this| may be a primitive value */
-
-#define JSFUN_FLAGS_MASK      0x07fa    /* overlay JSFUN_* attributes --
+#define JSFUN_CONSTRUCTOR     0x0200    /* native that can be called as a ctor
+                                           without creating a this object */
+
+#define JSFUN_FLAGS_MASK      0x07f8    /* overlay JSFUN_* attributes --
                                            bits 12-15 are used internally to
                                            flag interpreted functions */
 
 #define JSFUN_STUB_GSOPS      0x1000    /* use JS_PropertyStub getter/setter
                                            instead of defaulting to class gsops
                                            for property holding function */
 
 /*
@@ -2403,16 +2403,23 @@ JS_CompileScript(JSContext *cx, JSObject
 
 extern JS_PUBLIC_API(JSScript *)
 JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
                               JSPrincipals *principals,
                               const char *bytes, size_t length,
                               const char *filename, uintN lineno);
 
 extern JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipalsVersion(JSContext *cx, JSObject *obj,
+                                     JSPrincipals *principals,
+                                     const char *bytes, size_t length,
+                                     const char *filename, uintN lineno,
+                                     JSVersion version);
+
+extern JS_PUBLIC_API(JSScript *)
 JS_CompileUCScript(JSContext *cx, JSObject *obj,
                    const jschar *chars, size_t length,
                    const char *filename, uintN lineno);
 
 extern JS_PUBLIC_API(JSScript *)
 JS_CompileUCScriptForPrincipals(JSContext *cx, JSObject *obj,
                                 JSPrincipals *principals,
                                 const jschar *chars, size_t length,
@@ -2432,16 +2439,21 @@ extern JS_PUBLIC_API(JSScript *)
 JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename,
                      FILE *fh);
 
 extern JS_PUBLIC_API(JSScript *)
 JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj,
                                   const char *filename, FILE *fh,
                                   JSPrincipals *principals);
 
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipalsVersion(JSContext *cx, JSObject *obj,
+                                         const char *filename, FILE *fh,
+                                         JSPrincipals *principals);
+
 /*
  * NB: you must use JS_NewScriptObject and root a pointer to its return value
  * in order to keep a JSScript and its atoms safe from garbage collection after
  * creating the script via JS_Compile* and before a JS_ExecuteScript* call.
  * E.g., and without error checks:
  *
  *    JSScript *script = JS_CompileFile(cx, global, filename);
  *    JSObject *scrobj = JS_NewScriptObject(cx, script);
@@ -2549,16 +2561,20 @@ JS_DecompileFunctionBody(JSContext *cx, 
  * obvious names, too, so would not tend to be used.  The JS_SetOption call,
  * OTOH, can be more easily hacked into existing code that does not depend on
  * the bug; such code can continue to use the familiar JS_EvaluateScript,
  * etc., entry points.
  */
 extern JS_PUBLIC_API(JSBool)
 JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval);
 
+extern JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptVersion(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval,
+                        JSVersion version);
+
 /*
  * Execute either the function-defining prolog of a script, or the script's
  * main body, but not both.
  */
 typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart;
 
 extern JS_PUBLIC_API(JSBool)
 JS_EvaluateScript(JSContext *cx, JSObject *obj,
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -867,28 +867,48 @@ InvokeSessionGuard::start(JSContext *cx,
 bool
 ExternalInvoke(JSContext *cx, const Value &thisv, const Value &fval,
                uintN argc, Value *argv, Value *rval)
 {
     LeaveTrace(cx);
 
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
-        return JS_FALSE;
+        return false;
 
     args.callee() = fval;
     args.thisv() = thisv;
     memcpy(args.argv(), argv, argc * sizeof(Value));
 
     if (!Invoke(cx, args, 0))
-        return JS_FALSE;
+        return false;
 
     *rval = args.rval();
-
-    return JS_TRUE;
+    return true;
+}
+
+bool
+ExternalInvokeConstructor(JSContext *cx, const Value &fval, uintN argc, Value *argv,
+                          Value *rval)
+{
+    LeaveTrace(cx);
+
+    InvokeArgsGuard args;
+    if (!cx->stack().pushInvokeArgs(cx, argc, &args))
+        return false;
+
+    args.callee() = fval;
+    args.thisv().setMagic(JS_THIS_POISON);
+    memcpy(args.argv(), argv, argc * sizeof(Value));
+
+    if (!InvokeConstructor(cx, args))
+        return false;
+
+    *rval = args.rval();
+    return true;
 }
 
 bool
 ExternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, const Value &fval,
                  JSAccessMode mode, uintN argc, Value *argv, Value *rval)
 {
     LeaveTrace(cx);
 
--- a/js/src/jsinterp.h
+++ b/js/src/jsinterp.h
@@ -968,16 +968,20 @@ ExternalGetOrSet(JSContext *cx, JSObject
 
 extern JS_REQUIRES_STACK bool
 InvokeConstructor(JSContext *cx, const CallArgs &args);
 
 extern JS_REQUIRES_STACK bool
 InvokeConstructorWithGivenThis(JSContext *cx, JSObject *thisobj, const Value &fval,
                                uintN argc, Value *argv, Value *rval);
 
+extern bool
+ExternalInvokeConstructor(JSContext *cx, const Value &fval, uintN argc, Value *argv,
+                          Value *rval);
+
 /*
  * Performs a direct eval for the given arguments, which must correspond to the
  * currently-executing stack frame, which must be a script frame.  evalfun must
  * be the built-in eval function and must correspond to the callee in vp[0].
  * When this function succeeds it returns the result in *vp, adjusts the JS
  * stack pointer, and returns true.
  */
 extern JS_REQUIRES_STACK bool
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -5531,54 +5531,54 @@ js_DeleteProperty(JSContext *cx, JSObjec
         }
     }
 
     return obj->removeProperty(cx, id) && js_SuppressDeletedProperty(cx, obj, id);
 }
 
 namespace js {
 
+JSObject *
+HasNativeMethod(JSObject *obj, jsid methodid, Native native)
+{
+    const Shape *shape = obj->nativeLookup(methodid);
+    if (!shape || !shape->hasDefaultGetter() || !obj->containsSlot(shape->slot))
+        return NULL;
+
+    const Value &fval = obj->nativeGetSlot(shape->slot);
+    JSObject *funobj;
+    if (!IsFunctionObject(fval, &funobj) || funobj->getFunctionPrivate()->maybeNative() != native)
+        return NULL;
+
+    return funobj;
+}
+
 /*
  * When we have an object of a builtin class, we don't quite know what its
  * valueOf/toString methods are, since these methods may have been overwritten
  * or shadowed. However, we can still do better than js_TryMethod by
  * hard-coding the necessary properties for us to find the native we expect.
  *
  * TODO: a per-thread shape-based cache would be faster and simpler.
  */
 static JS_ALWAYS_INLINE bool
-ClassMethodIsNative(JSContext *cx, JSObject *obj, Class *classp, jsid methodid,
+ClassMethodIsNative(JSContext *cx, JSObject *obj, Class *clasp, jsid methodid,
                     Native native)
 {
-    JS_ASSERT(obj->getClass() == classp);
-
-    const Shape *shape = obj->nativeLookup(methodid);
-    JSObject *pobj = obj;
-
-    if (!shape) {
-        pobj = obj->getProto();
-
-        if (pobj && pobj->getClass() == classp)
-            shape = pobj->nativeLookup(methodid);
-    }
-
-    if (shape && shape->hasDefaultGetter() && pobj->containsSlot(shape->slot)) {
-        const Value &fval = pobj->nativeGetSlot(shape->slot);
-
-        JSObject *funobj;
-        if (IsFunctionObject(fval, &funobj)) {
-            JSFunction *fun = funobj->getFunctionPrivate();
-            if (fun->maybeNative() == native)
-                return true;
-        }
-    }
-    return false;
-}
-
-JSBool
+    JS_ASSERT(obj->getClass() == clasp);
+
+    if (HasNativeMethod(obj, methodid, native))
+        return true;
+
+    JSObject *pobj = obj->getProto();
+    return pobj && pobj->getClass() == clasp &&
+           HasNativeMethod(pobj, methodid, native);
+}
+
+bool
 DefaultValue(JSContext *cx, JSObject *obj, JSType hint, Value *vp)
 {
     JS_ASSERT(hint != JSTYPE_OBJECT && hint != JSTYPE_FUNCTION);
 
     Value v = ObjectValue(*obj);
     if (hint == JSTYPE_STRING) {
         /* Optimize (new String(...)).toString(). */
         if (obj->getClass() == &js_StringClass &&
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -1607,17 +1607,24 @@ js_SetPropertyHelper(JSContext *cx, JSOb
  * that obj is locked and this function always unlocks obj on return.
  */
 extern JSBool
 js_SetNativeAttributes(JSContext *cx, JSObject *obj, js::Shape *shape,
                        uintN attrs);
 
 namespace js {
 
-extern JSBool
+/*
+ * If obj has a data property methodid which is a function object for the given
+ * native, return that function object. Otherwise, return NULL.
+ */
+extern JSObject *
+HasNativeMethod(JSObject *obj, jsid methodid, Native native);
+
+extern bool
 DefaultValue(JSContext *cx, JSObject *obj, JSType hint, Value *vp);
 
 extern JSBool
 CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
             js::Value *vp, uintN *attrsp);
 
 } /* namespace js */
 
--- a/js/src/jsproxy.cpp
+++ b/js/src/jsproxy.cpp
@@ -249,24 +249,18 @@ JSProxyHandler::call(JSContext *cx, JSOb
 }
 
 bool
 JSProxyHandler::construct(JSContext *cx, JSObject *proxy,
                           uintN argc, Value *argv, Value *rval)
 {
     JS_ASSERT(OperationInProgress(cx, proxy));
     Value fval = GetConstruct(proxy);
-    if (fval.isUndefined()) {
-        fval = GetCall(proxy);
-        JSObject *obj = JS_New(cx, &fval.toObject(), argc, Jsvalify(argv));
-        if (!obj)
-            return false;
-        rval->setObject(*obj);
-        return true;
-    }
+    if (fval.isUndefined())
+        return ExternalInvokeConstructor(cx, GetCall(proxy), argc, argv, rval);
 
     /*
      * FIXME: The Proxy proposal says to pass undefined as the this argument,
      * but primitive this is not supported yet. See bug 576644.
      */
     JS_ASSERT(fval.isObject());
     JSObject *thisobj = fval.toObject().getGlobal();
     return ExternalInvoke(cx, thisobj, fval, argc, argv, rval);
--- a/js/src/jsscope.h
+++ b/js/src/jsscope.h
@@ -637,16 +637,17 @@ inline js::Shape **
 JSObject::nativeSearch(jsid id, bool adding)
 {
     return js::Shape::search(&lastProp, id, adding);
 }
 
 inline const js::Shape *
 JSObject::nativeLookup(jsid id)
 {
+    JS_ASSERT(isNative());
     return SHAPE_FETCH(nativeSearch(id));
 }
 
 inline bool
 JSObject::nativeContains(jsid id)
 {
     return nativeLookup(id) != NULL;
 }
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -5351,19 +5351,20 @@ TraceRecorder::emitIf(jsbytecode* pc, bo
     } else {
         exitType = BRANCH_EXIT;
     }
     /*
      * Put 'x' in a form suitable for a guard/branch condition if it isn't
      * already.  This lets us detect if the comparison is optimized to 0 or 1,
      * in which case we avoid the guard() call below.
      */
-    ensureCond(&x, &cond);
-    if (!x->isImmI())
+    if (!x->isImmI()) {
+        ensureCond(&x, &cond);
         guard(cond, x, exitType);
+    }
 }
 
 /* Emit code for a fused IFEQ/IFNE. */
 JS_REQUIRES_STACK void
 TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x)
 {
     if (*pc == JSOP_IFEQ || *pc == JSOP_IFNE) {
         emitIf(pc, cond, x);
@@ -11133,31 +11134,24 @@ TraceRecorder::callNative(uintN argc, JS
                          pc[JSOP_CALL_LENGTH + JSOP_TRACE_LENGTH] == JSOP_IFEQ) ||
                         (pc[JSOP_CALL_LENGTH] == JSOP_NOT &&
                          pc[JSOP_CALL_LENGTH + JSOP_NOT_LENGTH] == JSOP_IFEQ) ||
                         (pc[JSOP_CALL_LENGTH] == JSOP_TRACE &&
                          pc[JSOP_CALL_LENGTH + JSOP_TRACE_LENGTH] == JSOP_NOT &&
                          pc[JSOP_CALL_LENGTH + JSOP_TRACE_LENGTH + JSOP_NOT_LENGTH] == JSOP_IFEQ))
                     {
                         JSObject* proto;
-                        Value test;
-                        jsid testId = ATOM_TO_JSID(cx->runtime->atomState.testAtom);
+                        jsid id = ATOM_TO_JSID(cx->runtime->atomState.testAtom);
                         /* Get RegExp.prototype.test() and check it hasn't been changed. */
-                        if (js_GetClassPrototype(cx, funobj->getParent(), JSProto_RegExp, &proto) &&
-                            js_GetProperty(cx, proto, testId, &test) &&
-                            IsFunctionObject(test))
-                        {
-                            JSObject* tmpfunobj = &test.toObject();
-                            JSFunction* tmpfun = GET_FUNCTION_PRIVATE(cx, tmpfunobj);
-                            Native tmpnative = tmpfun->maybeNative();
-                            if (tmpnative == js_regexp_test) {
-                                vp[0] = test;
-                                funobj = tmpfunobj;
-                                fun = tmpfun;
-                                native = tmpnative;
+                        if (js_GetClassPrototype(cx, NULL, JSProto_RegExp, &proto)) {
+                            if (JSObject *tmp = HasNativeMethod(proto, id, js_regexp_test)) {
+                                vp[0] = ObjectValue(*tmp);
+                                funobj = tmp;
+                                fun = tmp->getFunctionPrivate();
+                                native = js_regexp_test;
                             }
                         }
                     }
                 }
             }
         }
         break;
 
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -62,22 +62,22 @@
 using namespace js;
 using namespace js::mjit;
 #if defined(JS_POLYIC) || defined(JS_MONOIC)
 using namespace js::mjit::ic;
 #endif
 
 #define ADD_CALLSITE(stub) if (debugMode) addCallSite(__LINE__, (stub))
 
-#define RETURN_IF_OOM(retval)                    \
-    JS_BEGIN_MACRO                               \
-        if (masm.oom() || stubcc.masm.oom()) {   \
-            js_ReportOutOfMemory(cx);            \
-            return retval;                       \
-        }                                        \
+#define RETURN_IF_OOM(retval)                                   \
+    JS_BEGIN_MACRO                                              \
+        if (oomInVector || masm.oom() || stubcc.masm.oom()) {   \
+            js_ReportOutOfMemory(cx);                           \
+            return retval;                                      \
+        }                                                       \
     JS_END_MACRO
 
 #if defined(JS_METHODJIT_SPEW)
 static const char *OpcodeNames[] = {
 # define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) #name,
 # include "jsopcode.tbl"
 # undef OPDEF
 };
@@ -89,31 +89,32 @@ mjit::Compiler::Compiler(JSContext *cx, 
     script(fp->script()),
     scopeChain(&fp->scopeChain()),
     globalObj(scopeChain->getGlobal()),
     fun(fp->isFunctionFrame() && !fp->isEvalFrame()
         ? fp->fun()
         : NULL),
     isConstructing(fp->isConstructing()),
     analysis(NULL), jumpMap(NULL), frame(cx, script, masm),
-    branchPatches(ContextAllocPolicy(cx)),
+    branchPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
 #if defined JS_MONOIC
-    mics(ContextAllocPolicy(cx)),
-    callICs(ContextAllocPolicy(cx)),
-    equalityICs(ContextAllocPolicy(cx)),
-    traceICs(ContextAllocPolicy(cx)),
+    mics(CompilerAllocPolicy(cx, *thisFromCtor())),
+    callICs(CompilerAllocPolicy(cx, *thisFromCtor())),
+    equalityICs(CompilerAllocPolicy(cx, *thisFromCtor())),
+    traceICs(CompilerAllocPolicy(cx, *thisFromCtor())),
 #endif
 #if defined JS_POLYIC
-    pics(ContextAllocPolicy(cx)), 
-    getElemICs(ContextAllocPolicy(cx)),
-    setElemICs(ContextAllocPolicy(cx)),
+    pics(CompilerAllocPolicy(cx, *thisFromCtor())), 
+    getElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
+    setElemICs(CompilerAllocPolicy(cx, *thisFromCtor())),
 #endif
-    callPatches(ContextAllocPolicy(cx)),
-    callSites(ContextAllocPolicy(cx)), 
-    doubleList(ContextAllocPolicy(cx)),
+    callPatches(CompilerAllocPolicy(cx, *thisFromCtor())),
+    callSites(CompilerAllocPolicy(cx, *thisFromCtor())), 
+    doubleList(CompilerAllocPolicy(cx, *thisFromCtor())),
+    oomInVector(false),
     stubcc(cx, *thisFromCtor(), frame, script),
     debugMode(cx->compartment->debugMode)
 #if defined JS_TRACER
     ,addTraceHints(cx->traceJitEnabled)
 #endif
 {
 }
 
@@ -4432,17 +4433,18 @@ mjit::Compiler::jumpAndTrace(Jump j, jsb
     ic.stubEntry = stubcc.masm.label();
     ic.jumpTarget = target;
     ic.traceHint = j;
     if (slow)
         ic.slowTraceHint = *slow;
 
     uint16 index = GET_UINT16(target);
     if (traceICs.length() <= index)
-        traceICs.resize(index+1);
+        if (!traceICs.resize(index+1))
+            return false;
 # endif
 
     Label traceStart = stubcc.masm.label();
 
     stubcc.linkExitDirect(j, traceStart);
     if (slow)
         slow->linkTo(traceStart, &stubcc.masm);
 # if JS_MONOIC
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -50,17 +50,16 @@
 #include "MonoIC.h"
 #include "PolyIC.h"
 
 namespace js {
 namespace mjit {
 
 class Compiler : public BaseCompiler
 {
-
     struct BranchPatch {
         BranchPatch(const Jump &j, jsbytecode *pc)
           : jump(j), pc(pc)
         { }
 
         Jump jump;
         jsbytecode *pc;
     };
@@ -264,38 +263,41 @@ class Compiler : public BaseCompiler
     JSObject *globalObj;
     JSFunction *fun;
     bool isConstructing;
     analyze::Script *analysis;
     Label *jumpMap;
     jsbytecode *PC;
     Assembler masm;
     FrameState frame;
-    js::Vector<BranchPatch, 64> branchPatches;
+    js::Vector<BranchPatch, 64, CompilerAllocPolicy> branchPatches;
 #if defined JS_MONOIC
-    js::Vector<MICGenInfo, 64> mics;
-    js::Vector<CallGenInfo, 64> callICs;
-    js::Vector<EqualityGenInfo, 64> equalityICs;
-    js::Vector<TraceGenInfo, 64> traceICs;
+    js::Vector<MICGenInfo, 64, CompilerAllocPolicy> mics;
+    js::Vector<CallGenInfo, 64, CompilerAllocPolicy> callICs;
+    js::Vector<EqualityGenInfo, 64, CompilerAllocPolicy> equalityICs;
+    js::Vector<TraceGenInfo, 64, CompilerAllocPolicy> traceICs;
 #endif
 #if defined JS_POLYIC
-    js::Vector<PICGenInfo, 16> pics;
-    js::Vector<GetElementICInfo> getElemICs;
-    js::Vector<SetElementICInfo> setElemICs;
+    js::Vector<PICGenInfo, 16, CompilerAllocPolicy> pics;
+    js::Vector<GetElementICInfo, 16, CompilerAllocPolicy> getElemICs;
+    js::Vector<SetElementICInfo, 16, CompilerAllocPolicy> setElemICs;
 #endif
-    js::Vector<CallPatchInfo, 64> callPatches;
-    js::Vector<InternalCallSite, 64> callSites;
-    js::Vector<DoublePatch, 16> doubleList;
+    js::Vector<CallPatchInfo, 64, CompilerAllocPolicy> callPatches;
+    js::Vector<InternalCallSite, 64, CompilerAllocPolicy> callSites;
+    js::Vector<DoublePatch, 16, CompilerAllocPolicy> doubleList;
     StubCompiler stubcc;
     Label invokeLabel;
     Label arityLabel;
     bool debugMode;
     bool addTraceHints;
+    bool oomInVector;       // True if we have OOM'd appending to a vector. 
 
     Compiler *thisFromCtor() { return this; }
+
+    friend class CompilerAllocPolicy;
   public:
     // Special atom index used to indicate that the atom is 'length'. This
     // follows interpreter usage in JSOP_LENGTH.
     enum { LengthAtomIndex = uint32(-2) };
 
     Compiler(JSContext *cx, JSStackFrame *fp);
     ~Compiler();
 
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -36,29 +36,35 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "MethodJIT.h"
 #include "Logging.h"
 #include "assembler/jit/ExecutableAllocator.h"
 #include "jstracer.h"
 #include "BaseAssembler.h"
+#include "Compiler.h"
 #include "MonoIC.h"
 #include "PolyIC.h"
 #include "TrampolineCompiler.h"
 #include "jscntxtinlines.h"
 #include "jscompartment.h"
 #include "jsscope.h"
 
 #include "jsgcinlines.h"
 
 using namespace js;
 using namespace js::mjit;
 
 
+js::mjit::CompilerAllocPolicy::CompilerAllocPolicy(JSContext *cx, Compiler &compiler)
+: ContextAllocPolicy(cx),
+  oomFlag(&compiler.oomInVector)
+{
+}
 void
 JSStackFrame::methodjitStaticAsserts()
 {
         /* Static assert for x86 trampolines in MethodJIT.cpp. */
 #if defined(JS_CPU_X86)
         JS_STATIC_ASSERT(offsetof(JSStackFrame, rval_)     == 0x18);
         JS_STATIC_ASSERT(offsetof(JSStackFrame, rval_) + 4 == 0x1C);
         JS_STATIC_ASSERT(offsetof(JSStackFrame, ncode_)    == 0x14);
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -199,16 +199,44 @@ class JaegerCompartment {
 
 #if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
     Trampolines::TrampolinePtr forceReturnFastTrampoline() const {
         return trampolines.forceReturnFast;
     }
 #endif
 };
 
+/*
+ * Allocation policy for compiler jstl objects. The goal is to free the
+ * compiler from having to check and propagate OOM after every time we
+ * append to a vector. We do this by reporting OOM to the engine and
+ * setting a flag on the compiler when OOM occurs. The compiler is required
+ * to check for OOM only before trying to use the contents of the list.
+ */
+class CompilerAllocPolicy : public ContextAllocPolicy
+{
+    bool *oomFlag;
+
+    void *checkAlloc(void *p) {
+        if (!p)
+            *oomFlag = true;
+        return p;
+    }
+
+  public:
+    CompilerAllocPolicy(JSContext *cx, bool *oomFlag)
+    : ContextAllocPolicy(cx), oomFlag(oomFlag) {}
+    CompilerAllocPolicy(JSContext *cx, Compiler &compiler);
+
+    void *malloc(size_t bytes) { return checkAlloc(ContextAllocPolicy::malloc(bytes)); }
+    void *realloc(void *p, size_t bytes) {
+        return checkAlloc(ContextAllocPolicy::realloc(p, bytes));
+    }
+};
+
 namespace ic {
 # if defined JS_POLYIC
     struct PICInfo;
     struct GetElementIC;
     struct SetElementIC;
 # endif
 # if defined JS_MONOIC
     struct MICInfo;
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -43,18 +43,26 @@
 #include "Compiler.h"
 #include "assembler/assembler/LinkBuffer.h"
 #include "FrameState-inl.h"
 
 using namespace js;
 using namespace mjit;
 
 StubCompiler::StubCompiler(JSContext *cx, mjit::Compiler &cc, FrameState &frame, JSScript *script)
-  : cx(cx), cc(cc), frame(frame), script(script), generation(1), lastGeneration(0),
-    exits(SystemAllocPolicy()), joins(SystemAllocPolicy()), jumpList(SystemAllocPolicy())
+: cx(cx),
+  cc(cc),
+  frame(frame),
+  script(script),
+  generation(1),
+  lastGeneration(0),
+  exits(CompilerAllocPolicy(cx, cc)),
+  joins(CompilerAllocPolicy(cx, cc)),
+  scriptJoins(CompilerAllocPolicy(cx, cc)),
+  jumpList(SystemAllocPolicy())
 {
 #ifdef DEBUG
     masm.setSpewPath(true);
 #endif
 }
 
 bool
 StubCompiler::init(uint32 nargs)
--- a/js/src/methodjit/StubCompiler.h
+++ b/js/src/methodjit/StubCompiler.h
@@ -83,20 +83,19 @@ class StubCompiler
 
   public:
     Assembler masm;
 
   private:
     uint32 generation;
     uint32 lastGeneration;
 
-    /* :TODO: oom check */
-    Vector<CrossPatch, 64, SystemAllocPolicy> exits;
-    Vector<CrossPatch, 64, SystemAllocPolicy> joins;
-    Vector<CrossJumpInScript, 64, SystemAllocPolicy> scriptJoins;
+    Vector<CrossPatch, 64, mjit::CompilerAllocPolicy> exits;
+    Vector<CrossPatch, 64, mjit::CompilerAllocPolicy> joins;
+    Vector<CrossJumpInScript, 64, mjit::CompilerAllocPolicy> scriptJoins;
     Vector<Jump, 8, SystemAllocPolicy> jumpList;
 
   public:
     StubCompiler(JSContext *cx, mjit::Compiler &cc, FrameState &frame, JSScript *script);
 
     bool init(uint32 nargs);
 
     size_t size() {
--- a/js/src/nanojit-import-rev
+++ b/js/src/nanojit-import-rev
@@ -1,1 +1,1 @@
-5012f8eb917c654d4756025fd90f1f98a1f07626
+18279f425cd8e3f63f7a13cd1b1b62cba57b7ecd
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1397,24 +1397,34 @@ namespace nanojit
         verbose_only( _thisfrag->nStaticExits++; )
         countlir_xcc();
         // We only support cmp with guard right now, also assume it is 'close'
         // and only emit the branch.
         NIns* exit = asm_exit(ins); // does intersectRegisterState()
         asm_branch(ins->opcode() == LIR_xf, cond, exit);
     }
 
+    // helper function for nop insertion feature that results in no more
+    // than 1 no-op instruction insertion every 128-1151 Bytes
+    static inline uint32_t noiseForNopInsertion(Noise* n) {
+        return n->getValue(1023) + 128;
+    }
+
     void Assembler::gen(LirFilter* reader)
     {
         NanoAssert(_thisfrag->nStaticExits == 0);
 
         InsList pending_lives(alloc);
 
         NanoAssert(!error());
 
+        // compiler hardening setup
+        NIns* priorIns = _nIns;
+        int32_t nopInsertTrigger = hardenNopInsertion(_config) ? noiseForNopInsertion(_noise): 0;
+
         // What's going on here: we're visiting all the LIR instructions in
         // the buffer, working strictly backwards in buffer-order, and
         // generating machine instructions for them as we go.
         //
         // For each LIns, we first check if it's live.  If so we mark its
         // operands as also live, and then generate code for it *if
         // necessary*.  It may not be necessary if the instruction is an
         // expression and code has already been generated for all its uses in
@@ -1468,16 +1478,38 @@ namespace nanojit
             // it is printed after the LIR and native code, exactly when the
             // post-regstate should be shown.
             if ((_logc->lcbits & LC_Native) && (_logc->lcbits & LC_Activation))
                 printActivationState();
             if ((_logc->lcbits & LC_Native) && (_logc->lcbits & LC_RegAlloc))
                 printRegState();
 #endif
 
+            // compiler hardening technique that inserts no-op instructions in the compiled method when nopInsertTrigger < 0
+            if (hardenNopInsertion(_config))
+            {
+                size_t delta = (uintptr_t)priorIns - (uintptr_t)_nIns; // # bytes that have been emitted since last go-around
+
+                if (codeList) {
+                    codeList = codeList;
+                }
+                // if no codeList then we know priorIns and _nIns are on same page, otherwise make sure priorIns was not in the previous code block
+                if (!codeList || !codeList->isInBlock(priorIns)) {
+                    NanoAssert(delta < VMPI_getVMPageSize()); // sanity check
+                    nopInsertTrigger -= delta;
+                    if (nopInsertTrigger < 0)
+                    {
+                        nopInsertTrigger = noiseForNopInsertion(_noise);
+                        asm_insert_random_nop();
+                        PERFM_NVPROF("hardening:nop-insert", 1);
+                    }
+                }
+                priorIns = _nIns;
+            }
+
             LOpcode op = ins->opcode();
             switch (op)
             {
                 default:
                     NanoAssertMsgf(false, "unsupported LIR instruction: %d\n", op);
                     break;
 
                 case LIR_regfence:
@@ -2028,32 +2060,32 @@ namespace nanojit
                     if (vtuneHandle) {
                         uint32_t currentLine = (uint32_t) ins->oprnd1()->immI();
                         vtuneLine(vtuneHandle, currentLine, _nIns);
                     }
                     break;
                 }
                #endif // VMCFG_VTUNE
 
-                case LIR_comment: 
+                case LIR_comment:
                     // Do nothing.
                     break;
             }
 
 #ifdef NJ_VERBOSE
             // We do final LIR printing inside this loop to avoid printing
             // dead LIR instructions.  We print the LIns after generating the
             // code.  This ensures that the LIns will appear in debug output
             // *before* the native code, because Assembler::outputf()
             // prints everything in reverse.
             //
             if (_logc->lcbits & LC_AfterDCE) {
                 InsBuf b;
                 LInsPrinter* printer = _thisfrag->lirbuf->printer;
-                if (ins->isop(LIR_comment)) 
+                if (ins->isop(LIR_comment))
                     outputf("%s", printer->formatIns(&b, ins));
                 else
                     outputf("    %s", printer->formatIns(&b, ins));
             }
 #endif
 
             if (error())
                 return;
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -482,16 +482,17 @@ namespace nanojit
 #endif
             void        asm_nongp_copy(Register r, Register s);
             void        asm_call(LIns*);
             Register    asm_binop_rhs_reg(LIns* ins);
             NIns*       asm_branch(bool branchOnFalse, LIns* cond, NIns* targ);
             NIns*       asm_branch_ov(LOpcode op, NIns* targ);
             void        asm_switch(LIns* ins, NIns* target);
             void        asm_jtbl(LIns* ins, NIns** table);
+            void        asm_insert_random_nop();
             void        emitJumpTable(SwitchInfo* si, NIns* target);
             void        assignSavedRegs();
             void        reserveSavedRegs();
             void        assignParamRegs();
             void        handleLoopCarriedExprs(InsList& pending_lives);
 
             // platform specific implementation (see NativeXXX.cpp file)
             void        nInit(AvmCore *);
--- a/js/src/nanojit/CodeAlloc.h
+++ b/js/src/nanojit/CodeAlloc.h
@@ -83,16 +83,20 @@ namespace nanojit
         /** return the starting address for this block only */
         NIns* start() { return &code[0]; }
 
         /** return just the usable size of this block */
         size_t size() const { return uintptr_t(end) - uintptr_t(&code[0]); }
 
         /** return the whole size of this block including overhead */
         size_t blockSize() const { return uintptr_t(end) - uintptr_t(this); }
+
+    public:
+        /** true is the given NIns is contained within this block */
+        bool isInBlock(NIns* n) { return (n >= this->start() && n < this->end); }
     };
 
     /**
      * Code memory allocator.
      * Long lived manager for many code blocks,
      * manages interaction with an underlying code memory allocator,
      * setting page permissions, api's for allocating and freeing
      * individual blocks of code memory (for methods, stubs, or compiled
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -2569,17 +2569,19 @@ namespace nanojit
         LIns* ins;
         NanoAssert(isCseOpcode(op));
         uint32_t k;
         ins = find2(op, a, b, k);
         if (!ins) {
             ins = out->ins2(op, a, b);
             addNL(LIns2, ins, k);
         } else if (ins->isCmp()) {
-            if (knownCmpValues.containsKey(ins)) {
+            // XXX: temporarily disabled because it exposed latent problems
+            // that caused bug 607856.  See also bug 609129.
+            if (0 && knownCmpValues.containsKey(ins)) {
                 // We've seen this comparison before, and it was previously
                 // used in a guard, so we know what its value must be at this
                 // point.  Replace it with a constant.
                 NanoAssert(ins->isCmp());
                 bool cmpValue = knownCmpValues.get(ins);
                 return insImmI(cmpValue ? 1 : 0);
             }
         }
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -2915,10 +2915,14 @@ void Assembler::swapCodeChunks() {
         _nExitSlot = exitStart;
     SWAP(NIns*, _nIns, _nExitIns);
     SWAP(NIns*, _nSlot, _nExitSlot);        // this one is ARM-specific
     SWAP(NIns*, codeStart, exitStart);
     SWAP(NIns*, codeEnd, exitEnd);
     verbose_only( SWAP(size_t, codeBytes, exitBytes); )
 }
 
+void Assembler::asm_insert_random_nop() {
+    NanoAssert(0); // not supported
+}
+
 }
 #endif /* FEATURE_NANOJIT */
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -266,16 +266,17 @@ verbose_only( extern const char* shiftNa
                                                                                 \
     void        BranchWithLink(NIns* addr);                                     \
     inline void BLX(Register addr, bool chk = true);                            \
     void        JMP_far(NIns*);                                                 \
     void        B_cond_chk(ConditionCode, NIns*, bool);                         \
     void        underrunProtect(int bytes);                                     \
     void        nativePageReset();                                              \
     void        nativePageSetup();                                              \
+    bool        hardenNopInsertion(const Config& /*c*/) { return false; }       \
     void        asm_immd_nochk(Register, int32_t, int32_t);                     \
     void        asm_regarg(ArgType, LIns*, Register);                           \
     void        asm_stkarg(LIns* p, int stkd);                                  \
     void        asm_cmpi(Register, int32_t imm);                                \
     void        asm_ldr_chk(Register d, Register b, int32_t off, bool chk);     \
     int32_t     asm_str(Register rt, Register rr, int32_t off);                 \
     void        asm_cmp(LIns *cond);                                            \
     void        asm_cmpd(LIns *cond);                                           \
--- a/js/src/nanojit/NativeCommon.h
+++ b/js/src/nanojit/NativeCommon.h
@@ -70,36 +70,53 @@ namespace nanojit
     struct Register {
         uint32_t n;     // the register number
     };
 
     static inline uint32_t REGNUM(Register r) {
         return r.n;
     }
 
-    static inline Register REGINC(Register r) {
-        r.n++;
+    static inline Register operator+(Register r, int c)
+    {
+        r.n += c;
         return r;
     }
 
     static inline bool operator==(Register r1, Register r2)
     {
         return r1.n == r2.n;
     }
 
     static inline bool operator!=(Register r1, Register r2)
     {
         return r1.n != r2.n;
     }
+
+    static inline bool operator<=(Register r1, Register r2)
+    {
+        return r1.n <= r2.n;
+    }
+
+    static inline bool operator<(Register r1, Register r2)
+    {
+        return r1.n < r2.n;
+    }
+
+    static inline bool operator>=(Register r1, Register r2)
+    {
+        return r1.n >= r2.n;
+    }
+
+    static inline bool operator>(Register r1, Register r2)
+    {
+        return r1.n > r2.n;
+    }
 #else
     typedef uint32_t Register;
 
     static inline uint32_t REGNUM(Register r) {
         return r;
     }
-
-    static inline Register REGINC(Register r) {
-        return r+1;
-    }
 #endif
 } // namespace nanojit
 
 #endif // __nanojit_NativeCommon__
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -2057,11 +2057,17 @@ namespace nanojit
         if (!_nExitSlot)
             _nExitSlot = exitStart;
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, _nSlot, _nExitSlot);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
+
+    void
+    Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 }
 
 #endif // FEATURE_NANOJIT && NANOJIT_MIPS
--- a/js/src/nanojit/NativeMIPS.h
+++ b/js/src/nanojit/NativeMIPS.h
@@ -237,16 +237,17 @@ namespace nanojit {
 
 // REQ: Platform specific declarations to include in Assembler class
 #define DECLARE_PLATFORM_ASSEMBLER()                                    \
     const static Register argRegs[4];                                   \
     const static Register retRegs[2];                                   \
     void nativePageSetup(void);                                         \
     void nativePageReset(void);                                         \
     void underrunProtect(int bytes);                                    \
+    bool hardenNopInsertion(const Config& /*c*/) { return false; }      \
     NIns *_nSlot;                                                       \
     NIns *_nExitSlot;                                                   \
     int max_out_args;                                                   \
     Register ovreg;                                                     \
                                                                         \
     void asm_ldst(int op, Register r, int offset, Register b);          \
     void asm_ldst64(bool store, Register fr, int offset, Register b);   \
     void asm_store_imm64(LIns *value, int dr, Register rbase);          \
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -732,31 +732,31 @@ namespace nanojit
             uint32_t j = argc - i - 1;
             ArgType ty = argTypes[j];
             LIns* arg = ins->arg(j);
             NanoAssert(ty != ARGTYPE_V);
             if (ty != ARGTYPE_D) {
                 // GP arg
                 if (r <= R10) {
                     asm_regarg(ty, arg, r);
-                    r = Register(r + 1);
+                    r = r + 1;
                     param_size += sizeof(void*);
                 } else {
                     // put arg on stack
                     TODO(stack_int32);
                 }
             } else {
                 // double
                 if (fr <= F13) {
                     asm_regarg(ty, arg, fr);
-                    fr = Register(fr + 1);
+                    fr = fr + 1;
                 #ifdef NANOJIT_64BIT
-                    r = Register(r + 1);
+                    r = r + 1;
                 #else
-                    r = Register(r + 2); // skip 2 gpr's
+                    r = r + 2; // Skip 2 GPRs.
                 #endif
                     param_size += sizeof(double);
                 } else {
                     // put arg on stack
                     TODO(stack_double);
                 }
             }
         }
@@ -1330,67 +1330,68 @@ namespace nanojit
             NanoAssert(((branch[0] & 0x3fff)<<2) == 0);
             branch[0] |= (bd & 0x3fff) << 2;
             TODO(patch_bc);
         }
     #ifdef NANOJIT_64BIT
         // patch 64bit branch
         else if ((branch[0] & ~(31<<21)) == PPC_addis) {
             // general branch, using lis,ori,sldi,oris,ori to load the const 64bit addr.
-            Register rd = Register((branch[0] >> 21) & 31);
+            Register rd = { (branch[0] >> 21) & 31 };
             NanoAssert(branch[1] == PPC_ori  | GPR(rd)<<21 | GPR(rd)<<16);
             NanoAssert(branch[3] == PPC_oris | GPR(rd)<<21 | GPR(rd)<<16);
             NanoAssert(branch[4] == PPC_ori  | GPR(rd)<<21 | GPR(rd)<<16);
             uint64_t imm = uintptr_t(target);
             uint32_t lo = uint32_t(imm);
             uint32_t hi = uint32_t(imm>>32);
             branch[0] = PPC_addis | GPR(rd)<<21 |               uint16_t(hi>>16);
             branch[1] = PPC_ori   | GPR(rd)<<21 | GPR(rd)<<16 | uint16_t(hi);
             branch[3] = PPC_oris  | GPR(rd)<<21 | GPR(rd)<<16 | uint16_t(lo>>16);
             branch[4] = PPC_ori   | GPR(rd)<<21 | GPR(rd)<<16 | uint16_t(lo);
         }
     #else // NANOJIT_64BIT
         // patch 32bit branch
         else if ((branch[0] & ~(31<<21)) == PPC_addis) {
             // general branch, using lis,ori to load the const addr.
             // patch a lis,ori sequence with a 32bit value
-            Register rd = Register((branch[0] >> 21) & 31);
+            Register rd = { (branch[0] >> 21) & 31 };
             NanoAssert(branch[1] == PPC_ori | GPR(rd)<<21 | GPR(rd)<<16);
             uint32_t imm = uint32_t(target);
             branch[0] = PPC_addis | GPR(rd)<<21 | uint16_t(imm >> 16); // lis rd, imm >> 16
             branch[1] = PPC_ori | GPR(rd)<<21 | GPR(rd)<<16 | uint16_t(imm); // ori rd, rd, imm & 0xffff
         }
     #endif // !NANOJIT_64BIT
         else {
             TODO(unknown_patch);
         }
     }
 
     static int cntzlw(int set) {
         // On PowerPC, prefer higher registers, to minimize
         // size of nonvolatile area that must be saved.
-        register Register i;
+        register uint32_t i;
         #ifdef __GNUC__
         asm ("cntlzw %0,%1" : "=r" (i) : "r" (set));
         #else // __GNUC__
         # error("unsupported compiler")
         #endif // __GNUC__
         return 31-i;
     }
 
     Register Assembler::nRegisterAllocFromSet(RegisterMask set) {
-        Register i;
+        uint32_t i;
         // note, deliberate truncation of 64->32 bits
         if (set & 0xffffffff) {
-            i = Register(cntzlw(int(set))); // gp reg
+            i = cntzlw(int(set)); // gp reg
         } else {
-            i = Register(32+cntzlw(int(set>>32))); // fp reg
+            i = 32 + cntzlw(int(set>>32)); // fp reg
         }
-        _allocator.free &= ~rmask(i);
-        return i;
+        Register r = { i };
+        _allocator.free &= ~rmask(r);
+        return r;
     }
 
     void Assembler::nRegisterResetAll(RegAlloc &regs) {
         regs.clear();
         regs.free = SavedRegs | 0x1ff8 /* R3-12 */ | 0x3ffe00000000LL /* F1-13 */;
     }
 
 #ifdef NANOJIT_64BIT
@@ -1444,11 +1445,15 @@ namespace nanojit
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         }
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 } // namespace nanojit
 
 #endif // FEATURE_NANOJIT && NANOJIT_PPC
--- a/js/src/nanojit/NativePPC.h
+++ b/js/src/nanojit/NativePPC.h
@@ -47,26 +47,28 @@
 #define count_prolog() _nvprof("ppc-prolog",1); count_instr();
 #define count_imt() _nvprof("ppc-imt",1) count_instr()
 #else
 #define count_instr()
 #define count_prolog()
 #define count_imt()
 #endif
 
+#include "NativeCommon.h"
+
 namespace nanojit
 {
 #define NJ_MAX_STACK_ENTRY              4096
 #define NJ_ALIGN_STACK                  16
 
 #define NJ_JTBL_SUPPORTED               1
 #define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
 #define NJ_F2I_SUPPORTED                0
 #define NJ_SOFTFLOAT_SUPPORTED          0
-#define NJ_DIVI_SUPPORTED               0    
+#define NJ_DIVI_SUPPORTED               0
 
     enum ConditionRegister {
         CR0 = 0,
         CR1 = 1,
         CR2 = 2,
         CR3 = 3,
         CR4 = 4,
         CR5 = 5,
@@ -83,17 +85,16 @@ namespace nanojit
     };
 
     // this is the BO field in condition instructions
     enum ConditionOption {
         BO_true = 12, // branch if true
         BO_false = 4, // branch if false
     };
 
-    typedef uint32_t Register;
     static const Register
         // general purpose 32bit regs
         R0   = { 0 },   // scratch or the value 0, excluded from regalloc
         SP   = { 1 },   // stack pointer, excluded from regalloc
         R2   = { 2 },   // scratch on MacOSX, rtoc pointer elsewhere
         R3   = { 3 },   // this, return value, MSW of int64 return
         R4   = { 4 },   // param, LSW of int64 return
         R5   = { 5 },   // param
@@ -161,25 +162,19 @@ namespace nanojit
 
         // special purpose registers (SPR)
         Rxer = { 1 },
         Rlr  = { 8 },
         Rctr = { 9 },
 
         deprecated_UnknownReg = { 127 };    // XXX: remove eventually, see bug 538924
 
-    static const uint32_t FirstRegNum = R0;
-    static const uint32_t LastRegNum = F31;
-}
+    static const uint32_t FirstRegNum = 0; // R0
+    static const uint32_t LastRegNum = 64; // F31
 
-#define NJ_USE_UINT32_REGISTER 1
-#include "NativeCommon.h"
-
-namespace nanojit
-{
     enum PpcOpcode {
         // opcodes
         PPC_add     = 0x7C000214, // add
         PPC_addo    = 0x7C000614, // add & OE=1 (can set OV)
         PPC_addi    = 0x38000000, // add immediate
         PPC_addis   = 0x3C000000, // add immediate shifted
         PPC_and     = 0x7C000038, // and
         PPC_andc    = 0x7C000078, // and with compliment
@@ -290,16 +285,17 @@ namespace nanojit
     #define DECL_PPC64()
 #endif
 
     #define DECLARE_PLATFORM_ASSEMBLER()                                    \
         const static Register argRegs[8], retRegs[2];                       \
         void underrunProtect(int bytes);                                    \
         void nativePageReset();                                             \
         void nativePageSetup();                                             \
+        bool hardenNopInsertion(const Config& /*c*/) { return false; }      \
         void br(NIns *addr, int link);                                      \
         void br_far(NIns *addr, int link);                                  \
         void asm_regarg(ArgType, LIns*, Register);                          \
         void asm_li(Register r, int32_t imm);                               \
         void asm_li32(Register r, int32_t imm);                             \
         void asm_li64(Register r, uint64_t imm);                            \
         void asm_cmp(LOpcode op, LIns *a, LIns *b, ConditionRegister);      \
         NIns* asm_branch_far(bool onfalse, LIns *cond, NIns * const targ);  \
@@ -315,18 +311,18 @@ namespace nanojit
     const size_t LARGEST_BRANCH_PATCH = 4 * sizeof(NIns);
 
     #define EMIT1(ins, fmt, ...) do {\
         underrunProtect(4);\
         *(--_nIns) = (NIns) (ins);\
         asm_output(fmt, ##__VA_ARGS__);\
         } while (0) /* no semi */
 
-    #define GPR(r) (r)
-    #define FPR(r) ((r)&31)
+    #define GPR(r) REGNUM(r)
+    #define FPR(r) (REGNUM(r) & 31)
 
     #define Bx(li,aa,lk) EMIT1(PPC_b | ((li)&0xffffff)<<2 | (aa)<<1 | (lk),\
         "b%s%s %p", (lk)?"l":"", (aa)?"a":"", _nIns+(li))
 
     #define B(li)   Bx(li,0,0)
     #define BA(li)  Bx(li,1,0)
     #define BL(li)  Bx(li,0,1)
     #define BLA(li) Bx(li,1,1)
@@ -427,17 +423,17 @@ namespace nanojit
 
     #define NEG(rd, rs)  EMIT1(PPC_neg | GPR(rd)<<21 | GPR(rs)<<16, "neg %s,%s", gpn(rd), gpn(rs))
     #define FNEG(rd,rs)  EMIT1(PPC_fneg | FPR(rd)<<21 | FPR(rs)<<11, "fneg %s,%s", gpn(rd), gpn(rs))
     #define FMR(rd,rb)   EMIT1(PPC_fmr  | FPR(rd)<<21 | FPR(rb)<<11, "fmr %s,%s", gpn(rd), gpn(rb))
     #define FCFID(rd,rs) EMIT1(PPC_fcfid | FPR(rd)<<21 | FPR(rs)<<11, "fcfid %s,%s", gpn(rd), gpn(rs))
 
     #define JMP(addr) br(addr, 0)
 
-    #define SPR(spr) ((R##spr)>>5|(R##spr&31)<<5)
+    #define SPR(spr) (REGNUM(R##spr)>>5|(REGNUM(R##spr)&31)<<5)
     #define MTSPR(spr,rs) EMIT1(PPC_mtspr | GPR(rs)<<21 | SPR(spr)<<11,\
         "mt%s %s", #spr, gpn(rs))
     #define MFSPR(rd,spr) EMIT1(PPC_mfspr | GPR(rd)<<21 | SPR(spr)<<11,\
         "mf%s %s", #spr, gpn(rd))
 
     #define MTXER(r) MTSPR(xer, r)
     #define MTLR(r)  MTSPR(lr,  r)
     #define MTCTR(r) MTSPR(ctr, r)
--- a/js/src/nanojit/NativeSH4.cpp
+++ b/js/src/nanojit/NativeSH4.cpp
@@ -3229,10 +3229,15 @@ namespace nanojit
         if (_nIns - nb_bytes < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
 
             // This jump will call underrunProtect again, but since we're on
             // a new page large enough to host its code, nothing will happen.
             JMP(pc, true);
         }
     }
+
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 }
 #endif // FEATURE_NANOJIT && FEATURE_SH4
--- a/js/src/nanojit/NativeSH4.h
+++ b/js/src/nanojit/NativeSH4.h
@@ -162,16 +162,17 @@ namespace nanojit
     const static int NumArgDregs;                                       \
     const static Register argRegs[4], retRegs[2];                       \
     const static Register argDregs[4], retDregs[1];                     \
     int max_stack_args;                                                 \
                                                                         \
     void nativePageReset();                                             \
     void nativePageSetup();                                             \
     void underrunProtect(int);                                          \
+    bool hardenNopInsertion(const Config& /*c*/) { return false; }      \
     bool simplifyOpcode(LOpcode &);                                     \
                                                                         \
     NIns *asm_immi(int, Register, bool force = false);                  \
     void asm_immd(uint64_t, Register);                                  \
     void asm_immd_nochk(uint64_t, Register);                            \
     void asm_arg_regi(LIns*, Register);                                 \
     void asm_arg_regd(LIns*, Register);                                 \
     void asm_arg_stacki(LIns*, int);                                    \
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -235,20 +235,20 @@ namespace nanojit
         LoadOperation(rs1, rs2, rd, 0x20, "ldf");
     }
     inline void Assembler::LDFI(Register rs1, int32_t simm13, Register rd) {
         LoadOperationI(rs1, simm13, rd, 0x20, "ldf");
     }
 
     inline void Assembler::LDDF32(Register rs1, int32_t immI, Register rd) {
         if (isIMM13(immI+4)) {
-            LDFI(rs1, immI+4, REGINC(rd));
+            LDFI(rs1, immI+4, rd + 1);
             LDFI(rs1, immI, rd);
         } else {
-            LDF(rs1, L0, REGINC(rd));
+            LDF(rs1, L0, rd + 1);
             SET32(immI+4, L0);
             LDF(rs1, L0, rd);
             SET32(immI, L0);
         }
     }
 
     inline void Assembler::LDUB(Register rs1, Register rs2, Register rd) {
         LoadOperation(rs1, rs2, rd,  0x1, "ldub");
@@ -444,20 +444,20 @@ namespace nanojit
         } else {
             STF(rd, L0, rs1);
             SET32(immI, L0);
         }
     }
 
     inline void Assembler::STDF32(Register rd, int32_t immI, Register rs1) {
         if (isIMM13(immI+4)) {
-            STFI(REGINC(rd), immI+4, rs1);;
+            STFI(rd + 1, immI+4, rs1);
             STFI(rd, immI, rs1);
         } else {
-            STF(REGINC(rd), L0, rs1);
+            STF(rd + 1, L0, rs1);
             SET32(immI+4, L0);
             STF(rd, L0, rs1);
             SET32(immI, L0);
         }
     }
 
     inline void Assembler::STW(Register rd, Register rs1, Register rs2) {
         Store(rd, rs1, rs2, 0x4, "st");
@@ -662,46 +662,46 @@ namespace nanojit
                 ArgType ty = argTypes[j];
                 if (ty == ARGTYPE_D) {
                     Register r = findRegFor(ins->arg(j), FpRegs);
 
                     underrunProtect(48);
                     // We might be calling a varargs function.
                     // So, make sure the GPR's are also loaded with
                     // the value, or the stack contains it.
-                    if (REGNUM(GPRIndex) <= REGNUM(O5)) {
+                    if (GPRIndex <= O5) {
                         LDSW32(SP, offset, GPRIndex);
                     }
-                    GPRIndex = REGINC(GPRIndex);
-                    if (REGNUM(GPRIndex) <= REGNUM(O5)) {
+                    GPRIndex = GPRIndex + 1;
+                    if (GPRIndex <= O5) {
                         LDSW32(SP, offset+4, GPRIndex);
                     }
-                    GPRIndex = REGINC(GPRIndex);
+                    GPRIndex = GPRIndex + 1;
                     STDF32(r, offset, SP);
                     offset += 8;
                 } else {
-                    if (REGNUM(GPRIndex) > REGNUM(O5)) {
+                    if (GPRIndex > O5) {
                         underrunProtect(12);
                         Register r = findRegFor(ins->arg(j), GpRegs);
                         STW32(r, offset, SP);
                     } else {
                         Register r = findSpecificRegFor(ins->arg(j), GPRIndex);
                     }
-                    GPRIndex = REGINC(GPRIndex);
+                    GPRIndex = GPRIndex + 1;
                     offset += 4;
                 }
             }
     }
 
     Register Assembler::nRegisterAllocFromSet(RegisterMask set)
     {
         // need to implement faster way
         Register i = G0;
         while (!(set & rmask(i)))
-            i = REGINC(i);
+            i = i + 1;
         _allocator.free &= ~rmask(i);
         return i;
     }
 
     void Assembler::nRegisterResetAll(RegAlloc& a)
     {
         a.clear();
         a.free = GpRegs | FpRegs;
@@ -1569,10 +1569,14 @@ namespace nanojit
         if (!_nExitIns)
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 #endif /* FEATURE_NANOJIT */
 }
--- a/js/src/nanojit/NativeSparc.h
+++ b/js/src/nanojit/NativeSparc.h
@@ -75,17 +75,17 @@ namespace nanojit
 
 #define NJ_MAX_STACK_ENTRY              8192
 #define NJ_MAX_PARAMETERS               1
 
 #define NJ_JTBL_SUPPORTED               0
 #define NJ_EXPANDED_LOADSTORE_SUPPORTED 0
 #define NJ_F2I_SUPPORTED                1
 #define NJ_SOFTFLOAT_SUPPORTED          0
-#define NJ_DIVI_SUPPORTED               0    
+#define NJ_DIVI_SUPPORTED               0
 
     const int NJ_ALIGN_STACK = 16;
 
     typedef uint32_t NIns;
 
     // Bytes of icache to flush after Assembler::patch
     const size_t LARGEST_BRANCH_PATCH = 2 * sizeof(NIns);
 
@@ -203,16 +203,17 @@ namespace nanojit
 #define DECLARE_PLATFORM_REGALLOC()
 
 #define DECLARE_PLATFORM_ASSEMBLER()    \
      const static Register argRegs[6], retRegs[1]; \
      bool has_cmov; \
      void nativePageReset(); \
      void nativePageSetup(); \
      void underrunProtect(int bytes); \
+     bool hardenNopInsertion(const Config& /*c*/) { return false; } \
      void asm_align_code(); \
      void asm_cmp(LIns *cond); \
      void asm_cmpd(LIns *cond); \
      NIns* asm_branchd(bool, LIns*, NIns*); \
      void IMM32(int32_t i) { \
          --_nIns; \
          *((int32_t*)_nIns) = i; \
      } \
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -998,25 +998,25 @@ namespace nanojit
             if ((ty == ARGTYPE_I || ty == ARGTYPE_UI || ty == ARGTYPE_Q) && arg_index < NumArgRegs) {
                 // gp arg
                 asm_regarg(ty, arg, argRegs[arg_index]);
                 arg_index++;
             }
         #ifdef _WIN64
             else if (ty == ARGTYPE_D && arg_index < NumArgRegs) {
                 // double goes in XMM reg # based on overall arg_index
-                Register rxi = { REGNUM(XMM0) + arg_index };
+                Register rxi = XMM0 + arg_index;
                 asm_regarg(ty, arg, rxi);
                 arg_index++;
             }
         #else
-            else if (ty == ARGTYPE_D && REGNUM(fr) < REGNUM(XMM8)) {
+            else if (ty == ARGTYPE_D && fr < XMM8) {
                 // double goes in next available XMM register
                 asm_regarg(ty, arg, fr);
-                fr = REGINC(fr);
+                fr = fr + 1;
             }
         #endif
             else {
                 asm_stkarg(ty, arg, stk_used);
                 stk_used += sizeof(void*);
             }
         }
 
@@ -2177,11 +2177,15 @@ namespace nanojit
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         }
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 } // namespace nanojit
 
 #endif // FEATURE_NANOJIT && NANOJIT_X64
--- a/js/src/nanojit/NativeX64.h
+++ b/js/src/nanojit/NativeX64.h
@@ -62,17 +62,17 @@ namespace nanojit
 {
 #define NJ_MAX_STACK_ENTRY              4096
 #define NJ_ALIGN_STACK                  16
 
 #define NJ_JTBL_SUPPORTED               1
 #define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
 #define NJ_F2I_SUPPORTED                1
 #define NJ_SOFTFLOAT_SUPPORTED          0
-#define NJ_DIVI_SUPPORTED               1    
+#define NJ_DIVI_SUPPORTED               1
 
     static const Register RAX = { 0 };      // 1st int return, # of sse varargs
     static const Register RCX = { 1 };      // 4th int arg
     static const Register RDX = { 2 };      // 3rd int arg 2nd return
     static const Register RBX = { 3 };      // saved
     static const Register RSP = { 4 };      // stack ptr
     static const Register RBP = { 5 };      // frame ptr, saved, sib reqd
     static const Register RSI = { 6 };      // 2nd int arg
@@ -316,17 +316,17 @@ namespace nanojit
         X64_xorpd   = 0xC0570F4066000005LL, // 128bit xor xmm (two packed doubles)
         X64_xorps   = 0xC0570F4000000004LL, // 128bit xor xmm (four packed singles), one byte shorter
         X64_xorpsm  = 0x05570F4000000004LL, // 128bit xor xmm, [rip+disp32]
         X64_xorpsa  = 0x2504570F40000005LL, // 128bit xor xmm, [disp32]
         X64_inclmRAX= 0x00FF000000000002LL, // incl (%rax)
         X64_jmpx    = 0xC524ff4000000004LL, // jmp [d32+x*8]
         X64_jmpxb   = 0xC024ff4000000004LL, // jmp [b+x*8]
 
-        X64_movqmi  = 0x80C7480000000003LL, // 32bit signed extended to 64-bit store imm -> qword ptr[b+disp32]  
+        X64_movqmi  = 0x80C7480000000003LL, // 32bit signed extended to 64-bit store imm -> qword ptr[b+disp32]
         X64_movlmi  = 0x80C7400000000003LL, // 32bit store imm -> dword ptr[b+disp32]
         X64_movsmi  = 0x80C7406600000004LL, // 16bit store imm -> word ptr[b+disp32]
         X64_movbmi  = 0x80C6400000000003LL, // 8bit store imm -> byte ptr[b+disp32]
 
         X86_and8r   = 0xC022000000000002LL, // and rl,rh
         X86_sete    = 0xC0940F0000000003LL, // no-rex version of X64_sete
         X86_setnp   = 0xC09B0F0000000003LL  // no-rex set byte if odd parity (ordered fcmp result) (PF == 0)
     };
@@ -368,16 +368,17 @@ namespace nanojit
     #define DECLARE_PLATFORM_STATS()
     #define DECLARE_PLATFORM_REGALLOC()
 
     #define DECLARE_PLATFORM_ASSEMBLER()                                    \
         const static Register argRegs[NumArgRegs], retRegs[1];              \
         void underrunProtect(ptrdiff_t bytes);                              \
         void nativePageReset();                                             \
         void nativePageSetup();                                             \
+        bool hardenNopInsertion(const Config& /*c*/) { return false; }      \
         void asm_qbinop(LIns*);                                             \
         void MR(Register, Register);\
         void JMP(NIns*);\
         void JMPl(NIns*);\
         void emit(uint64_t op);\
         void emit8(uint64_t op, int64_t val);\
         void emit_target8(size_t underrun, uint64_t op, NIns* target);\
         void emit_target32(size_t underrun, uint64_t op, NIns* target);\
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -612,18 +612,18 @@ namespace nanojit
         underrunProtect(2);
         MODRMm(4, 0, r);
         *(--_nIns) = 0xff;
         asm_output("jmp   *(%s)", gpn(r));
     }
 
     inline void Assembler::JMP_indexed(Register x, I32 ss, NIns** addr) {
         underrunProtect(7);
-        IMM32(int32_t(addr));           
-        SIB(ss, REGNUM(x), 5);          
+        IMM32(int32_t(addr));
+        SIB(ss, REGNUM(x), 5);
         MODRM(0, 4, 4);                 // amode == addr(table + x<<ss)
         *(--_nIns) = uint8_t(0xff);     // jmp
         asm_output("jmp   *(%s*%d+%p)", gpn(x), 1 << ss, (void*)addr);
     }
 
     inline void Assembler::JE(NIns* t)   { JCC(0x04, t, "je"); }
     inline void Assembler::JNE(NIns* t)  { JCC(0x05, t, "jne"); }
     inline void Assembler::JP(NIns* t)   { JCC(0x0A, t, "jp"); }
@@ -1680,17 +1680,17 @@ namespace nanojit
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
 
         if (_config.i386_sse2) {
             // LIR_ltd and LIR_gtd are handled by the same case because
             // asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise
             // for LIR_led/LIR_ged.
             switch (opcode) {
-            case LIR_eqd:   
+            case LIR_eqd:
                 if (ins->oprnd1() == ins->oprnd2()) {
                     SETNP(r);
                 } else {
                     // result = ZF & !PF, must do logic on flags
                     AND8R(r);       // and      rl,rh    rl &= rh
                     SETNPH(r);      // setnp    rh       rh = !PF
                     SETE(r);        // sete     rl       rl = ZF
                 }
@@ -2646,17 +2646,17 @@ namespace nanojit
                     if (cond->oprnd1() == cond->oprnd2()) {
                         JNP(targ);
                     } else {
                         // jp skip (2byte)
                         // je target
                         // skip: ...
                         underrunProtect(16); // underrun of 7 needed but we write 2 instr --> 16
                         NIns *skip = _nIns;
-                        JE(targ);      
+                        JE(targ);
                         at = _nIns;
                         JP(skip);
                     }
                     break;
                 case LIR_ltd:
                 case LIR_gtd:   JA(targ);       break;
                 case LIR_led:
                 case LIR_ged:   JAE(targ);      break;
@@ -2666,17 +2666,17 @@ namespace nanojit
         } else {
             if (branchOnFalse)
                 JP(targ);
             else
                 JNP(targ);
         }
 
         if (!at)
-            at = _nIns; 
+            at = _nIns;
         asm_cmpd(cond);
 
         return at;
     }
 
     // WARNING: This function cannot generate any code that will affect the
     // condition codes prior to the generation of the
     // ucomisd/fcompp/fcmop/fcom.  See asm_cmp() for more details.
@@ -2701,17 +2701,17 @@ namespace nanojit
 
             // LIR_eqd, if lhs == rhs:
             //   ucomisd       ZPC   outcome (SETNP/JNP succeeds if P==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETNP/JNP fails
             //   EQUAL         100   SETNP/JNP succeeds
             //
             // LIR_eqd, if lsh != rhs;
-            //   ucomisd       ZPC   outcome (SETP/JP succeeds if P==0, 
+            //   ucomisd       ZPC   outcome (SETP/JP succeeds if P==0,
             //                                SETE/JE succeeds if Z==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETP/JP succeeds (and skips to fail target)
             //   EQUAL         100   SETP/JP fails, SETE/JE succeeds
             //   GREATER_THAN  000   SETP/JP fails, SETE/JE fails
             //   LESS_THAN     001   SETP/JP fails, SETE/JE fails
             //
             // LIR_gtd:
@@ -2859,16 +2859,30 @@ namespace nanojit
         NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
         // This may be in a normal code chunk or an exit code chunk.
         if (eip - n < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
             JMP(eip);
         }
     }
 
+    void Assembler::asm_insert_random_nop()
+    {
+        // one of a random nop instructions
+        uint32_t r = _noise->getValue(5);
+        switch(r)
+        {
+            case 0: MR(rEAX,rEAX);        break;
+            case 1: MR(rEDI,rEDI);        break;
+            case 2: MR(rECX,rECX);        break;
+            case 3: LEA(rECX,0,rECX);     break;
+            case 4: LEA(rESP,0,rESP);     break;
+        }
+    }
+
     void Assembler::asm_ret(LIns* ins)
     {
         genEpilogue();
 
         // Restore rESP from rEBP, undoing SUBi(SP,amt) in the prologue
         MR(SP,FP);
 
         releaseRegisters();
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -188,16 +188,17 @@ namespace nanojit
     #define JMP32 0xe9
 
     #define DECLARE_PLATFORM_ASSEMBLER()    \
         const static Register argRegs[2], retRegs[2]; \
         int32_t max_stk_args;\
         void nativePageReset();\
         void nativePageSetup();\
         void underrunProtect(int);\
+        bool hardenNopInsertion(const Config& c) { return c.harden_nop_insertion; } \
         void asm_immi(Register r, int32_t val, bool canClobberCCs);\
         void asm_stkarg(LIns* p, int32_t& stkd);\
         void asm_farg(LIns*, int32_t& stkd);\
         void asm_arg(ArgType ty, LIns* p, Register r, int32_t& stkd);\
         void asm_pusharg(LIns*);\
         void asm_cmpd(LIns *cond);\
         NIns* asm_branchd(bool, LIns*, NIns*);\
         void asm_cmp(LIns *cond); \
--- a/js/src/nanojit/njconfig.cpp
+++ b/js/src/nanojit/njconfig.cpp
@@ -88,16 +88,17 @@ namespace nanojit
 
 #ifdef NANOJIT_IA32
         int const features = getCpuFeatures();
         i386_sse2 = (features & (1<<26)) != 0;
         i386_use_cmov = (features & (1<<15)) != 0;
         i386_fixed_esp = false;
 #endif
         harden_function_alignment = false;
+        harden_nop_insertion = false;
 
 #if defined(NANOJIT_ARM)
 
         // XXX: temporarily disabled, see bug 547063.
         //NanoStaticAssert(NJ_COMPILER_ARM_ARCH >= 5 && NJ_COMPILER_ARM_ARCH <= 7);
 
         arm_arch = NJ_COMPILER_ARM_ARCH;
         arm_vfp = (arm_arch >= 7);
--- a/js/src/nanojit/njconfig.h
+++ b/js/src/nanojit/njconfig.h
@@ -92,13 +92,16 @@ namespace nanojit
         uint32_t arm_show_stats:1;
 
         // If true, use softfloat for all floating point operations,
         // whether or not an FPU is present. (ARM only for now, but might also includes MIPS in the future)
         uint32_t soft_float:1;
 
         // If true, compiler will insert a random amount of space in between functions (x86-32 only)
         uint32_t harden_function_alignment:1;
+
+        // If true, compiler will insert randomly choosen no-op instructions at random locations within a compiled method (x86-32 only)
+        uint32_t harden_nop_insertion:1;
     };
 }
 
 #endif // FEATURE_NANOJIT
 #endif // __njconfig_h__
--- a/js/src/xpconnect/loader/mozJSComponentLoader.cpp
+++ b/js/src/xpconnect/loader/mozJSComponentLoader.cpp
@@ -1113,39 +1113,38 @@ mozJSComponentLoader::GlobalForLocation(
 
             char *buf = static_cast<char*>(PR_MemMap(map, 0, fileSize32));
             if (!buf) {
                 NS_WARNING("Failed to map file");
                 JS_SetOptions(cx, oldopts);
                 return NS_ERROR_FAILURE;
             }
 
-            script = JS_CompileScriptForPrincipals(cx, global,
-                                                   jsPrincipals,
-                                                   buf, fileSize32,
-                                                   nativePath.get(), 1);
+            script = JS_CompileScriptForPrincipalsVersion(
+              cx, global, jsPrincipals, buf, fileSize32, nativePath.get(), 1,
+              JSVERSION_LATEST);
+
             PR_MemUnmap(buf, fileSize32);
 
 #else  /* HAVE_PR_MEMMAP */
 
             /**
              * No memmap implementation, so fall back to using
              * JS_CompileFileHandleForPrincipals().
              */
 
             FILE *fileHandle;
             rv = aComponentFile->OpenANSIFileDesc("r", &fileHandle);
             if (NS_FAILED(rv)) {
                 JS_SetOptions(cx, oldopts);
                 return NS_ERROR_FILE_NOT_FOUND;
             }
 
-            script = JS_CompileFileHandleForPrincipals(cx, global,
-                                                       nativePath.get(),
-                                                       fileHandle, jsPrincipals);
+            script = JS_CompileFileHandleForPrincipalsVersion(
+              cx, global, nativePath.get(), fileHandle, jsPrincipals, JSVERSION_LATEST);
 
             /* JS will close the filehandle after compilation is complete. */
 #endif /* HAVE_PR_MEMMAP */
         } else {
             nsCOMPtr<nsIIOService> ioService = do_GetIOService(&rv);
             NS_ENSURE_SUCCESS(rv, rv);
 
             nsCOMPtr<nsIChannel> scriptChannel;
@@ -1170,20 +1169,19 @@ mozJSComponentLoader::GlobalForLocation(
 
             /* read the file in one swoop */
             rv = scriptStream->Read(buf, len, &bytesRead);
             if (bytesRead != len)
                 return NS_BASE_STREAM_OSERROR;
 
             buf[len] = '\0';
 
-            script = JS_CompileScriptForPrincipals(cx, global,
-                                                   jsPrincipals,
-                                                   buf, bytesRead,
-                                                   nativePath.get(), 1);
+            script = JS_CompileScriptForPrincipalsVersion(
+              cx, global, jsPrincipals, buf, bytesRead, nativePath.get(), 1,
+              JSVERSION_LATEST);
         }
         // Propagate the exception, if one exists. Also, don't leave the stale
         // exception on this context.
         // NB: The caller must stick exception into a rooted slot (probably on
         // its context) as soon as possible to avoid GC hazards.
         if (exception) {
             JS_SetOptions(cx, oldopts);
             if (!script) {
@@ -1232,17 +1230,17 @@ mozJSComponentLoader::GlobalForLocation(
     }
 #endif
 
     // Assign aGlobal here so that it's available to recursive imports.
     // See bug 384168.
     *aGlobal = global;
 
     jsval retval;
-    if (!JS_ExecuteScript(cx, global, script, &retval)) {
+    if (!JS_ExecuteScriptVersion(cx, global, script, &retval, JSVERSION_LATEST)) {
 #ifdef DEBUG_shaver_off
         fprintf(stderr, "mJCL: failed to execute %s\n", nativePath.get());
 #endif
         *aGlobal = nsnull;
         return NS_ERROR_FAILURE;
     }
 
     /* Freed when we remove from the table. */