Merging patch for bug 502723.
authorNick Kreeger <nick.kreeger@park.edu>
Mon, 06 Jul 2009 15:32:29 -0700
changeset 30042 475b8583efe4629f01df7ed048caa21f364bf52b
parent 30041 a5dfd04812d820e905a1847dc7a4283cb3562c28 (current diff)
parent 30040 a5b215383e581df8b2aadaad22e5754216176ddb (diff)
child 30043 f46e6aee13358520ea32ab998a74a304c870e630
push idunknown
push userunknown
push dateunknown
bugs502723
milestone1.9.2a1pre
Merging patch for bug 502723.
--- a/content/base/public/nsContentUtils.h
+++ b/content/base/public/nsContentUtils.h
@@ -37,17 +37,17 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 /* A namespace class for static content utilities. */
 
 #ifndef nsContentUtils_h___
 #define nsContentUtils_h___
 
-#include "jspubtd.h"
+#include "jsprvtd.h"
 #include "jsnum.h"
 #include "nsAString.h"
 #include "nsIStatefulFrame.h"
 #include "nsIPref.h"
 #include "nsINodeInfo.h"
 #include "nsNodeInfoManager.h"
 #include "nsContentList.h"
 #include "nsDOMClassInfoID.h"
--- a/dom/base/nsDOMClassInfo.cpp
+++ b/dom/base/nsDOMClassInfo.cpp
@@ -9268,22 +9268,16 @@ nsHTMLSelectElementSH::SetProperty(nsIXP
   }
 
   return nsElementSH::SetProperty(wrapper, cx, obj, id, vp, _retval);
 }
 
 
 // HTMLObject/EmbedElement helper
 
-// This resolve hook makes embed.nsIFoo work as if
-// QueryInterface(Components.interfaces.nsIFoo) was called on the
-// plugin instance, the result of calling QI, assuming it's
-// successful, will be defined on the embed element as a nsIFoo
-// property.
-
 // static
 nsresult
 nsHTMLPluginObjElementSH::GetPluginInstanceIfSafe(nsIXPConnectWrappedNative *wrapper,
                                                   JSObject *obj,
                                                   nsIPluginInstance **_result)
 {
   *_result = nsnull;
 
@@ -9447,50 +9441,32 @@ nsHTMLPluginObjElementSH::SetupProtoChai
   // this.__proto__.__proto__
   //   ^      ^         ^
   //   |      |         |__ Object.prototype
   //   |      |
   //   |      |__ xpc embed wrapper proto (shared)
   //   |
   //   |__ xpc wrapped native embed node
   //
-  // pi.__proto__.__proto__
-  // ^      ^         ^
-  // |      |         |__ Object.prototype
-  // |      |
-  // |      |__ plugin proto (not shared in the xpc wrapper case)
+  // pi.__proto__
+  // ^      ^
+  // |      |__ Object.prototype
   // |
-  // |__ xpc wrapped native pi (plugin instance)
+  // |__ Plugin NPRuntime JS object wrapper
   //
   // Now, after the above prototype setup the prototype chain should
-  // look like this if the plugin had a proto (other than
-  // Object.prototype):
-  //
-  // this.__proto__.__proto__.__proto__.__proto__
-  //   ^      ^         ^         ^         ^
-  //   |      |         |         |         |__ Object.prototype
-  //   |      |         |         |
-  //   |      |         |         |__ xpc embed wrapper proto (shared)
-  //   |      |         |
-  //   |      |         |__ plugin proto (not shared in the xpc wrapper case)
-  //   |      |
-  //   |      |__ xpc wrapped native pi (plugin instance)
-  //   |
-  //   |__ xpc wrapped native embed node
-  //
-  // If the plugin's proto was Object.prototype, the prototype chain
-  // should look like this:
+  // look like this:
   //
   // this.__proto__.__proto__.__proto__
   //   ^      ^         ^         ^
   //   |      |         |         |__ Object.prototype
   //   |      |         |
   //   |      |         |__ xpc embed wrapper proto (shared)
   //   |      |
-  //   |      |__ pi (plugin instance) NPRuntime JS object wrapper
+  //   |      |__ Plugin NPRuntime JS object wrapper
   //   |
   //   |__ xpc wrapped native embed node
   //
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
@@ -9628,19 +9604,19 @@ NS_IMETHODIMP
 nsHTMLPluginObjElementSH::Call(nsIXPConnectWrappedNative *wrapper,
                                JSContext *cx, JSObject *obj, PRUint32 argc,
                                jsval *argv, jsval *vp, PRBool *_retval)
 {
   nsCOMPtr<nsIPluginInstance> pi;
   nsresult rv = GetPluginInstanceIfSafe(wrapper, obj, getter_AddRefs(pi));
   NS_ENSURE_SUCCESS(rv, rv);
 
-  if (!pi) {
-    // No plugin around for this object.
-
+  // If obj is a native wrapper, or if there's no plugin around for
+  // this object, throw.
+  if (!ObjectIsNativeWrapper(cx, obj) || !pi) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   JSObject *pi_obj = nsnull;
   JSObject *pi_proto = nsnull;
 
   rv = GetPluginJSObject(cx, obj, pi, &pi_obj, &pi_proto);
   NS_ENSURE_SUCCESS(rv, rv);
@@ -9655,31 +9631,16 @@ nsHTMLPluginObjElementSH::Call(nsIXPConn
   JSAutoRequest ar(cx);
   *_retval = ::JS_CallFunctionValue(cx, JSVAL_TO_OBJECT(argv[-1]),
                                     OBJECT_TO_JSVAL(pi_obj), argc, argv, vp);
 
   return NS_OK;
 }
 
 
-// HTMLAppletElement helper
-
-// static
-nsresult
-nsHTMLPluginObjElementSH::GetJavaPluginJSObject(JSContext *cx, JSObject *obj,
-                                                nsIPluginInstance *plugin_inst,
-                                                JSObject **plugin_obj,
-                                                JSObject **plugin_proto)
-{
-  return NS_OK;
-}
-
-
-// HTMLEmbed/ObjectElement helper
-
 nsresult
 nsHTMLPluginObjElementSH::GetPluginJSObject(JSContext *cx, JSObject *obj,
                                             nsIPluginInstance *plugin_inst,
                                             JSObject **plugin_obj,
                                             JSObject **plugin_proto)
 {
   *plugin_obj = nsnull;
   *plugin_proto = nsnull;
@@ -9691,99 +9652,16 @@ nsHTMLPluginObjElementSH::GetPluginJSObj
     if (*plugin_obj) {
       *plugin_proto = ::JS_GetPrototype(cx, *plugin_obj);
     }
   }
 
   return NS_OK;
 }
 
-NS_IMETHODIMP
-nsHTMLPluginObjElementSH::NewResolve(nsIXPConnectWrappedNative *wrapper,
-                                     JSContext *cx, JSObject *obj, jsval id,
-                                     PRUint32 flags, JSObject **objp,
-                                     PRBool *_retval)
-{
-  if (!JSVAL_IS_STRING(id)) {
-    return NS_OK;
-  }
-
-  // This code resolves embed.nsIFoo to the nsIFoo wrapper of the
-  // plugin/applet instance. We only want to do that for plugin
-  // instances that are not scriptable using NPRuntime or are Java
-  // plugin instances.
-
-  nsCOMPtr<nsIPluginInstance> pi;
-  nsresult rv = GetPluginInstanceIfSafe(wrapper, obj, getter_AddRefs(pi));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  // Bail if we don't have a plugin instance or this is an NPRuntime or Java
-  // plugin since the following code is only useful for XPCOM plugins.
-  JSObject *jsobj;
-  if (pi)
-    pi->GetJSObject(cx, &jsobj);
-  if (!pi || jsobj) {
-    return nsHTMLElementSH::NewResolve(wrapper, cx, obj, id, flags, objp,
-                                       _retval);
-  }
-
-  JSObject *proto = ::JS_GetPrototype(cx, obj);
-
-  if (!proto || strcmp(STOBJ_GET_CLASS(proto)->name, NPRUNTIME_JSCLASS_NAME)) {
-    // This is not an NPRuntime plugin or Java plugin, continue on...
-
-    JSString *str = JSVAL_TO_STRING(id);
-    char* cstring = ::JS_GetStringBytes(str);
-
-    nsCOMPtr<nsIInterfaceInfoManager>
-      iim(do_GetService(NS_INTERFACEINFOMANAGER_SERVICE_CONTRACTID));
-    NS_ENSURE_TRUE(iim, NS_ERROR_UNEXPECTED);
-
-    nsIID* iid = nsnull;
-
-    nsresult rv = iim->GetIIDForName(cstring, &iid);
-
-    if (NS_SUCCEEDED(rv) && iid) {
-      // Notify the PluginHost that this one is scriptable -- it
-      // will need some special treatment later
-
-      nsCOMPtr<nsIPluginHost> pluginHost = do_GetService(MOZ_PLUGIN_HOST_CONTRACTID);
-      if (pluginHost)
-        pluginHost->SetIsScriptableInstance(pi, PR_TRUE);
-
-      nsCOMPtr<nsIXPConnectJSObjectHolder> holder;
-      rv = sXPConnect->WrapNative(cx, obj, pi, *iid, getter_AddRefs(holder));
-
-      if (NS_SUCCEEDED(rv)) {
-        JSObject* ifaceObj;
-
-        rv = holder->GetJSObject(&ifaceObj);
-
-        if (NS_SUCCEEDED(rv)) {
-          nsMemory::Free(iid);
-
-          *_retval = ::JS_DefineUCProperty(cx, obj, ::JS_GetStringChars(str),
-                                           ::JS_GetStringLength(str),
-                                           OBJECT_TO_JSVAL(ifaceObj), nsnull,
-                                           nsnull, JSPROP_ENUMERATE);
-
-          *objp = obj;
-
-          return *_retval ? NS_OK : NS_ERROR_FAILURE;
-        }
-      }
-    }
-
-    nsMemory::Free(iid);
-  }
-
-  return nsHTMLElementSH::NewResolve(wrapper, cx, obj, id, flags, objp,
-                                     _retval);
-}
-
 
 // HTMLOptionsCollection helper
 
 NS_IMETHODIMP
 nsHTMLOptionsCollectionSH::SetProperty(nsIXPConnectWrappedNative *wrapper,
                                        JSContext *cx, JSObject *obj, jsval id,
                                        jsval *vp, PRBool *_retval)
 {
--- a/dom/base/nsDOMClassInfo.h
+++ b/dom/base/nsDOMClassInfo.h
@@ -1084,25 +1084,17 @@ protected:
                                           JSObject *obj,
                                           nsIPluginInstance **aResult);
 
   static nsresult GetPluginJSObject(JSContext *cx, JSObject *obj,
                                     nsIPluginInstance *plugin_inst,
                                     JSObject **plugin_obj,
                                     JSObject **plugin_proto);
 
-  static nsresult GetJavaPluginJSObject(JSContext *cx, JSObject *obj,
-                                        nsIPluginInstance *plugin_inst,
-                                        JSObject **plugin_obj,
-                                        JSObject **plugin_proto);
-
 public:
-  NS_IMETHOD NewResolve(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
-                        JSObject *obj, jsval id, PRUint32 flags,
-                        JSObject **objp, PRBool *_retval);
   NS_IMETHOD PreCreate(nsISupports *nativeObj, JSContext *cx,
                        JSObject *globalObj, JSObject **parentObj);
   NS_IMETHOD PostCreate(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
                         JSObject *obj);
   NS_IMETHOD GetProperty(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
                          JSObject *obj, jsval id, jsval *vp, PRBool *_retval);
   NS_IMETHOD SetProperty(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
                          JSObject *obj, jsval id, jsval *vp, PRBool *_retval);
--- a/dom/src/json/nsJSON.cpp
+++ b/dom/src/json/nsJSON.cpp
@@ -34,16 +34,17 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "jsapi.h"
 #include "jsdtoa.h"
+#include "jsprvtd.h"
 #include "jsnum.h"
 #include "jsbool.h"
 #include "jsarena.h"
 #include "jscntxt.h"
 #include "jsinterp.h"
 #include "jsiter.h"
 #include "jstypes.h"
 #include "nsIServiceManager.h"
--- a/js/src/bench.sh
+++ b/js/src/bench.sh
@@ -1,5 +1,5 @@
 #!/bin/bash
 X="var d = Date.now();";
 for i in t/*.js; do X="$X load(\"$i\");"; done
 X="$X print(Date.now() - d);"
-echo $X | (./Darwin_OPT.OBJ/js -j || ./Linux_All_OPT.OBJ/js -j)
+echo $X | $1 -j
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -2612,16 +2612,17 @@ JS_SetErrorReporter(JSContext *cx, JSErr
 /*
  * Regular Expressions.
  */
 #define JSREG_FOLD      0x01    /* fold uppercase to lowercase */
 #define JSREG_GLOB      0x02    /* global exec, creates array of matches */
 #define JSREG_MULTILINE 0x04    /* treat ^ and $ as begin and end of line */
 #define JSREG_STICKY    0x08    /* only match starting at lastIndex */
 #define JSREG_FLAT      0x10    /* parse as a flat regexp */
+#define JSREG_NOCOMPILE 0x20    /* do not try to compile to native code */
 
 extern JS_PUBLIC_API(JSObject *)
 JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags);
 
 extern JS_PUBLIC_API(JSObject *)
 JS_NewUCRegExpObject(JSContext *cx, jschar *chars, size_t length, uintN flags);
 
 extern JS_PUBLIC_API(void)
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -94,16 +94,17 @@
 #include "jsgc.h"
 #include "jsinterp.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsscope.h"
 #include "jsstr.h"
 #include "jsstaticcheck.h"
+#include "jsvector.h"
 
 /* 2^32 - 1 as a number and a string */
 #define MAXINDEX 4294967295u
 #define MAXSTR   "4294967295"
 
 /* Small arrays are dense, no matter what. */
 #define MIN_SPARSE_INDEX 256
 
@@ -1296,285 +1297,306 @@ js_MakeArraySlow(JSContext *cx, JSObject
     obj->map = &scope->map;
     return JS_TRUE;
 
   out_bad:
     js_DestroyScope(cx, scope);
     return JS_FALSE;
 }
 
-enum ArrayToStringOp {
-    TO_STRING,
-    TO_LOCALE_STRING,
-    TO_SOURCE
-};
-
-/*
- * When op is TO_STRING or TO_LOCALE_STRING sep indicates a separator to use
- * or "," when sep is NULL.
- * When op is TO_SOURCE sep must be NULL.
- */
+/* Transfer ownership of buffer to returned string. */
 static JSBool
-array_join_sub(JSContext *cx, JSObject *obj, enum ArrayToStringOp op,
-               JSString *sep, jsval *rval)
+BufferToString(JSContext *cx, JSTempVector<jschar> &buf, jsval *rval)
 {
-    JSBool ok, hole;
-    jsuint length, index;
-    jschar *chars, *ochars;
-    size_t nchars, growth, seplen, tmplen, extratail;
-    const jschar *sepstr;
-    JSString *str;
-    JSHashEntry *he;
-    JSAtom *atom;
-
-    JS_CHECK_RECURSION(cx, return JS_FALSE);
-
-    ok = js_GetLengthProperty(cx, obj, &length);
-    if (!ok)
-        return JS_FALSE;
-
-    he = js_EnterSharpObject(cx, obj, NULL, &chars);
-    if (!he)
-        return JS_FALSE;
-#ifdef DEBUG
-    growth = (size_t) -1;
-#endif
-
-    /*
-     * We must check for the sharp bit and skip js_LeaveSharpObject when it is
-     * set even when op is not TO_SOURCE. A script can overwrite the default
-     * toSource implementation and trigger a call, for example, to the
-     * toString method during serialization of the object graph (bug 369696).
-     */
-    if (IS_SHARP(he)) {
-#if JS_HAS_SHARP_VARS
-        nchars = js_strlen(chars);
-#else
-        chars[0] = '[';
-        chars[1] = ']';
-        chars[2] = 0;
-        nchars = 2;
-#endif
-        goto make_string;
-    }
-
-    if (op == TO_SOURCE) {
-        /*
-         * Always allocate 2 extra chars for closing ']' and terminating 0
-         * and then preallocate 1 + extratail to include starting '['.
-         */
-        extratail = 2;
-        growth = (1 + extratail) * sizeof(jschar);
-        if (!chars) {
-            nchars = 0;
-            chars = (jschar *) malloc(growth);
-            if (!chars)
-                goto done;
-        } else {
-            MAKE_SHARP(he);
-            nchars = js_strlen(chars);
-            growth += nchars * sizeof(jschar);
-            chars = (jschar *)realloc((ochars = chars), growth);
-            if (!chars) {
-                free(ochars);
-                goto done;
-            }
-        }
-        chars[nchars++] = '[';
-        JS_ASSERT(sep == NULL);
-        sepstr = NULL;  /* indicates to use ", " as separator */
-        seplen = 2;
-    } else {
-        /*
-         * Free any sharp variable definition in chars.  Normally, we would
-         * MAKE_SHARP(he) so that only the first sharp variable annotation is
-         * a definition, and all the rest are references, but in the current
-         * case of (op != TO_SOURCE), we don't need chars at all.
-         */
-        if (chars)
-            JS_free(cx, chars);
-        chars = NULL;
-        nchars = 0;
-        extratail = 1;  /* allocate extra char for terminating 0 */
-
-        /* Return the empty string on a cycle as well as on empty join. */
-        if (IS_BUSY(he) || length == 0) {
-            js_LeaveSharpObject(cx, NULL);
-            *rval = JS_GetEmptyStringValue(cx);
-            return ok;
-        }
-
-        /* Flag he as BUSY so we can distinguish a cycle from a join-point. */
-        MAKE_BUSY(he);
-
-        if (sep) {
-            sep->getCharsAndLength(sepstr, seplen);
-        } else {
-            sepstr = NULL;      /* indicates to use "," as separator */
-            seplen = 1;
-        }
-    }
-
-    /* Use rval to locally root each element value as we loop and convert. */
-    for (index = 0; index < length; index++) {
-        ok = (JS_CHECK_OPERATION_LIMIT(cx) &&
-              GetArrayElement(cx, obj, index, &hole, rval));
-        if (!ok)
-            goto done;
-        if (hole ||
-            (op != TO_SOURCE &&
-             (JSVAL_IS_VOID(*rval) || JSVAL_IS_NULL(*rval)))) {
-            str = cx->runtime->emptyString;
-        } else {
-            if (op == TO_LOCALE_STRING) {
-                JSObject *robj;
-
-                atom = cx->runtime->atomState.toLocaleStringAtom;
-                ok = js_ValueToObject(cx, *rval, &robj);
-                if (ok) {
-                    /* Re-use *rval to protect robj temporarily. */
-                    *rval = OBJECT_TO_JSVAL(robj);
-                    ok = js_TryMethod(cx, robj, atom, 0, NULL, rval);
-                }
-                if (!ok)
-                    goto done;
-                str = js_ValueToString(cx, *rval);
-            } else if (op == TO_STRING) {
-                str = js_ValueToString(cx, *rval);
-            } else {
-                JS_ASSERT(op == TO_SOURCE);
-                str = js_ValueToSource(cx, *rval);
-            }
-            if (!str) {
-                ok = JS_FALSE;
-                goto done;
-            }
-        }
-
-        /*
-         * Do not append separator after the last element unless it is a hole
-         * and we are in toSource. In that case we append single ",".
-         */
-        if (index + 1 == length)
-            seplen = (hole && op == TO_SOURCE) ? 1 : 0;
-
-        /* Allocate 1 at end for closing bracket and zero. */
-        tmplen = str->length();
-        growth = nchars + tmplen + seplen + extratail;
-        if (nchars > growth || tmplen > growth ||
-            growth > (size_t)-1 / sizeof(jschar)) {
-            if (chars) {
-                free(chars);
-                chars = NULL;
-            }
-            goto done;
-        }
-        growth *= sizeof(jschar);
-        if (!chars) {
-            chars = (jschar *) malloc(growth);
-            if (!chars)
-                goto done;
-        } else {
-            chars = (jschar *) realloc((ochars = chars), growth);
-            if (!chars) {
-                free(ochars);
-                goto done;
-            }
-        }
-
-        js_strncpy(&chars[nchars], str->chars(), tmplen);
-        nchars += tmplen;
-
-        if (seplen) {
-            if (sepstr) {
-                js_strncpy(&chars[nchars], sepstr, seplen);
-            } else {
-                JS_ASSERT(seplen == 1 || seplen == 2);
-                chars[nchars] = ',';
-                if (seplen == 2)
-                    chars[nchars + 1] = ' ';
-            }
-            nchars += seplen;
-        }
-    }
-
-  done:
-    if (op == TO_SOURCE) {
-        if (chars)
-            chars[nchars++] = ']';
-    } else {
-        CLEAR_BUSY(he);
-    }
-    js_LeaveSharpObject(cx, NULL);
-    if (!ok) {
-        if (chars)
-            free(chars);
-        return ok;
-    }
-
-  make_string:
-    if (!chars) {
-        JS_ReportOutOfMemory(cx);
-        return JS_FALSE;
-    }
-    chars[nchars] = 0;
-    JS_ASSERT(growth == (size_t)-1 || (nchars + 1) * sizeof(jschar) == growth);
-    str = js_NewString(cx, chars, nchars);
+    size_t length = buf.size() - 1;
+    jschar *chars = buf.extractRawBuffer();
+    JSString *str = js_NewString(cx, chars, length);
     if (!str) {
-        free(chars);
+        JS_free(cx, chars);
         return JS_FALSE;
     }
     *rval = STRING_TO_JSVAL(str);
     return JS_TRUE;
 }
 
 #if JS_HAS_TOSOURCE
 static JSBool
 array_toSource(JSContext *cx, uintN argc, jsval *vp)
 {
-    JSObject *obj;
-
-    obj = JS_THIS_OBJECT(cx, vp);
-    if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
-        !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
+    JS_CHECK_RECURSION(cx, return JS_FALSE);
+
+    JSObject *obj = JS_THIS_OBJECT(cx, vp);
+    if (!obj ||
+        (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
+         !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
         return JS_FALSE;
     }
-    return array_join_sub(cx, obj, TO_SOURCE, NULL, vp);
+
+    /* Find joins or cycles in the reachable object graph. */
+    jschar *sharpchars;
+    JSHashEntry *he = js_EnterSharpObject(cx, obj, NULL, &sharpchars);
+    if (!he)
+        return JS_FALSE;
+    JSBool initiallySharp = IS_SHARP(he) ? JS_TRUE : JS_FALSE;
+
+    /* After this point, all paths exit through the 'done' label. */
+    MUST_FLOW_THROUGH("done");
+    JSBool ok = JS_TRUE;
+
+    /*
+     * This object will take responsibility for the jschar buffer until the 
+     * buffer is transferred to the returned JSString.
+     */
+    JSTempVector<jschar> buf(cx);
+    if (!(ok = buf.reserve(3)))
+        goto done;
+
+    /* Cycles/joins are indicated by sharp objects. */
+#if JS_HAS_SHARP_VARS
+    if (IS_SHARP(he)) {
+        JS_ASSERT(sharpchars != 0);
+        /* +1 to include the trailing '\0' */
+        buf.replaceRawBuffer(sharpchars, js_strlen(sharpchars) + 1);
+        goto make_string;
+    } else if (sharpchars) {
+        MAKE_SHARP(he);
+        buf.replaceRawBuffer(sharpchars, js_strlen(sharpchars));
+    }
+#else
+    if (IS_SHARP(he)) {
+        static const jschar arr[] = { '[', ']', 0 };
+        if (!(ok = buf.pushBack(arr, arr + 3)))
+            goto done;
+        if (sharpchars)
+            JS_free(cx, sharpchars);
+        goto make_string;
+    }
+#endif
+
+    if (!(ok = buf.pushBack('[')))
+        goto done;
+
+    jsuint length;
+    ok = js_GetLengthProperty(cx, obj, &length);
+    if (!ok)
+        goto done;
+
+    for (jsuint index = 0; index < length; index++) {
+        /* Use vp to locally root each element value. */
+        JSBool hole;
+        ok = (JS_CHECK_OPERATION_LIMIT(cx) &&
+              GetArrayElement(cx, obj, index, &hole, vp));
+        if (!ok)
+            goto done;
+
+        /* Get element's character string. */
+        JSString *str;
+        if (hole) {
+            str = cx->runtime->emptyString;
+        } else {
+            str = js_ValueToSource(cx, *vp);
+            if (!str) {
+                ok = JS_FALSE;
+                goto done;
+            }
+        }
+        *vp = STRING_TO_JSVAL(str);
+        const jschar *chars;
+        size_t charlen;
+        str->getCharsAndLength(chars, charlen);
+
+        /* Append element to buffer. */
+        if (!(ok = buf.pushBack(chars, chars + charlen)))
+            goto done;
+        if (index + 1 != length) {
+            if (!(ok = buf.pushBack(',')) || !(ok = buf.pushBack(' ')))
+                goto done;
+        } else if (hole) {
+            if (!(ok = buf.pushBack(',')))
+                goto done;
+        }
+    }
+
+    /* Finalize the buffer. */
+    if (!(ok = buf.pushBack(']')) || !(ok = buf.pushBack(0)))
+        goto done;
+
+  make_string:
+    if (!(ok = BufferToString(cx, buf, vp)))
+        goto done;
+
+  done:
+    if (!initiallySharp)
+        js_LeaveSharpObject(cx, NULL);
+    return ok;
 }
 #endif
 
+static JSHashNumber
+js_hash_array(const void *key)
+{
+    return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
+}
+
+JSBool
+js_InitContextBusyArrayTable(JSContext *cx)
+{
+    cx->busyArrayTable = JS_NewHashTable(4, js_hash_array, JS_CompareValues,
+                                         JS_CompareValues, NULL, NULL);
+    return cx->busyArrayTable != NULL;
+}
+
+static JSBool
+array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale,
+                   JSString *sepstr, jsval *rval)
+{
+    JS_CHECK_RECURSION(cx, return JS_FALSE);
+
+    /*
+     * This hash table is shared between toString invocations and must be empty
+     * after the root invocation completes.
+     */
+    JSHashTable *table = cx->busyArrayTable;
+
+    /*
+     * Use HashTable entry as the cycle indicator.  On first visit, create the
+     * entry, and, when leaving, remove the entry.
+     */
+    JSHashNumber hash = js_hash_array(obj);
+    JSHashEntry **hep = JS_HashTableRawLookup(table, hash, obj);
+    JSHashEntry *he = *hep;
+    if (!he) {
+        /* Not in hash table, so not a cycle. */
+        he = JS_HashTableRawAdd(table, hep, hash, obj, NULL);
+        if (!he) {
+            JS_ReportOutOfMemory(cx);
+            return JS_FALSE;
+        }
+    } else {
+        /* Cycle, so return empty string. */
+        *rval = ATOM_KEY(cx->runtime->atomState.emptyAtom);
+        return JS_TRUE;
+    }
+
+    JSAutoTempValueRooter tvr(cx, obj);
+
+    /* After this point, all paths exit through the 'done' label. */
+    MUST_FLOW_THROUGH("done");
+    JSBool ok = JS_TRUE;
+
+    /* Get characters to use for the separator. */
+    static const jschar comma = ',';
+    const jschar *sep;
+    size_t seplen;
+    if (sepstr) {
+        sepstr->getCharsAndLength(sep, seplen);
+    } else {
+        sep = &comma;
+        seplen = 1;
+    }
+
+    /*
+     * This object will take responsibility for the jschar buffer until the 
+     * buffer is transferred to the returned JSString.
+     */
+    JSTempVector<jschar> buf(cx);
+
+    jsuint length;
+    ok = js_GetLengthProperty(cx, obj, &length);
+    if (!ok)
+        goto done;
+
+    for (jsuint index = 0; index < length; index++) {
+        /* Use rval to locally root each element value. */
+        JSBool hole;
+        ok = JS_CHECK_OPERATION_LIMIT(cx) &&
+             GetArrayElement(cx, obj, index, &hole, rval);
+        if (!ok)
+            goto done;
+
+        /* Get element's character string. */
+        if (!(hole || JSVAL_IS_VOID(*rval) || JSVAL_IS_NULL(*rval))) {
+            if (locale) {
+                JSObject *robj;
+
+                JSAtom *atom = cx->runtime->atomState.toLocaleStringAtom;
+                ok = js_ValueToObject(cx, *rval, &robj);
+                if (ok) {
+                    /* Re-use *rval to protect robj temporarily. */
+                    *rval = OBJECT_TO_JSVAL(robj);
+                    ok = js_TryMethod(cx, robj, atom, 0, NULL, rval);
+                }
+                if (!ok)
+                    goto done;
+            }
+
+            ok = js_ValueToStringBuffer(cx, *rval, buf);
+            if (!ok)
+                goto done;
+        }
+
+        /* Append the separator. */
+        if (index + 1 != length) {
+            if (!(ok = buf.pushBack(sep, sep + seplen)))
+                goto done;
+        }
+    }
+
+    /* Finalize the buffer. */
+    if (buf.empty()) {
+        *rval = ATOM_KEY(cx->runtime->atomState.emptyAtom);
+        goto done;
+    }
+
+    ok = buf.pushBack(0) &&
+         BufferToString(cx, buf, rval);
+    if (!ok)
+        goto done;
+
+  done:
+    /*
+     * It is possible that 'hep' may have been invalidated by subsequent
+     * RawAdd/Remove.  Hence, 'RawRemove' must not be used.
+     */
+    JS_HashTableRemove(table, obj);
+    return ok;
+}
+
 static JSBool
 array_toString(JSContext *cx, uintN argc, jsval *vp)
 {
     JSObject *obj;
 
     obj = JS_THIS_OBJECT(cx, vp);
-    if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
-        !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
+    if (!obj ||
+        (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
+         !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
         return JS_FALSE;
     }
-    return array_join_sub(cx, obj, TO_STRING, NULL, vp);
+
+    return array_toString_sub(cx, obj, JS_FALSE, NULL, vp);
 }
 
 static JSBool
 array_toLocaleString(JSContext *cx, uintN argc, jsval *vp)
 {
     JSObject *obj;
 
     obj = JS_THIS_OBJECT(cx, vp);
-    if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
-        !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
+    if (!obj ||
+        (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
+         !JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
         return JS_FALSE;
     }
 
     /*
      *  Passing comma here as the separator. Need a way to get a
      *  locale-specific version.
      */
-    return array_join_sub(cx, obj, TO_LOCALE_STRING, NULL, vp);
+    return array_toString_sub(cx, obj, JS_TRUE, NULL, vp);
 }
 
 enum TargetElementsType {
     TargetElementsAllHoles,
     TargetElementsMayContainValues
 };
 
 enum SourceVectorType {
@@ -1711,28 +1733,28 @@ InitArrayObject(JSContext *cx, JSObject 
     return JS_TRUE;
 }
 
 #ifdef JS_TRACER
 static JSString* FASTCALL
 Array_p_join(JSContext* cx, JSObject* obj, JSString *str)
 {
     JSAutoTempValueRooter tvr(cx);
-    if (!array_join_sub(cx, obj, TO_STRING, str, tvr.addr())) {
+    if (!array_toString_sub(cx, obj, JS_FALSE, str, tvr.addr())) {
         js_SetBuiltinError(cx);
         return NULL;
     }
     return JSVAL_TO_STRING(tvr.value());
 }
 
 static JSString* FASTCALL
 Array_p_toString(JSContext* cx, JSObject* obj)
 {
     JSAutoTempValueRooter tvr(cx);
-    if (!array_join_sub(cx, obj, TO_STRING, NULL, tvr.addr())) {
+    if (!array_toString_sub(cx, obj, JS_FALSE, NULL, tvr.addr())) {
         js_SetBuiltinError(cx);
         return NULL;
     }
     return JSVAL_TO_STRING(tvr.value());
 }
 #endif
 
 /*
@@ -1748,17 +1770,17 @@ array_join(JSContext *cx, uintN argc, js
         str = NULL;
     } else {
         str = js_ValueToString(cx, vp[2]);
         if (!str)
             return JS_FALSE;
         vp[2] = STRING_TO_JSVAL(str);
     }
     obj = JS_THIS_OBJECT(cx, vp);
-    return obj && array_join_sub(cx, obj, TO_STRING, str, vp);
+    return obj && array_toString_sub(cx, obj, JS_FALSE, str, vp);
 }
 
 static JSBool
 array_reverse(JSContext *cx, uintN argc, jsval *vp)
 {
     JSObject *obj;
     JSTempValueRooter tvr;
     jsuint len, half, i;
--- a/js/src/jsarray.h
+++ b/js/src/jsarray.h
@@ -92,16 +92,19 @@ static JS_INLINE JSObject *
 js_GetProtoIfDenseArray(JSContext *cx, JSObject *obj)
 {
     return OBJ_IS_DENSE_ARRAY(cx, obj) ? OBJ_GET_PROTO(cx, obj) : obj;
 }
 
 extern JSObject *
 js_InitArrayClass(JSContext *cx, JSObject *obj);
 
+extern JSBool
+js_InitContextBusyArrayTable(JSContext *);
+
 extern JSObject *
 js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector,
                   JSBool holey = JS_FALSE);
 
 /* Create an array object that starts out already made slow/sparse. */
 extern JSObject *
 js_NewSlowArrayObject(JSContext *cx);
 
--- a/js/src/jsbool.cpp
+++ b/js/src/jsbool.cpp
@@ -47,16 +47,17 @@
 #include "jsatom.h"
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsversion.h"
 #include "jslock.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsstr.h"
+#include "jsvector.h"
 
 /* Check pseudo-booleans values. */
 JS_STATIC_ASSERT(!(JSVAL_TRUE & JSVAL_HOLE_FLAG));
 JS_STATIC_ASSERT(!(JSVAL_FALSE & JSVAL_HOLE_FLAG));
 JS_STATIC_ASSERT(!(JSVAL_VOID & JSVAL_HOLE_FLAG));
 JS_STATIC_ASSERT((JSVAL_HOLE & JSVAL_HOLE_FLAG));
 JS_STATIC_ASSERT((JSVAL_HOLE & ~JSVAL_HOLE_FLAG) == JSVAL_VOID);
 JS_STATIC_ASSERT(!(JSVAL_ARETURN & JSVAL_HOLE_FLAG));
@@ -157,16 +158,26 @@ js_InitBooleanClass(JSContext *cx, JSObj
 }
 
 JSString *
 js_BooleanToString(JSContext *cx, JSBool b)
 {
     return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[b ? 1 : 0]);
 }
 
+/* This function implements E-262-3 section 9.8, toString. */
+JSBool
+js_BooleanToStringBuffer(JSContext *cx, JSBool b, JSTempVector<jschar> &buf)
+{
+    static const jschar trueChars[] = { 't', 'r', 'u', 'e' },
+                        falseChars[] = { 'f', 'a', 'l', 's', 'e' };
+    return b ? buf.pushBack(trueChars, trueChars + JS_ARRAY_LENGTH(trueChars))
+             : buf.pushBack(falseChars, falseChars + JS_ARRAY_LENGTH(falseChars));
+}
+
 JSBool
 js_ValueToBoolean(jsval v)
 {
     if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
         return JS_FALSE;
     if (JSVAL_IS_OBJECT(v))
         return JS_TRUE;
     if (JSVAL_IS_STRING(v))
--- a/js/src/jsbool.h
+++ b/js/src/jsbool.h
@@ -76,13 +76,16 @@ extern JSClass js_BooleanClass;
 
 extern JSObject *
 js_InitBooleanClass(JSContext *cx, JSObject *obj);
 
 extern JSString *
 js_BooleanToString(JSContext *cx, JSBool b);
 
 extern JSBool
+js_BooleanToStringBuffer(JSContext *cx, JSBool b, JSTempVector<jschar> &buf);
+
+extern JSBool
 js_ValueToBoolean(jsval v);
 
 JS_END_EXTERN_C
 
 #endif /* jsbool_h___ */
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -382,16 +382,21 @@ js_NewContext(JSRuntime *rt, size_t stac
 
     JS_INIT_ARENA_POOL(&cx->tempPool, "temp",
                        1024,  /* FIXME: bug 421435 */
                        sizeof(jsdouble), &cx->scriptStackQuota);
 
     js_InitRegExpStatics(cx);
     JS_ASSERT(cx->resolveFlags == 0);
 
+    if (!js_InitContextBusyArrayTable(cx)) {
+        FreeContext(cx);
+        return NULL;
+    }
+
 #ifdef JS_THREADSAFE
     if (!js_InitContextThread(cx)) {
         FreeContext(cx);
         return NULL;
     }
 #endif
 
     /*
@@ -738,16 +743,22 @@ FreeContext(JSContext *cx)
     /* Remove any argument formatters. */
     map = cx->argumentFormatMap;
     while (map) {
         JSArgumentFormatMap *temp = map;
         map = map->next;
         JS_free(cx, temp);
     }
 
+    /* Destroy the busy array table. */
+    if (cx->busyArrayTable) {
+        JS_HashTableDestroy(cx->busyArrayTable);
+        cx->busyArrayTable = NULL;
+    }
+
     /* Destroy the resolve recursion damper. */
     if (cx->resolvingTable) {
         JS_DHashTableDestroy(cx->resolvingTable);
         cx->resolvingTable = NULL;
     }
 
     lrs = cx->localRootStack;
     if (lrs) {
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -51,16 +51,17 @@
 #include "jsdhash.h"
 #include "jsgc.h"
 #include "jsinterp.h"
 #include "jsobj.h"
 #include "jsprvtd.h"
 #include "jspubtd.h"
 #include "jsregexp.h"
 #include "jsutil.h"
+#include "jsarray.h"
 
 JS_BEGIN_EXTERN_C
 
 /*
  * js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
  * given pc in a script. We use the script->code pointer to tag the cache,
  * instead of the script address itself, so that source notes are always found
  * by offset from the bytecode with which they were generated.
@@ -949,16 +950,17 @@ struct JSContext {
     /* Storage to root recently allocated GC things and script result. */
     JSWeakRoots         weakRoots;
 
     /* Regular expression class statics (XXX not shared globally). */
     JSRegExpStatics     regExpStatics;
 
     /* State for object and array toSource conversion. */
     JSSharpObjectMap    sharpObjectMap;
+    JSHashTable         *busyArrayTable;
 
     /* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
     JSArgumentFormatMap *argumentFormatMap;
 
     /* Last message string and trace file for debugging. */
     char                *lastMessage;
 #ifdef DEBUG
     void                *tracefp;
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -626,28 +626,37 @@ js_watch_set(JSContext *cx, JSObject *ob
 
                     argv[0] = OBJECT_TO_JSVAL(closure);
                     argv[1] = JSVAL_NULL;
                     memset(argv + 2, 0, (nslots - 2) * sizeof(jsval));
 
                     memset(&frame, 0, sizeof(frame));
                     frame.script = script;
                     frame.regs = NULL;
+                    frame.callee = closure;
+                    frame.fun = fun;
+                    frame.argv = argv + 2;
+                    frame.down = js_GetTopStackFrame(cx);
+                    frame.scopeChain = OBJ_GET_PARENT(cx, closure);
                     if (script) {
                         JS_ASSERT(script->length >= JSOP_STOP_LENGTH);
                         regs.pc = script->code + script->length
                                   - JSOP_STOP_LENGTH;
                         regs.sp = NULL;
                         frame.regs = &regs;
+                        if (fun &&
+                            JSFUN_HEAVYWEIGHT_TEST(fun->flags) &&
+                            !js_GetCallObject(cx, &frame)) {
+                            if (argv != smallv)
+                                JS_free(cx, argv);
+                            DBG_LOCK(rt);
+                            DropWatchPointAndUnlock(cx, wp, JSWP_HELD);
+                            return JS_FALSE;
+                        }
                     }
-                    frame.callee = closure;
-                    frame.fun = fun;
-                    frame.argv = argv + 2;
-                    frame.down = js_GetTopStackFrame(cx);
-                    frame.scopeChain = OBJ_GET_PARENT(cx, closure);
 
                     cx->fp = &frame;
                 }
 #ifdef __GNUC__
                 else
                     argv = NULL;    /* suppress bogus gcc warnings */
 #endif
                 ok = !wp->setter ||
--- a/js/src/jsdtoa.cpp
+++ b/js/src/jsdtoa.cpp
@@ -41,17 +41,17 @@
  * Portable double to alphanumeric string and back converters.
  */
 #include "jslibmath.h"
 #include "jstypes.h"
 #include "jsstdint.h"
 #include "jsdtoa.h"
 #include "jsprf.h"
 #include "jsutil.h" /* Added by JSIFY */
-#include "jspubtd.h"
+#include "jsprvtd.h"
 #include "jsnum.h"
 #include "jsbit.h"
 
 #ifdef JS_THREADSAFE
 #include "jslock.h"
 #endif
 
 #ifdef IS_LITTLE_ENDIAN
--- a/js/src/jsnum.cpp
+++ b/js/src/jsnum.cpp
@@ -66,16 +66,17 @@
 #include "jsgc.h"
 #include "jsinterp.h"
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsprf.h"
 #include "jsscope.h"
 #include "jsstr.h"
+#include "jsvector.h"
 
 static JSBool
 num_isNaN(JSContext *cx, uintN argc, jsval *vp)
 {
     jsdouble x;
 
     if (argc == 0) {
         *vp = JSVAL_TRUE;
@@ -856,16 +857,51 @@ NumberToStringWithBase(JSContext *cx, js
 }
 
 JSString * JS_FASTCALL
 js_NumberToString(JSContext *cx, jsdouble d)
 {
     return NumberToStringWithBase(cx, d, 10);
 }
 
+JSBool JS_FASTCALL
+js_NumberValueToStringBuffer(JSContext *cx, jsval v, JSTempVector<jschar> &buf)
+{
+    /* Convert to C-string. */
+    static const size_t arrSize = DTOSTR_STANDARD_BUFFER_SIZE;
+    char arr[arrSize];
+    const char *cstr;
+    if (JSVAL_IS_INT(v)) {
+        cstr = IntToCString(JSVAL_TO_INT(v), 10, arr, arrSize);
+    } else {
+        JS_ASSERT(JSVAL_IS_DOUBLE(v));
+        cstr = JS_dtostr(arr, arrSize, DTOSTR_STANDARD, 0, *JSVAL_TO_DOUBLE(v));
+    }
+    if (!cstr)
+        return JS_FALSE;
+
+    /*
+     * Inflate to jschar string.  The input C-string characters are < 127, so
+     * even if jschars are UTF-8, all chars should map to one jschar.
+     */
+    size_t cstrlen = strlen(cstr);
+    JS_ASSERT(cstrlen < arrSize);
+    size_t sizeBefore = buf.size();
+    if (!buf.growBy(cstrlen))
+        return JS_FALSE;
+    jschar *appendBegin = buf.begin() + sizeBefore;
+#ifdef DEBUG
+    size_t oldcstrlen = cstrlen;
+    JSBool ok =
+#endif
+        js_InflateStringToBuffer(cx, cstr, cstrlen, appendBegin, &cstrlen);
+    JS_ASSERT(ok && cstrlen == oldcstrlen);
+    return JS_TRUE;
+}
+
 jsdouble
 js_ValueToNumber(JSContext *cx, jsval *vp)
 {
     jsval v;
     JSString *str;
     const jschar *bp, *end, *ep;
     jsdouble d, *dp;
     JSObject *obj;
--- a/js/src/jsnum.h
+++ b/js/src/jsnum.h
@@ -185,16 +185,23 @@ js_NewNumberInRootedValue(JSContext *cx,
 extern JSBool
 js_NewWeaklyRootedNumber(JSContext *cx, jsdouble d, jsval *vp);
 
 /* Convert a number to a GC'ed string. */
 extern JSString * JS_FASTCALL
 js_NumberToString(JSContext *cx, jsdouble d);
 
 /*
+ * Convert an integer or double (contained in the given jsval) to a string and
+ * append to the given buffer.
+ */
+extern JSBool JS_FASTCALL
+js_NumberValueToStringBuffer(JSContext *, jsval, JSTempVector<jschar> &);
+
+/*
  * Convert a value to a number. On exit JSVAL_IS_NULL(*vp) iff there was an
  * error. If on exit JSVAL_IS_NUMBER(*vp), then *vp holds the jsval that
  * matches the result. Otherwise *vp is JSVAL_TRUE indicating that the jsval
  * for result has to be created explicitly using, for example, the
  * js_NewNumberInRootedValue function.
  */
 extern jsdouble
 js_ValueToNumber(JSContext *cx, jsval* vp);
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -2163,17 +2163,17 @@ JS_DEFINE_CALLINFO_3(extern, CONSTRUCTOR
 
 #endif /* !JS_TRACER */
 
 /*
  * Given pc pointing after a property accessing bytecode, return true if the
  * access is "object-detecting" in the sense used by web scripts, e.g., when
  * checking whether document.all is defined.
  */
-static JS_REQUIRES_STACK JSBool
+JS_REQUIRES_STACK JSBool
 Detecting(JSContext *cx, jsbytecode *pc)
 {
     JSScript *script;
     jsbytecode *endpc;
     JSOp op;
     JSAtom *atom;
 
     script = cx->fp->script;
@@ -2226,19 +2226,26 @@ Detecting(JSContext *cx, jsbytecode *pc)
 }
 
 /*
  * Infer lookup flags from the currently executing bytecode. This does
  * not attempt to infer JSRESOLVE_WITH, because the current bytecode
  * does not indicate whether we are in a with statement. Return defaultFlags
  * if a currently executing bytecode cannot be determined.
  */
-static uintN
-InferFlags(JSContext *cx, uintN defaultFlags)
+uintN
+js_InferFlags(JSContext *cx, uintN defaultFlags)
 {
+#ifdef JS_TRACER
+    if (JS_ON_TRACE(cx))
+        return cx->bailExit->lookupFlags;
+#endif
+
+    JS_ASSERT_NOT_ON_TRACE(cx);
+
     JSStackFrame *fp;
     jsbytecode *pc;
     const JSCodeSpec *cs;
     uint32 format;
     uintN flags = 0;
 
     fp = js_GetTopStackFrame(cx);
     if (!fp || !fp->regs)
@@ -2266,17 +2273,17 @@ InferFlags(JSContext *cx, uintN defaultF
  */
 static JSBool
 with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
                     JSProperty **propp)
 {
     /* Fixes bug 463997 */
     uintN flags = cx->resolveFlags;
     if (flags == JSRESOLVE_INFER)
-        flags = InferFlags(cx, flags);
+        flags = js_InferFlags(cx, flags);
     flags |= JSRESOLVE_WITH;
     JSAutoResolveFlags rf(cx, flags);
     JSObject *proto = OBJ_GET_PROTO(cx, obj);
     if (!proto)
         return js_LookupProperty(cx, obj, id, objp, propp);
     return OBJ_LOOKUP_PROPERTY(cx, proto, id, objp, propp);
 }
 
@@ -3832,17 +3839,17 @@ js_LookupPropertyWithFlags(JSContext *cx
                 generation = cx->resolvingTable->generation;
 
                 /* Null *propp here so we can test it at cleanup: safely. */
                 *propp = NULL;
 
                 if (clasp->flags & JSCLASS_NEW_RESOLVE) {
                     newresolve = (JSNewResolveOp)resolve;
                     if (flags == JSRESOLVE_INFER)
-                        flags = InferFlags(cx, flags);
+                        flags = js_InferFlags(cx, flags);
                     obj2 = (clasp->flags & JSCLASS_NEW_RESOLVE_GETS_START)
                            ? start
                            : NULL;
                     JS_UNLOCK_OBJ(cx, obj);
 
                     /* Protect id and all atoms from a GC nested in resolve. */
                     JS_KEEP_ATOMS(cx->runtime);
                     ok = newresolve(cx, obj, ID_TO_VALUE(id), flags, &obj2);
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -856,11 +856,14 @@ JS_FRIEND_API(void) js_DumpChars(const j
 JS_FRIEND_API(void) js_DumpString(JSString *str);
 JS_FRIEND_API(void) js_DumpAtom(JSAtom *atom);
 JS_FRIEND_API(void) js_DumpValue(jsval val);
 JS_FRIEND_API(void) js_DumpId(jsid id);
 JS_FRIEND_API(void) js_DumpObject(JSObject *obj);
 JS_FRIEND_API(void) js_DumpStackFrame(JSStackFrame *fp);
 #endif
 
+extern uintN
+js_InferFlags(JSContext *cx, uintN defaultFlags);
+
 JS_END_EXTERN_C
 
 #endif /* jsobj_h___ */
--- a/js/src/jsopcode.cpp
+++ b/js/src/jsopcode.cpp
@@ -5355,35 +5355,34 @@ SimulateImacroCFG(JSContext *cx, JSScrip
         if (SimulateOp(cx, script, op, cs, pc, tmp_pcstack, pcdepth) < 0)
             goto failure;
 
         uint32 type = cs->format & JOF_TYPEMASK;
         if (type == JOF_JUMP || type == JOF_JUMPX) {
             ptrdiff_t jmpoff = (type == JOF_JUMP) ? GET_JUMP_OFFSET(pc)
                                                   : GET_JUMPX_OFFSET(pc);
             LOCAL_ASSERT(jmpoff >= 0);
-            uintN tmp_pcdepth = SimulateImacroCFG(cx, script, pcdepth, pc + jmpoff,
-                                                  target, tmp_pcstack);
+            intN tmp_pcdepth = SimulateImacroCFG(cx, script, pcdepth, pc + jmpoff,
+                                                 target, tmp_pcstack);
             if (tmp_pcdepth >= 0) {
-                pcdepth = tmp_pcdepth;
+                pcdepth = uintN(tmp_pcdepth);
                 goto success;
             }
 
             if (op == JSOP_GOTO || op == JSOP_GOTOX)
                 goto failure;
         }
     }
 
     if (pc > target)
         goto failure;
 
     LOCAL_ASSERT(pc == target);
 
   success:
-    LOCAL_ASSERT(pcdepth >= 0);
     memcpy(pcstack, tmp_pcstack, nbytes);
     JS_free(cx, tmp_pcstack);
     return pcdepth;
 
   failure:
     JS_free(cx, tmp_pcstack);
     return -1;
 }
@@ -5398,19 +5397,19 @@ ReconstructImacroPCStack(JSContext *cx, 
                          jsbytecode **pcstack)
 {
     /*
      * Begin with a recursive call back to ReconstructPCStack to pick up
      * the state-of-the-world at the *start* of the imacro.
      */
     JSStackFrame *fp = js_GetScriptedCaller(cx, NULL);
     JS_ASSERT(fp->imacpc);
-    uintN pcdepth = ReconstructPCStack(cx, script, fp->imacpc, pcstack);
+    intN pcdepth = ReconstructPCStack(cx, script, fp->imacpc, pcstack);
     if (pcdepth < 0)
-        return pcdepth;
+        return uintN(pcdepth);
     return SimulateImacroCFG(cx, script, pcdepth, imacstart, target, pcstack);
 }
 
 extern jsbytecode* js_GetImacroStart(jsbytecode* pc);
 #endif
 
 static intN
 ReconstructPCStack(JSContext *cx, JSScript *script, jsbytecode *target,
--- a/js/src/jsprvtd.h
+++ b/js/src/jsprvtd.h
@@ -129,16 +129,31 @@ typedef struct JSScopeProperty      JSSc
 typedef struct JSStackHeader        JSStackHeader;
 typedef struct JSStringBuffer       JSStringBuffer;
 typedef struct JSSubString          JSSubString;
 typedef struct JSTraceableNative    JSTraceableNative;
 typedef struct JSXML                JSXML;
 typedef struct JSXMLArray           JSXMLArray;
 typedef struct JSXMLArrayCursor     JSXMLArrayCursor;
 
+/*
+ * Template declarations.
+ *
+ * jsprvtd.h can be included in both C and C++ translation units.  For C++, it
+ * may possibly be wrapped in an extern "C" block which does not agree with
+ * templates.
+ */
+#ifdef __cplusplus
+extern "C++" {
+
+template <class T> class JSTempVector;
+
+}
+#endif  /* __cplusplus */
+
 /* "Friend" types used by jscntxt.h and jsdbgapi.h. */
 typedef enum JSTrapStatus {
     JSTRAP_ERROR,
     JSTRAP_CONTINUE,
     JSTRAP_RETURN,
     JSTRAP_THROW,
     JSTRAP_LIMIT
 } JSTrapStatus;
--- a/js/src/jsregexp.cpp
+++ b/js/src/jsregexp.cpp
@@ -2383,18 +2383,20 @@ class RegExpNativeCompiler {
         size_t re_length;
         Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
 
         re->source->getCharsAndLength(re_chars, re_length);
         /* 
          * If the regexp is too long nanojit will assert when we
          * try to insert the guard record.
          */
-        if (re_length > 1024)
+        if (re_length > 1024) {
+            re->flags |= JSREG_NOCOMPILE;
             return JS_FALSE;
+        }
 
         this->cx = cx;
         /* At this point we have an empty fragment. */
         LirBuffer* lirbuf = fragment->lirbuf;
         if (lirbuf->outOMem()) 
             goto fail;
         /* FIXME Use bug 463260 smart pointer when available. */
         lir = lirBufWriter = new (&gc) LirBufWriter(lirbuf);
@@ -2441,17 +2443,17 @@ class RegExpNativeCompiler {
         return JS_TRUE;
     fail:
         if (lirbuf->outOMem() || oom || 
             js_OverfullFragmento(&JS_TRACE_MONITOR(cx), fragmento)) {
             fragmento->clearFrags();
             lirbuf->rewind();
         } else {
             if (!guard) insertGuard(re_chars, re_length);
-            fragment->blacklist();
+            re->flags |= JSREG_NOCOMPILE;
         }
         delete lirBufWriter;
 #ifdef NJ_VERBOSE
         debug_only_stmt( if (js_LogController.lcbits & LC_TMRegexp)
                              delete lir; )
 #endif
         return JS_FALSE;
     }
@@ -2464,18 +2466,16 @@ static inline JSBool
 CompileRegExpToNative(JSContext* cx, JSRegExp* re, Fragment* fragment)
 {
     JSBool rv = JS_FALSE;
     void* mark;
     CompilerState state;
     RegExpNativeCompiler rc(re, &state, fragment);
 
     JS_ASSERT(!fragment->code());
-    JS_ASSERT(!fragment->isBlacklisted());
-
     mark = JS_ARENA_MARK(&cx->tempPool);
     if (!CompileRegExpToAST(cx, NULL, re->source, re->flags, state)) {
         goto out;
     }
     rv = rc.compile(cx);
  out:
     JS_ARENA_RELEASE(&cx->tempPool, mark);
     return rv;
@@ -2494,29 +2494,25 @@ GetNativeRegExp(JSContext* cx, JSRegExp*
     Fragment *fragment;
     const jschar *re_chars;
     size_t re_length;
     Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
 
     re->source->getCharsAndLength(re_chars, re_length);
     void* hash = HashRegExp(re->flags, re_chars, re_length);
     fragment = LookupNativeRegExp(cx, hash, re->flags, re_chars, re_length);
-    if (fragment) {
-        if (fragment->code())
-            goto ok;
-        if (fragment->isBlacklisted())
-            return NULL;
-    } else {
+    if (!fragment) {
         fragment = fragmento->getAnchor(hash);
         fragment->lirbuf = JS_TRACE_MONITOR(cx).reLirBuf;
         fragment->root = fragment;
+    } 
+    if (!fragment->code()) {
+        if (!CompileRegExpToNative(cx, re, fragment))
+            return NULL;
     }
-        
-    if (!CompileRegExpToNative(cx, re, fragment))
-        return NULL;
  ok:
     union { NIns *code; NativeRegExp func; } u;
     u.code = fragment->code();
     return u.func;
 }
 #endif
 
 JSRegExp *
@@ -3917,16 +3913,17 @@ MatchRegExp(REGlobalData *gData, REMatch
     const jschar *cp = x->cp;
     const jschar *cp2;
     uintN j;
 #ifdef JS_TRACER
     NativeRegExp native;
 
     /* Run with native regexp if possible. */
     if (TRACING_ENABLED(gData->cx) && 
+        !(gData->regexp->flags & JSREG_NOCOMPILE) &&
         (native = GetNativeRegExp(gData->cx, gData->regexp))) {
         gData->skipped = (ptrdiff_t) x->cp;
 
 #ifdef JS_JIT_SPEW
         debug_only_stmt({
             VOUCH_DOES_NOT_REQUIRE_STACK();
             JSStackFrame *caller = (JS_ON_TRACE(gData->cx))
                                    ? NULL
--- a/js/src/jsstr.cpp
+++ b/js/src/jsstr.cpp
@@ -68,16 +68,17 @@
 #include "jsnum.h"
 #include "jsobj.h"
 #include "jsopcode.h"
 #include "jsregexp.h"
 #include "jsscope.h"
 #include "jsstaticcheck.h"
 #include "jsstr.h"
 #include "jsbit.h"
+#include "jsvector.h"
 
 #define JSSTRDEP_RECURSION_LIMIT        100
 
 static size_t
 MinimizeDependentStrings(JSString *str, int level, JSString **basep)
 {
     JSString *base;
     size_t start, length;
@@ -241,45 +242,46 @@ js_MakeStringImmutable(JSContext *cx, JS
     }
     str->flatClearMutable();
     return JS_TRUE;
 }
 
 static JSString *
 ArgToRootedString(JSContext *cx, uintN argc, jsval *vp, uintN arg)
 {
-    JSObject *obj;
-    JSString *str;
-
     if (arg >= argc)
         return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
     vp += 2 + arg;
 
-    if (JSVAL_IS_OBJECT(*vp)) {
-        obj = JSVAL_TO_OBJECT(*vp);
-        if (!obj)
-            return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
-        if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, vp))
-            return NULL;
+    if (!JSVAL_IS_PRIMITIVE(*vp) &&
+        !OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(*vp), JSTYPE_STRING, vp)) {
+        return NULL;
     }
-    if (JSVAL_IS_STRING(*vp))
-        return JSVAL_TO_STRING(*vp);
-    if (JSVAL_IS_INT(*vp)) {
-        str = js_NumberToString(cx, JSVAL_TO_INT(*vp));
-    } else if (JSVAL_IS_DOUBLE(*vp)) {
-        str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(*vp));
+
+    JSString *str;
+    if (JSVAL_IS_STRING(*vp)) {
+        str = JSVAL_TO_STRING(*vp);
     } else if (JSVAL_IS_BOOLEAN(*vp)) {
-        return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[
+        str = ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[
                                   JSVAL_TO_BOOLEAN(*vp)? 1 : 0]);
-    } else {
-        JS_ASSERT(JSVAL_IS_VOID(*vp));
-        return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
+    } else if (JSVAL_IS_NULL(*vp)) {
+        str = ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
+    } else if (JSVAL_IS_VOID(*vp)) {
+        str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
     }
-    if (str)
-        *vp = STRING_TO_JSVAL(str);
+    else {
+        if (JSVAL_IS_INT(*vp)) {
+            str = js_NumberToString(cx, JSVAL_TO_INT(*vp));
+        } else {
+            JS_ASSERT(JSVAL_IS_DOUBLE(*vp));
+            str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(*vp));
+        }
+        if (str)
+            *vp = STRING_TO_JSVAL(str);
+    }
     return str;
 }
 
 /*
  * Forward declarations for URI encode/decode and helper routines
  */
 static JSBool
 str_decodeURI(JSContext *cx, uintN argc, jsval *vp);
@@ -2961,40 +2963,75 @@ js_ValueToPrintable(JSContext *cx, jsval
     if (!str)
         return NULL;
     return js_GetStringBytes(cx, str);
 }
 
 JS_FRIEND_API(JSString *)
 js_ValueToString(JSContext *cx, jsval v)
 {
-    JSObject *obj;
     JSString *str;
 
-    if (JSVAL_IS_OBJECT(v)) {
-        obj = JSVAL_TO_OBJECT(v);
-        if (!obj)
-            return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
-        if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
-            return NULL;
+    if (!JSVAL_IS_PRIMITIVE(v) &&
+        !OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_STRING, &v)) {
+        return NULL;
     }
+
     if (JSVAL_IS_STRING(v)) {
         str = JSVAL_TO_STRING(v);
     } else if (JSVAL_IS_INT(v)) {
         str = js_NumberToString(cx, JSVAL_TO_INT(v));
     } else if (JSVAL_IS_DOUBLE(v)) {
         str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(v));
     } else if (JSVAL_IS_BOOLEAN(v)) {
         str = js_BooleanToString(cx, JSVAL_TO_BOOLEAN(v));
+    } else if (JSVAL_IS_NULL(v)) {
+        str = ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
     } else {
         str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
     }
     return str;
 }
 
+static inline JSBool
+pushAtom(JSAtom *atom, JSTempVector<jschar> &buf)
+{
+    JSString *str = ATOM_TO_STRING(atom);
+    const jschar *chars;
+    size_t length;
+    str->getCharsAndLength(chars, length);
+    return buf.pushBack(chars, chars + length);
+}
+
+/* This function implements E-262-3 section 9.8, toString. */
+JS_FRIEND_API(JSBool)
+js_ValueToStringBuffer(JSContext *cx, jsval v, JSTempVector<jschar> &buf)
+{
+    if (!JSVAL_IS_PRIMITIVE(v) &&
+        !OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_STRING, &v)) {
+        return JS_FALSE;
+    }
+
+    if (JSVAL_IS_STRING(v)) {
+        JSString *str = JSVAL_TO_STRING(v);
+        const jschar *chars;
+        size_t length;
+        str->getCharsAndLength(chars, length);
+        return buf.pushBack(chars, chars + length);
+    }
+    if (JSVAL_IS_NUMBER(v))
+        return js_NumberValueToStringBuffer(cx, v, buf);
+    if (JSVAL_IS_BOOLEAN(v))
+        return js_BooleanToStringBuffer(cx, JSVAL_TO_BOOLEAN(v), buf);
+    if (JSVAL_IS_NULL(v))
+        return pushAtom(cx->runtime->atomState.nullAtom, buf);
+    JS_ASSERT(JSVAL_IS_VOID(v));
+    return pushAtom(cx->runtime->atomState.typeAtoms[JSTYPE_VOID], buf);
+}
+
 JS_FRIEND_API(JSString *)
 js_ValueToSource(JSContext *cx, jsval v)
 {
     JSTempValueRooter tvr;
     JSString *str;
 
     if (JSVAL_IS_VOID(v))
         return ATOM_TO_STRING(cx->runtime->atomState.void0Atom);
--- a/js/src/jsstr.h
+++ b/js/src/jsstr.h
@@ -598,16 +598,24 @@ js_ValueToPrintable(JSContext *cx, jsval
 /*
  * Convert a value to a string, returning null after reporting an error,
  * otherwise returning a new string reference.
  */
 extern JS_FRIEND_API(JSString *)
 js_ValueToString(JSContext *cx, jsval v);
 
 /*
+ * This function implements E-262-3 section 9.8, toString.  Convert the given
+ * value to a string of jschars appended to the given buffer.  On error, the
+ * passed buffer may have partial results appended.
+ */
+extern JS_FRIEND_API(JSBool)
+js_ValueToStringBuffer(JSContext *, jsval, JSTempVector<jschar> &);
+
+/*
  * Convert a value to its source expression, returning null after reporting
  * an error, otherwise returning a new string reference.
  */
 extern JS_FRIEND_API(JSString *)
 js_ValueToSource(JSContext *cx, jsval v);
 
 /*
  * Compute a hash function from str. The caller can call this function even if
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -2939,16 +2939,17 @@ TraceRecorder::snapshot(ExitType exitTyp
         : 0;
     exit->exitType = exitType;
     exit->block = fp->blockChain;
     exit->pc = pc;
     exit->imacpc = fp->imacpc;
     exit->sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase;
     exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
     exit->nativeCalleeWord = 0;
+    exit->lookupFlags = js_InferFlags(cx, 0);
     memcpy(getFullTypeMap(exit), typemap, typemap_size);
     return exit;
 }
 
 JS_REQUIRES_STACK LIns*
 TraceRecorder::createGuardRecord(VMSideExit* exit)
 {
     LIns* guardRec = lir->insSkip(sizeof(GuardRecord));
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -333,16 +333,17 @@ struct VMSideExit : public nanojit::Side
     jsbytecode* imacpc;
     intptr_t sp_adj;
     intptr_t rp_adj;
     int32_t calldepth;
     uint32 numGlobalSlots;
     uint32 numStackSlots;
     uint32 numStackSlotsBelowCurrentFrame;
     ExitType exitType;
+    uintN lookupFlags;
 
     /*
      * Ordinarily 0.  If a slow native function is atop the stack, the 1 bit is
      * set if constructing and the other bits are a pointer to the funobj.
      */
     uintptr_t nativeCalleeWord;
 
     JSObject * nativeCallee() {
new file mode 100644
--- /dev/null
+++ b/js/src/jsvector.h
@@ -0,0 +1,410 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=99 ft=cpp:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
+ * June 12, 2009.
+ *
+ * The Initial Developer of the Original Code is
+ *   the Mozilla Corporation.
+ *
+ * Contributor(s):
+ *   Luke Wagner <lw@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsvector_h_
+#define jsvector_h_
+
+#include "jscntxt.h"
+
+#include <string.h>
+#include <new>
+
+/*
+ * Traits class for identifying POD types.  Until C++0x, there is no automatic
+ * way to detect PODs, so for the moment it is done manually.
+ */
+template <class T> struct IsPodType     { static const bool result = false; };
+template <> struct IsPodType<char>      { static const bool result = true; };
+template <> struct IsPodType<int>       { static const bool result = true; };
+template <> struct IsPodType<short>     { static const bool result = true; };
+template <> struct IsPodType<long>      { static const bool result = true; };
+template <> struct IsPodType<float>     { static const bool result = true; };
+template <> struct IsPodType<double>    { static const bool result = true; };
+template <> struct IsPodType<jschar>    { static const bool result = true; };
+
+/*
+ * This template class provides a default implementation for vector operations
+ * when the element type is not known to be a POD, as judged by IsPodType.
+ */
+template <class T, bool IsPod>
+struct JSTempVectorImpl
+{
+    /* Destroys constructed objects in the range [begin, end). */
+    static inline void destroy(T *begin, T *end) {
+        for (T *p = begin; p != end; ++p)
+            p->~T();
+    }
+
+    /* Constructs objects in the uninitialized range [begin, end). */
+    static inline void initialize(T *begin, T *end) {
+        for (T *p = begin; p != end; ++p)
+            new(p) T();
+    }
+
+    /*
+     * Copy-constructs objects in the uninitialized range
+     * [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
+     */
+    template <class U>
+    static inline void copyInitialize(T *dst, const U *srcbeg, const U *srcend) {
+        for (const U *p = srcbeg; p != srcend; ++p, ++dst)
+            new(dst) T(*p);
+    }
+
+    /*
+     * Grows the given buffer to have capacity newcap, preserving the objects
+     * constructed in the range [begin, end) and updating vec.
+     */
+    static inline bool growTo(JSTempVector<T> &vec, size_t newcap) {
+        size_t bytes = sizeof(T) * newcap;
+        T *newbuf = reinterpret_cast<T *>(malloc(bytes));
+        if (!newbuf) {
+            js_ReportOutOfMemory(vec.mCx);
+            return false;
+        }
+        for (T *dst = newbuf, *src = vec.mBegin; src != vec.mEnd; ++dst, ++src)
+            new(dst) T(*src);
+        JSTempVectorImpl::destroy(vec.mBegin, vec.mEnd);
+        free(vec.mBegin);
+        vec.mEnd = newbuf + (vec.mEnd - vec.mBegin);
+        vec.mBegin = newbuf;
+        vec.mCapacity = newbuf + newcap;
+        return true;
+    }
+};
+
+/*
+ * This partial template specialization provides a default implementation for
+ * vector operations when the element type is known to be a POD, as judged by
+ * IsPodType.
+ */
+template <class T>
+struct JSTempVectorImpl<T, true>
+{
+    static inline void destroy(T *, T *) {}
+
+    static inline void initialize(T *begin, T *end) {
+        //memset(begin, 0, sizeof(T) * (end-begin));  //SLOWER
+        for (T *p = begin; p != end; ++p)
+            *p = 0;
+    }
+
+    static inline void copyInitialize(T *dst, const T *srcbeg, const T *srcend) {
+        //memcpy(dst, srcbeg, sizeof(T) * (srcend-srcbeg));  //SLOWER
+        for (const T *p = srcbeg; p != srcend; ++p, ++dst)
+            *dst = *p;
+    }
+
+    static inline bool growTo(JSTempVector<T> &vec, size_t newcap) {
+        size_t bytes = sizeof(T) * newcap;
+        T *newbuf = reinterpret_cast<T *>(realloc(vec.mBegin, bytes));
+        if (!newbuf) {
+            js_ReportOutOfMemory(vec.mCx);
+            free(vec.mBegin);
+            return false;
+        }
+        vec.mEnd = newbuf + (vec.mEnd - vec.mBegin);
+        vec.mBegin = newbuf;
+        vec.mCapacity = newbuf + newcap;
+        return true;
+    }
+};
+
+/*
+ * JS-friendly, STL-like container providing a short-lived, dynamic buffer.
+ * JSTempVector calls the constructors/destructors of all elements stored in
+ * its internal buffer, so non-PODs may be safely used.
+ *
+ * T requirements:
+ *  - default and copy constructible, assignable, destructible
+ *  - operations do not throw
+ *
+ * N.B: JSTempVector is not reentrant: T member functions called during
+ *      JSTempVector member functions must not call back into the same
+ *      JSTempVector.
+ */
+template <class T>
+class JSTempVector
+{
+#ifdef DEBUG
+    bool mInProgress;
+#endif
+
+    class ReentrancyGuard {
+        JSTempVector &mVec;
+      public:
+        ReentrancyGuard(JSTempVector &v)
+          : mVec(v)
+        {
+#ifdef DEBUG
+            JS_ASSERT(!mVec.mInProgress);
+            mVec.mInProgress = true;
+#endif
+        }
+        ~ReentrancyGuard()
+        {
+#ifdef DEBUG
+            mVec.mInProgress = false;
+#endif
+        }
+    };
+
+  public:
+    JSTempVector(JSContext *cx)
+      :
+#ifdef DEBUG
+        mInProgress(false),
+#endif
+        mCx(cx), mBegin(0), mEnd(0), mCapacity(0)
+    {}
+    ~JSTempVector();
+
+    JSTempVector(const JSTempVector &);
+    JSTempVector &operator=(const JSTempVector &);
+
+    /* accessors */
+
+    size_t size() const     { return mEnd - mBegin; }
+    size_t capacity() const { return mCapacity - mBegin; }
+    bool empty() const      { return mBegin == mEnd; }
+
+    T &operator[](int i) {
+        JS_ASSERT(!mInProgress && i < size());
+        return mBegin[i];
+    }
+
+    const T &operator[](int i) const {
+        JS_ASSERT(!mInProgress && i < size());
+        return mBegin[i];
+    }
+
+    T *begin() {
+        JS_ASSERT(!mInProgress);
+        return mBegin;
+    }
+
+    const T *begin() const {
+        JS_ASSERT(!mInProgress);
+        return mBegin;
+    }
+
+    T *end() {
+        JS_ASSERT(!mInProgress);
+        return mEnd;
+    }
+
+    const T *end() const {
+        JS_ASSERT(!mInProgress);
+        return mEnd;
+    }
+
+    T &back() {
+        JS_ASSERT(!mInProgress);
+        return *(mEnd - 1);
+    }
+
+    const T &back() const {
+        JS_ASSERT(!mInProgress && !empty());
+        return *(mEnd - 1);
+    }
+
+    /* mutators */
+
+    bool reserve(size_t);
+    bool growBy(size_t);
+    void clear();
+
+    bool pushBack(const T &);
+    template <class U> bool pushBack(const U *begin, const U *end);
+
+    /*
+     * Transfers ownership of the internal buffer used by JSTempVector to the
+     * caller.  After this call, the JSTempVector is empty.
+     * N.B. Although a T*, only the range [0, size()) is constructed.
+     */
+    T *extractRawBuffer();
+
+    /*
+     * Transfer ownership of an array of objects into the JSTempVector.
+     * N.B. This call assumes that there are no uninitialized elements in the
+     *      passed array.
+     */
+    void replaceRawBuffer(T *, size_t length);
+
+  private:
+    typedef JSTempVectorImpl<T, IsPodType<T>::result> Impl;
+    friend class JSTempVectorImpl<T, IsPodType<T>::result>;
+
+    static const int sGrowthFactor = 3;
+
+    bool checkOverflow(size_t newval, size_t oldval, size_t diff) const;
+
+    JSContext *mCx;
+    T *mBegin, *mEnd, *mCapacity;
+};
+
+template <class T>
+inline
+JSTempVector<T>::~JSTempVector()
+{
+    ReentrancyGuard g(*this);
+    Impl::destroy(mBegin, mEnd);
+    free(mBegin);
+}
+
+template <class T>
+inline bool
+JSTempVector<T>::reserve(size_t newsz)
+{
+    ReentrancyGuard g(*this);
+    size_t oldcap = capacity();
+    if (newsz > oldcap) {
+        size_t diff = newsz - oldcap;
+        size_t newcap = diff + oldcap * sGrowthFactor;
+        return checkOverflow(newcap, oldcap, diff) &&
+               Impl::growTo(*this, newcap);
+    }
+    return true;
+}
+
+template <class T>
+inline bool
+JSTempVector<T>::growBy(size_t amount)
+{
+    /* grow if needed */
+    size_t oldsize = size(), newsize = oldsize + amount;
+    if (!checkOverflow(newsize, oldsize, amount) ||
+        (newsize > capacity() && !reserve(newsize)))
+        return false;
+
+    /* initialize new elements */
+    ReentrancyGuard g(*this);
+    JS_ASSERT(mCapacity - (mBegin + newsize) >= 0);
+    T *newend = mBegin + newsize;
+    Impl::initialize(mEnd, newend);
+    mEnd = newend;
+    return true;
+}
+
+template <class T>
+inline void
+JSTempVector<T>::clear()
+{
+    ReentrancyGuard g(*this);
+    Impl::destroy(mBegin, mEnd);
+    mEnd = mBegin;
+}
+
+/*
+ * Check for overflow of an increased size or capacity (generically, 'value').
+ * 'diff' is how much greater newval should be compared to oldval.
+ */
+template <class T>
+inline bool
+JSTempVector<T>::checkOverflow(size_t newval, size_t oldval, size_t diff) const
+{
+    size_t newbytes = newval * sizeof(T),
+           oldbytes = oldval * sizeof(T),
+           diffbytes = diff * sizeof(T);
+    bool ok = newbytes >= oldbytes && (newbytes - oldbytes) >= diffbytes;
+    if (!ok)
+        js_ReportAllocationOverflow(mCx);
+    return ok;
+}
+
+template <class T>
+inline bool
+JSTempVector<T>::pushBack(const T &t)
+{
+    ReentrancyGuard g(*this);
+    if (mEnd == mCapacity) {
+        /* reallocate, doubling size */
+        size_t oldcap = capacity();
+        size_t newcap = empty() ? 1 : oldcap * sGrowthFactor;
+        if (!checkOverflow(newcap, oldcap, 1) ||
+            !Impl::growTo(*this, newcap))
+            return false;
+    }
+    JS_ASSERT(mEnd != mCapacity);
+    new(mEnd++) T(t);
+    return true;
+}
+
+template <class T>
+template <class U>
+inline bool
+JSTempVector<T>::pushBack(const U *begin, const U *end)
+{
+    ReentrancyGuard g(*this);
+    size_t space = mCapacity - mEnd, needed = end - begin;
+    if (space < needed) {
+        /* reallocate, doubling size */
+        size_t oldcap = capacity();
+        size_t newcap = empty() ? needed : (needed + oldcap * sGrowthFactor);
+        if (!checkOverflow(newcap, oldcap, needed) ||
+            !Impl::growTo(*this, newcap))
+            return false;
+    }
+    JS_ASSERT((mCapacity - mEnd) >= (end - begin));
+    Impl::copyInitialize(mEnd, begin, end);
+    mEnd += needed;
+    return true;
+}
+
+template <class T>
+inline T *
+JSTempVector<T>::extractRawBuffer()
+{
+    T *ret = mBegin;
+    mBegin = mEnd = mCapacity = 0;
+    return ret;
+}
+
+template <class T>
+inline void
+JSTempVector<T>::replaceRawBuffer(T *p, size_t length)
+{
+    ReentrancyGuard g(*this);
+    Impl::destroy(mBegin, mEnd);
+    free(mBegin);
+    mBegin = p;
+    mCapacity = mEnd = mBegin + length;
+}
+
+#endif /* jsvector_h_ */
--- a/js/src/jsxdrapi.cpp
+++ b/js/src/jsxdrapi.cpp
@@ -488,32 +488,28 @@ JS_XDRStringOrNull(JSXDRState *xdr, JSSt
     return JS_XDRString(xdr, strp);
 }
 
 static JSBool
 XDRDoubleValue(JSXDRState *xdr, jsdouble *dp)
 {
     jsdpun u;
 
-    if (xdr->mode == JSXDR_ENCODE)
-        u.d = *dp;
+    u.d = (xdr->mode == JSXDR_ENCODE) ? *dp : 0.0;
     if (!JS_XDRUint32(xdr, &u.s.lo) || !JS_XDRUint32(xdr, &u.s.hi))
         return JS_FALSE;
     if (xdr->mode == JSXDR_DECODE)
         *dp = u.d;
     return JS_TRUE;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_XDRDouble(JSXDRState *xdr, jsdouble **dpp)
 {
-    jsdouble d;
-
-    if (xdr->mode == JSXDR_ENCODE)
-        d = **dpp;
+    jsdouble d = (xdr->mode == JSXDR_ENCODE) ? **dpp : 0.0;
     if (!XDRDoubleValue(xdr, &d))
         return JS_FALSE;
     if (xdr->mode == JSXDR_DECODE) {
         *dpp = JS_NewDouble(xdr->cx, d);
         if (!*dpp)
             return JS_FALSE;
     }
     return JS_TRUE;
@@ -539,19 +535,17 @@ XDRValueBody(JSXDRState *xdr, uint32 typ
             str = JSVAL_TO_STRING(*vp);
         if (!JS_XDRString(xdr, &str))
             return JS_FALSE;
         if (xdr->mode == JSXDR_DECODE)
             *vp = STRING_TO_JSVAL(str);
         break;
       }
       case JSVAL_DOUBLE: {
-        jsdouble *dp;
-        if (xdr->mode == JSXDR_ENCODE)
-            dp = JSVAL_TO_DOUBLE(*vp);
+        jsdouble *dp = (xdr->mode == JSXDR_ENCODE) ? JSVAL_TO_DOUBLE(*vp) : NULL;
         if (!JS_XDRDouble(xdr, &dp))
             return JS_FALSE;
         if (xdr->mode == JSXDR_DECODE)
             *vp = DOUBLE_TO_JSVAL(dp);
         break;
       }
       case JSVAL_OBJECT: {
         JSObject *obj;
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -638,19 +638,19 @@ assemble(istream &in,
 {
 
     Pipeline writer;
 
     multimap<string,LIns*> fwd_jumps;
     map<string,LIns*> labels;
     map<string,pair<LOpcode,size_t> > op_map;
 
-#define OPDEF(op, number, args) \
+#define OPDEF(op, number, args, repkind) \
     op_map[#op] = make_pair(LIR_##op, args);
-#define OPDEF64(op, number, args) \
+#define OPDEF64(op, number, args, repkind) \
     op_map[#op] = make_pair(LIR_##op, args);
 #include "nanojit/LIRopcode.tbl"
 #undef OPDEF
 #undef OPDEF64
 
     vector<string> toks;
     size_t line = 0;
 
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1477,17 +1477,16 @@ namespace nanojit
 				case LIR_fge:
 				{
                     countlir_fpu();
 					asm_fcond(ins);
 					break;
 				}
 				case LIR_eq:
                 case LIR_ov:
-                case LIR_cs:
 				case LIR_le:
 				case LIR_lt:
 				case LIR_gt:
 				case LIR_ge:
 				case LIR_ult:
 				case LIR_ule:
 				case LIR_ugt:
 				case LIR_uge:
@@ -1538,39 +1537,33 @@ namespace nanojit
 	/*
 	 * Write a jump table for the given SwitchInfo and store the table
 	 * address in the SwitchInfo. Every entry will initially point to
 	 * target.
 	 */
 	void Assembler::emitJumpTable(SwitchInfo* si, NIns* target)
 	{
 		underrunProtect(si->count * sizeof(NIns*) + 20);
-		// Align for platform. The branch should be optimized away and is
-		// required to select the compatible int type.
-		if (sizeof(NIns*) == 8) {
-			_nIns = (NIns*) (uint64(_nIns) & ~7);
-		} else if (sizeof(NIns*) == 4) {
-		    _nIns = (NIns*) (uint32(_nIns) & ~3);
-		}
+		_nIns = reinterpret_cast<NIns*>(uintptr_t(_nIns) & ~(sizeof(NIns*) - 1));
 		for (uint32_t i = 0; i < si->count; ++i) {
 			_nIns = (NIns*) (((uint8*) _nIns) - sizeof(NIns*));
 			*(NIns**) _nIns = target;
 		}
 		si->table = (NIns**) _nIns;
 	}
 
     void Assembler::assignSavedRegs()
     {
         // restore saved regs
 		releaseRegisters();
         LirBuffer *b = _thisfrag->lirbuf;
         for (int i=0, n = NumSavedRegs; i < n; i++) {
             LIns *p = b->savedRegs[i];
             if (p)
-                findSpecificRegFor(p, savedRegs[p->imm8()]);
+                findSpecificRegFor(p, savedRegs[p->paramArg()]);
         }
     }
 
     void Assembler::reserveSavedRegs()
     {
         LirBuffer *b = _thisfrag->lirbuf;
         for (int i=0, n = NumSavedRegs; i < n; i++) {
             LIns *p = b->savedRegs[i];
@@ -1579,20 +1572,20 @@ namespace nanojit
         }
     }
 
     // restore parameter registers
     void Assembler::assignParamRegs()
     {
         LInsp state = _thisfrag->lirbuf->state;
         if (state)
-            findSpecificRegFor(state, argRegs[state->imm8()]); 
+            findSpecificRegFor(state, argRegs[state->paramArg()]); 
         LInsp param1 = _thisfrag->lirbuf->param1;
         if (param1)
-            findSpecificRegFor(param1, argRegs[param1->imm8()]);
+            findSpecificRegFor(param1, argRegs[param1->paramArg()]);
     }
     
     void Assembler::handleLoopCarriedExprs()
     {
         // ensure that exprs spanning the loop are marked live at the end of the loop
         reserveSavedRegs();
         for (int i=0, n=pending_lives.size(); i < n; i++) {
             findMemFor(pending_lives[i]);
--- a/js/src/nanojit/Fragmento.cpp
+++ b/js/src/nanojit/Fragmento.cpp
@@ -79,22 +79,23 @@ namespace nanojit
 			NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
 			NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
 			NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt);
 			NanoStaticAssert((LIR_flt ^ 3) == LIR_fge);
 			NanoStaticAssert((LIR_fle ^ 3) == LIR_fgt);
 
 			/* Opcodes must be strictly increasing without holes. */
 			uint32_t count = 0;
-			#define OPDEF(op, number, operands) \
-				NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
-			#define OPDEF64(op, number, operands) OPDEF(op, number, operands)
-			#include "LIRopcode.tbl"
-			#undef OPDEF
-			#undef OPDEF64
+#define OPDEF(op, number, operands, repkind) \
+        NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
+#define OPDEF64(op, number, operands, repkind) \
+        OPDEF(op, number, operands, repkind)
+#include "LIRopcode.tbl"
+#undef OPDEF
+#undef OPDEF64
 		}
 #endif
 
 #ifdef MEMORY_INFO
 		_allocList.set_meminfo_name("Fragmento._allocList");
 #endif
 		NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed 
 		_core = core;
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -46,33 +46,55 @@
 
 
 namespace nanojit
 {
     using namespace avmplus;
 	#ifdef FEATURE_NANOJIT
 
 	const uint8_t operandCount[] = {
-#define OPDEF(op, number, operands) \
+#define OPDEF(op, number, operands, repkind) \
         operands,
-#define OPDEF64(op, number, operands) \
+#define OPDEF64(op, number, operands, repkind) \
         operands,
 #include "LIRopcode.tbl"
 #undef OPDEF
 #undef OPDEF64
         0
 	};
 
+    const uint8_t repKinds[] = {
+#define OPDEF(op, number, operands, repkind) \
+        LRK_##repkind,
+#define OPDEF64(op, number, operands, repkind) \
+        OPDEF(op, number, operands, repkind)
+#include "LIRopcode.tbl"
+#undef OPDEF
+#undef OPDEF64
+        0
+    };
+
+        const uint8_t insSizes[] = {
+#define OPDEF(op, number, operands, repkind) \
+            sizeof(LIns##repkind),
+#define OPDEF64(op, number, operands, repkind) \
+            OPDEF(op, number, operands, repkind)
+#include "LIRopcode.tbl"
+#undef OPDEF
+#undef OPDEF64
+            0
+        };
+
 	// LIR verbose specific
 	#ifdef NJ_VERBOSE
 
 	const char* lirNames[] = {
-#define OPDEF(op, number, operands) \
+#define OPDEF(op, number, operands, repkind) \
         #op,
-#define OPDEF64(op, number, operands) \
+#define OPDEF64(op, number, operands, repkind) \
         #op,
 #include "LIRopcode.tbl"
 #undef OPDEF
 #undef OPDEF64
         NULL
 	};
 
 	#endif /* NANOJIT_VEBROSE */
@@ -126,17 +148,18 @@ namespace nanojit
 		Page* start = pageAlloc();
         _unused = start ? uintptr_t(&start->lir[0]) : 0;
 		_nextPage = pageAlloc();
 		NanoAssert((_unused && _nextPage) || _noMem);
     }
 
 	int32_t LirBuffer::insCount() 
 	{
-        // Doesn't include LIR_skip payload or LIR_call arg slots.
+        // A LIR_skip payload is considered part of the LIR_skip, and LIR_call
+        // arg slots are considered part of the LIR_call.
 		return _stats.lir;
 	}
 
     size_t LirBuffer::byteCount() 
 	{
 		return ((_pages.size() ? _pages.size()-1 : 0) * sizeof(Page)) +
             (_unused - pageTop(_unused));
 	}
@@ -160,20 +183,20 @@ namespace nanojit
         _unused = uintptr_t(&_nextPage->lir[0]);
         _nextPage = pageAlloc();
         NanoAssert(_nextPage || _noMem);
 
         // Link LIR stream back to prior instruction.
         // Unlike all the ins*() functions, we don't call makeRoom() here
         // because we know we have enough space, having just started a new
         // page.
-        LInsp l = (LInsp)_unused;
-		l->setIns1(LIR_skip, (LInsp)addrOfLastLInsOnCurrentPage);
-		l->resv()->clear();
-        _unused += sizeof(LIns);
+        LInsSk* insSk = (LInsSk*)_unused;
+        LIns*   ins   = insSk->getLIns();
+        ins->initLInsSk((LInsp)addrOfLastLInsOnCurrentPage);
+        _unused += sizeof(LInsSk);
         _stats.lir++;
     }
 
     // Make room for a single instruction.
     uintptr_t LirBuffer::makeRoom(size_t szB)
     {
         // Make sure the size is ok, and that we're not pointing to the
         // PageHeader.
@@ -203,50 +226,52 @@ namespace nanojit
         if (_unused > pageBottom(startOfRoom)) {
             // Check we only spilled over by one byte.
             NanoAssert(_unused == pageTop(_unused));
             NanoAssert(_unused == pageBottom(startOfRoom) + 1);
             uintptr_t addrOfLastLInsOnPage = _unused - sizeof(LIns);
             moveToNewPage(addrOfLastLInsOnPage);
         }
 
+        // Make sure it's word-aligned.
+        NanoAssert(0 == startOfRoom % sizeof(void*));
         return startOfRoom;
 	}
 
     LInsp LirBufWriter::insStorei(LInsp val, LInsp base, int32_t d)
 	{
         LOpcode op = val->isQuad() ? LIR_stqi : LIR_sti;
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setStorei(op, val, base, d);
-		l->resv()->clear();
-		return l;
+        LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
+        LIns*    ins    = insSti->getLIns();
+        ins->initLInsSti(op, val, base, d);
+        return ins;
 	}
 
 	LInsp LirBufWriter::ins0(LOpcode op)
 	{
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setIns0(op);
-		l->resv()->clear();
-		return l;
+        LInsOp0* insOp0 = (LInsOp0*)_buf->makeRoom(sizeof(LInsOp0));
+        LIns*    ins    = insOp0->getLIns();
+        ins->initLInsOp0(op);
+        return ins;
 	}
 	
 	LInsp LirBufWriter::ins1(LOpcode op, LInsp o1)
 	{
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setIns1(op, o1);
-		l->resv()->clear();
-		return l;
+        LInsOp1* insOp1 = (LInsOp1*)_buf->makeRoom(sizeof(LInsOp1));
+        LIns*    ins    = insOp1->getLIns();
+        ins->initLInsOp1(op, o1);
+        return ins;
 	}
 	
 	LInsp LirBufWriter::ins2(LOpcode op, LInsp o1, LInsp o2)
 	{
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setIns2(op, o1, o2);
-		l->resv()->clear();
-		return l;
+        LInsOp2* insOp2 = (LInsOp2*)_buf->makeRoom(sizeof(LInsOp2));
+        LIns*    ins    = insOp2->getLIns();
+        ins->initLInsOp2(op, o1, o2);
+        return ins;
 	}
 
 	LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, LInsp d)
 	{
 		return ins2(op,base,d);
 	}
 
 	LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, LInsp data)
@@ -258,69 +283,69 @@ namespace nanojit
 	{
         NanoAssert(condition);
         return ins2(op, condition, toLabel);
 	}
 
     LInsp LirBufWriter::insAlloc(int32_t size)
     {
         size = (size+3)>>2; // # of required 32bit words
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setAlloc(LIR_alloc, size);
-		l->resv()->clear();
-		return l;
+        LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
+        LIns*  ins  = insI->getLIns();
+        ins->initLInsI(LIR_alloc, size);
+        return ins;
     }
 
     LInsp LirBufWriter::insParam(int32_t arg, int32_t kind)
     {
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setParam(LIR_param, arg, kind);
-		l->resv()->clear();
+        LInsP* insP = (LInsP*)_buf->makeRoom(sizeof(LInsP));
+        LIns*  ins  = insP->getLIns();
+        ins->initLInsP(arg, kind);
         if (kind) {
             NanoAssert(arg < NumSavedRegs);
-            _buf->savedRegs[arg] = l;
+            _buf->savedRegs[arg] = ins;
             _buf->explicitSavedRegs = true;
         }
-		return l;
+        return ins;
     }
 	
 	LInsp LirBufWriter::insImm(int32_t imm)
 	{
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setImm(LIR_int, imm);
-		l->resv()->clear();
-        return l;
+        LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
+        LIns*  ins  = insI->getLIns();
+        ins->initLInsI(LIR_int, imm);
+        return ins;
 	}
 	
 	LInsp LirBufWriter::insImmq(uint64_t imm)
 	{
-        LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
-		l->setImmq(LIR_quad, imm);
-		l->resv()->clear();
-        return l;
+        LInsI64* insI64 = (LInsI64*)_buf->makeRoom(sizeof(LInsI64));
+        LIns*    ins    = insI64->getLIns();
+        ins->initLInsI64(LIR_quad, imm);
+        return ins;
 	}
 
     LInsp LirBufWriter::insSkip(size_t payload_szB)
 	{
         // First, round up payload_szB to a multiple of the word size.  To
         // ensure that the rounding up won't cause it to exceed
         // NJ_MAX_SKIP_PAYLOAD_SZB, NJ_MAX_SKIP_PAYLOAD_SZB must also be a
         // multiple of the word size, which we check.
         payload_szB = alignUp(payload_szB, sizeof(void*));
         NanoAssert(0 == NJ_MAX_SKIP_PAYLOAD_SZB % sizeof(void*));
         NanoAssert(sizeof(void*) <= payload_szB && payload_szB <= NJ_MAX_SKIP_PAYLOAD_SZB);
 
-        uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LIns));   // payload + skip
+        uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LInsSk));
         uintptr_t prevLInsAddr = payload - sizeof(LIns);
-        LInsp l = (LInsp)(payload + payload_szB);
+        LInsSk* insSk = (LInsSk*)(payload + payload_szB);
+        LIns*   ins   = insSk->getLIns();
         NanoAssert(prevLInsAddr >= pageDataStart(prevLInsAddr));
-        NanoAssert(samepage(prevLInsAddr, l));
-		l->setIns1(LIR_skip, (LInsp)prevLInsAddr);
-		l->resv()->clear();
-        return l;
+        NanoAssert(samepage(prevLInsAddr, insSk));
+        ins->initLInsSk((LInsp)prevLInsAddr);
+        return ins;
 	}
 
     // Reads the next non-skip instruction.
 	LInsp LirReader::read()	
 	{
 		LInsp cur = _i;
 		if (!cur)
 			return 0;
@@ -336,246 +361,214 @@ namespace nanojit
         // instruction will be a skip -- the one needed for the cross-page
         // link.  But the last *inserted* instruction is what is recorded and
         // used to initialise each LirReader, and that is what is seen here,
         // and therefore this assertion holds.
         NanoAssert(iop != LIR_skip);
 
 		do
 		{
-			switch (iop)
-			{					
-				default:
-                    i -= sizeof(LIns);
-					break;
+            // Nb: this switch is table-driven (because sizeof_LInsXYZ() is
+            // table-driven) in most cases to avoid branch mispredictions --
+            // if we do a vanilla switch on the iop or LInsRepKind the extra
+            // branch mispredictions cause a small but noticeable slowdown.
+            switch (iop)
+            {
+                default:
+                    i -= insSizes[((LInsp)i)->opcode()];
+                    break;
 
 #if defined NANOJIT_64BIT
-            	case LIR_callh:
+                case LIR_callh:
 #endif
-				case LIR_call:
-			    case LIR_fcall: {
+                case LIR_call:
+                case LIR_fcall: {
                     int argc = ((LInsp)i)->argc();
-                    uintptr_t prev = i - sizeof(LIns) - argc*sizeof(LInsp);
-                    NanoAssert( samepage(i, prev) );
-                    i = prev;
+                    i -= sizeof(LInsC);         // step over the instruction
+                    i -= argc*sizeof(LInsp);    // step over the arguments
+                    NanoAssert( samepage(i, _i) );
                     break;
                 }
 
-				case LIR_skip:
-                    NanoAssert(((LInsp)i)->oprnd1() != (LInsp)i);
-                    i = uintptr_t(((LInsp)i)->oprnd1());
-					break;
+                case LIR_skip:
+                    // Ignore the skip, move onto its predecessor.
+                    NanoAssert(((LInsp)i)->prevLIns() != (LInsp)i);
+                    i = uintptr_t(((LInsp)i)->prevLIns());
+                    break;
 
-				case LIR_start:
-					_i = 0;  // start of trace
-					return cur;
-			}
+                case LIR_start:
+                    _i = 0;  // this means the next call to this method will return 0
+                    return cur;
+            }
             iop = ((LInsp)i)->opcode();
 		}
         while (iop==LIR_skip || iop==LIR_2);
         _i = (LInsp)i;
 		return cur;
 	}
 
     bool LIns::isFloat() const {
-        switch (firstWord.code) {
+        switch (opcode()) {
             default:
                 return false;
             case LIR_fadd:
             case LIR_fsub:
             case LIR_fmul:
             case LIR_fdiv:
             case LIR_fneg:
             case LIR_fcall:
             case LIR_i2f:
             case LIR_u2f:
                 return true;
         }
     }
     
 #if defined(_DEBUG)
-    bool LIns::isOp1() const {
-        switch (firstWord.code) {
-            case LIR_skip:
-            case LIR_ret:
-            case LIR_live:
-            case LIR_neg:
-#if !defined NANOJIT_64BIT
-            case LIR_callh:
-#endif
-            case LIR_not:
-            case LIR_qlo:
-            case LIR_qhi:
-            case LIR_ov:
-            case LIR_cs:
-            case LIR_file:
-            case LIR_line:
-            case LIR_fret:
-            case LIR_fneg:
-            case LIR_i2f:
-		    case LIR_u2f:
-		    case LIR_mod:
-                return true;
+    bool LIns::isLInsOp0() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_Op0 == repKinds[opcode()];
+    }
 
-            default:
-                return false;
-        }
+    bool LIns::isLInsOp1() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_Op1 == repKinds[opcode()];
+    }
+
+    bool LIns::isLInsOp2() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_Op2 == repKinds[opcode()];
+    }
+
+    bool LIns::isLInsSti() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_Sti == repKinds[opcode()];
     }
 
-    // Nb: this excludes loads and stores, which are covered by isLoad() and
-    // isStore().
-    bool LIns::isOp2() const {
-        switch (firstWord.code) {
-            case LIR_loop:
-            case LIR_x:
-            case LIR_jt:
-            case LIR_jf:
-            case LIR_feq:
-            case LIR_flt:
-            case LIR_fgt:
-            case LIR_fle:
-            case LIR_fge:
-            case LIR_cmov:
-            case LIR_add:
-            case LIR_sub:
-            case LIR_mul:
-		    case LIR_div:
-            case LIR_and:
-            case LIR_or:
-            case LIR_xor:
-            case LIR_lsh:
-            case LIR_rsh:
-            case LIR_ush:
-            case LIR_xt:
-            case LIR_xf:
-            case LIR_eq:
-            case LIR_lt:
-            case LIR_gt:
-            case LIR_le:
-            case LIR_ge:
-            case LIR_ult:
-            case LIR_ugt:
-            case LIR_ule:
-            case LIR_uge:
-            case LIR_2:
-            case LIR_xbarrier:
-            case LIR_xtbl:
-            case LIR_qiand:
-            case LIR_qiadd:
-            case LIR_qjoin:
-            case LIR_qcmov:
-            case LIR_fadd:
-            case LIR_fsub:
-            case LIR_fmul:
-            case LIR_fdiv:
-            case LIR_qior:
-            case LIR_qilsh:
-                return true;
+    bool LIns::isLInsSk() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_Sk == repKinds[opcode()];
+    }
+
+    bool LIns::isLInsC() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_C == repKinds[opcode()];
+    }
 
-            default:
-                return false;
-        }
+    bool LIns::isLInsP() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_P == repKinds[opcode()];
+    }
+
+    bool LIns::isLInsI() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_I == repKinds[opcode()];
+    }
+
+    bool LIns::isLInsI64() const {
+        NanoAssert(LRK_None != repKinds[opcode()]);
+        return LRK_I64 == repKinds[opcode()];
     }
 #endif // defined(_DEBUG)
 
 	bool LIns::isCmp() const {
-        LOpcode op = firstWord.code;
+        LOpcode op = opcode();
         return (op >= LIR_eq && op <= LIR_uge) || (op >= LIR_feq && op <= LIR_fge);
 	}
 
     bool LIns::isCond() const {
-        LOpcode op = firstWord.code;
-        return (op == LIR_ov) || (op == LIR_cs) || isCmp();
+        LOpcode op = opcode();
+        return (op == LIR_ov) || isCmp();
     }
 	
 	bool LIns::isQuad() const {
 #ifdef AVMPLUS_64BIT
 		// callh in 64bit cpu's means a call that returns an int64 in a single register
-		return (firstWord.code & LIR64) != 0 || firstWord.code == LIR_callh;
+        return (opcode() & LIR64) != 0 || opcode() == LIR_callh;
 #else
 		// callh in 32bit cpu's means the 32bit MSW of an int64 result in 2 registers
-		return (firstWord.code & LIR64) != 0;
+        return (opcode() & LIR64) != 0;
 #endif
 	}
     
 	bool LIns::isconstval(int32_t val) const
 	{
         return isconst() && imm32()==val;
 	}
 
 	bool LIns::isconstq() const
 	{	
-        return firstWord.code == LIR_quad;
+        return opcode() == LIR_quad;
 	}
 
 	bool LIns::isconstp() const
 	{
 #ifdef AVMPLUS_64BIT
 	    return isconstq();
 #else
 	    return isconst();
 #endif
 	}
 
     bool LIns::isCse() const
     { 
-        return nanojit::isCseOpcode(firstWord.code) || (isCall() && callInfo()->_cse);
+        return nanojit::isCseOpcode(opcode()) || (isCall() && callInfo()->_cse);
     }
 
     void LIns::setTarget(LInsp label)
     {
         NanoAssert(label && label->isop(LIR_label));
 		NanoAssert(isBranch());
-        u.oprnd_2 = label;
+        toLInsOp2()->oprnd_2 = label;
 	}
 
 	LInsp LIns::getTarget()
 	{
         NanoAssert(isBranch());
         return oprnd2();
 	}
 
     void *LIns::payload() const
     {
         NanoAssert(isop(LIR_skip));
-        // Operand 1 points to the previous instruction;  we move one
-        // instruction past it to get to the payload.
-        return (void*) (intptr_t(oprnd1()) + sizeof(LIns));
+        // Operand 1 points to the previous LIns;  we move past it to get to
+        // the payload.
+        return (void*) (uintptr_t(prevLIns()) + sizeof(LIns));
     }
 
     uint64_t LIns::imm64() const
 	{
         NanoAssert(isconstq());
-        return (uint64_t(i64.imm64_1) << 32) | uint32_t(i64.imm64_0);
+        return (uint64_t(toLInsI64()->imm64_1) << 32) | uint32_t(toLInsI64()->imm64_0);
 	}
 
     double LIns::imm64f() const
 	{
         union {
             double f;
             uint64_t q;
         } u;
         u.q = imm64();
         return u.f;
 	}
 
 	const CallInfo* LIns::callInfo() const
 	{
         NanoAssert(isCall());
-        return c.ci;
+        return toLInsC()->ci;
 	}
 
     // Index args in r-l order.  arg(0) is rightmost arg.
     // Nb: this must be kept in sync with insCall().
     LInsp LIns::arg(uint32_t i) 
 	{
         NanoAssert(isCall());
         NanoAssert(i < argc());
-        LInsp* offs = (LInsp*)this - (i+1);
-        return *offs;
+        // Move to the start of the LInsC, then move back one word per argument.
+        LInsp* argSlot = (LInsp*)(uintptr_t(toLInsC()) - (i+1)*sizeof(void*));
+        return *argSlot;
 	}
 
     LIns* LirWriter::ins2i(LOpcode v, LIns* oprnd1, int32_t imm)
     {
         return ins2(v, oprnd1, insImm(imm));
     }
 
     bool insIsS16(LInsp i)
@@ -690,18 +683,16 @@ namespace nanojit
 			switch (v) {
 			case LIR_qjoin:
 				q = c1 | uint64_t(c2)<<32;
 				return insImmq(q);
 			case LIR_eq:
 				return insImm(c1 == c2);
 			case LIR_ov:
                 return insImm((c2 != 0) && ((c1 + c2) <= c1)); 
-			case LIR_cs:
-                return insImm((c2 != 0) && ((uint32_t(c1) + uint32_t(c2)) <= uint32_t(c1)));
 			case LIR_lt:
 				return insImm(c1 < c2);
 			case LIR_gt:
 				return insImm(c1 > c2);
 			case LIR_le:
 				return insImm(c1 <= c2);
 			case LIR_ge:
 				return insImm(c1 >= c2);
@@ -1006,48 +997,33 @@ namespace nanojit
         ArgSize sizes[MAXARGS];
         int32_t argc = ci->get_sizes(sizes);
 
 		if (AvmCore::config.soft_float) {
 			if (op == LIR_fcall)
 				op = LIR_callh;
 		}
 
-        // An example of what we're trying to serialize (for a 32-bit machine):
-        //
-        // byte
-        // ----
-        // N+0   [ arg operand #2 ----------------------
-        // N+4     arg operand #1 ----------------------
-        // N+8     arg operand #0 ---------------------- ]
-        // N+12  [ resv + code=LIR_call
-        // N+16    imm8a | imm8b | (pad16) -------------
-        // N+20    ci ----------------------------------
-        // N+24    (pad32) ----------------------------- ]
-        //
-        // In this example:
-        //    'argc' = 3
-
 		NanoAssert(argc <= (int)MAXARGS);
 
         // Lay the call parameters out (in reverse order).
         // Nb: this must be kept in sync with arg().
-        LInsp* newargs = (LInsp*)_buf->makeRoom(argc*sizeof(LInsp) + sizeof(LIns));  // args + call
+        LInsp* newargs = (LInsp*)_buf->makeRoom(argc*sizeof(LInsp) + sizeof(LInsC)); // args + call
         for (int32_t i = 0; i < argc; i++)
             newargs[argc - i - 1] = args[i];
 
         // Write the call instruction itself.
-        LInsp l = (LInsp)(uintptr_t(newargs) + argc*sizeof(LInsp));
+        LInsC* insC = (LInsC*)(uintptr_t(newargs) + argc*sizeof(LInsp));
+        LIns*  ins  = insC->getLIns();
 #ifndef NANOJIT_64BIT
-        l->setCall(op==LIR_callh ? LIR_call : op, argc, ci);
+        ins->initLInsC(op==LIR_callh ? LIR_call : op, argc, ci);
 #else
-        l->setCall(op, argc, ci);
+        ins->initLInsC(op, argc, ci);
 #endif
-        l->resv()->clear();
-        return l;
+        return ins;
 	}
 
     using namespace avmplus;
 
 	StackFilter::StackFilter(LirFilter *in, GC *gc, LirBuffer *lirbuf, LInsp sp) 
 		: LirFilter(in), gc(gc), lirbuf(lirbuf), sp(sp), top(0)
 	{}
 
@@ -1453,19 +1429,19 @@ namespace nanojit
                 NanoAssert(size_t(i->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
                 live.put(i,use);
             }
 		}
         void retire(LInsp i, GC *gc) {
             RetiredEntry *e = NJ_NEW(gc, RetiredEntry)(gc);
             e->i = i;
             for (int j=0, n=live.size(); j < n; j++) {
-                LInsp l = live.keyAt(j);
-                if (!l->isStore() && !l->isGuard())
-                    e->live.add(l);
+                LInsp ins = live.keyAt(j);
+                if (!ins->isStore() && !ins->isGuard())
+                    e->live.add(ins);
             }
             int size=0;
 		    if ((size = e->live.size()) > maxlive)
 			    maxlive = size;
 
             live.remove(i);
             retired.add(e);
 		}
@@ -1702,18 +1678,18 @@ namespace nanojit
 					sprintf(s, "%s ",formatRef(i->arg(j)));
 				}
 				s += strlen(s);
 				sprintf(s, ")");
 				break;
 			}
 
 			case LIR_param: { 
-				uint32_t arg = i->imm8();
-				if (!i->imm8b()) {
+                uint32_t arg = i->paramArg();
+                if (!i->paramKind()) {
 					if (arg < sizeof(Assembler::argRegs)/sizeof(Assembler::argRegs[0])) {
 						sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op],
 							arg, gpn(Assembler::argRegs[arg]));
 					} else {
 						sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], arg);
 					}
 				} else {
 					sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op],
@@ -1746,17 +1722,16 @@ namespace nanojit
             case LIR_callh:
 			case LIR_neg:
 			case LIR_fneg:
 			case LIR_i2f:
 			case LIR_u2f:
 			case LIR_qlo:
 			case LIR_qhi:
             case LIR_ov:
-            case LIR_cs:
 			case LIR_not: 
 		    case LIR_mod:
 				sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1()));
 				break;
 
 			case LIR_x:
 			case LIR_xt:
 			case LIR_xf:
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -53,37 +53,35 @@ namespace nanojit
 	enum LOpcode
 #if defined(_MSC_VER) && _MSC_VER >= 1400
           : unsigned
 #endif
 	{
 		// flags; upper bits reserved
 		LIR64	= 0x40,			// result is double or quad
 		
-#define OPDEF(op, number, args) \
+#define OPDEF(op, number, args, repkind) \
         LIR_##op = (number),
-#define OPDEF64(op, number, args) \
+#define OPDEF64(op, number, args, repkind) \
         LIR_##op = ((number) | LIR64),
 #include "LIRopcode.tbl"
         LIR_sentinel
 #undef OPDEF
 #undef OPDEF64
 	};
 
 	#if defined NANOJIT_64BIT
 	#define LIR_ldp     LIR_ldq
-	#define LIR_stp     LIR_stq
     #define LIR_piadd   LIR_qiadd
     #define LIR_piand   LIR_qiand
     #define LIR_pilsh   LIR_qilsh
 	#define LIR_pcmov	LIR_qcmov
     #define LIR_pior    LIR_qior
 	#else
 	#define LIR_ldp     LIR_ld
-	#define LIR_stp     LIR_st
     #define LIR_piadd   LIR_add
     #define LIR_piand   LIR_and
     #define LIR_pilsh   LIR_lsh
 	#define LIR_pcmov	LIR_cmov
     #define LIR_pior    LIR_or
 	#endif
 
 	struct GuardRecord;
@@ -143,282 +141,543 @@ namespace nanojit
     inline bool isCseOpcode(LOpcode op) {
         op = LOpcode(op & ~LIR64);
         return op >= LIR_int && op <= LIR_uge;
     }
     inline bool isRetOpcode(LOpcode op) {
         return (op & ~LIR64) == LIR_ret;
     }
 
-	// Sun Studio requires explicitly declaring signed int bit-field
-	#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
-	#define _sign_int signed int
-	#else
-	#define _sign_int int32_t
-	#endif
-
     // The opcode is not logically part of the Reservation, but we include it
     // in this struct to ensure that opcode plus the Reservation fits in a
     // single word.  Yuk.
     struct Reservation
     {
         uint32_t arIndex:16;    // index into stack frame.  displ is -4*arIndex
         Register reg:7;         // register UnknownReg implies not in register
         uint32_t used:1;        // when set, the reservation is active
-        LOpcode  code:8;
+        LOpcode  opcode:8;
 
 		inline void init() {
 			reg = UnknownReg;
 			arIndex = 0;
 			used = 1;
 		}
 
-		inline void clear()
-		{
+        inline void clear() {
 			used = 0;
 		}
 	};
 
-    // Low-level Instruction.  4 words per instruction -- it's important this
-    // doesn't change unintentionally, so it is checked in LIR.cpp by an
-    // assertion in initOpcodeAndClearResv().
-    // The first word is the same for all LIns kinds;  the last three differ.
+    //-----------------------------------------------------------------------
+    // Low-level instructions.  This is a bit complicated, because we have a
+    // variable-width representation to minimise space usage.
+    //
+    // - Instruction size is always an integral multiple of word size.
+    //
+    // - Every instruction has at least one word, holding the opcode and the
+    //   reservation info.  That word is in class LIns.
+    //
+    // - Beyond that, most instructions have 1, 2 or 3 extra words.  These
+    //   extra words are in classes LInsOp1, LInsOp2, etc (collectively called
+    //   "LInsXYZ" in what follows).  Each LInsXYZ class also contains a word,
+    //   accessible by the 'ins' member, which holds the LIns data;  its type
+    //   is void* (which is the same size as LIns) rather than LIns to avoid a
+    //   recursive dependency between LIns and LInsXYZ.
+    //
+    // - LIR is written forward, but read backwards.  When reading backwards,
+    //   in order to find the opcode, it must be in a predictable place in the
+    //   LInsXYZ isn't affected by instruction width.  Therefore, the LIns
+    //   word (which contains the opcode) is always the *last* word in an
+    //   instruction.
+    //
+    // - Each instruction is created by casting pre-allocated bytes from a
+    //   LirBuffer to the LInsXYZ type.  Therefore there are no constructors
+    //   for LIns or LInsXYZ.
+    //
+    // - The standard handle for an instruction is a LIns*.  This actually
+    //   points to the LIns word, ie. to the final word in the instruction.
+    //   This is a bit odd, but it allows the instruction's opcode to be
+    //   easily accessed.  Once you've looked at the opcode and know what kind
+    //   of instruction it is, if you want to access any of the other words,
+    //   you need to use toLInsXYZ(), which takes the LIns* and gives you an
+    //   LInsXYZ*, ie. the pointer to the actual start of the instruction's
+    //   bytes.  From there you can access the instruction-specific extra
+    //   words.
+    //
+    // - However, from outside class LIns, LInsXYZ isn't visible, nor is
+    //   toLInsXYZ() -- from outside LIns, all LIR instructions are handled
+    //   via LIns pointers and get/set methods are used for all LIns/LInsXYZ
+    //   accesses.  In fact, all data members in LInsXYZ are private and can
+    //   only be accessed by LIns, which is a friend class.  The only thing
+    //   anyone outside LIns can do with a LInsXYZ is call getLIns().
+    //
+    // - An example Op2 instruction and the likely pointers to it (each line
+    //   represents a word, and pointers to a line point to the start of the
+    //   word on that line):
+    //
+    //      [ oprnd_2         <-- LInsOp2* insOp2 == toLInsOp2(ins)
+    //        oprnd_1
+    //        opcode + resv ] <-- LIns* ins
+    //
+    // - LIR_skip instructions are more complicated.  They allow an arbitrary
+    //   blob of data (the "payload") to be placed in the LIR stream.  The
+    //   size of the payload is always a multiple of the word size.  A skip
+    //   instruction's operand points to the previous instruction, which lets
+    //   the payload be skipped over when reading backwards.  Here's an
+    //   example of a skip instruction with a 3-word payload preceded by an
+    //   LInsOp1:
+    //
+    //      [ oprnd_1
+    //  +->   opcode + resv           ]
+    //  |   [ data
+    //  |     data
+    //  |     data
+    //  +---- prevLIns                  <-- LInsSk* insSk == toLInsSk(ins)
+    //        opcode==LIR_skip + resv ] <-- LIns* ins
+    //
+    //   Skips are also used to link code pages.  If the first instruction on
+    //   a page isn't a LIR_start, it will be a skip, and the skip's operand
+    //   will point to the last LIns on the previous page.  In this case there
+    //   isn't a payload as such;  in fact, the previous page might be at a
+    //   higher address, ie. the operand might point forward rather than
+    //   backward.
+    //
+    //   LInsSk has the same layout as LInsOp1, but we represent it as a
+    //   different class because there are some places where we treat
+    //   skips specially and so having it separate seems like a good idea.
+    //
+    // - Call instructions (LIR_call, LIR_fcall, LIR_calli, LIR_fcalli) are
+    //   also more complicated.  They are preceded by the arguments to the
+    //   call, which are laid out in reverse order.  For example, a call with
+    //   3 args will look like this:
+    //
+    //      [ arg #2
+    //        arg #1
+    //        arg #0
+    //        argc            <-- LInsC insC == toLInsC(ins)
+    //        ci
+    //        opcode + resv ] <-- LIns* ins 
+    //
+    // - Various things about the size and layout of LIns and LInsXYZ are
+    //   statically checked in staticSanityCheck().  In particular, this is
+    //   worthwhile because there's nothing that guarantees that all the
+    //   LInsXYZ classes have a size that is a multiple of word size (but in
+    //   practice all sane compilers use a layout that results in this).  We
+    //   also check that every LInsXYZ is word-aligned in
+    //   LirBuffer::makeRoom();  this seems sensible to avoid potential
+    //   slowdowns due to misalignment.  It relies on pages themselves being
+    //   word-aligned, which is extremely likely.
+    //
+    // - There is an enum, LInsRepKind, with one member for each of the
+    //   LInsXYZ kinds.  Each opcode is categorised with its LInsRepKind value
+    //   in LIRopcode.tbl, and this is used in various places.
+    //-----------------------------------------------------------------------
+
+    enum LInsRepKind {
+        // LRK_XYZ corresponds to class LInsXYZ.
+        LRK_Op0,
+        LRK_Op1,
+        LRK_Op2,
+        LRK_Sti,
+        LRK_Sk,
+        LRK_C,
+        LRK_P,
+        LRK_I,
+        LRK_I64,
+        LRK_None    // this one is used for unused opcode numbers
+    };
+
+    // 0-operand form.  Used for LIR_start and LIR_label.
+    class LInsOp0
+    {
+    private:
+        friend class LIns;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // 1-operand form.  Used for LIR_ret, LIR_ov, unary arithmetic/logic ops,
+    // etc.
+    class LInsOp1
+    {
+    private:
+        friend class LIns;
+
+        // Nb: oprnd_1 position relative to 'ins' must match that in
+        // LIns{Op2,Sti}.  Checked in LirBufWriter::LirBufWriter().
+        LIns*       oprnd_1;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // 2-operand form.  Used for loads, guards, branches, comparisons, binary
+    // arithmetic/logic ops, etc.
+    class LInsOp2
+    {
+    private:
+        friend class LIns;
+
+        // Nb: oprnd_{1,2} position relative to 'ins' must match that in
+        // LIns{Op1,Sti}.  Checked in LirBufWriter::LirBufWriter().
+        LIns*       oprnd_2;
+
+        LIns*       oprnd_1;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for LIR_sti and LIR_stqi.
+    class LInsSti
+    {
+    private:
+        friend class LIns;
+
+        int32_t     disp;
+
+        // Nb: oprnd_{1,2} position relative to 'ins' must match that in
+        // LIns{Op1,Op2}.  Checked in LIns::staticSanityCheck().
+        LIns*       oprnd_2;
+
+        LIns*       oprnd_1;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for LIR_skip.
+    class LInsSk
+    {
+    private:
+        friend class LIns;
+
+        LIns*       prevLIns;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for all variants of LIR_call.
+    class LInsC
+    {
+    private:
+        friend class LIns;
+
+        uintptr_t   argc:8;
+
+        const CallInfo* ci;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for LIR_param.
+    class LInsP
+    {
+    private:
+        friend class LIns;
+
+        uintptr_t   arg:8;
+        uintptr_t   kind:8;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for LIR_int and LIR_alloc.
+    class LInsI
+    {
+    private:
+        friend class LIns;
+
+        int32_t     imm32;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used for LIR_quad.
+    class LInsI64
+    {
+    private:
+        friend class LIns;
+
+        int32_t     imm64_0;
+
+        int32_t     imm64_1;
+
+        void*       ins;
+
+    public:
+        LIns* getLIns() { return (LIns*)&ins; };
+    };
+
+    // Used only as a placeholder for OPDEF macros for unused opcodes in
+    // LIRopcode.tbl.
+    class LInsNone
+    {
+    };
+
 	class LIns
 	{
-        // 2-operand form.  Used for most LIns kinds, including LIR_skip (for
-        // which oprnd_1 is the target).
-		struct u_type
-		{
-            // Nb: oprnd_1 and oprnd_2 layout must match that in sti_type
-            // because oprnd1() and oprnd2() are used for both.
-            LIns*       oprnd_1;
+    private:
+        // Last word: fields shared by all LIns kinds.  The reservation fields
+        // are read/written during assembly.
+        Reservation lastWord;
 
-            LIns*       oprnd_2;  
-		};
+        // LIns-to-LInsXYZ converters.
+        LInsOp0* toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
+        LInsOp1* toLInsOp1() const { return (LInsOp1*)( uintptr_t(this+1) - sizeof(LInsOp1) ); }
+        LInsOp2* toLInsOp2() const { return (LInsOp2*)( uintptr_t(this+1) - sizeof(LInsOp2) ); }
+        LInsSti* toLInsSti() const { return (LInsSti*)( uintptr_t(this+1) - sizeof(LInsSti) ); }
+        LInsSk*  toLInsSk()  const { return (LInsSk* )( uintptr_t(this+1) - sizeof(LInsSk ) ); }
+        LInsC*   toLInsC()   const { return (LInsC*  )( uintptr_t(this+1) - sizeof(LInsC  ) ); }
+        LInsP*   toLInsP()   const { return (LInsP*  )( uintptr_t(this+1) - sizeof(LInsP  ) ); }
+        LInsI*   toLInsI()   const { return (LInsI*  )( uintptr_t(this+1) - sizeof(LInsI  ) ); }
+        LInsI64* toLInsI64() const { return (LInsI64*)( uintptr_t(this+1) - sizeof(LInsI64) ); }
 
-        // Used for LIR_sti and LIR_stqi.
-        struct sti_type
+        // This is never called, but that's ok because it contains only static
+        // assertions.
+        void staticSanityCheck()
         {
-            // Nb: oprnd_1 and oprnd_2 layout must match that in u_type
-            // because oprnd1() and oprnd2() are used for both.
-            LIns*       oprnd_1;
-
-            LIns*       oprnd_2;  
-
-            int32_t     disp;
-        };
+            // LIns must be word-sized.
+            NanoStaticAssert(sizeof(LIns) == 1*sizeof(void*));
 
-        // Used for LIR_call and LIR_param.
-		struct c_type
-		{
-            uintptr_t   imm8a:8;    // call: 0 (not used);  param: arg
-            uintptr_t   imm8b:8;    // call: argc;  param: kind
+            // LInsXYZ have expected sizes too.
+            NanoStaticAssert(sizeof(LInsOp0) == 1*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsOp1) == 2*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsOp2) == 3*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsSti) == 4*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsSk)  == 2*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsC)   == 3*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsP)   == 2*sizeof(void*));
+            NanoStaticAssert(sizeof(LInsI)   == 2*sizeof(void*));
+        #if defined NANOJIT_64BIT
+            NanoStaticAssert(sizeof(LInsI64) == 2*sizeof(void*));
+        #else
+            NanoStaticAssert(sizeof(LInsI64) == 3*sizeof(void*));
+        #endif
 
-            const CallInfo* ci;     // call: callInfo;  param: NULL (not used)
-		};
+            // oprnd_1 must be in the same position in LIns{Op1,Op2,Sti}
+            // because oprnd1() is used for all of them.
+            NanoStaticAssert( (offsetof(LInsOp1, ins) - offsetof(LInsOp1, oprnd_1)) ==
+                              (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) );
+            NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) ==
+                              (offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_1)) );
+
+            // oprnd_2 must be in the same position in LIns{Op2,Sti}
+            // because oprnd2() is used for both of them.
+            NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2)) ==
+                              (offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) );
+        }
 
-        // Used for LIR_int.
-		struct i_type
-		{
-            int32_t     imm32;
-		};
-
-        // Used for LIR_quad.
-        struct i64_type
-		{
-            int32_t     imm64_0;
-            int32_t     imm64_1;
-		};
+    public:
+        void initLInsOp0(LOpcode opcode) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            NanoAssert(isLInsOp0());
+        }
+        void initLInsOp1(LOpcode opcode, LIns* oprnd1) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsOp1()->oprnd_1 = oprnd1;
+            NanoAssert(isLInsOp1());
+        }
+        void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsOp2()->oprnd_1 = oprnd1;
+            toLInsOp2()->oprnd_2 = oprnd2;
+            NanoAssert(isLInsOp2());
+        }
+        void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsSti()->oprnd_1 = val;
+            toLInsSti()->oprnd_2 = base;
+            toLInsSti()->disp = d;
+            NanoAssert(isLInsSti());
+        }
+        void initLInsSk(LIns* prevLIns) {
+            lastWord.clear();
+            lastWord.opcode = LIR_skip;
+            toLInsSk()->prevLIns = prevLIns;
+            NanoAssert(isLInsSk());
+        }
+        // Nb: this does NOT initialise the arguments.  That must be done
+        // separately.
+        void initLInsC(LOpcode opcode, int32_t argc, const CallInfo* ci) {
+            NanoAssert(isU8(argc));
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsC()->argc = argc;
+            toLInsC()->ci = ci;
+            NanoAssert(isLInsC());
+        }
+        void initLInsP(int32_t arg, int32_t kind) {
+            lastWord.clear();
+            lastWord.opcode = LIR_param;
+            NanoAssert(isU8(arg) && isU8(kind));
+            toLInsP()->arg = arg;
+            toLInsP()->kind = kind;
+            NanoAssert(isLInsP());
+        }
+        void initLInsI(LOpcode opcode, int32_t imm32) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsI()->imm32 = imm32;
+            NanoAssert(isLInsI());
+        }
+        void initLInsI64(LOpcode opcode, int64_t imm64) {
+            lastWord.clear();
+            lastWord.opcode = opcode;
+            toLInsI64()->imm64_0 = int32_t(imm64);
+            toLInsI64()->imm64_1 = int32_t(imm64 >> 32);
+            NanoAssert(isLInsI64());
+        }
 
-        #undef _sign_int
-		
-        // 1st word: fields shared by all LIns kinds.  The reservation fields
-        // are read/written during assembly.
-        Reservation firstWord;
-
-        // 2nd, 3rd and 4th words: differ depending on the LIns kind.
-		union
-		{
-            u_type      u;
-            c_type      c;
-            i_type      i;
-            i64_type    i64;
-            sti_type    sti;
-		};
-
-	public:
         LIns* oprnd1() const {
-            NanoAssert(isOp1() || isOp2() || isLoad() || isStore());
-            return u.oprnd_1;
+            NanoAssert(isLInsOp1() || isLInsOp2() || isStore());
+            return toLInsOp2()->oprnd_1;
         }
         LIns* oprnd2() const {
-            NanoAssert(isOp2() || isLoad() || isStore());
-            return u.oprnd_2;
+            NanoAssert(isLInsOp2() || isStore());
+            return toLInsOp2()->oprnd_2;
+        }
+
+        LIns* prevLIns() const {
+            NanoAssert(isop(LIR_skip));
+            return toLInsSk()->prevLIns;
         }
 
-        inline LOpcode opcode()   const { return firstWord.code; }
-        inline uint8_t imm8()     const { NanoAssert(isop(LIR_param)); return c.imm8a; }
-        inline uint8_t imm8b()    const { NanoAssert(isop(LIR_param)); return c.imm8b; }
-        inline int32_t imm32()    const { NanoAssert(isconst());  return i.imm32; }
-        inline int32_t imm64_0()  const { NanoAssert(isconstq()); return i64.imm64_0; }
-        inline int32_t imm64_1()  const { NanoAssert(isconstq()); return i64.imm64_1; }
-        uint64_t       imm64()    const;
-        double         imm64f()   const;
-        Reservation*   resv()           { return &firstWord; }
-        void*	       payload() const;
-        inline Page*   page()			{ return (Page*) alignTo(this,NJ_PAGE_SIZE); }
-        inline int32_t size() const {
+        inline LOpcode opcode()    const { return lastWord.opcode; }
+        inline uint8_t paramArg()  const { NanoAssert(isop(LIR_param)); return toLInsP()->arg; }
+        inline uint8_t paramKind() const { NanoAssert(isop(LIR_param)); return toLInsP()->kind; }
+        inline int32_t imm32()     const { NanoAssert(isconst());  return toLInsI()->imm32; }
+        inline int32_t imm64_0()   const { NanoAssert(isconstq()); return toLInsI64()->imm64_0; }
+        inline int32_t imm64_1()   const { NanoAssert(isconstq()); return toLInsI64()->imm64_1; }
+        uint64_t       imm64()     const;
+        double         imm64f()    const;
+        Reservation*   resv()            { return &lastWord; }
+        void*          payload()   const;
+        inline Page*   page()            { return (Page*) alignTo(this,NJ_PAGE_SIZE); }
+        inline int32_t size()      const {
             NanoAssert(isop(LIR_alloc));
-            return i.imm32<<2;
-        }
-        inline void setSize(int32_t bytes) {
-            NanoAssert(isop(LIR_alloc) && (bytes&3)==0 && isU16(bytes>>2));
-            i.imm32 = bytes>>2;
+            return toLInsI()->imm32 << 2;
         }
 
 		LIns* arg(uint32_t i);
 
         inline int32_t immdisp() const 
         {
             NanoAssert(isStore());
-            return sti.disp;
+            return toLInsSti()->disp;
         }
     
 		inline void* constvalp() const
 		{
         #ifdef AVMPLUS_64BIT
 		    return (void*)imm64();
 		#else
 		    return (void*)imm32();
         #endif      
 		}
 		
 		bool isCse() const;
-        bool isRet() const { return nanojit::isRetOpcode(firstWord.code); }
-		bool isop(LOpcode o) const { return firstWord.code == o; }
+        bool isRet() const { return nanojit::isRetOpcode(opcode()); }
+        bool isop(LOpcode o) const { return opcode() == o; }
         #if defined(_DEBUG)
-        bool isOp1() const;     // true for unary ops
-        bool isOp2() const;     // true for binary ops
+        // isLInsXYZ() returns true if the instruction has the LInsXYZ form.
+        // Note that there is some overlap with other predicates, eg.
+        // isStore()==isLInsSti(), isCall()==isLInsC(), but that's ok;  these
+        // ones are used only to check that opcodes are appropriate for
+        // instruction layouts, the others are used for non-debugging
+        // purposes.
+        bool isLInsOp0() const;
+        bool isLInsOp1() const;
+        bool isLInsOp2() const;
+        bool isLInsSti() const;
+        bool isLInsSk()  const;
+        bool isLInsC()   const;
+        bool isLInsP()   const;
+        bool isLInsI()   const;
+        bool isLInsI64() const;
         #endif
 		bool isQuad() const;
 		bool isCond() const;
         bool isFloat() const;
 		bool isCmp() const;
         bool isCall() const { 
-            LOpcode op = LOpcode(firstWord.code & ~LIR64);
+            LOpcode op = LOpcode(opcode() & ~LIR64);
             return op == LIR_call;
         }
         bool isStore() const {
-            LOpcode op = LOpcode(firstWord.code & ~LIR64);
+            LOpcode op = LOpcode(opcode() & ~LIR64);
             return op == LIR_sti;
         }
         bool isLoad() const { 
-            LOpcode op = firstWord.code;
+            LOpcode op = opcode();
             return op == LIR_ldq  || op == LIR_ld || op == LIR_ldc || 
                    op == LIR_ldqc || op == LIR_ldcs || op == LIR_ldcb;
         }
         bool isGuard() const {
-            LOpcode op = firstWord.code;
+            LOpcode op = opcode();
             return op == LIR_x || op == LIR_xf || op == LIR_xt || 
                    op == LIR_loop || op == LIR_xbarrier || op == LIR_xtbl;
         }
 		// True if the instruction is a 32-bit or smaller constant integer.
-        bool isconst() const { return firstWord.code == LIR_int; }
+        bool isconst() const { return opcode() == LIR_int; }
 		// True if the instruction is a 32-bit or smaller constant integer and
 		// has the value val when treated as a 32-bit signed integer.
 		bool isconstval(int32_t val) const;
 		// True if the instruction is a constant quad value.
 		bool isconstq() const;
 		// True if the instruction is a constant pointer value.
 		bool isconstp() const;
 		bool isBranch() const {
 			return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j);
 		}
 
-        void setIns0(LOpcode op) {
-            firstWord.code = op;
-		}
-        void setIns1(LOpcode op, LIns* oprnd1) {
-            firstWord.code = op;
-            u.oprnd_1 = oprnd1;
-            NanoAssert(isOp1());
-        }
-        void setIns2(LOpcode op, LIns* oprnd1, LIns* oprnd2) {
-            firstWord.code = op;
-            u.oprnd_1 = oprnd1;
-            u.oprnd_2 = oprnd2;
-            NanoAssert(isOp2() || isLoad() || isGuard() || isBranch());
-        }
-        void setLoad(LOpcode op, LIns* base, LIns* d) {
-            setIns2(op, base, d);
-        }
-		void setGuard(LOpcode op, LIns* cond, LIns* data) {
-			setIns2(op, cond, data);
-		}
-		void setBranch(LOpcode op, LIns* cond, LIns* target) {
-			setIns2(op, cond, target);
-		}
-        void setStorei(LOpcode op, LIns* val, LIns* base, int32_t d) {
-            firstWord.code = op;
-            u.oprnd_1 = val;
-            u.oprnd_2 = base;
-            sti.disp = d;
-            NanoAssert(isStore());
-        }
-		void setImm(LOpcode op, int32_t imm32) {
-			firstWord.code = op;
-			i.imm32 = imm32;
-			NanoAssert(op == LIR_alloc || op == LIR_int);
-		}
-		void setAlloc(LOpcode op, int32_t size) {
-			setImm(op, size);
-		}
-		void setParam(LOpcode op, int32_t arg, int32_t kind)
-		{
-			firstWord.code = op;
-			NanoAssert(isU8(arg) && isU8(kind));
-			c.imm8a = arg;
-			c.imm8b = kind;
-			c.ci = NULL;
-			NanoAssert(op == LIR_param);
-		}
-		void setCall(LOpcode op, int32_t argc, const CallInfo* ci)
-		{
-			firstWord.code = op;
-			NanoAssert(isU8(argc));
-			c.imm8a = 0;
-			c.imm8b = argc;
-			c.ci = ci;
-			NanoAssert(op == LIR_call || op == LIR_fcall);
-		}
-		void setImmq(LOpcode op, int64_t imm64) {
-			firstWord.code = op;
-			i64.imm64_0 = int32_t(imm64);
-			i64.imm64_1 = int32_t(imm64>>32);
-			NanoAssert(op == LIR_quad);
-		}
-
 		void setTarget(LIns* t);
 		LIns* getTarget();
 
         GuardRecord *record();
 
 		inline uint32_t argc() const {
 			NanoAssert(isCall());
-			return c.imm8b;
+            return toLInsC()->argc;
 		}
 		const CallInfo *callInfo() const;
 	};
-	typedef LIns*		LInsp;
+
+    typedef LIns* LInsp;
 
 	LIns* FASTCALL callArgN(LInsp i, uint32_t n);
 	extern const uint8_t operandCount[];
 
 	class Fragmento;	// @todo remove this ; needed for minbuild for some reason?!?  Should not be compiling this code at all
-	class LirFilter;
 
 	// make it a GCObject so we can explicitly delete it early
 	class LirWriter : public avmplus::GCObject
 	{
 	public:
 		LirWriter *out;
 
 		virtual ~LirWriter() {}
@@ -485,22 +744,22 @@ namespace nanojit
 
 
     // Each page has a header;  the rest of it holds code.
     #define NJ_PAGE_CODE_AREA_SZB       (NJ_PAGE_SIZE - sizeof(PageHeader))
 
     // The first instruction on a page is always a start instruction, or a
     // payload-less skip instruction linking to the previous page.  The
     // biggest possible instruction would take up the entire rest of the page.
-    #define NJ_MAX_LINS_SZB             (NJ_PAGE_CODE_AREA_SZB - sizeof(LIns))
+    #define NJ_MAX_LINS_SZB             (NJ_PAGE_CODE_AREA_SZB - sizeof(LInsSk))
 
     // The maximum skip payload size is determined by the maximum instruction
     // size.  We require that a skip's payload be adjacent to the skip LIns
     // itself.
-    #define NJ_MAX_SKIP_PAYLOAD_SZB     (NJ_MAX_LINS_SZB - sizeof(LIns))
+    #define NJ_MAX_SKIP_PAYLOAD_SZB     (NJ_MAX_LINS_SZB - sizeof(LInsSk))
  
 
 #ifdef NJ_VERBOSE
 	extern const char* lirNames[];
 
 	/**
 	 * map address ranges to meaningful names.
 	 */
--- a/js/src/nanojit/LIRopcode.tbl
+++ b/js/src/nanojit/LIRopcode.tbl
@@ -39,222 +39,226 @@
  * ***** END LICENSE BLOCK ***** */
 
 /*
  * Definitions of LIR opcodes.  If you need to allocate an opcode, look
  * for a name of the form unused* and claim it.
  *
  * Includers must define OPDEF and OPDEF64 macros of the following forms:
  *
- * #define   OPDEF(op,val,operands) ...
- * #define OPDEF64(op,val,operands) ...
+ * #define   OPDEF(op,val,operands,repkind) ...
+ * #define OPDEF64(op,val,operands,repkind) ...
  *
  * Selected arguments can then be used within the macro expansions.
  *
  * Field        Description
- * op           Bytecode name, token-pasted after "LIR_" to form an LOpcode
- * val          Bytecode value, which is the LOpcode enumerator value
- * operands     Number of operands for this instruction
+ * op           Bytecode name, token-pasted after "LIR_" to form an LOpcode.
+ * val          Bytecode value, which is the LOpcode enumerator value.
+ * operands     Number of operands for this instruction, where an "operand" is
+ *              a LIns* argument.  Eg. LIR_sti has 3 fields, but the last is an
+ *              immediate, so it only has two operands.  Call instructions are
+ *              considered to have 0 operands -- the call args aren't counted.
+ *              The value is set to -1 for unused opcodes to make it obvious
+ *              that it needs changing if the opcode becomes used.
+ * repkind      Indicates how the instruction is represented in memory;  XYZ
+ *              corresponds to LInsXYZ and LRK_XYZ.
  *
  * This file is best viewed with 128 columns:
 12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
  */
 
 /*    op    val name        operands */
 
 /* special operations (must be 0..N) */
-OPDEF(start,     0, 0)
-OPDEF(unused1,   1, 0)
-OPDEF(skip,      2, 0)
-OPDEF(unused3,   3, 0)
-OPDEF(unused4,   4, 0)
-
-OPDEF(unused5,   5, 2)
-OPDEF(unused6,   6, 2)
+OPDEF(start,     0, 0, Op0)     // start of a fragment
+OPDEF(unused1,   1,-1, None)
+OPDEF(skip,      2, 1, Sk)      // holds blobs ("payloads") of data;  also links pages
+OPDEF(unused3,   3,-1, None)
+OPDEF(unused4,   4,-1, None)
+OPDEF(unused5,   5,-1, None)
+OPDEF(unused6,   6,-1, None)
 
 /* non-pure operations */
-OPDEF(addp,      7, 2)
-OPDEF(param,     8, 0)
-OPDEF(unused9,   9, 2)
-OPDEF(ld,       10, 2) // 32-bit load
-OPDEF(alloc,    11, 0) // alloca some stack space
-OPDEF(sti,      12, 2) // 32-bit store
-OPDEF(ret,      13, 1)
-OPDEF(live,     14, 1) // extend live range of reference
-OPDEF(unused15, 15, 0) // indirect call
-OPDEF(call,     16, 0) // subroutine call returning a 32-bit value
+OPDEF(addp,      7, 2, Op2)     // integer addition for temporary pointer calculations
+OPDEF(param,     8, 0, P)       // load a parameter
+OPDEF(unused9,   9,-1, None)
+OPDEF(ld,       10, 2, Op2)     // 32-bit load
+OPDEF(alloc,    11, 0, I)       // alloca some stack space
+OPDEF(sti,      12, 2, Sti)     // 32-bit store
+OPDEF(ret,      13, 1, Op1)     // return a word-sized value
+OPDEF(live,     14, 1, Op1)     // extend live range of reference
+OPDEF(unused15, 15, 0, C)
+OPDEF(call,     16, 0, C)       // subroutine call returning a 32-bit value
 
 /* guards */
-OPDEF(loop,     17, 0) // loop fragment
-OPDEF(x,        18, 0) // exit always
+OPDEF(loop,     17, 0, Op2)     // loop fragment
+OPDEF(x,        18, 0, Op2)     // exit always
 
 /* branches */
-OPDEF(j,        19, 0) // jump always
-OPDEF(jt,       20, 1) // jump true
-OPDEF(jf,       21, 1) // jump false
-OPDEF(label,    22, 0) // a jump target
-OPDEF(ji,       23, 2) // jump indirect
+OPDEF(j,        19, 0, Op2)     // jump always
+OPDEF(jt,       20, 1, Op2)     // jump if true
+OPDEF(jf,       21, 1, Op2)     // jump if false
+OPDEF(label,    22, 0, Op0)     // a jump target (no machine code is emitted for this)
+OPDEF(ji,       23,-1, None)    // indirect jump (currently not implemented)
 
 /* operators */
 
 /*
  * NB: Opcodes LIR_int through LIR_uge must remain continuous to aid in
  *     common-subexpression-elimination detection code.
  */
 
-OPDEF(int,      24, 0) // constant 32-bit integer
-OPDEF(cmov,     25, 2) // conditional move (op1=cond, op2=cond(iftrue,iffalse))
+OPDEF(int,      24, 0, I)       // constant 32-bit integer
+OPDEF(cmov,     25, 2, Op2)     // conditional move (op1=cond, op2=LIR_2(iftrue,iffalse))
 #if defined(NANOJIT_64BIT)
-OPDEF(callh,    26, 0)
+OPDEF(callh,    26,-1, None)    // unused on 64-bit machines
 #else
-OPDEF(callh,    26, 1)
+OPDEF(callh,    26, 1, Op1)     // get the high 32 bits of a call returning a 64-bit value
 #endif
 
 /*
  * feq though fge must only be used on float arguments.  They return integers.
  * For all except feq, (op ^ 1) is the op which flips the
  * left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
  * "<" is xored with 1 to get ">".  Similarly, (op ^ 3) is the complement of
  * op, so (lt ^ 1) == ge, or the complement of the operator "<" is ">=" xored
  * with 3.  NB: These opcodes must remain continuous so that comparison-opcode
  * detection works correctly.
  */
-OPDEF(feq,      27, 2) // floating-point equality [2 float inputs]
-OPDEF(flt,      28, 2) // floating-point less than: arg1 < arg2
-OPDEF(fgt,      29, 2) // floating-point greater than: arg1 > arg2
-OPDEF(fle,      30, 2) // arg1 <= arg2, both floating-point
-OPDEF(fge,      31, 2) // arg1 >= arg2, both floating-point
+OPDEF(feq,      27, 2, Op2)     // floating-point equality
+OPDEF(flt,      28, 2, Op2)     // floating-point less-than
+OPDEF(fgt,      29, 2, Op2)     // floating-point greater-than
+OPDEF(fle,      30, 2, Op2)     // floating-point less-than-or-equal
+OPDEF(fge,      31, 2, Op2)     // floating-point greater-than-or-equal
 
-OPDEF(ldcb,     32, 2) // non-volatile 8-bit load
-OPDEF(ldcs,     33, 2) // non-volatile 16-bit load
-OPDEF(ldc,      34, 2) // non-volatile 32-bit load
+OPDEF(ldcb,     32, 2, Op2)     // non-volatile  8-bit load
+OPDEF(ldcs,     33, 2, Op2)     // non-volatile 16-bit load
+OPDEF(ldc,      34, 2, Op2)     // non-volatile 32-bit load
 
-// neg through ush are all integer operations
-OPDEF(neg,      35, 1) // numeric negation [ 1 integer input / integer output ]
-OPDEF(add,      36, 2) // integer addition [ 2 operand integer intputs / integer output ]
-OPDEF(sub,      37, 2) // integer subtraction
-OPDEF(mul,      38, 2) // integer multiplication
-OPDEF(div,      39, 2)
-OPDEF(mod,      40, 1)
+OPDEF(neg,      35, 1, Op1)     // integer negation
+OPDEF(add,      36, 2, Op2)     // integer addition
+OPDEF(sub,      37, 2, Op2)     // integer subtraction
+OPDEF(mul,      38, 2, Op2)     // integer multiplication
+OPDEF(div,      39, 2, Op2)     // integer division
+OPDEF(mod,      40, 1, Op1)     // hack: get the modulus from a LIR_div result, for x86 only
 
-OPDEF(and,      41, 2)
-OPDEF(or,       42, 2)
-OPDEF(xor,      43, 2)
-OPDEF(not,      44, 1)
-OPDEF(lsh,      45, 2)
-OPDEF(rsh,      46, 2) // >>
-OPDEF(ush,      47, 2) // >>>
+OPDEF(and,      41, 2, Op2)     // 32-bit bitwise AND
+OPDEF(or,       42, 2, Op2)     // 32-bit bitwise OR
+OPDEF(xor,      43, 2, Op2)     // 32-bit bitwise XOR
+OPDEF(not,      44, 1, Op1)     // 32-bit bitwise NOT
+OPDEF(lsh,      45, 2, Op2)     // 32-bit left shift
+OPDEF(rsh,      46, 2, Op2)     // 32-bit right shift with sign-extend (>>)
+OPDEF(ush,      47, 2, Op2)     // 32-bit unsigned right shift (>>>)
 
 // conditional guards, op^1 to complement.  Only things that are
 // isCond() can be passed to these.
-OPDEF(xt,       48, 1) // exit if true   0x30 0011 0000
-OPDEF(xf,       49, 1) // exit if false  0x31 0011 0001
+OPDEF(xt,       48, 1, Op2)     // exit if true   (0x30 0011 0000)
+OPDEF(xf,       49, 1, Op2)     // exit if false  (0x31 0011 0001)
+
+OPDEF(qlo,      50, 1, Op1)     // get the low  32 bits of a 64-bit value
+OPDEF(qhi,      51, 1, Op1)     // get the high 32 bits of a 64-bit value
 
-// qlo and qhi take a single quad argument and return its low and high
-// 32 bits respectively as 32-bit integers.
-OPDEF(qlo,      50, 1)
-OPDEF(qhi,      51, 1)
+OPDEF(unused52, 52,-1, None)
 
-OPDEF(unused52, 52, 0)
+OPDEF(ov,       53, 1, Op1)     // test for overflow;  value must have just been computed
 
-OPDEF(ov,       53, 1)
-OPDEF(cs,       54, 1)
+OPDEF(unused53, 54,-1, None)
 
 // Integer (all sizes) relational operators.  (op ^ 1) is the op which flips the
 // left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
 // "<" is xored with 1 to get ">".  Similarly, (op ^ 3) is the complement of
 // op, so (lt ^ 1) == ge, or the complement of the operator "<" is ">=" xored
 // with 3.  'u' prefix indicates the unsigned integer variant.
 // NB: These opcodes must remain continuous so that comparison-opcode detection
 // works correctly.
-OPDEF(eq,       55, 2) // integer equality
-OPDEF(lt,       56, 2) // 0x38 0011 1000
-OPDEF(gt,       57, 2) // 0x39 0011 1001
-OPDEF(le,       58, 2) // 0x3A 0011 1010
-OPDEF(ge,       59, 2) // 0x3B 0011 1011
-OPDEF(ult,      60, 2) // 0x3C 0011 1100
-OPDEF(ugt,      61, 2) // 0x3D 0011 1101
-OPDEF(ule,      62, 2) // 0x3E 0011 1110
-OPDEF(uge,      63, 2) // 0x3F 0011 1111
+OPDEF(eq,       55, 2, Op2)     //          integer equality
+OPDEF(lt,       56, 2, Op2)     //   signed integer less-than             (0x38 0011 1000)
+OPDEF(gt,       57, 2, Op2)     //   signed integer greater-than          (0x39 0011 1001)
+OPDEF(le,       58, 2, Op2)     //   signed integer less-than-or-equal    (0x3A 0011 1010)
+OPDEF(ge,       59, 2, Op2)     //   signed integer greater-than-or-equal (0x3B 0011 1011)
+OPDEF(ult,      60, 2, Op2)     // unsigned integer less-than             (0x3C 0011 1100)
+OPDEF(ugt,      61, 2, Op2)     // unsigned integer greater-than          (0x3D 0011 1101)
+OPDEF(ule,      62, 2, Op2)     // unsigned integer less-than-or-equal    (0x3E 0011 1110)
+OPDEF(uge,      63, 2, Op2)     // unsigned integer greater-than-or-equal (0x3F 0011 1111)
 
-OPDEF64(2,          0, 2) // wraps a pair of refs
-OPDEF64(file,       1, 2)
-OPDEF64(line,       2, 2)
-OPDEF64(xbarrier,   3, 1) // memory barrier (dummy guard)
-OPDEF64(xtbl,       4, 1) // exit via indirect jump
+OPDEF64(2,          0, 2, Op2)      // wraps a pair of refs, for LIR_cmov or LIR_qcmov
+OPDEF64(file,       1, 2, Op1)      // source filename for debug symbols
+OPDEF64(line,       2, 2, Op1)      // source line number for debug symbols 
+OPDEF64(xbarrier,   3, 1, Op2)      // memory barrier;  doesn't exit, but flushes all values to the stack
+OPDEF64(xtbl,       4, 1, Op2)      // exit via indirect jump
 
-OPDEF64(unused5_64,   5, 2)
-OPDEF64(unused6_64,   6, 2)
-OPDEF64(unused7_64,   7, 2)
-OPDEF64(unused8_64,   8, 2)
+OPDEF64(unused5_64, 5,-1, None)
+OPDEF64(unused6_64, 6,-1, None)
+OPDEF64(unused7_64, 7,-1, None)
+OPDEF64(unused8_64, 8,-1, None)
+OPDEF64(unused9_64, 9,-1, None)
 
-OPDEF64(unused9_64,   9, 2)
-OPDEF64(ldq, LIR_ld, 2) // quad load
+OPDEF64(ldq,    LIR_ld, 2, Op2)     // 64-bit (quad) load
 
-OPDEF64(unused11_64, 11, 2)
+OPDEF64(unused11_64, 11,-1, None)
 
-OPDEF64(stqi,   LIR_sti, 2) // quad store
-OPDEF64(fret,   LIR_ret, 1)
+OPDEF64(stqi,   LIR_sti, 2, Sti)    // 64-bit (quad) store
+OPDEF64(fret,   LIR_ret, 1, Op1)
 
-OPDEF64(unused14_64, 14, 2)
-OPDEF64(unused15_64, 15, 2)
+OPDEF64(unused14_64, 14,-1, None)
+OPDEF64(unused15_64, 15,-1, None)
 
-OPDEF64(fcall,       LIR_call, 0) // subroutine call returning quad
+OPDEF64(fcall,  LIR_call,  0, C)    // subroutine call returning 64-bit (quad) value
 
-OPDEF64(unused17_64, 17, 2)
-OPDEF64(unused18_64, 18, 2)
-OPDEF64(unused19_64, 19, 2)
-OPDEF64(unused20_64, 20, 2)
-OPDEF64(unused21_64, 21, 2)
-OPDEF64(unused22_64, 22, 2)
-OPDEF64(unused23_64, 23, 2)
+OPDEF64(unused17_64, 17,-1, None)
+OPDEF64(unused18_64, 18,-1, None)
+OPDEF64(unused19_64, 19,-1, None)
+OPDEF64(unused20_64, 20,-1, None)
+OPDEF64(unused21_64, 21,-1, None)
+OPDEF64(unused22_64, 22,-1, None)
+OPDEF64(unused23_64, 23,-1, None)
 
-// We strip of the 64bit flag and compare that the opcode is between LIR_int
+// We strip off the 64 bit flag and compare that the opcode is between LIR_int
 // and LIR_uge to decide whether we can CSE the opcode. All opcodes below
 // this marker are subject to CSE.
 
-OPDEF64(quad,        LIR_int,  0) // quad constant value
-OPDEF64(qcmov,       LIR_cmov, 2)
-OPDEF64(unused26_64, 26,       2)
+OPDEF64(quad,   LIR_int,  0, I64)   // 64-bit (quad) constant value
+OPDEF64(qcmov,  LIR_cmov, 2, Op2)   // 64-bit conditional move
 
-OPDEF64(unused27_64, 27, 2)
-OPDEF64(unused28_64, 28, 2)
-OPDEF64(unused29_64, 29, 2)
-OPDEF64(unused30_64, 30, 2)
-OPDEF64(unused31_64, 31, 2)
-OPDEF64(unused32_64, 32, 2)
-OPDEF64(unused33_64, 33, 2)
+OPDEF64(unused26_64, 26,-1, None)
+OPDEF64(unused27_64, 27,-1, None)
+OPDEF64(unused28_64, 28,-1, None)
+OPDEF64(unused29_64, 29,-1, None)
+OPDEF64(unused30_64, 30,-1, None)
+OPDEF64(unused31_64, 31,-1, None)
+OPDEF64(unused32_64, 32,-1, None)
+OPDEF64(unused33_64, 33,-1, None)
 
-OPDEF64(ldqc,   LIR_ldc, 2)
+OPDEF64(ldqc,   LIR_ldc, 2, Op2)    // non-volatile 64-bit load
 
-/* floating-point arithmetic operations */
-OPDEF64(fneg,   LIR_neg, 1)
-OPDEF64(fadd,   LIR_add, 2)
-OPDEF64(fsub,   LIR_sub, 2)
-OPDEF64(fmul,   LIR_mul, 2)
-OPDEF64(fdiv,   LIR_div, 2)
-OPDEF64(fmod,   LIR_mod, 2)
+OPDEF64(fneg,   LIR_neg, 1, Op1)    // floating-point negation
+OPDEF64(fadd,   LIR_add, 2, Op2)    // floating-point addition
+OPDEF64(fsub,   LIR_sub, 2, Op2)    // floating-point subtraction
+OPDEF64(fmul,   LIR_mul, 2, Op2)    // floating-point multiplication
+OPDEF64(fdiv,   LIR_div, 2, Op2)    // floating-point division
+OPDEF64(fmod,   LIR_mod, 2, Op2)    // floating-point modulus(?)
 
-OPDEF64(qiand,  41,      2)
-OPDEF64(qiadd,  42,      2)
-OPDEF64(qior,   43,      2)
-OPDEF64(qilsh,  44,      2)
-OPDEF64(qjoin,  45,      2) // 1st arg is low 32 bits, 2nd arg is high 32 bits
+OPDEF64(qiand,  41,      2, Op2)    // 64-bit bitwise AND
+OPDEF64(qiadd,  42,      2, Op2)    // 64-bit bitwise ADD
+OPDEF64(qior,   43,      2, Op2)    // 64-bit bitwise OR
 
-OPDEF64(i2f,    46,      1) // convert an integer to a float
-OPDEF64(u2f,    47,      1) // convert an unsigned integer to a float
+OPDEF64(qilsh,  44,      2, Op2)    // 64-bit left shift
+OPDEF64(qjoin,  45,      2, Op2)    // join two 32-bit values (1st arg is low bits, 2nd is high)
+
+OPDEF64(i2f,    46,      1, Op1)    // convert a signed 32-bit integer to a float
+OPDEF64(u2f,    47,      1, Op1)    // convert an unsigned 32-bit integer to a float
 
-OPDEF64(unused48_64, 48, 2)
-OPDEF64(unused49_64, 49, 2)
-OPDEF64(unused50_64, 50, 2)
-OPDEF64(unused51_64, 51, 2)
-OPDEF64(unused52_64, 52, 2)
-OPDEF64(unused53_64, 53, 2)
-OPDEF64(unused54_64, 54, 2)
-OPDEF64(unused55_64, 55, 2)
-OPDEF64(unused56_64, 56, 2)
-OPDEF64(unused57_64, 57, 2)
-OPDEF64(unused58_64, 58, 2)
-OPDEF64(unused59_64, 59, 2)
-OPDEF64(unused60_64, 60, 2)
-OPDEF64(unused61_64, 61, 2)
-OPDEF64(unused62_64, 62, 2)
-OPDEF64(unused63_64, 63, 2)
+OPDEF64(unused48_64, 48,-1, None)
+OPDEF64(unused49_64, 49,-1, None)
+OPDEF64(unused50_64, 50,-1, None)
+OPDEF64(unused51_64, 51,-1, None)
+OPDEF64(unused52_64, 52,-1, None)
+OPDEF64(unused53_64, 53,-1, None)
+OPDEF64(unused54_64, 54,-1, None)
+OPDEF64(unused55_64, 55,-1, None)
+OPDEF64(unused56_64, 56,-1, None)
+OPDEF64(unused57_64, 57,-1, None)
+OPDEF64(unused58_64, 58,-1, None)
+OPDEF64(unused59_64, 59,-1, None)
+OPDEF64(unused60_64, 60,-1, None)
+OPDEF64(unused61_64, 61,-1, None)
+OPDEF64(unused62_64, 62,-1, None)
+OPDEF64(unused63_64, 63,-1, None)
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -610,17 +610,17 @@ Assembler::hint(LIns* i, RegisterMask al
     uint32_t op = i->opcode();
     int prefer = ~0;
 
     if (op==LIR_call || op==LIR_fcall)
         prefer = rmask(R0);
     else if (op == LIR_callh)
         prefer = rmask(R1);
     else if (op == LIR_param)
-        prefer = rmask(imm2register(i->imm8()));
+        prefer = rmask(imm2register(i->paramArg()));
 
     if (_allocator.free & allow & prefer)
         allow &= prefer;
     return allow;
 }
 
 void
 Assembler::asm_qjoin(LIns *ins)
@@ -1559,17 +1559,16 @@ Assembler::asm_branch(bool branchOnFalse
         case LIR_flt:   cc = LO;    fp_cond = true;     break;
         case LIR_fle:   cc = LS;    fp_cond = true;     break;
         case LIR_fge:   cc = GE;    fp_cond = true;     break;
         case LIR_fgt:   cc = GT;    fp_cond = true;     break;
 
         // Standard signed and unsigned integer comparisons.
         case LIR_eq:    cc = EQ;    fp_cond = false;    break;
         case LIR_ov:    cc = VS;    fp_cond = false;    break;
-        case LIR_cs:    cc = CS;    fp_cond = false;    break;
         case LIR_lt:    cc = LT;    fp_cond = false;    break;
         case LIR_le:    cc = LE;    fp_cond = false;    break;
         case LIR_gt:    cc = GT;    fp_cond = false;    break;
         case LIR_ge:    cc = GE;    fp_cond = false;    break;
         case LIR_ult:   cc = LO;    fp_cond = false;    break;
         case LIR_ule:   cc = LS;    fp_cond = false;    break;
         case LIR_ugt:   cc = HI;    fp_cond = false;    break;
         case LIR_uge:   cc = HS;    fp_cond = false;    break;
@@ -1603,18 +1602,18 @@ Assembler::asm_branch(bool branchOnFalse
     return at;
 }
 
 void
 Assembler::asm_cmp(LIns *cond)
 {
     LOpcode condop = cond->opcode();
 
-    // LIR_ov and LIR_cs recycle the flags set by arithmetic ops
-    if ((condop == LIR_ov) || (condop == LIR_cs))
+    // LIR_ov recycles the flags set by arithmetic ops
+    if ((condop == LIR_ov))
         return;
 
     LInsp lhs = cond->oprnd1();
     LInsp rhs = cond->oprnd2();
     Reservation *rA, *rB;
 
     // Not supported yet.
     NanoAssert(!lhs->isQuad() && !rhs->isQuad());
@@ -1695,17 +1694,16 @@ Assembler::asm_fcond(LInsp ins)
 void
 Assembler::asm_cond(LInsp ins)
 {
     Register r = prepResultReg(ins, AllowableFlagRegs);
     switch(ins->opcode())
     {
         case LIR_eq:    SET(r,EQ);      break;
         case LIR_ov:    SET(r,VS);      break;
-        case LIR_cs:    SET(r,CS);      break;
         case LIR_lt:    SET(r,LT);      break;
         case LIR_le:    SET(r,LE);      break;
         case LIR_gt:    SET(r,GT);      break;
         case LIR_ge:    SET(r,GE);      break;
         case LIR_ult:   SET(r,LO);      break;
         case LIR_ule:   SET(r,LS);      break;
         case LIR_ugt:   SET(r,HI);      break;
         case LIR_uge:   SET(r,HS);      break;
@@ -1877,17 +1875,16 @@ Assembler::asm_cmov(LInsp ins)
 
     // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
     // (This is true on Intel, is it true on all architectures?)
     const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
     switch (condval->opcode()) {
         // note that these are all opposites...
         case LIR_eq:    MOVNE(rr, iffalsereg);  break;
         case LIR_ov:    MOVVC(rr, iffalsereg);  break;
-        case LIR_cs:    MOVNC(rr, iffalsereg);  break;
         case LIR_lt:    MOVGE(rr, iffalsereg);  break;
         case LIR_le:    MOVGT(rr, iffalsereg);  break;
         case LIR_gt:    MOVLE(rr, iffalsereg);  break;
         case LIR_ge:    MOVLT(rr, iffalsereg);  break;
         case LIR_ult:   MOVCS(rr, iffalsereg);  break;
         case LIR_ule:   MOVHI(rr, iffalsereg);  break;
         case LIR_ugt:   MOVLS(rr, iffalsereg);  break;
         case LIR_uge:   MOVCC(rr, iffalsereg);  break;
@@ -1913,18 +1910,18 @@ Assembler::asm_qlo(LInsp ins)
     LIns *q = ins->oprnd1();
     int d = findMemFor(q);
     LD(rr, d, FP);
 }
 
 void
 Assembler::asm_param(LInsp ins)
 {
-    uint32_t a = ins->imm8();
-    uint32_t kind = ins->imm8b();
+    uint32_t a = ins->paramArg();
+    uint32_t kind = ins->paramKind();
     if (kind == 0) {
         // ordinary param
         AbiKind abi = _thisfrag->lirbuf->abi;
         uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
         if (a < abi_regcount) {
             // incoming arg in register
             prepResultReg(ins, rmask(argRegs[a]));
         } else {
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -498,17 +498,16 @@ enum {
 #define MOVGE(dr,sr) MOV_cond(GE, dr, sr)
 #define MOVLO(dr,sr) MOV_cond(LO, dr, sr) // Equivalent to MOVCC
 #define MOVCC(dr,sr) MOV_cond(CC, dr, sr) // Equivalent to MOVLO
 #define MOVLS(dr,sr) MOV_cond(LS, dr, sr)
 #define MOVHI(dr,sr) MOV_cond(HI, dr, sr)
 #define MOVHS(dr,sr) MOV_cond(HS, dr, sr) // Equivalent to MOVCS
 #define MOVCS(dr,sr) MOV_cond(CS, dr, sr) // Equivalent to MOVHS
 #define MOVVC(dr,sr) MOV_cond(VC, dr, sr) // overflow clear
-#define MOVNC(dr,sr) MOV_cond(CC, dr, sr) // carry clear
 
 // _d = [_b+off]
 #define LDR(_d,_b,_off)        asm_ldr_chk(_d,_b,_off,1)
 #define LDR_nochk(_d,_b,_off)  asm_ldr_chk(_d,_b,_off,0)
 
 // _d = #_imm
 #define LDi(_d,_imm) asm_ld_imm(_d,_imm)
 
@@ -661,18 +660,16 @@ enum {
 #define JL(t)   B_cond(LT,t)
 #define JNL(t)  B_cond(GE,t)
 #define JLE(t)  B_cond(LE,t)
 #define JNLE(t) B_cond(GT,t)
 #define JGE(t)  B_cond(GE,t)
 #define JNGE(t) B_cond(LT,t)
 #define JG(t)   B_cond(GT,t)
 #define JNG(t)  B_cond(LE,t)
-#define JC(t)   B_cond(CS,t)
-#define JNC(t)  B_cond(CC,t)
 #define JO(t)   B_cond(VS,t)
 #define JNO(t)  B_cond(VC,t)
 
 // used for testing result of an FP compare on x86; not used on arm.
 // JP = comparison  false
 #define JP(t)   do {NanoAssert(0); B_cond(NE,t); asm_output("jp 0x%08x",t); } while(0) 
 
 // JNP = comparison true
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -486,18 +486,16 @@ namespace nanojit
 
         // produce the branch
         if (branchOnFalse)
             {
                 if (condop == LIR_eq)
                     BNE(0, tt);
                 else if (condop == LIR_ov)
                     BVC(0, tt);
-                else if (condop == LIR_cs)
-                    BCC(0, tt);
                 else if (condop == LIR_lt)
                     BGE(0, tt);
                 else if (condop == LIR_le)
                     BG(0, tt);
                 else if (condop == LIR_gt)
                     BLE(0, tt);
                 else if (condop == LIR_ge)
                     BL(0, tt);
@@ -511,18 +509,16 @@ namespace nanojit
                     BCS(0, tt);
             }
         else // op == LIR_xt
             {
                 if (condop == LIR_eq)
                     BE(0, tt);
                 else if (condop == LIR_ov)
                     BVS(0, tt);
-                else if (condop == LIR_cs)
-                    BCS(0, tt);
                 else if (condop == LIR_lt)
                     BL(0, tt);
                 else if (condop == LIR_le)
                     BLE(0, tt);
                 else if (condop == LIR_gt)
                     BG(0, tt);
                 else if (condop == LIR_ge)
                     BGE(0, tt);
@@ -539,18 +535,18 @@ namespace nanojit
         return at;
     }
 
     void Assembler::asm_cmp(LIns *cond)
     {
         underrunProtect(12);
         LOpcode condop = cond->opcode();
         
-        // LIR_ov and LIR_cs recycle the flags set by arithmetic ops
-        if ((condop == LIR_ov) || (condop == LIR_cs))
+        // LIR_ov recycles the flags set by arithmetic ops
+        if ((condop == LIR_ov))
             return;
         
         LInsp lhs = cond->oprnd1();
         LInsp rhs = cond->oprnd2();
         Reservation *rA, *rB;
 
         NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
 
@@ -586,17 +582,17 @@ namespace nanojit
         verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf("         jmp   SOT"); } );
         
         loopJumps.add(_nIns);
 
         assignSavedRegs();
 
         // restore first parameter, the only one we use
         LInsp state = _thisfrag->lirbuf->state;
-        findSpecificRegFor(state, argRegs[state->imm8()]); 
+        findSpecificRegFor(state, argRegs[state->paramArg()]); 
     }    
 
     void Assembler::asm_fcond(LInsp ins)
     {
         // only want certain regs 
         Register r = prepResultReg(ins, AllowableFlagRegs);
         asm_setcc(r, ins);
     }
@@ -607,18 +603,16 @@ namespace nanojit
         // only want certain regs 
         LOpcode op = ins->opcode();            
         Register r = prepResultReg(ins, AllowableFlagRegs);
 
         if (op == LIR_eq)
             MOVEI(1, 1, 0, 0, r);
         else if (op == LIR_ov)
             MOVVSI(1, 1, 0, 0, r);
-        else if (op == LIR_cs)
-            MOVCSI(1, 1, 0, 0, r);
         else if (op == LIR_lt)
             MOVLI(1, 1, 0, 0, r);
         else if (op == LIR_le)
             MOVLEI(1, 1, 0, 0, r);
         else if (op == LIR_gt)
             MOVGI(1, 1, 0, 0, r);
         else if (op == LIR_ge)
             MOVGEI(1, 1, 0, 0, r);
@@ -783,17 +777,16 @@ namespace nanojit
         // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
         // (This is true on Intel, is it true on all architectures?)
         const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
         if (op == LIR_cmov) {
             switch (condval->opcode()) {
                 // note that these are all opposites...
             case LIR_eq:  MOVNE (iffalsereg, 1, 0, 0, rr); break;
             case LIR_ov:  MOVVC (iffalsereg, 1, 0, 0, rr); break;
-            case LIR_cs:  MOVCC (iffalsereg, 1, 0, 0, rr); break;
             case LIR_lt:  MOVGE (iffalsereg, 1, 0, 0, rr); break;
             case LIR_le:  MOVG  (iffalsereg, 1, 0, 0, rr); break;
             case LIR_gt:  MOVLE (iffalsereg, 1, 0, 0, rr); break;
             case LIR_ge:  MOVL  (iffalsereg, 1, 0, 0, rr); break;
             case LIR_ult: MOVCC (iffalsereg, 1, 0, 0, rr); break;
             case LIR_ule: MOVGU (iffalsereg, 1, 0, 0, rr); break;
             case LIR_ugt: MOVLEU(iffalsereg, 1, 0, 0, rr); break;
             case LIR_uge: MOVCS (iffalsereg, 1, 0, 0, rr); break;
@@ -812,18 +805,18 @@ namespace nanojit
         Register rr = prepResultReg(ins, GpRegs);
         LIns *q = ins->oprnd1();
         int d = findMemFor(q);
         LDSW32(FP, d+4, rr);
     }
 
     void Assembler::asm_param(LInsp ins)
     {
-        uint32_t a = ins->imm8();
-        uint32_t kind = ins->imm8b();
+        uint32_t a = ins->paramArg();
+        uint32_t kind = ins->paramKind();
         //        prepResultReg(ins, rmask(argRegs[a]));
         if (kind == 0) {
             prepResultReg(ins, rmask(argRegs[a]));
         } else {
             prepResultReg(ins, rmask(savedRegs[a]));
         }
     }
 
--- a/js/src/nanojit/NativeSparc.h
+++ b/js/src/nanojit/NativeSparc.h
@@ -680,22 +680,16 @@ namespace nanojit
     } while (0)
 
 #define MOVFGEI(simm11, cc2, cc1, cc0, rd) \
     do { \
     asm_output("movge %d, %s", simm11, gpn(rd)); \
     Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \
     } while (0)
 
-#define MOVCSI(simm11, cc2, cc1, cc0, rd) \
-    do { \
-    asm_output("movcs %d, %s", simm11, gpn(rd)); \
-    Format_4_2I(rd, 0x2c, cc2, 5, cc1, cc0, simm11); \
-    } while (0)
-
 #define MOVLEUI(simm11, cc2, cc1, cc0, rd) \
     do { \
     asm_output("movleu %d, %s", simm11, gpn(rd)); \
     Format_4_2I(rd, 0x2c, cc2, 4, cc1, cc0, simm11); \
     } while (0)
 
 #define MOVGUI(simm11, cc2, cc1, cc0, rd) \
     do { \
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -338,18 +338,18 @@ namespace nanojit
         if (op == LIR_call) {
 			prefer &= rmask(retRegs[0]);
         }
         else if (op == LIR_fcall) {
             prefer &= rmask(FST0);
         }
         else if (op == LIR_param) {
             uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
-            if (i->imm8() < max_regs)
-    			prefer &= rmask(Register(i->imm8()));
+            if (i->paramArg() < max_regs)
+    			prefer &= rmask(Register(i->paramArg()));
         }
         else if (op == LIR_callh || (op == LIR_rsh && i->oprnd1()->opcode()==LIR_callh)) {
             prefer &= rmask(retRegs[1]);
         }
         else if (i->isCmp()) {
 			prefer &= AllowableFlagRegs;
         }
         else if (i->isconst()) {
@@ -652,18 +652,16 @@ namespace nanojit
 
 		// produce the branch
 		if (branchOnFalse)
 		{
 			if (condop == LIR_eq)
 				JNE(targ, isfar);
 			else if (condop == LIR_ov)
 				JNO(targ, isfar);
-			else if (condop == LIR_cs)
-				JNC(targ, isfar);
 			else if (condop == LIR_lt)
 				JNL(targ, isfar);
 			else if (condop == LIR_le)
 				JNLE(targ, isfar);
 			else if (condop == LIR_gt)
 				JNG(targ, isfar);
 			else if (condop == LIR_ge)
 				JNGE(targ, isfar);
@@ -677,18 +675,16 @@ namespace nanojit
 				JNAE(targ, isfar);
 		}
 		else // op == LIR_xt
 		{
 			if (condop == LIR_eq)
 				JE(targ, isfar);
 			else if (condop == LIR_ov)
 				JO(targ, isfar);
-			else if (condop == LIR_cs)
-				JC(targ, isfar);
 			else if (condop == LIR_lt)
 				JL(targ, isfar);
 			else if (condop == LIR_le)
 				JLE(targ, isfar);
 			else if (condop == LIR_gt)
 				JG(targ, isfar);
 			else if (condop == LIR_ge)
 				JGE(targ, isfar);
@@ -713,18 +709,18 @@ namespace nanojit
 		findSpecificRegFor(diff, EBX);
 		JMP(exit);
    	}
 
 	void Assembler::asm_cmp(LIns *cond)
 	{
         LOpcode condop = cond->opcode();
         
-        // LIR_ov and LIR_cs recycle the flags set by arithmetic ops
-        if ((condop == LIR_ov) || (condop == LIR_cs))
+        // LIR_ov recycles the flags set by arithmetic ops
+        if ((condop == LIR_ov))
             return;
         
         LInsp lhs = cond->oprnd1();
 		LInsp rhs = cond->oprnd2();
 		Reservation *rA, *rB;
 
 		NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
 
@@ -783,18 +779,16 @@ namespace nanojit
 		LOpcode op = ins->opcode();			
 		Register r = prepResultReg(ins, AllowableFlagRegs);
 		// SETcc only sets low 8 bits, so extend 
 		MOVZX8(r,r);
 		if (op == LIR_eq)
 			SETE(r);
 		else if (op == LIR_ov)
 			SETO(r);
-		else if (op == LIR_cs)
-			SETC(r);
 		else if (op == LIR_lt)
 			SETL(r);
 		else if (op == LIR_le)
 			SETLE(r);
 		else if (op == LIR_gt)
 			SETG(r);
 		else if (op == LIR_ge)
 			SETGE(r);
@@ -1081,17 +1075,16 @@ namespace nanojit
 		// (This is true on Intel, is it true on all architectures?)
 		const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
 		if (op == LIR_cmov) {
 			switch (condval->opcode())
 			{
 				// note that these are all opposites...
 				case LIR_eq:	MRNE(rr, iffalsereg);	break;
 				case LIR_ov:    MRNO(rr, iffalsereg);   break;
-				case LIR_cs:    MRNC(rr, iffalsereg);   break;
 				case LIR_lt:	MRGE(rr, iffalsereg);	break;
 				case LIR_le:	MRG(rr, iffalsereg);	break;
 				case LIR_gt:	MRLE(rr, iffalsereg);	break;
 				case LIR_ge:	MRL(rr, iffalsereg);	break;
 				case LIR_ult:	MRAE(rr, iffalsereg);	break;
 				case LIR_ule:	MRA(rr, iffalsereg);	break;
 				case LIR_ugt:	MRBE(rr, iffalsereg);	break;
 				case LIR_uge:	MRB(rr, iffalsereg);	break;
@@ -1109,18 +1102,18 @@ namespace nanojit
 		Register rr = prepResultReg(ins, GpRegs);
 		LIns *q = ins->oprnd1();
 		int d = findMemFor(q);
 		LD(rr, d+4, FP);
 	}
 
 	void Assembler::asm_param(LInsp ins)
 	{
-		uint32_t a = ins->imm8();
-		uint32_t kind = ins->imm8b();
+		uint32_t a = ins->paramArg();
+		uint32_t kind = ins->paramKind();
 		if (kind == 0) {
 			// ordinary param
 			AbiKind abi = _thisfrag->lirbuf->abi;
 			uint32_t abi_regcount = max_abi_regs[abi];
 			if (a < abi_regcount) {
 				// incoming arg in register
 				prepResultReg(ins, rmask(argRegs[a]));
 			} else {
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -388,29 +388,27 @@ namespace nanojit
 #define SETL(r)		do { count_alu(); ALU2(0x0f9C,(r),(r));			asm_output("setl  %s",gpn(r)); } while(0)
 #define SETLE(r)	do { count_alu(); ALU2(0x0f9E,(r),(r));			asm_output("setle %s",gpn(r)); } while(0)
 #define SETG(r)		do { count_alu(); ALU2(0x0f9F,(r),(r));			asm_output("setg  %s",gpn(r)); } while(0)
 #define SETGE(r)	do { count_alu(); ALU2(0x0f9D,(r),(r));			asm_output("setge %s",gpn(r)); } while(0)
 #define SETB(r)     do { count_alu(); ALU2(0x0f92,(r),(r));          asm_output("setb  %s",gpn(r)); } while(0)
 #define SETBE(r)    do { count_alu(); ALU2(0x0f96,(r),(r));          asm_output("setbe %s",gpn(r)); } while(0)
 #define SETA(r)     do { count_alu(); ALU2(0x0f97,(r),(r));          asm_output("seta  %s",gpn(r)); } while(0)
 #define SETAE(r)    do { count_alu(); ALU2(0x0f93,(r),(r));          asm_output("setae %s",gpn(r)); } while(0)
-#define SETC(r)     do { count_alu(); ALU2(0x0f90,(r),(r));          asm_output("setc  %s",gpn(r)); } while(0)
 #define SETO(r)     do { count_alu(); ALU2(0x0f92,(r),(r));          asm_output("seto  %s",gpn(r)); } while(0)
 
 #define MREQ(dr,sr)	do { count_alu(); ALU2(0x0f44,dr,sr); asm_output("cmove %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRNE(dr,sr)	do { count_alu(); ALU2(0x0f45,dr,sr); asm_output("cmovne %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRL(dr,sr)	do { count_alu(); ALU2(0x0f4C,dr,sr); asm_output("cmovl %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRLE(dr,sr)	do { count_alu(); ALU2(0x0f4E,dr,sr); asm_output("cmovle %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRG(dr,sr)	do { count_alu(); ALU2(0x0f4F,dr,sr); asm_output("cmovg %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRGE(dr,sr)	do { count_alu(); ALU2(0x0f4D,dr,sr); asm_output("cmovge %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRB(dr,sr)	do { count_alu(); ALU2(0x0f42,dr,sr); asm_output("cmovb %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRBE(dr,sr)	do { count_alu(); ALU2(0x0f46,dr,sr); asm_output("cmovbe %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRA(dr,sr)	do { count_alu(); ALU2(0x0f47,dr,sr); asm_output("cmova %s,%s", gpn(dr),gpn(sr)); } while(0)
-#define MRNC(dr,sr)	do { count_alu(); ALU2(0x0f43,dr,sr); asm_output("cmovnc %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRAE(dr,sr)	do { count_alu(); ALU2(0x0f43,dr,sr); asm_output("cmovae %s,%s", gpn(dr),gpn(sr)); } while(0)
 #define MRNO(dr,sr)	do { count_alu(); ALU2(0x0f41,dr,sr); asm_output("cmovno %s,%s", gpn(dr),gpn(sr)); } while(0)
 
 // these aren't currently used but left in for reference
 //#define LDEQ(r,d,b) do { ALU2m(0x0f44,r,d,b); asm_output("cmove %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
 //#define LDNEQ(r,d,b) do { ALU2m(0x0f45,r,d,b); asm_output("cmovne %s,%d(%s)", gpn(r),d,gpn(b)); } while(0)
 
 #define LD(reg,disp,base)	do { 	\
@@ -605,18 +603,16 @@ namespace nanojit
 #define JLE(t, isfar)	   JCC(0x0E, t, isfar, "jle")
 #define JNLE(t, isfar)   JCC(0x0F, t, isfar, "jnle")
 
 #define JG(t, isfar)	   JCC(0x0F, t, isfar, "jg")
 #define JNG(t, isfar)	   JCC(0x0E, t, isfar, "jng")
 #define JGE(t, isfar)	   JCC(0x0D, t, isfar, "jge")
 #define JNGE(t, isfar)   JCC(0x0C, t, isfar, "jnge")
 
-#define JC(t, isfar)     JCC(0x02, t, isfar, "jc")
-#define JNC(t, isfar)    JCC(0x03, t, isfar, "jnc")
 #define JO(t, isfar)     JCC(0x00, t, isfar, "jo")
 #define JNO(t, isfar)    JCC(0x01, t, isfar, "jno")
 
 // sse instructions 
 #define SSE(c,d,s)  \
 		underrunProtect(9);	\
 		MODRM((d),(s));	\
 		_nIns -= 3; \
--- a/js/src/xpconnect/src/xpccomponents.cpp
+++ b/js/src/xpconnect/src/xpccomponents.cpp
@@ -1527,20 +1527,20 @@ public:
     NS_DECL_NSICLASSINFO
 
 
 public:
     nsXPCComponents_ID();
     virtual ~nsXPCComponents_ID();
 
 private:
-    NS_METHOD CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
-                              JSContext * cx, JSObject * obj,
-                              PRUint32 argc, jsval * argv,
-                              jsval * vp, PRBool *_retval);
+    static nsresult CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
+                                    JSContext * cx, JSObject * obj,
+                                    PRUint32 argc, jsval * argv,
+                                    jsval * vp, PRBool *_retval);
 };
 
 /***************************************************************************/
 /* void getInterfaces (out PRUint32 count, [array, size_is (count), retval] 
                        out nsIIDPtr array); */
 NS_IMETHODIMP 
 nsXPCComponents_ID::GetInterfaces(PRUint32 *aCount, nsIID * **aArray)
 {
@@ -1670,17 +1670,18 @@ nsXPCComponents_ID::Call(nsIXPConnectWra
 
 /* PRBool construct (in nsIXPConnectWrappedNative wrapper, in JSContextPtr cx, in JSObjectPtr obj, in PRUint32 argc, in JSValPtr argv, in JSValPtr vp); */
 NS_IMETHODIMP
 nsXPCComponents_ID::Construct(nsIXPConnectWrappedNative *wrapper, JSContext * cx, JSObject * obj, PRUint32 argc, jsval * argv, jsval * vp, PRBool *_retval)
 {
     return CallOrConstruct(wrapper, cx, obj, argc, argv, vp, _retval);
 }
 
-NS_METHOD
+// static
+nsresult
 nsXPCComponents_ID::CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
                                     JSContext * cx, JSObject * obj,
                                     PRUint32 argc, jsval * argv,
                                     jsval * vp, PRBool *_retval)
 {
     // make sure we have at least one arg
 
     if(!argc)
@@ -1754,20 +1755,20 @@ public:
     NS_DECL_NSICLASSINFO
 
 
 public:
     nsXPCComponents_Exception();
     virtual ~nsXPCComponents_Exception();
 
 private:
-    NS_METHOD CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
-                              JSContext * cx, JSObject * obj,
-                              PRUint32 argc, jsval * argv,
-                              jsval * vp, PRBool *_retval);
+    static nsresult CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
+                                    JSContext * cx, JSObject * obj,
+                                    PRUint32 argc, jsval * argv,
+                                    jsval * vp, PRBool *_retval);
 };
 
 /***************************************************************************/
 /* void getInterfaces (out PRUint32 count, [array, size_is (count), retval] 
                        out nsIIDPtr array); */
 NS_IMETHODIMP 
 nsXPCComponents_Exception::GetInterfaces(PRUint32 *aCount, nsIID * **aArray)
 {
@@ -1897,17 +1898,18 @@ nsXPCComponents_Exception::Call(nsIXPCon
 
 /* PRBool construct (in nsIXPConnectWrappedNative wrapper, in JSContextPtr cx, in JSObjectPtr obj, in PRUint32 argc, in JSValPtr argv, in JSValPtr vp); */
 NS_IMETHODIMP
 nsXPCComponents_Exception::Construct(nsIXPConnectWrappedNative *wrapper, JSContext * cx, JSObject * obj, PRUint32 argc, jsval * argv, jsval * vp, PRBool *_retval)
 {
     return CallOrConstruct(wrapper, cx, obj, argc, argv, vp, _retval);
 }
 
-NS_METHOD
+// static
+nsresult
 nsXPCComponents_Exception::CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
                                            JSContext * cx, JSObject * obj,
                                            PRUint32 argc, jsval * argv,
                                            jsval * vp, PRBool *_retval)
 {
     XPCCallContext ccx(JS_CALLER, cx);
     if(!ccx.IsValid())
         return ThrowAndFail(NS_ERROR_XPC_UNEXPECTED, cx, _retval);
@@ -2043,20 +2045,20 @@ public:
 public:
     nsXPCConstructor(); // not implemented
     nsXPCConstructor(nsIJSCID* aClassID,
                      nsIJSIID* aInterfaceID,
                      const char* aInitializer);
     virtual ~nsXPCConstructor();
 
 private:
-    NS_METHOD CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
-                              JSContext * cx, JSObject * obj,
-                              PRUint32 argc, jsval * argv,
-                              jsval * vp, PRBool *_retval);
+    nsresult CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
+                             JSContext * cx, JSObject * obj,
+                             PRUint32 argc, jsval * argv,
+                             jsval * vp, PRBool *_retval);
 private:
     nsIJSCID* mClassID;
     nsIJSIID* mInterfaceID;
     char*     mInitializer;
 };
 
 /***************************************************************************/
 /* void getInterfaces (out PRUint32 count, [array, size_is (count), retval] 
@@ -2222,17 +2224,18 @@ nsXPCConstructor::Call(nsIXPConnectWrapp
 
 /* PRBool construct (in nsIXPConnectWrappedNative wrapper, in JSContextPtr cx, in JSObjectPtr obj, in PRUint32 argc, in JSValPtr argv, in JSValPtr vp); */
 NS_IMETHODIMP
 nsXPCConstructor::Construct(nsIXPConnectWrappedNative *wrapper, JSContext * cx, JSObject * obj, PRUint32 argc, jsval * argv, jsval * vp, PRBool *_retval)
 {
     return CallOrConstruct(wrapper, cx, obj, argc, argv, vp, _retval);
 }
 
-NS_METHOD
+// static
+nsresult
 nsXPCConstructor::CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
                                   JSContext * cx, JSObject * obj,
                                   PRUint32 argc, jsval * argv,
                                   jsval * vp, PRBool *_retval)
 {
     XPCCallContext ccx(JS_CALLER, cx);
     if(!ccx.IsValid())
         return ThrowAndFail(NS_ERROR_XPC_UNEXPECTED, cx, _retval);
@@ -2312,20 +2315,20 @@ public:
     NS_DECL_NSIXPCSCRIPTABLE
     NS_DECL_NSICLASSINFO
 
 public:
     nsXPCComponents_Constructor();
     virtual ~nsXPCComponents_Constructor();
 
 private:
-    NS_METHOD CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
-                              JSContext * cx, JSObject * obj,
-                              PRUint32 argc, jsval * argv,
-                              jsval * vp, PRBool *_retval);
+    static nsresult CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
+                                    JSContext * cx, JSObject * obj,
+                                    PRUint32 argc, jsval * argv,
+                                    jsval * vp, PRBool *_retval);
 };
 
 /***************************************************************************/
 /* void getInterfaces (out PRUint32 count, [array, size_is (count), retval] 
                        out nsIIDPtr array); */
 NS_IMETHODIMP 
 nsXPCComponents_Constructor::GetInterfaces(PRUint32 *aCount, nsIID * **aArray)
 {
@@ -2454,17 +2457,18 @@ nsXPCComponents_Constructor::Call(nsIXPC
 
 /* PRBool construct (in nsIXPConnectWrappedNative wrapper, in JSContextPtr cx, in JSObjectPtr obj, in PRUint32 argc, in JSValPtr argv, in JSValPtr vp); */
 NS_IMETHODIMP
 nsXPCComponents_Constructor::Construct(nsIXPConnectWrappedNative *wrapper, JSContext * cx, JSObject * obj, PRUint32 argc, jsval * argv, jsval * vp, PRBool *_retval)
 {
     return CallOrConstruct(wrapper, cx, obj, argc, argv, vp, _retval);
 }
 
-NS_METHOD
+// static
+nsresult
 nsXPCComponents_Constructor::CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
                                              JSContext * cx, JSObject * obj,
                                              PRUint32 argc, jsval * argv,
                                              jsval * vp, PRBool *_retval)
 {
     // make sure we have at least one arg
 
     if(!argc)
@@ -2653,23 +2657,20 @@ public:
     NS_DECL_NSIXPCCOMPONENTS_UTILS_SANDBOX
     NS_DECL_NSIXPCSCRIPTABLE
 
 public:
     nsXPCComponents_utils_Sandbox();
     virtual ~nsXPCComponents_utils_Sandbox();
 
 private:
-    // XXXjst: This method (and other CallOrConstruct()'s in this
-    // file) doesn't need to be virtual, could even be a static
-    // method!
-    NS_METHOD CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
-                              JSContext * cx, JSObject * obj,
-                              PRUint32 argc, jsval * argv,
-                              jsval * vp, PRBool *_retval);
+    static nsresult CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
+                                    JSContext * cx, JSObject * obj,
+                                    PRUint32 argc, jsval * argv,
+                                    jsval * vp, PRBool *_retval);
 };
 
 class nsXPCComponents_Utils :
             public nsIXPCComponents_Utils,
             public nsIXPCScriptable
 #ifdef XPC_USE_SECURITY_CHECKED_COMPONENT
           , public nsISecurityCheckedComponent
 #endif
@@ -3296,17 +3297,18 @@ nsXPCComponents_utils_Sandbox::Construct
                                          PRUint32 argc,
                                          jsval * argv,
                                          jsval * vp,
                                          PRBool *_retval)
 {
     return CallOrConstruct(wrapper, cx, obj, argc, argv, vp, _retval);
 }
 
-NS_IMETHODIMP
+// static
+nsresult
 nsXPCComponents_utils_Sandbox::CallOrConstruct(nsIXPConnectWrappedNative *wrapper,
                                                JSContext * cx, JSObject * obj,
                                                PRUint32 argc, jsval * argv,
                                                jsval * vp, PRBool *_retval)
 {
 #ifdef XPCONNECT_STANDALONE
     return NS_ERROR_NOT_AVAILABLE;
 #else /* XPCONNECT_STANDALONE */
--- a/netwerk/base/src/nsSocketTransportService2.cpp
+++ b/netwerk/base/src/nsSocketTransportService2.cpp
@@ -555,17 +555,17 @@ nsSocketTransportService::AfterProcessNe
 NS_IMETHODIMP
 nsSocketTransportService::Run()
 {
     LOG(("STS thread init\n"));
 
     gSocketThread = PR_GetCurrentThread();
 
 #ifdef WINCE
-    SetThreadPriority(GetCurrentThread(), 116);
+    CeSetThreadPriority(GetCurrentThread(), 116);
 #endif
 
     // add thread event to poll list (mThreadEvent may be NULL)
     mPollList[0].fd = mThreadEvent;
     mPollList[0].in_flags = PR_POLL_READ;
     mPollList[0].out_flags = 0;
 
     nsIThread *thread = NS_GetCurrentThread();
--- a/parser/html/nsHtml5Portability.cpp
+++ b/parser/html/nsHtml5Portability.cpp
@@ -116,17 +116,17 @@ void
 nsHtml5Portability::releaseElement(nsIContent* element)
 {
   NS_IF_RELEASE(element);
 }
 
 PRBool
 nsHtml5Portability::localEqualsBuffer(nsIAtom* local, PRUnichar* buf, PRInt32 offset, PRInt32 length)
 {
-  return local->Equals(nsDependentString(buf + offset, buf + offset + length));
+  return local->Equals(nsDependentSubstring(buf + offset, buf + offset + length));
 }
 
 PRBool
 nsHtml5Portability::lowerCaseLiteralIsPrefixOfIgnoreAsciiCaseString(const char* lowerCaseLiteral, nsString* string)
 {
   if (!string) {
     return PR_FALSE;
   }
--- a/widget/src/cocoa/nsMenuItemIconX.h
+++ b/widget/src/cocoa/nsMenuItemIconX.h
@@ -74,18 +74,24 @@ public:
 
   // GetIconURI fails if the item should not have any icon.
   nsresult GetIconURI(nsIURI** aIconURI);
 
   // LoadIcon will set a placeholder image and start a load request for the
   // icon.  The request may not complete until after LoadIcon returns.
   nsresult LoadIcon(nsIURI* aIconURI);
 
+  // Unless we take precautions, we may outlive the object that created us
+  // (mMenuObject, which owns our native menu item (mNativeMenuItem)).
+  // Destroy() should be called from mMenuObject's destructor to prevent
+  // this from happening.  See bug 499600.
+  void Destroy();
+
 protected:
   nsCOMPtr<nsIContent>  mContent;
   nsCOMPtr<imgIRequest> mIconRequest;
-  nsMenuObjectX*        mMenuObject;
+  nsMenuObjectX*        mMenuObject; // [weak]
   PRPackedBool          mLoadedIcon;
   PRPackedBool          mSetIcon;
-  NSMenuItem*           mNativeMenuItem;
+  NSMenuItem*           mNativeMenuItem; // [weak]
 };
 
 #endif // nsMenuItemIconX_h_
--- a/widget/src/cocoa/nsMenuItemIconX.mm
+++ b/widget/src/cocoa/nsMenuItemIconX.mm
@@ -92,16 +92,29 @@ nsMenuItemIconX::nsMenuItemIconX(nsMenuO
 }
 
 nsMenuItemIconX::~nsMenuItemIconX()
 {
   if (mIconRequest)
     mIconRequest->CancelAndForgetObserver(NS_BINDING_ABORTED);
 }
 
+// Called from mMenuObjectX's destructor, to prevent us from outliving it
+// (as might otherwise happen if calls to our imgIDecoderObserver methods
+// are still outstanding).  mMenuObjectX owns our nNativeMenuItem.
+void nsMenuItemIconX::Destroy()
+{
+  if (mIconRequest) {
+    mIconRequest->CancelAndForgetObserver(NS_BINDING_ABORTED);
+    mIconRequest = nsnull;
+  }
+  mMenuObject = nsnull;
+  mNativeMenuItem = nil;
+}
+
 nsresult
 nsMenuItemIconX::SetupIcon()
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NSRESULT;
 
   // Still don't have one, then something is wrong, get out of here.
   if (!mNativeMenuItem) {
     NS_ERROR("No native menu item\n");
@@ -121,16 +134,19 @@ nsMenuItemIconX::SetupIcon()
   return LoadIcon(iconURI);
 
   NS_OBJC_END_TRY_ABORT_BLOCK_NSRESULT;
 }
 
 nsresult
 nsMenuItemIconX::GetIconURI(nsIURI** aIconURI)
 {
+  if (!mMenuObject)
+    return NS_ERROR_FAILURE;
+
   // Mac native menu items support having both a checkmark and an icon
   // simultaneously, but this is unheard of in the cross-platform toolkit,
   // seemingly because the win32 theme is unable to cope with both at once.
   // The downside is that it's possible to get a menu item marked with a
   // native checkmark and a checkmark for an icon.  Head off that possibility
   // by pretending that no icon exists if this is a checkable menu item.
   if (mMenuObject->MenuObjectType() == eMenuItemObjectType) {
     nsMenuItemX* menuItem = static_cast<nsMenuItemX*>(mMenuObject);
@@ -315,16 +331,18 @@ nsMenuItemIconX::OnStopFrame(imgIRequest
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK_NSRESULT;
 
   if (aRequest != mIconRequest) return NS_ERROR_FAILURE;
 
   // Only support one frame.
   if (mLoadedIcon)
     return NS_OK;
 
+  if (!mNativeMenuItem) return NS_ERROR_FAILURE;
+
   nsCOMPtr<gfxIImageFrame> frame = aFrame;
   nsCOMPtr<nsIImage> image = do_GetInterface(frame);
   if (!image) return NS_ERROR_FAILURE;
 
   nsresult rv = image->LockImagePixels(PR_FALSE);
   if (NS_FAILED(rv))
     return rv;
 
--- a/widget/src/cocoa/nsMenuItemX.mm
+++ b/widget/src/cocoa/nsMenuItemX.mm
@@ -67,16 +67,20 @@ nsMenuItemX::nsMenuItemX()
 
   MOZ_COUNT_CTOR(nsMenuItemX);
 }
 
 nsMenuItemX::~nsMenuItemX()
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
 
+  // Prevent the icon object from outliving us.
+  if (mIcon)
+    mIcon->Destroy();
+
   // autorelease the native menu item so that anything else happening to this
   // object happens before the native menu item actually dies
   [mNativeMenuItem autorelease];
 
   if (mContent)
     mMenuBar->UnregisterForContentChanges(mContent);
   if (mCommandContent)
     mMenuBar->UnregisterForContentChanges(mCommandContent);
--- a/widget/src/cocoa/nsMenuX.mm
+++ b/widget/src/cocoa/nsMenuX.mm
@@ -112,16 +112,20 @@ nsMenuX::nsMenuX()
 
   NS_OBJC_END_TRY_ABORT_BLOCK;
 }
 
 nsMenuX::~nsMenuX()
 {
   NS_OBJC_BEGIN_TRY_ABORT_BLOCK;
 
+  // Prevent the icon object from outliving us.
+  if (mIcon)
+    mIcon->Destroy();
+
   RemoveAll();
 
   [mNativeMenu setDelegate:nil];
   [mNativeMenu release];
   [mMenuDelegate release];
   // autorelease the native menu item so that anything else happening to this
   // object happens before the native menu item actually dies
   [mNativeMenuItem autorelease];
--- a/xpcom/glue/nsAutoLock.cpp
+++ b/xpcom/glue/nsAutoLock.cpp
@@ -445,17 +445,19 @@ void nsAutoMonitor::Exit()
 {
 #ifdef DEBUG
     if (!mAddr) {
         NS_ERROR("It is not legal to exit a null monitor");
         return;
     }
     (void) PR_SetThreadPrivate(LockStackTPI, mDown);
 #endif
-    PRStatus status = PR_ExitMonitor(mMonitor);
+    // Split 'status' init to avoid an "unused variable" compiler warning.
+    PRStatus status;
+    status = PR_ExitMonitor(mMonitor);
     NS_ASSERTION(status == PR_SUCCESS, "PR_ExitMonitor failed");
     mLockCount -= 1;
 }
 
 // XXX we don't worry about cached monitors being destroyed behind our back.
 // XXX current NSPR (mozilla/nsprpub/pr/src/threads/prcmon.c) never destroys
 // XXX a cached monitor! potential resource pig in conjunction with necko...
 
@@ -472,12 +474,14 @@ void nsAutoCMonitor::Enter()
     mLockCount += 1;
 }
 
 void nsAutoCMonitor::Exit()
 {
 #ifdef DEBUG
     (void) PR_SetThreadPrivate(LockStackTPI, mDown);
 #endif
-    PRStatus status = PR_CExitMonitor(mLockObject);
+    // Split 'status' init to avoid an "unused variable" compiler warning.
+    PRStatus status;
+    status = PR_CExitMonitor(mLockObject);
     NS_ASSERTION(status == PR_SUCCESS, "PR_CExitMonitor failed");
     mLockCount -= 1;
 }
--- a/xpcom/reflect/xptinfo/tests/TestInterfaceInfo.cpp
+++ b/xpcom/reflect/xptinfo/tests/TestInterfaceInfo.cpp
@@ -92,57 +92,64 @@ int main (int argc, char **argv) {
     fprintf(stderr, "\ngetting info for name 'nsIBidirectionalEnumerator'\n");
     iim->GetInfoForName("nsIBidirectionalEnumerator", &info4);
 #ifdef DEBUG
 //    ((nsInterfaceInfo *)info4)->print(stderr);
 #endif
 
     fprintf(stderr, "\nparams work?\n");
     fprintf(stderr, "\ngetting info for name 'nsIServiceManager'\n");
-    iim->GetInfoForName("nsIServiceManager", &info5);
+    iim->GetInfoForName("nsIComponentManager", &info5);
 #ifdef DEBUG
 //    ((nsInterfaceInfo *)info5)->print(stderr);
 #endif
 
     // XXX: nsIServiceManager is no more; what do we test with?
     if (info5 == NULL) {
-        fprintf(stderr, "\nNo nsIServiceManager; cannot continue.\n");
+        fprintf(stderr, "\nNo nsIComponentManager; cannot continue.\n");
         return 1;
     }
 
     uint16 methodcount;
     info5->GetMethodCount(&methodcount);
     const nsXPTMethodInfo *mi;
     for (i = 0; i < methodcount; i++) {
         info5->GetMethodInfo(i, &mi);
         fprintf(stderr, "method %d, name %s\n", i, mi->GetName());
     }
 
-    // 7 is GetServiceWithListener, which has juicy params.
-    info5->GetMethodInfo(7, &mi);
-//    uint8 paramcount = mi->GetParamCount();
+    // 4 is getServiceByContractID, which has juicy params.
+    info5->GetMethodInfo(6, &mi);
 
-    nsXPTParamInfo param2 = mi->GetParam(2);
-    // should be IID for nsIShutdownListener
+    const nsXPTParamInfo& param2 = mi->GetParam(1);
+    // should be IID for the service
     nsIID *nsISL;
-    info5->GetIIDForParam(7, &param2, &nsISL);
-//      const nsIID *nsISL = param2.GetInterfaceIID(info5);
-    fprintf(stderr, "iid assoc'd with param 2 of method 7 of GetServiceWithListener - %s\n", nsISL->ToString());
+    info5->GetIIDForParam(6, &param2, &nsISL);
+    fprintf(stderr, "iid assoc'd with param 1 of method 6 - createInstanceByContractID - %s\n", nsISL->ToString());
     // if we look up the name?
     char *nsISLname;
     iim->GetNameForIID(nsISL, &nsISLname);
     fprintf(stderr, "which is called %s\n", nsISLname);
 
-    fprintf(stderr, "\nhow about one defined in a different typelib\n");
-    nsXPTParamInfo param3 = mi->GetParam(3);
-    // should be IID for nsIShutdownListener
-    nsIID *nsISS;
-    info5->GetIIDForParam(7, &param3, &nsISS);
-//      const nsIID *nsISS = param3.GetInterfaceIID(info5);
-    fprintf(stderr, "iid assoc'd with param 3 of method 7 of GetServiceWithListener - %s\n", nsISS->ToString());
-    // if we look up the name?
-    char *nsISSname;
-    iim->GetNameForIID(nsISS, &nsISSname);
-    fprintf(stderr, "which is called %s\n", nsISSname);
+    fprintf(stderr, "\nNow check the last param\n");
+    const nsXPTParamInfo& param3 = mi->GetParam(3);
+
+    if (param3.GetType().TagPart() != nsXPTType::T_INTERFACE_IS) {
+        fprintf(stderr, "Param 3 is not type interface is\n");
+        // Not returning an error, because this could legitamately change
+    }
+    // lets see what arg this refers to
+    uint8 argnum;
+    info5->GetInterfaceIsArgNumberForParam(6, &param3, &argnum);
+    fprintf(stderr, "param 3 referrs to param %d of method 6 - createInstanceByContractID\n", (PRUint32)argnum);
+    // Get the type of the parameter referred to
+    const nsXPTParamInfo& arg_param = mi->GetParam(argnum);
+    const nsXPTType& arg_type = arg_param.GetType();
+    // Check to make sure it refers to the proper param
+    if(!arg_type.IsPointer() || arg_type.TagPart() != nsXPTType::T_IID) {
+        fprintf(stderr, "Param 3 of method 6 refers to a non IID parameter\n"); 
+        // Not returning an error, because this could legitamately change
+    }
+
 
     return 0;
 }    
 
--- a/xulrunner/setup/nsXULAppInstall.js
+++ b/xulrunner/setup/nsXULAppInstall.js
@@ -208,17 +208,17 @@ function createExtractor(aFile) {
 const AppInstall = {
 
   /* nsISupports */
   QueryInterface : function ai_QI(iid) {
     if (iid.equals(nsIXULAppInstall) ||
         iid.equals(nsISupports))
       return this;
 
-    throw Components.result.NS_ERROR_NO_INTERFACE;
+    throw Components.results.NS_ERROR_NO_INTERFACE;
   },
 
   /* nsIXULAppInstall */
   installApplication : function ai_IA(aAppFile, aDirectory, aLeafName) {
     var extractor = createExtractor(aAppFile);
     var iniParser = extractor.iniParser;
 
     var appName = iniParser.getString("App", "Name");