Merge tracemonkey to mozilla-central.
authorRobert Sayre <sayrer@gmail.com>
Tue, 24 Mar 2009 13:49:05 -0400
changeset 26572 f0bcbbeb355f05f555b190f346d43157bad610d8
parent 26508 b47f0de93a826631d91f8e9df9ec76ac00215a05 (current diff)
parent 26571 54823f3acc0eabdd00b03cb686d5eccc7286c7b2 (diff)
child 26573 4a34c6235bb72d440ea4de1f9773b3e92037335e
push id6115
push userrsayre@mozilla.com
push dateTue, 24 Mar 2009 17:50:03 +0000
treeherdermozilla-central@4a34c6235bb7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone1.9.2a1pre
Merge tracemonkey to mozilla-central.
js/src/Makefile.in
js/src/configure.in
js/src/jsapi.cpp
js/src/jsarray.cpp
js/src/jsbuiltins.h
js/src/jscntxt.cpp
js/src/jsdbgapi.cpp
js/src/jsfun.cpp
js/src/jsgc.cpp
js/src/jsinterp.cpp
js/src/jsnum.cpp
js/src/jsobj.cpp
js/src/jsparse.cpp
js/src/jsregexp.cpp
js/src/jsscript.cpp
js/src/jsstr.cpp
js/src/jstracer.cpp
js/src/nanojit/avmplus.h
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -229,16 +229,22 @@ CPPSRCS += \
 		Fragmento.cpp  \
 		LIR.cpp        \
 		RegAlloc.cpp   \
 		avmplus.cpp    \
 		Native$(NANOJIT_ARCH).cpp \
 		jsbuiltins.cpp         \
 		$(NULL)
 
+ifdef WINCE
+# don't need -c
+AS_DASH_C_FLAG =
+ASFILES += jswince.asm
+endif
+
 ifdef DEBUG
 CPPSRCS += TraceTreeDrawer.cpp
 endif
 endif # ENABLE_JIT
 
 ifdef HAVE_DTRACE
 INSTALLED_HEADERS += \
 		jsdtracef.h \
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -1860,20 +1860,18 @@ case "$target" in
 
 *-wince*)
 
     MOZ_TOOLS_DIR=`echo $MOZ_TOOLS`
     AR_LIST="$AR -list"
     AR_EXTRACT="$AR -extract"
     AR_DELETE="$AR d"
     AR_FLAGS='-OUT:"$@"'
-
-    if test -z "$AS_BIN"; then
-        AS="$AS_BIN"
-    fi
+    AS="$AS_BIN"
+
     DSO_CFLAGS=
     DSO_PIC_CFLAGS=
     DLL_SUFFIX=.dll
     BIN_SUFFIX='.exe'
     if test -z "$RC"; then 
         RC=rc.exe  
     fi
     # certain versions of cygwin's makedepend barf on the 
@@ -3010,35 +3008,16 @@ if test ! "$GNU_CXX"; then
 	;;
      *)
 	AC_CHECK_LIB(C, demangle)
 	;;
      esac
 fi
 AC_CHECK_LIB(socket, socket)
 
-dnl Enable VFP support on ARM
-MOZ_ARG_DISABLE_BOOL(arm-vfp,
-[  --disable-arm-vfp       Disable ARM VFP instructions in JavaScript JIT],
-   MOZ_ARM_VFP=, MOZ_ARM_VFP=1, MOZ_ARM_VFP=1)
-if test "$MOZ_ARM_VFP"; then
-   AC_DEFINE(NJ_ARM_VFP)
-fi
-
-AC_MSG_CHECKING(for ARM SIMD support)
-AC_TRY_COMPILE([],
-               [asm("uqadd8 r1, r1, r2");],
-               result="yes", result="no")
-AC_MSG_RESULT("$result")
-if test "$result" = "yes"; then
-    AC_DEFINE(HAVE_ARM_SIMD)
-    HAVE_ARM_SIMD=1
-fi
-AC_SUBST(HAVE_ARM_SIMD)
-
 dnl ========================================================
 dnl = pthread support
 dnl = Start by checking whether the system support pthreads
 dnl ========================================================
 case "$target_os" in
 darwin*)
     USE_PTHREADS=1
     ;;
--- a/js/src/imacros.c.out
+++ b/js/src/imacros.c.out
@@ -274,16 +274,41 @@ static struct {
 /*30*/  JSOP_GOTO, 0, 5,
 /*33*/  JSOP_SWAP,
 /*34*/  JSOP_POP,
 /*35*/  JSOP_CALL, 0, 1,
 /*38*/  JSOP_STOP,
     },
 };
 static struct {
+    jsbytecode String[39];
+} new_imacros = {
+    {
+/* 0*/  JSOP_DUP,
+/* 1*/  JSOP_DUP,
+/* 2*/  JSOP_GETPROP, 0, COMMON_ATOM_INDEX(toString),
+/* 5*/  JSOP_IFPRIMTOP, 0, 13,
+/* 8*/  JSOP_SWAP,
+/* 9*/  JSOP_CALL, 0, 0,
+/*12*/  JSOP_IFPRIMTOP, 0, 21,
+/*15*/  JSOP_GOTO, 0, 4,
+/*18*/  JSOP_POP,
+/*19*/  JSOP_POP,
+/*20*/  JSOP_CALLPROP, 0, COMMON_ATOM_INDEX(valueOf),
+/*23*/  JSOP_STRING, 0, COMMON_TYPE_ATOM_INDEX(JSTYPE_STRING),
+/*26*/  JSOP_CALL, 0, 1,
+/*29*/  JSOP_PRIMTOP,
+/*30*/  JSOP_GOTO, 0, 5,
+/*33*/  JSOP_SWAP,
+/*34*/  JSOP_POP,
+/*35*/  JSOP_NEW, 0, 1,
+/*38*/  JSOP_STOP,
+    },
+};
+static struct {
     jsbytecode apply0[8];
     jsbytecode apply1[12];
     jsbytecode apply2[16];
     jsbytecode apply3[21];
     jsbytecode apply4[26];
     jsbytecode apply5[31];
     jsbytecode apply6[36];
     jsbytecode apply7[41];
@@ -636,16 +661,39 @@ static struct {
 /* 0*/  JSOP_SWAP,
 /* 1*/  JSOP_CALLBUILTIN, ((JSBUILTIN_GetElement) & 0xff00) >> 8, ((JSBUILTIN_GetElement) & 0xff),
 /* 4*/  JSOP_PICK, 2,
 /* 6*/  JSOP_CALL, 0, 1,
 /* 9*/  JSOP_STOP,
     },
 };
 static struct {
+    jsbytecode callprop[12];
+    jsbytecode callelem[12];
+} callelem_imacros = {
+    {
+/* 0*/  JSOP_SWAP,
+/* 1*/  JSOP_DUP,
+/* 2*/  JSOP_CALLBUILTIN, ((JSBUILTIN_GetProperty) & 0xff00) >> 8, ((JSBUILTIN_GetProperty) & 0xff),
+/* 5*/  JSOP_PICK, 3,
+/* 7*/  JSOP_CALL, 0, 1,
+/*10*/  JSOP_SWAP,
+/*11*/  JSOP_STOP,
+    },
+    {
+/* 0*/  JSOP_SWAP,
+/* 1*/  JSOP_DUP,
+/* 2*/  JSOP_CALLBUILTIN, ((JSBUILTIN_GetElement) & 0xff00) >> 8, ((JSBUILTIN_GetElement) & 0xff),
+/* 5*/  JSOP_PICK, 3,
+/* 7*/  JSOP_CALL, 0, 1,
+/*10*/  JSOP_SWAP,
+/*11*/  JSOP_STOP,
+    },
+};
+static struct {
     jsbytecode setprop[15];
     jsbytecode setelem[15];
 } setelem_imacros = {
     {
 /* 0*/  JSOP_DUP,
 /* 1*/  JSOP_PICK, 3,
 /* 3*/  JSOP_CALLBUILTIN, ((JSBUILTIN_SetProperty) & 0xff00) >> 8, ((JSBUILTIN_SetProperty) & 0xff),
 /* 6*/  JSOP_PICK, 4,
@@ -768,17 +816,17 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
     0,  /* JSOP_NULLTHIS */
     3,  /* JSOP_ITER */
     2,  /* JSOP_NEXTITER */
     0,  /* JSOP_ENDITER */
     8,  /* JSOP_APPLY */
     0,  /* JSOP_SWAP */
     0,  /* JSOP_OBJECT */
     0,  /* JSOP_POP */
-    0,  /* JSOP_NEW */
+    2,  /* JSOP_NEW */
     0,  /* JSOP_TRAP */
     0,  /* JSOP_GETARG */
     0,  /* JSOP_SETARG */
     0,  /* JSOP_GETLOCAL */
     0,  /* JSOP_SETLOCAL */
     0,  /* JSOP_UINT16 */
     0,  /* JSOP_NEWINIT */
     0,  /* JSOP_ENDINIT */
@@ -880,17 +928,17 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
     0,  /* JSOP_GETUPVAR */
     0,  /* JSOP_DELDESC */
     0,  /* JSOP_UINT24 */
     0,  /* JSOP_INDEXBASE */
     0,  /* JSOP_RESETBASE */
     0,  /* JSOP_RESETBASE0 */
     0,  /* JSOP_STARTXML */
     0,  /* JSOP_STARTXMLEXPR */
-    0,  /* JSOP_CALLELEM */
+    3,  /* JSOP_CALLELEM */
     0,  /* JSOP_STOP */
     0,  /* JSOP_GETXPROP */
     0,  /* JSOP_CALLXMLNAME */
     0,  /* JSOP_TYPEOFEXPR */
     0,  /* JSOP_ENTERBLOCK */
     0,  /* JSOP_LEAVEBLOCK */
     0,  /* JSOP_PICK */
     0,  /* JSOP_PRIMTOP */
@@ -946,10 +994,12 @@ uint8 js_opcode2extra[JSOP_LIMIT] = {
  || x == JSOP_NEG \
  || x == JSOP_POS \
  || x == JSOP_GETELEM \
  || x == JSOP_SETELEM \
  || x == JSOP_CALL \
  || x == JSOP_ITER \
  || x == JSOP_NEXTITER \
  || x == JSOP_APPLY \
+ || x == JSOP_NEW \
  || x == JSOP_INITELEM \
+ || x == JSOP_CALLELEM \
 )
--- a/js/src/imacros.jsasm
+++ b/js/src/imacros.jsasm
@@ -315,16 +315,42 @@ 2:      pop                             
 3:      swap                                # String this rval obj
         pop                                 # String this rval
 4:      call 1                              # str
         stop                                # str
     .end
 
 .end
 
+.igroup new JSOP_NEW
+
+    .imacro String                          # String this obj
+        dup                                 # String this obj obj
+        dup                                 # String this obj obj obj
+        getprop toString                    # String this obj obj toString
+        ifprimtop 1                         # String this obj obj toString
+        swap                                # String this obj toString obj
+        call 0                              # String this obj rval
+        ifprimtop 3                         # String this obj rval
+        goto 2
+1:      pop                                 # String this obj obj
+2:      pop                                 # String this obj
+        callprop valueOf                    # String this valueOf obj
+        string string                       # String this valueOf obj "string"
+        call 1                              # String this rval
+        primtop                             # String this rval
+        goto 4                              # String this rval
+3:      swap                                # String this rval obj
+        pop                                 # String this rval
+4:      new 1                               # strobj
+        stop                                # strobj
+    .end
+
+.end
+
 .igroup apply JSOP_APPLY
     .imacro apply0                          # apply fun this arr
         pick 3                              # fun this arr apply
         pop                                 # fun this arr
         pop                                 # fun this
         call 0                              #
         stop                                #
     .end                                    #
@@ -680,16 +706,40 @@ 4:      call 1                          
         callbuiltin (JSBUILTIN_GetElement)          # i fun obj
         pick 2                                      # fun obj i
         call 1                                      # propval
         stop
     .end
 
 .end
 
+.igroup callelem JSOP_CALLELEM
+
+    .imacro callprop                                # obj name
+        swap                                        # name obj
+        dup                                         # name obj obj
+        callbuiltin (JSBUILTIN_GetProperty)         # name obj fun obj
+        pick 3                                      # obj fun obj name
+        call 1                                      # obj propval
+        swap                                        # propval obj
+        stop
+    .end
+
+    .imacro callelem                                # obj i
+        swap                                        # i obj
+        dup                                         # i obj obj
+        callbuiltin (JSBUILTIN_GetElement)          # i obj fun obj
+        pick 3                                      # obj fun obj i
+        call 1                                      # obj propval
+        swap                                        # propval obj
+        stop
+    .end
+
+.end
+
 .igroup setelem JSOP_SETELEM
 
     .imacro setprop                                 # obj name val
         dup                                         # obj name val val
         pick 3                                      # name val val obj
         callbuiltin (JSBUILTIN_SetProperty)         # name val val fun obj
         pick 4                                      # val val fun obj name
         pick 4                                      # val fun obj name val
--- a/js/src/js.msg
+++ b/js/src/js.msg
@@ -150,17 +150,17 @@ MSG_DEF(JSMSG_WHITHER_WHENCE,          6
 MSG_DEF(JSMSG_BAD_SCRIPT_MAGIC,        68, 0, JSEXN_INTERNALERR, "bad script XDR magic number")
 MSG_DEF(JSMSG_PAREN_BEFORE_FORMAL,     69, 0, JSEXN_SYNTAXERR, "missing ( before formal parameters")
 MSG_DEF(JSMSG_MISSING_FORMAL,          70, 0, JSEXN_SYNTAXERR, "missing formal parameter")
 MSG_DEF(JSMSG_PAREN_AFTER_FORMAL,      71, 0, JSEXN_SYNTAXERR, "missing ) after formal parameters")
 MSG_DEF(JSMSG_CURLY_BEFORE_BODY,       72, 0, JSEXN_SYNTAXERR, "missing { before function body")
 MSG_DEF(JSMSG_CURLY_AFTER_BODY,        73, 0, JSEXN_SYNTAXERR, "missing } after function body")
 MSG_DEF(JSMSG_PAREN_BEFORE_COND,       74, 0, JSEXN_SYNTAXERR, "missing ( before condition")
 MSG_DEF(JSMSG_PAREN_AFTER_COND,        75, 0, JSEXN_SYNTAXERR, "missing ) after condition")
-MSG_DEF(JSMSG_UNUSED76,                76, 0, JSEXN_NONE, "unused76")
+MSG_DEF(JSMSG_DESTRUCT_DUP_ARG,        76, 0, JSEXN_SYNTAXERR, "duplicate argument is mixed with destructuring pattern")
 MSG_DEF(JSMSG_NAME_AFTER_DOT,          77, 0, JSEXN_SYNTAXERR, "missing name after . operator")
 MSG_DEF(JSMSG_BRACKET_IN_INDEX,        78, 0, JSEXN_SYNTAXERR, "missing ] in index expression")
 MSG_DEF(JSMSG_UNUSED79,                79, 0, JSEXN_NONE, "unused79")
 MSG_DEF(JSMSG_PAREN_BEFORE_SWITCH,     80, 0, JSEXN_SYNTAXERR, "missing ( before switch expression")
 MSG_DEF(JSMSG_PAREN_AFTER_SWITCH,      81, 0, JSEXN_SYNTAXERR, "missing ) after switch expression")
 MSG_DEF(JSMSG_CURLY_BEFORE_SWITCH,     82, 0, JSEXN_SYNTAXERR, "missing { before switch body")
 MSG_DEF(JSMSG_COLON_AFTER_CASE,        83, 0, JSEXN_SYNTAXERR, "missing : after case label")
 MSG_DEF(JSMSG_WHILE_AFTER_DO,          84, 0, JSEXN_SYNTAXERR, "missing while after do-loop body")
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -783,18 +783,16 @@ JS_NewRuntime(uint32 maxbytes)
         goto bad;
     if (!js_InitGC(rt, maxbytes))
         goto bad;
     if (!js_InitAtomState(rt))
         goto bad;
     if (!js_InitDeflatedStringCache(rt))
         goto bad;
 #ifdef JS_THREADSAFE
-    if (!js_InitThreadPrivateIndex(js_ThreadDestructorCB))
-        goto bad;
     rt->gcLock = JS_NEW_LOCK();
     if (!rt->gcLock)
         goto bad;
     rt->gcDone = JS_NEW_CONDVAR(rt->gcLock);
     if (!rt->gcDone)
         goto bad;
     rt->requestDone = JS_NEW_CONDVAR(rt->gcLock);
     if (!rt->requestDone)
@@ -813,20 +811,18 @@ JS_NewRuntime(uint32 maxbytes)
         goto bad;
     rt->titleSharingTodo = NO_TITLE_SHARING_TODO;
     rt->debuggerLock = JS_NEW_LOCK();
     if (!rt->debuggerLock)
         goto bad;
 #endif
     if (!js_InitPropertyTree(rt))
         goto bad;
-
-#if !defined JS_THREADSAFE && defined JS_TRACER
-    js_InitJIT(&rt->traceMonitor);
-#endif
+    if (!js_InitThreads(rt))
+        goto bad;
 
     return rt;
 
 bad:
     JS_DestroyRuntime(rt);
     return NULL;
 }
 
@@ -845,20 +841,17 @@ JS_DestroyRuntime(JSRuntime *rt)
             cxcount++;
         }
         fprintf(stderr,
 "JS API usage error: %u context%s left in runtime upon JS_DestroyRuntime.\n",
                 cxcount, (cxcount == 1) ? "" : "s");
     }
 #endif
 
-#if !defined JS_THREADSAFE && defined JS_TRACER
-    js_FinishJIT(&rt->traceMonitor);
-#endif
-
+    js_FinishThreads(rt);
     js_FreeRuntimeScriptState(rt);
     js_FinishAtomState(rt);
 
     /*
      * Free unit string storage only after all strings have been finalized, so
      * that js_FinalizeString can detect unit strings and avoid calling free
      * on their chars storage.
      */
@@ -880,35 +873,32 @@ JS_DestroyRuntime(JSRuntime *rt)
     if (rt->rtLock)
         JS_DESTROY_LOCK(rt->rtLock);
     if (rt->stateChange)
         JS_DESTROY_CONDVAR(rt->stateChange);
     if (rt->titleSharingDone)
         JS_DESTROY_CONDVAR(rt->titleSharingDone);
     if (rt->debuggerLock)
         JS_DESTROY_LOCK(rt->debuggerLock);
-#else
-    GSN_CACHE_CLEAR(&rt->gsnCache);
 #endif
     js_FinishPropertyTree(rt);
     free(rt);
 }
 
 JS_PUBLIC_API(void)
 JS_ShutDown(void)
 {
 #ifdef JS_OPMETER
     extern void js_DumpOpMeters();
 
     js_DumpOpMeters();
 #endif
 
     js_FinishDtoa();
 #ifdef JS_THREADSAFE
-    js_CleanupThreadPrivateData();  /* Fixes bug 464828. */
     js_CleanupLocks();
 #endif
     PRMJ_NowShutdown();
 }
 
 JS_PUBLIC_API(void *)
 JS_GetRuntimePrivate(JSRuntime *rt)
 {
@@ -922,25 +912,24 @@ JS_SetRuntimePrivate(JSRuntime *rt, void
 }
 
 JS_PUBLIC_API(void)
 JS_BeginRequest(JSContext *cx)
 {
 #ifdef JS_THREADSAFE
     JSRuntime *rt;
 
-    JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+    JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
     if (!cx->requestDepth) {
         JS_ASSERT(cx->gcLocalFreeLists == &js_GCEmptyFreeListSet);
 
         /* Wait until the GC is finished. */
         rt = cx->runtime;
         JS_LOCK_GC(rt);
 
-        /* NB: we use cx->thread here, not js_GetCurrentThread(). */
         if (rt->gcThread != cx->thread) {
             while (rt->gcLevel > 0)
                 JS_AWAIT_GC_DONE(rt);
         }
 
         /* Indicate that a request is running. */
         rt->requestCount++;
         cx->requestDepth = 1;
@@ -957,16 +946,17 @@ JS_PUBLIC_API(void)
 JS_EndRequest(JSContext *cx)
 {
 #ifdef JS_THREADSAFE
     JSRuntime *rt;
     JSTitle *title, **todop;
     JSBool shared;
 
     CHECK_REQUEST(cx);
+    JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
     JS_ASSERT(cx->requestDepth > 0);
     JS_ASSERT(cx->outstandingRequests > 0);
     if (cx->requestDepth == 1) {
         /* Lock before clearing to interlock with ClaimScope, in jslock.c. */
         rt = cx->runtime;
         JS_LOCK_GC(rt);
         cx->requestDepth = 0;
         cx->outstandingRequests--;
@@ -1930,24 +1920,18 @@ JS_NewDoubleValue(JSContext *cx, jsdoubl
         return JS_FALSE;
     *rval = DOUBLE_TO_JSVAL(dp);
     return JS_TRUE;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval)
 {
-    jsint i;
-
     CHECK_REQUEST(cx);
-    if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
-        *rval = INT_TO_JSVAL(i);
-        return JS_TRUE;
-    }
-    return JS_NewDoubleValue(cx, d, rval);
+    return js_NewWeaklyRootedNumber(cx, d, rval);
 }
 
 #undef JS_AddRoot
 JS_PUBLIC_API(JSBool)
 JS_AddRoot(JSContext *cx, void *rp)
 {
     CHECK_REQUEST(cx);
     return js_AddRoot(cx, rp, NULL);
@@ -2846,40 +2830,26 @@ JS_GetPrototype(JSContext *cx, JSObject 
     return proto && proto->map ? proto : NULL;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto)
 {
     CHECK_REQUEST(cx);
     JS_ASSERT(obj != proto);
-#ifdef DEBUG
-    /*
-     * FIXME: bug 408416. The cycle-detection required for script-writeable
-     * __proto__ lives in js_SetProtoOrParent over in jsobj.c, also known as
-     * js_ObjectOps.setProto. This hook must detect cycles, to prevent scripts
-     * from ilooping SpiderMonkey trivially. But the overhead of detecting
-     * cycles is high enough, and the threat from JS-API-calling C++ code is
-     * low enough, that it's not worth burdening the non-DEBUG callers. Same
-     * goes for JS_SetParent, below.
-     */
-    if (obj->map->ops->setProto)
-        return obj->map->ops->setProto(cx, obj, JSSLOT_PROTO, proto);
-#else
     if (OBJ_IS_NATIVE(obj)) {
         JS_LOCK_OBJ(cx, obj);
         if (!js_GetMutableScope(cx, obj)) {
             JS_UNLOCK_OBJ(cx, obj);
             return JS_FALSE;
         }
         LOCKED_OBJ_SET_PROTO(obj, proto);
         JS_UNLOCK_OBJ(cx, obj);
         return JS_TRUE;
     }
-#endif
     OBJ_SET_PROTO(cx, obj, proto);
     return JS_TRUE;
 }
 
 JS_PUBLIC_API(JSObject *)
 JS_GetParent(JSContext *cx, JSObject *obj)
 {
     JSObject *parent;
@@ -2890,21 +2860,16 @@ JS_GetParent(JSContext *cx, JSObject *ob
     return parent && parent->map ? parent : NULL;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_SetParent(JSContext *cx, JSObject *obj, JSObject *parent)
 {
     CHECK_REQUEST(cx);
     JS_ASSERT(obj != parent);
-#ifdef DEBUG
-    /* FIXME: bug 408416, see JS_SetPrototype just above. */
-    if (obj->map->ops->setParent)
-        return obj->map->ops->setParent(cx, obj, JSSLOT_PARENT, parent);
-#endif
     OBJ_SET_PARENT(cx, obj, parent);
     return JS_TRUE;
 }
 
 JS_PUBLIC_API(JSObject *)
 JS_GetConstructor(JSContext *cx, JSObject *proto)
 {
     jsval cval;
@@ -5929,64 +5894,55 @@ JS_GetContextThread(JSContext *cx)
  * old owning thread id, or -1 if the operation failed.
  */
 JS_PUBLIC_API(jsword)
 JS_SetContextThread(JSContext *cx)
 {
 #ifdef JS_THREADSAFE
     JS_ASSERT(cx->requestDepth == 0);
     if (cx->thread) {
-        JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+        JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
         return cx->thread->id;
     }
 
-    JSRuntime *rt = cx->runtime;
-    JSThread *thread = js_GetCurrentThread(rt);
-    if (!thread) {
+    if (!js_InitContextThread(cx)) {
         js_ReportOutOfMemory(cx);
         return -1;
     }
 
-    /*
-     * We must not race with a GC that accesses cx->thread for all threads,
-     * see bug 476934.
-     */
-    JS_LOCK_GC(rt);
-    js_WaitForGC(rt);
-    js_InitContextThread(cx, thread);
-    JS_UNLOCK_GC(rt);
+    /* Here the GC lock is still held after js_InitContextThread took it. */
+    JS_UNLOCK_GC(cx->runtime);
 #endif
     return 0;
 }
 
 JS_PUBLIC_API(jsword)
 JS_ClearContextThread(JSContext *cx)
 {
 #ifdef JS_THREADSAFE
     /*
      * This must be called outside a request and, if cx is associated with a
      * thread, this must be called only from that thread.  If not, this is a
      * harmless no-op.
      */
     JS_ASSERT(cx->requestDepth == 0);
     if (!cx->thread)
         return 0;
+    JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
     jsword old = cx->thread->id;
-    JS_ASSERT(old == js_CurrentThreadId());
 
     /*
      * We must not race with a GC that accesses cx->thread for all threads,
      * see bug 476934.
      */
     JSRuntime *rt = cx->runtime;
     JS_LOCK_GC(rt);
     js_WaitForGC(rt);
-    JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
-    cx->thread = NULL;
-    JS_UNLOCK_GC(cx->runtime);
+    js_ClearContextThread(cx);
+    JS_UNLOCK_GC(rt);
     return old;
 #else
     return 0;
 #endif
 }
 
 #ifdef JS_GC_ZEAL
 JS_PUBLIC_API(void)
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -1540,18 +1540,18 @@ struct JSFunctionSpec {
     {name, call, nargs, flags, extra}
 
 /*
  * "Fast native" initializer macro for a JSFunctionSpec array element. Use this
  * in preference to JS_FS if the native in question does not need its own stack
  * frame when activated.
  */
 #define JS_FN(name,fastcall,nargs,flags)                                      \
-    {name, (JSNative)(fastcall), nargs,                                       \
-     (flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS, 0}
+    JS_FS(name, (JSNative)(fastcall), nargs,                                  \
+          (flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS, 0)
 
 extern JS_PUBLIC_API(JSObject *)
 JS_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
              JSClass *clasp, JSNative constructor, uintN nargs,
              JSPropertySpec *ps, JSFunctionSpec *fs,
              JSPropertySpec *static_ps, JSFunctionSpec *static_fs);
 
 #ifdef JS_THREADSAFE
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -233,23 +233,19 @@ js_GetLengthProperty(JSContext *cx, JSOb
             ok = !JSVAL_IS_NULL(tvr.u.value);
         }
     }
     JS_POP_TEMP_ROOT(cx, &tvr);
     return ok;
 }
 
 static JSBool
-IndexToValue(JSContext *cx, jsuint index, jsval *vp)
+IndexToValue(JSContext *cx, jsdouble index, jsval *vp)
 {
-    if (index <= JSVAL_INT_MAX) {
-        *vp = INT_TO_JSVAL(index);
-        return JS_TRUE;
-    }
-    return JS_NewDoubleValue(cx, (jsdouble)index, vp);
+    return js_NewWeaklyRootedNumber(cx, index, vp);
 }
 
 JSBool JS_FASTCALL
 js_IndexToId(JSContext *cx, jsuint index, jsid *idp)
 {
     JSString *str;
 
     if (index <= JSVAL_INT_MAX) {
@@ -379,140 +375,173 @@ EnsureCapacity(JSContext *cx, JSObject *
             capacity = JS_ROUNDUP(capacity + 1, CAPACITY_CHUNK) - 1;  /* -1 for dslots[-1] */
         else if (capacity < ARRAY_CAPACITY_MIN)
             capacity = ARRAY_CAPACITY_MIN;
         return ResizeSlots(cx, obj, oldsize, capacity);
     }
     return JS_TRUE;
 }
 
+static bool
+ReallyBigIndexToId(JSContext* cx, jsdouble index, jsid* idp)
+{
+    JSAutoTempValueRooter dval(cx);
+    if (!js_NewDoubleInRootedValue(cx, index, dval.addr()) ||
+        !js_ValueToStringId(cx, dval.value(), idp)) {
+        return JS_FALSE;
+    }
+    return JS_TRUE;
+}
+
+static bool
+IndexToId(JSContext* cx, JSObject* obj, jsdouble index, JSBool* hole, jsid* idp,
+          JSBool createAtom = JS_FALSE)
+{
+    if (index <= JSVAL_INT_MAX) {
+        *idp = INT_TO_JSID(index);
+        return JS_TRUE;
+    }
+
+    if (index <= jsuint(-1)) {
+        if (!BigIndexToId(cx, obj, jsuint(index), createAtom, idp))
+            return JS_FALSE;
+        if (hole && JSVAL_IS_VOID(*idp))
+            *hole = JS_TRUE;
+        return JS_TRUE;
+    }
+
+    return ReallyBigIndexToId(cx, index, idp);
+}
+
 /*
  * If the property at the given index exists, get its value into location
  * pointed by vp and set *hole to false. Otherwise set *hole to true and *vp
  * to JSVAL_VOID. This function assumes that the location pointed by vp is
  * properly rooted and can be used as GC-protected storage for temporaries.
  */
 static JSBool
-GetArrayElement(JSContext *cx, JSObject *obj, jsuint index, JSBool *hole,
+GetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, JSBool *hole,
                 jsval *vp)
 {
-    jsid id;
-    JSObject *obj2;
-    JSProperty *prop;
-
+    JS_ASSERT(index >= 0);
     if (OBJ_IS_DENSE_ARRAY(cx, obj) && index < js_DenseArrayCapacity(obj) &&
-        (*vp = obj->dslots[index]) != JSVAL_HOLE) {
+        (*vp = obj->dslots[jsuint(index)]) != JSVAL_HOLE) {
         *hole = JS_FALSE;
         return JS_TRUE;
     }
 
-    if (index <= JSVAL_INT_MAX) {
-        id = INT_TO_JSID(index);
-    } else {
-        if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
-            return JS_FALSE;
-        if (JSVAL_IS_VOID(id)) {
-            *hole = JS_TRUE;
-            *vp = JSVAL_VOID;
-            return JS_TRUE;
-        }
+    JSAutoTempIdRooter idr(cx);
+
+    *hole = JS_FALSE;
+    if (!IndexToId(cx, obj, index, hole, idr.addr()))
+        return JS_FALSE;
+    if (*hole) {
+        *vp = JSVAL_VOID;
+        return JS_TRUE;
     }
 
-    if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+    JSObject *obj2;
+    JSProperty *prop;
+    if (!OBJ_LOOKUP_PROPERTY(cx, obj, idr.id(), &obj2, &prop))
         return JS_FALSE;
     if (!prop) {
         *hole = JS_TRUE;
         *vp = JSVAL_VOID;
     } else {
         OBJ_DROP_PROPERTY(cx, obj2, prop);
-        if (!OBJ_GET_PROPERTY(cx, obj, id, vp))
+        if (!OBJ_GET_PROPERTY(cx, obj, idr.id(), vp))
             return JS_FALSE;
         *hole = JS_FALSE;
     }
     return JS_TRUE;
 }
 
 /*
  * Set the value of the property at the given index to v assuming v is rooted.
  */
 static JSBool
-SetArrayElement(JSContext *cx, JSObject *obj, jsuint index, jsval v)
+SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, jsval v)
 {
-    jsid id;
+    JS_ASSERT(index >= 0);
 
     if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
-        /* Predicted/prefeched code should favor the remains-dense case. */
-        if (!INDEX_TOO_SPARSE(obj, index)) {
-            if (!EnsureCapacity(cx, obj, index + 1))
-                return JS_FALSE;
-            if (index >= (uint32)obj->fslots[JSSLOT_ARRAY_LENGTH])
-                obj->fslots[JSSLOT_ARRAY_LENGTH] = index + 1;
-            if (obj->dslots[index] == JSVAL_HOLE)
-                obj->fslots[JSSLOT_ARRAY_COUNT]++;
-            obj->dslots[index] = v;
-            return JS_TRUE;
+        /* Predicted/prefetched code should favor the remains-dense case. */
+        if (index <= jsuint(-1)) {
+            jsuint idx = jsuint(index);
+            if (!INDEX_TOO_SPARSE(obj, idx)) {
+                JS_ASSERT(idx + 1 > idx);
+                if (!EnsureCapacity(cx, obj, idx + 1))
+                    return JS_FALSE;
+                if (index >= uint32(obj->fslots[JSSLOT_ARRAY_LENGTH]))
+                    obj->fslots[JSSLOT_ARRAY_LENGTH] = idx + 1;
+                if (obj->dslots[idx] == JSVAL_HOLE)
+                    obj->fslots[JSSLOT_ARRAY_COUNT]++;
+                obj->dslots[idx] = v;
+                return JS_TRUE;
+            }
         }
 
         if (!js_MakeArraySlow(cx, obj))
             return JS_FALSE;
     }
 
-    if (index <= JSVAL_INT_MAX) {
-        id = INT_TO_JSID(index);
-    } else {
-        if (!BigIndexToId(cx, obj, index, JS_TRUE, &id))
-            return JS_FALSE;
-        JS_ASSERT(!JSVAL_IS_VOID(id));
-    }
-    return OBJ_SET_PROPERTY(cx, obj, id, &v);
+    JSAutoTempIdRooter idr(cx);
+
+    if (!IndexToId(cx, obj, index, NULL, idr.addr(), JS_TRUE))
+        return JS_FALSE;
+    JS_ASSERT(!JSVAL_IS_VOID(idr.id()));
+
+    return OBJ_SET_PROPERTY(cx, obj, idr.id(), &v);
 }
 
 static JSBool
-DeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index)
+DeleteArrayElement(JSContext *cx, JSObject *obj, jsdouble index)
 {
-    jsid id;
-    jsval junk;
-
+    JS_ASSERT(index >= 0);
     if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
-        if (index < js_DenseArrayCapacity(obj)) {
-            if (obj->dslots[index] != JSVAL_HOLE)
-                obj->fslots[JSSLOT_ARRAY_COUNT]--;
-            obj->dslots[index] = JSVAL_HOLE;
+        if (index <= jsuint(-1)) {
+            jsuint idx = jsuint(index);
+            if (!INDEX_TOO_SPARSE(obj, idx) && idx < js_DenseArrayCapacity(obj)) {
+                if (obj->dslots[idx] != JSVAL_HOLE)
+                    obj->fslots[JSSLOT_ARRAY_COUNT]--;
+                obj->dslots[idx] = JSVAL_HOLE;
+                return JS_TRUE;
+            }
         }
         return JS_TRUE;
     }
 
-    if (index <= JSVAL_INT_MAX) {
-        id = INT_TO_JSID(index);
-    } else {
-        if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
-            return JS_FALSE;
-        if (JSVAL_IS_VOID(id))
-            return JS_TRUE;
-    }
-    return OBJ_DELETE_PROPERTY(cx, obj, id, &junk);
+    JSAutoTempIdRooter idr(cx);
+
+    if (!IndexToId(cx, obj, index, NULL, idr.addr()))
+        return JS_FALSE;
+    if (JSVAL_IS_VOID(idr.id()))
+        return JS_TRUE;
+
+    jsval junk;
+    return OBJ_DELETE_PROPERTY(cx, obj, idr.id(), &junk);
 }
 
 /*
  * When hole is true, delete the property at the given index. Otherwise set
  * its value to v assuming v is rooted.
  */
 static JSBool
-SetOrDeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index,
+SetOrDeleteArrayElement(JSContext *cx, JSObject *obj, jsdouble index,
                         JSBool hole, jsval v)
 {
     if (hole) {
         JS_ASSERT(JSVAL_IS_VOID(v));
         return DeleteArrayElement(cx, obj, index);
     }
     return SetArrayElement(cx, obj, index, v);
 }
 
 JSBool
-js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length)
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsdouble length)
 {
     jsval v;
     jsid id;
 
     if (!IndexToValue(cx, length, &v))
         return JS_FALSE;
     id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
     return OBJ_SET_PROPERTY(cx, obj, id, &v);
@@ -1548,36 +1577,71 @@ array_toLocaleString(JSContext *cx, uint
     /*
      *  Passing comma here as the separator. Need a way to get a
      *  locale-specific version.
      */
     return array_join_sub(cx, obj, TO_LOCALE_STRING, NULL, vp);
 }
 
 static JSBool
-InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint end,
-                  jsval *vector)
+InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsval *vector)
 {
-    if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
-        if (!EnsureCapacity(cx, obj, end))
+    JS_ASSERT(count < MAXINDEX);
+    /*
+     * Optimize for dense arrays so long as adding the given set of elements
+     * wouldn't otherwise make the array slow.
+     */
+    if (OBJ_IS_DENSE_ARRAY(cx, obj) && start <= MAXINDEX - count &&
+        !INDEX_TOO_BIG(start + count)) {
+        jsuint newlen = start + count;
+        JS_ASSERT(jsdouble(start) + count == jsdouble(newlen));
+        if (!EnsureCapacity(cx, obj, newlen))
             return JS_FALSE;
 
-        if (end > (uint32)obj->fslots[JSSLOT_ARRAY_LENGTH])
-            obj->fslots[JSSLOT_ARRAY_LENGTH] = end;
-
-        memcpy(obj->dslots + start, vector, sizeof(jsval) * (end - start));
+        if (newlen > uint32(obj->fslots[JSSLOT_ARRAY_LENGTH]))
+            obj->fslots[JSSLOT_ARRAY_LENGTH] = newlen;
+
+        JS_ASSERT(count < size_t(-1) / sizeof(jsval));
+        memcpy(obj->dslots + start, vector, sizeof(jsval) * count);
+        JS_ASSERT_IF(count != 0, obj->dslots[newlen - 1] != JSVAL_HOLE);
         return JS_TRUE;
     }
 
-    while (start != end) {
+    jsval* end = vector + count;
+    while (vector != end && start < MAXINDEX) {
         if (!JS_CHECK_OPERATION_LIMIT(cx) ||
             !SetArrayElement(cx, obj, start++, *vector++)) {
             return JS_FALSE;
         }
     }
+
+    if (vector == end)
+        return JS_TRUE;
+
+    /* Finish out any remaining elements past the max array index. */
+    if (!ENSURE_SLOW_ARRAY(cx, obj))
+        return JS_FALSE;
+
+    JS_ASSERT(start == MAXINDEX);
+    jsval tmp[2] = {JSVAL_NULL, JSVAL_NULL};
+    jsdouble* dp = js_NewWeaklyRootedDouble(cx, MAXINDEX);
+    if (!dp)
+        return JS_FALSE;
+    tmp[0] = DOUBLE_TO_JSVAL(dp);
+    JSAutoTempValueRooter(cx, JS_ARRAY_LENGTH(tmp), tmp);
+    JSAutoTempIdRooter idr(cx);
+    do {
+        tmp[1] = *vector++;
+        if (!js_ValueToStringId(cx, tmp[0], idr.addr()) ||
+            !js_SetProperty(cx, obj, idr.id(), &tmp[1])) {
+            return JS_FALSE;
+        }
+        *dp += 1;
+    } while (vector != end);
+
     return JS_TRUE;
 }
 
 static JSBool
 InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector,
                 JSBool holey = JS_FALSE)
 {
     JS_ASSERT(OBJ_IS_ARRAY(cx, obj));
@@ -2166,25 +2230,25 @@ array_sort(JSContext *cx, uintN argc, js
 }
 
 /*
  * Perl-inspired push, pop, shift, unshift, and splice methods.
  */
 static JSBool
 array_push_slowly(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
 {
-    jsuint length, newlength;
+    jsuint length;
 
     if (!js_GetLengthProperty(cx, obj, &length))
         return JS_FALSE;
-    newlength = length + argc;
-    if (!InitArrayElements(cx, obj, length, newlength, argv))
+    if (!InitArrayElements(cx, obj, length, argc, argv))
         return JS_FALSE;
 
     /* Per ECMA-262, return the new array length. */
+    jsdouble newlength = length + jsdouble(argc);
     if (!IndexToValue(cx, newlength, rval))
         return JS_FALSE;
     return js_SetLengthProperty(cx, obj, newlength);
 }
 
 static JSBool
 array_push1_dense(JSContext* cx, JSObject* obj, jsval v, jsval *rval)
 {
@@ -2369,23 +2433,25 @@ array_shift(JSContext *cx, uintN argc, j
     return js_SetLengthProperty(cx, obj, length);
 }
 
 static JSBool
 array_unshift(JSContext *cx, uintN argc, jsval *vp)
 {
     JSObject *obj;
     jsval *argv;
-    jsuint length, last;
+    jsuint length;
     JSBool hole, ok;
     JSTempValueRooter tvr;
+    jsdouble last, newlen;
 
     obj = JS_THIS_OBJECT(cx, vp);
     if (!obj || !js_GetLengthProperty(cx, obj, &length))
         return JS_FALSE;
+    newlen = length;
     if (argc > 0) {
         /* Slide up the array to make room for argc at the bottom. */
         argv = JS_ARGV(cx, vp);
         if (length > 0) {
             last = length;
             ok = JS_TRUE;
             JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
             do {
@@ -2401,23 +2467,23 @@ array_unshift(JSContext *cx, uintN argc,
             if (!ok)
                 return JS_FALSE;
         }
 
         /* Copy from argv to the bottom of the array. */
         if (!InitArrayElements(cx, obj, 0, argc, argv))
             return JS_FALSE;
 
-        length += argc;
-        if (!js_SetLengthProperty(cx, obj, length))
+        newlen += argc;
+        if (!js_SetLengthProperty(cx, obj, newlen))
             return JS_FALSE;
     }
 
     /* Follow Perl by returning the new array length. */
-    return IndexToValue(cx, length, vp);
+    return IndexToValue(cx, newlen, vp);
 }
 
 static JSBool
 array_splice(JSContext *cx, uintN argc, jsval *vp)
 {
     jsval *argv;
     JSObject *obj;
     jsuint length, begin, end, count, delta, last;
@@ -2528,17 +2594,17 @@ array_splice(JSContext *cx, uintN argc, 
                                          tvr.u.value);
             if (!ok)
                 goto out;
         }
         length -= delta;
     }
 
     /* Copy from argv into the hole to complete the splice. */
-    ok = InitArrayElements(cx, obj, begin, begin + argc, argv);
+    ok = InitArrayElements(cx, obj, begin, argc, argv);
     if (!ok)
         goto out;
 
     /* Update length in case we deleted elements from the end. */
     ok = js_SetLengthProperty(cx, obj, length);
 
 out:
     JS_POP_TEMP_ROOT(cx, &tvr);
--- a/js/src/jsarray.h
+++ b/js/src/jsarray.h
@@ -117,17 +117,17 @@ js_SetDenseArrayCapacity(JSObject *obj, 
     JS_ASSERT(obj->dslots);
     obj->dslots[-1] = (jsval) capacity;
 }
 
 extern JSBool
 js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
 
 extern JSBool
-js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length);
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsdouble length);
 
 extern JSBool
 js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
 
 extern JSBool JS_FASTCALL
 js_IndexToId(JSContext *cx, jsuint index, jsid *idp);
 
 /*
--- a/js/src/jsbuiltins.h
+++ b/js/src/jsbuiltins.h
@@ -422,16 +422,17 @@ JS_DECLARE_CALLINFO(js_Array_dense_setel
 JS_DECLARE_CALLINFO(js_FastNewArray)
 JS_DECLARE_CALLINFO(js_FastNewArrayWithLength)
 JS_DECLARE_CALLINFO(js_NewUninitializedArray)
 
 /* Defined in jsnum.cpp. */
 JS_DECLARE_CALLINFO(js_NumberToString)
 
 /* Defined in jsstr.cpp. */
+JS_DECLARE_CALLINFO(js_String_tn)
 JS_DECLARE_CALLINFO(js_CompareStrings)
 JS_DECLARE_CALLINFO(js_ConcatStrings)
 JS_DECLARE_CALLINFO(js_EqualStrings)
 JS_DECLARE_CALLINFO(js_String_getelem)
 JS_DECLARE_CALLINFO(js_String_p_charCodeAt)
 JS_DECLARE_CALLINFO(js_String_p_charCodeAt0)
 JS_DECLARE_CALLINFO(js_String_p_charCodeAt0_int)
 JS_DECLARE_CALLINFO(js_String_p_charCodeAt_int)
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -64,152 +64,242 @@
 #include "jspubtd.h"
 #include "jsscan.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstaticcheck.h"
 #include "jsstr.h"
 #include "jstracer.h"
 
-#ifdef JS_THREADSAFE
-#include "prtypes.h"
-
-/*
- * The index for JSThread info, returned by PR_NewThreadPrivateIndex.  The
- * index value is visible and shared by all threads, but the data associated
- * with it is private to each thread.
- */
-static PRUintn threadTPIndex;
-static JSBool  tpIndexInited = JS_FALSE;
+static void
+FreeContext(JSContext *cx);
 
-JS_BEGIN_EXTERN_C
-JSBool
-js_InitThreadPrivateIndex(void (*ptr)(void *))
+static void
+InitThreadData(JSThreadData *data)
 {
-    PRStatus status;
-
-    if (tpIndexInited)
-        return JS_TRUE;
-
-    status = PR_NewThreadPrivateIndex(&threadTPIndex, ptr);
+#ifdef DEBUG
+    /* The data must be already zeroed. */
+    for (size_t i = 0; i != sizeof(*data); ++i)
+        JS_ASSERT(reinterpret_cast<uint8*>(data)[i] == 0);
+#endif
+#ifdef JS_TRACER
+    js_InitJIT(&data->traceMonitor);
+#endif
+}
 
-    if (status == PR_SUCCESS)
-        tpIndexInited = JS_TRUE;
-    return status == PR_SUCCESS;
-}
-JS_END_EXTERN_C
+static void
+FinishThreadData(JSThreadData *data)
+{
+#ifdef DEBUG
+    /* All GC-related things must be already removed at this point. */
+    for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i)
+        JS_ASSERT(!data->scriptsToGC[i]);
+#endif
 
-JS_BEGIN_EXTERN_C
-JSBool
-js_CleanupThreadPrivateData()
-{
-    if (!tpIndexInited)
-        return JS_TRUE;
-    return PR_SetThreadPrivate(threadTPIndex, NULL) == PR_SUCCESS;
+    js_FinishGSNCache(&data->gsnCache);
+    js_FinishPropertyCache(&data->propertyCache);
+#if defined JS_TRACER
+    js_FinishJIT(&data->traceMonitor);
+#endif
 }
-JS_END_EXTERN_C
 
-/*
- * Callback function to delete a JSThread info when the thread that owns it
- * is destroyed.
- */
-void
-js_ThreadDestructorCB(void *ptr)
+static void
+PurgeThreadData(JSContext *cx, JSThreadData *data)
 {
-    JSThread *thread = (JSThread *)ptr;
-
-    if (!thread)
-        return;
+# ifdef JS_TRACER
+    JSTraceMonitor *tm = &data->traceMonitor;
+    tm->reservedDoublePoolPtr = tm->reservedDoublePool;
+    tm->needFlush = JS_TRUE;
 
     /*
-     * Check that this thread properly called either JS_DestroyContext or
-     * JS_ClearContextThread on each JSContext it created or used.
+     * We want to keep tm->reservedObjects after the GC. So, unless we are
+     * shutting down, we don't purge them here and rather mark them during
+     * the GC, see MarkReservedObjects in jsgc.cpp.
      */
+    if (cx->runtime->state == JSRTS_LANDING)
+        tm->reservedObjects = NULL;
+# endif
+
+    /* Destroy eval'ed scripts. */
+    js_DestroyScriptsToGC(cx, data);
+
+    js_PurgeGSNCache(&data->gsnCache);
+    js_PurgePropertyCache(cx, &data->propertyCache);
+}
+
+#ifdef JS_THREADSAFE
+
+static JSThread *
+NewThread(jsword id)
+{
+    JS_ASSERT(js_CurrentThreadId() == id);
+    JSThread *thread = (JSThread *) calloc(1, sizeof(JSThread));
+    if (!thread)
+        return NULL;
+    JS_INIT_CLIST(&thread->contextList);
+    thread->id = id;
+    InitThreadData(&thread->data);
+    return thread;
+}
+
+static void
+DestroyThread(JSThread *thread)
+{
+    /* The thread must have zero contexts. */
     JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
-    GSN_CACHE_CLEAR(&thread->gsnCache);
-#if defined JS_TRACER
-    js_FinishJIT(&thread->traceMonitor);
-#endif
+    FinishThreadData(&thread->data);
     free(thread);
 }
 
-/*
- * Get current thread-local JSThread info, creating one if it doesn't exist.
- * Each thread has a unique JSThread pointer.
- *
- * Since we are dealing with thread-local data, no lock is needed.
- *
- * Return a pointer to the thread local info, NULL if the system runs out
- * of memory, or it failed to set thread private data (neither case is very
- * likely; both are probably due to out-of-memory).  It is up to the caller
- * to report an error, if possible.
- */
-JSThread *
-js_GetCurrentThread(JSRuntime *rt)
+JSBool
+js_InitContextThread(JSContext *cx)
 {
-    JSThread *thread;
+    JS_ASSERT(!cx->thread);
+    jsword id = js_CurrentThreadId();
+    JSRuntime *rt = cx->runtime;
+    JS_LOCK_GC(rt);
 
-    thread = (JSThread *)PR_GetThreadPrivate(threadTPIndex);
-    if (!thread) {
-        thread = (JSThread *) malloc(sizeof(JSThread));
+    /*
+     * We must not race with a GC that accesses cx->thread for JSContext
+     * instances on all threads, see bug 476934.
+     */
+    js_WaitForGC(rt);
+    JSThreadsHashEntry *entry = (JSThreadsHashEntry *)
+                                JS_DHashTableOperate(&rt->threads,
+                                                     (const void *) id,
+                                                     JS_DHASH_LOOKUP);
+    JSThread *thread;
+    if (JS_DHASH_ENTRY_IS_BUSY(&entry->base)) {
+        thread = entry->thread;
+        JS_ASSERT(thread->id == id);
+    } else {
+        JS_UNLOCK_GC(rt);
+        thread = NewThread(id);
         if (!thread)
-            return NULL;
-#ifdef DEBUG
-        memset(thread, JS_FREE_PATTERN, sizeof(JSThread));
-#endif
-        if (PR_FAILURE == PR_SetThreadPrivate(threadTPIndex, thread)) {
-            free(thread);
-            return NULL;
+            return false;
+        JS_LOCK_GC(rt);
+        js_WaitForGC(rt);
+        entry = (JSThreadsHashEntry *)
+                JS_DHashTableOperate(&rt->threads, (const void *) id,
+                                     JS_DHASH_ADD);
+        if (!entry) {
+            JS_UNLOCK_GC(rt);
+            DestroyThread(thread);
+            return false;
         }
 
-        JS_INIT_CLIST(&thread->contextList);
-        thread->id = js_CurrentThreadId();
-        thread->gcMallocBytes = 0;
-#ifdef JS_TRACER
-        memset(&thread->traceMonitor, 0, sizeof(thread->traceMonitor));
-        js_InitJIT(&thread->traceMonitor);
-#endif
-        memset(thread->scriptsToGC, 0, sizeof thread->scriptsToGC);
-
-        /*
-         * js_InitContextThread initializes the remaining fields as necessary.
-         */
-    }
-    return thread;
-}
-
-/*
- * Sets current thread as owning thread of a context by assigning the
- * thread-private info to the context.
- */
-void
-js_InitContextThread(JSContext *cx, JSThread *thread)
-{
-    JS_ASSERT(CURRENT_THREAD_IS_ME(thread));
-    JS_ASSERT(!cx->thread);
-    JS_ASSERT(cx->requestDepth == 0);
-
-    /*
-     * Clear caches on each transition from 0 to 1 context active on the
-     * current thread. See bug 425828.
-     */
-    if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
-        memset(&thread->gsnCache, 0, sizeof thread->gsnCache);
-        memset(&thread->propertyCache, 0, sizeof thread->propertyCache);
-#ifdef DEBUG
-        memset(&thread->evalCacheMeter, 0, sizeof thread->evalCacheMeter);
-#endif
+        /* Another thread cannot initialize entry->thread. */
+        JS_ASSERT(!entry->thread);
+        entry->thread = thread;
     }
 
     JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
     cx->thread = thread;
+    return true;
+}
+
+void
+js_ClearContextThread(JSContext *cx)
+{
+    JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
+    JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
+    cx->thread = NULL;
+}
+
+static JSBool
+thread_matchEntry(JSDHashTable *table,
+                  const JSDHashEntryHdr *hdr,
+                  const void *key)
+{
+    const JSThreadsHashEntry *entry = (const JSThreadsHashEntry *) hdr;
+
+    return entry->thread->id == (jsword) key;
+}
+
+static const JSDHashTableOps threads_ops = {
+    JS_DHashAllocTable,
+    JS_DHashFreeTable,
+    JS_DHashVoidPtrKeyStub,
+    thread_matchEntry,
+    JS_DHashMoveEntryStub,
+    JS_DHashClearEntryStub,
+    JS_DHashFinalizeStub,
+    NULL
+};
+
+static JSDHashOperator
+thread_destroyer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 /* index */,
+                 void * /* arg */)
+{
+    JSThreadsHashEntry *entry = (JSThreadsHashEntry *) hdr;
+    JSThread *thread = entry->thread;
+
+    JS_ASSERT(JS_CLIST_IS_EMPTY(&thread->contextList));
+    DestroyThread(thread);
+    return JS_DHASH_REMOVE;
+}
+
+static JSDHashOperator
+thread_purger(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 /* index */,
+              void *arg)
+{
+    JSContext* cx = (JSContext *) arg;
+    JSThread *thread = ((JSThreadsHashEntry *) hdr)->thread;
+
+    if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
+        JS_ASSERT(cx->thread != thread);
+        js_DestroyScriptsToGC(cx, &thread->data);
+        DestroyThread(thread);
+        return JS_DHASH_REMOVE;
+    }
+    PurgeThreadData(cx, &thread->data);
+    return JS_DHASH_NEXT;
 }
 
 #endif /* JS_THREADSAFE */
 
+JSBool
+js_InitThreads(JSRuntime *rt)
+{
+#ifdef JS_THREADSAFE
+    if (!JS_DHashTableInit(&rt->threads, &threads_ops, NULL,
+                           sizeof(JSThreadsHashEntry), 4)) {
+        rt->threads.ops = NULL;
+        return false;
+    }
+#else
+    InitThreadData(&rt->threadData);
+#endif
+    return true;
+}
+
+void
+js_FinishThreads(JSRuntime *rt)
+{
+#ifdef JS_THREADSAFE
+    if (!rt->threads.ops)
+        return;
+    JS_DHashTableEnumerate(&rt->threads, thread_destroyer, NULL);
+    JS_DHashTableFinish(&rt->threads);
+    rt->threads.ops = NULL;
+#else
+    FinishThreadData(&rt->threadData);
+#endif
+}
+
+void
+js_PurgeThreads(JSContext *cx)
+{
+#ifdef JS_THREADSAFE
+    JS_DHashTableEnumerate(&cx->runtime->threads, thread_purger, cx);
+#else
+    PurgeThreadData(cx, &cx->runtime->threadData);
+#endif
+}
+
 /*
  * JSOPTION_XML and JSOPTION_ANONFUNFIX must be part of the JS version
  * associated with scripts, so in addition to storing them in cx->options we
  * duplicate them in cx->version (script->version, etc.) and ensure each bit
  * remains synchronized between the two through these two functions.
  */
 void
 js_SyncOptionsToVersion(JSContext* cx)
@@ -256,22 +346,16 @@ js_SetVersion(JSContext *cx, JSVersion v
 }
 
 JSContext *
 js_NewContext(JSRuntime *rt, size_t stackChunkSize)
 {
     JSContext *cx;
     JSBool ok, first;
     JSContextCallback cxCallback;
-#ifdef JS_THREADSAFE
-    JSThread *thread = js_GetCurrentThread(rt);
-
-    if (!thread)
-        return NULL;
-#endif
 
     /*
      * We need to initialize the new context fully before adding it to the
      * runtime list. After that it can be accessed from another thread via
      * js_ContextIterator.
      */
     cx = (JSContext *) calloc(1, sizeof *cx);
     if (!cx)
@@ -280,49 +364,63 @@ js_NewContext(JSRuntime *rt, size_t stac
     cx->runtime = rt;
     cx->debugHooks = &rt->globalDebugHooks;
 #if JS_STACK_GROWTH_DIRECTION > 0
     cx->stackLimit = (jsuword) -1;
 #endif
     cx->scriptStackQuota = JS_DEFAULT_SCRIPT_STACK_QUOTA;
 #ifdef JS_THREADSAFE
     cx->gcLocalFreeLists = (JSGCFreeListSet *) &js_GCEmptyFreeListSet;
-    js_InitContextThread(cx, thread);
 #endif
     JS_STATIC_ASSERT(JSVERSION_DEFAULT == 0);
     JS_ASSERT(cx->version == JSVERSION_DEFAULT);
     VOUCH_DOES_NOT_REQUIRE_STACK();
     JS_INIT_ARENA_POOL(&cx->stackPool, "stack", stackChunkSize, sizeof(jsval),
                        &cx->scriptStackQuota);
 
     JS_INIT_ARENA_POOL(&cx->tempPool, "temp",
                        1024,  /* FIXME: bug 421435 */
                        sizeof(jsdouble), &cx->scriptStackQuota);
 
     js_InitRegExpStatics(cx);
     JS_ASSERT(cx->resolveFlags == 0);
 
-    JS_LOCK_GC(rt);
+#ifdef JS_THREADSAFE
+    if (!js_InitContextThread(cx)) {
+        FreeContext(cx);
+        return NULL;
+    }
+#endif
+
+    /*
+     * Here the GC lock is still held after js_InitContextThread took it and
+     * the GC is not running on another thread.
+     */
     for (;;) {
-        /*
-         * Ensure that we don't race with the GC on other threads, bug 478336.
-         */
-        js_WaitForGC(rt);
         if (rt->state == JSRTS_UP) {
             JS_ASSERT(!JS_CLIST_IS_EMPTY(&rt->contextList));
             first = JS_FALSE;
             break;
         }
         if (rt->state == JSRTS_DOWN) {
             JS_ASSERT(JS_CLIST_IS_EMPTY(&rt->contextList));
             first = JS_TRUE;
             rt->state = JSRTS_LAUNCHING;
             break;
         }
         JS_WAIT_CONDVAR(rt->stateChange, JS_NO_TIMEOUT);
+
+        /*
+         * During the above wait after we are notified about the state change
+         * but before we wake up, another thread could enter the GC from
+         * js_DestroyContext, bug 478336. So we must wait here to ensure that
+         * when we exit the loop with the first flag set to true, that GC is
+         * finished.
+         */
+        js_WaitForGC(rt);
     }
     JS_APPEND_LINK(&cx->link, &rt->contextList);
     JS_UNLOCK_GC(rt);
 
     /*
      * If cx is the first context on this runtime, initialize well-known atoms,
      * keywords, numbers, and strings.  If one of these steps should fail, the
      * runtime will be left in a partially initialized state, with zeroes and
@@ -399,17 +497,17 @@ DumpEvalCacheMeter(JSContext *cx)
     struct {
         const char *name;
         ptrdiff_t  offset;
     } table[] = {
 #define frob(x) { #x, offsetof(JSEvalCacheMeter, x) }
         EVAL_CACHE_METER_LIST(frob)
 #undef frob
     };
-    JSEvalCacheMeter *ecm = &JS_CACHE_LOCUS(cx)->evalCacheMeter;
+    JSEvalCacheMeter *ecm = &JS_THREAD_DATA(cx)->evalCacheMeter;
 
     static AutoFile fp;
     if (!fp) {
         fp.open("/tmp/evalcache.stats", "w");
         if (!fp)
             return;
     }
 
@@ -434,19 +532,16 @@ DumpEvalCacheMeter(JSContext *cx)
 #endif
 
 void
 js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
 {
     JSRuntime *rt;
     JSContextCallback cxCallback;
     JSBool last;
-    JSArgumentFormatMap *map;
-    JSLocalRootStack *lrs;
-    JSLocalRootChunk *lrc;
 
 #ifdef JS_THREADSAFE
     JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
 #endif
     rt = cx->runtime;
 
     if (mode != JSDCM_NEW_FAILED) {
         cxCallback = rt->cxCallback;
@@ -473,83 +568,109 @@ js_DestroyContext(JSContext *cx, JSDestr
     if (cx->requestDepth == 0)
         js_WaitForGC(rt);
     js_RevokeGCLocalFreeLists(cx);
 #endif
     JS_REMOVE_LINK(&cx->link);
     last = (rt->contextList.next == &rt->contextList);
     if (last)
         rt->state = JSRTS_LANDING;
-    JS_UNLOCK_GC(rt);
+    if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC
+#ifdef JS_THREADSAFE
+        || cx->requestDepth != 0
+#endif
+        ) {
+        JS_UNLOCK_GC(rt);
 
-    if (last) {
+        if (last) {
 #ifdef JS_THREADSAFE
-        /*
-         * If cx is not in a request already, begin one now so that we wait
-         * for any racing GC started on a not-last context to finish, before
-         * we plow ahead and unpin atoms.  Note that even though we begin a
-         * request here if necessary, we end all requests on cx below before
-         * forcing a final GC.  This lets any not-last context destruction
-         * racing in another thread try to force or maybe run the GC, but by
-         * that point, rt->state will not be JSRTS_UP, and that GC attempt
-         * will return early.
-         */
-        if (cx->requestDepth == 0)
-            JS_BeginRequest(cx);
+            /*
+             * If cx is not in a request already, begin one now so that we wait
+             * for any racing GC started on a not-last context to finish, before
+             * we plow ahead and unpin atoms.  Note that even though we begin a
+             * request here if necessary, we end all requests on cx below before
+             * forcing a final GC.  This lets any not-last context destruction
+             * racing in another thread try to force or maybe run the GC, but by
+             * that point, rt->state will not be JSRTS_UP, and that GC attempt
+             * will return early.
+             */
+            if (cx->requestDepth == 0)
+                JS_BeginRequest(cx);
 #endif
 
-        /* Unlock and clear GC things held by runtime pointers. */
-        js_FinishRuntimeNumberState(cx);
-        js_FinishRuntimeStringState(cx);
+            /* Unlock and clear GC things held by runtime pointers. */
+            js_FinishRuntimeNumberState(cx);
+            js_FinishRuntimeStringState(cx);
 
-        /* Unpin all common atoms before final GC. */
-        js_FinishCommonAtoms(cx);
+            /* Unpin all common atoms before final GC. */
+            js_FinishCommonAtoms(cx);
 
-        /* Clear debugging state to remove GC roots. */
-        JS_ClearAllTraps(cx);
-        JS_ClearAllWatchPoints(cx);
-    }
+            /* Clear debugging state to remove GC roots. */
+            JS_ClearAllTraps(cx);
+            JS_ClearAllWatchPoints(cx);
+        }
 
-    /* Remove more GC roots in regExpStatics, then collect garbage. */
-    JS_ClearRegExpRoots(cx);
+        /* Remove more GC roots in regExpStatics, then collect garbage. */
+        JS_ClearRegExpRoots(cx);
 
 #ifdef JS_THREADSAFE
-    /*
-     * Destroying a context implicitly calls JS_EndRequest().  Also, we must
-     * end our request here in case we are "last" -- in that event, another
-     * js_DestroyContext that was not last might be waiting in the GC for our
-     * request to end.  We'll let it run below, just before we do the truly
-     * final GC and then free atom state.
-     */
-    while (cx->requestDepth != 0)
-        JS_EndRequest(cx);
+        /*
+         * Destroying a context implicitly calls JS_EndRequest().  Also, we must
+         * end our request here in case we are "last" -- in that event, another
+         * js_DestroyContext that was not last might be waiting in the GC for our
+         * request to end.  We'll let it run below, just before we do the truly
+         * final GC and then free atom state.
+         */
+        while (cx->requestDepth != 0)
+            JS_EndRequest(cx);
 #endif
 
-    if (last) {
-        js_GC(cx, GC_LAST_CONTEXT);
-        DUMP_EVAL_CACHE_METER(cx);
+        if (last) {
+            js_GC(cx, GC_LAST_CONTEXT);
+            DUMP_EVAL_CACHE_METER(cx);
 
-        /*
-         * Free the script filename table if it exists and is empty. Do this
-         * after the last GC to avoid finalizers tripping on free memory.
-         */
-        if (rt->scriptFilenameTable && rt->scriptFilenameTable->nentries == 0)
-            js_FinishRuntimeScriptState(rt);
+            /*
+             * Free the script filename table if it exists and is empty. Do this
+             * after the last GC to avoid finalizers tripping on free memory.
+             */
+            if (rt->scriptFilenameTable &&
+                rt->scriptFilenameTable->nentries == 0) {
+                js_FinishRuntimeScriptState(rt);
+            }
 
-        /* Take the runtime down, now that it has no contexts or atoms. */
-        JS_LOCK_GC(rt);
-        rt->state = JSRTS_DOWN;
-        JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
-        JS_UNLOCK_GC(rt);
-    } else {
-        if (mode == JSDCM_FORCE_GC)
-            js_GC(cx, GC_NORMAL);
-        else if (mode == JSDCM_MAYBE_GC)
-            JS_MaybeGC(cx);
+            /* Take the runtime down, now that it has no contexts or atoms. */
+            JS_LOCK_GC(rt);
+            rt->state = JSRTS_DOWN;
+            JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
+        } else {
+            if (mode == JSDCM_FORCE_GC)
+                js_GC(cx, GC_NORMAL);
+            else if (mode == JSDCM_MAYBE_GC)
+                JS_MaybeGC(cx);
+            JS_LOCK_GC(rt);
+            js_WaitForGC(rt);
+        }
     }
+#ifdef JS_THREADSAFE
+    js_ClearContextThread(cx);
+#endif
+    JS_UNLOCK_GC(rt);
+    FreeContext(cx);
+}
+
+static void
+FreeContext(JSContext *cx)
+{
+    JSArgumentFormatMap *map;
+    JSLocalRootStack *lrs;
+    JSLocalRootChunk *lrc;
+
+#ifdef JS_THREADSAFE
+    JS_ASSERT(!cx->thread);
+#endif
 
     /* Free the stuff hanging off of cx. */
     js_FreeRegExpStatics(cx);
     VOUCH_DOES_NOT_REQUIRE_STACK();
     JS_FinishArenaPool(&cx->stackPool);
     JS_FinishArenaPool(&cx->tempPool);
 
     if (cx->lastMessage)
@@ -573,26 +694,16 @@ js_DestroyContext(JSContext *cx, JSDestr
     if (lrs) {
         while ((lrc = lrs->topChunk) != &lrs->firstChunk) {
             lrs->topChunk = lrc->down;
             JS_free(cx, lrc);
         }
         JS_free(cx, lrs);
     }
 
-#ifdef JS_THREADSAFE
-    /*
-     * Since cx is not on rt->contextList, it cannot be accessed by the GC
-     * running on another thread. Thus, compared with JS_ClearContextThread,
-     * we can safely unlink cx from from JSThread.contextList without taking
-     * the GC lock.
-     */
-    JS_REMOVE_LINK(&cx->threadLinks);
-#endif
-
     /* Finally, free cx itself. */
     free(cx);
 }
 
 JSBool
 js_ValidContextPointer(JSRuntime *rt, JSContext *cx)
 {
     JSCList *cl;
@@ -1499,8 +1610,38 @@ js_GetScriptedCaller(JSContext *cx, JSSt
         fp = js_GetTopStackFrame(cx);
     while (fp) {
         if (fp->script)
             return fp;
         fp = fp->down;
     }
     return NULL;
 }
+
+jsbytecode*
+js_GetCurrentBytecodePC(JSContext* cx)
+{
+    jsbytecode *pc, *imacpc;
+
+#ifdef JS_TRACER
+    if (JS_ON_TRACE(cx)) {
+        pc = cx->bailExit->pc;
+        imacpc = cx->bailExit->imacpc;
+    } else
+#endif
+    {
+        JS_ASSERT_NOT_ON_TRACE(cx);  /* for static analysis */
+        JSStackFrame* fp = cx->fp;
+        if (fp && fp->regs) {
+            pc = fp->regs->pc;
+            imacpc = fp->imacpc;
+        } else {
+            return NULL;
+        }
+    }
+
+    /*
+     * If we are inside GetProperty_tn or similar, return a pointer to the
+     * current instruction in the script, not the CALL instruction in the
+     * imacro, for the benefit of callers doing bytecode inspection.
+     */
+    return (*pc == JSOP_CALL && imacpc) ? imacpc : pc;
+}
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -67,35 +67,30 @@ JS_BEGIN_EXTERN_C
  */
 typedef struct JSGSNCache {
     jsbytecode      *code;
     JSDHashTable    table;
 #ifdef JS_GSNMETER
     uint32          hits;
     uint32          misses;
     uint32          fills;
-    uint32          clears;
+    uint32          purges;
 # define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt)
 #else
 # define GSN_CACHE_METER(cache,cnt) /* nothing */
 #endif
 } JSGSNCache;
 
-#define GSN_CACHE_CLEAR(cache)                                                \
-    JS_BEGIN_MACRO                                                            \
-        (cache)->code = NULL;                                                 \
-        if ((cache)->table.ops) {                                             \
-            JS_DHashTableFinish(&(cache)->table);                             \
-            (cache)->table.ops = NULL;                                        \
-        }                                                                     \
-        GSN_CACHE_METER(cache, clears);                                       \
-    JS_END_MACRO
+#define js_FinishGSNCache(cache) js_PurgeGSNCache(cache)
+
+extern void
+js_PurgeGSNCache(JSGSNCache *cache);
 
 /* These helper macros take a cx as parameter and operate on its GSN cache. */
-#define JS_CLEAR_GSN_CACHE(cx)      GSN_CACHE_CLEAR(&JS_GSN_CACHE(cx))
+#define JS_PURGE_GSN_CACHE(cx)      js_PurgeGSNCache(&JS_GSN_CACHE(cx))
 #define JS_METER_GSN_CACHE(cx,cnt)  GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
 
 typedef struct InterpState InterpState;
 typedef struct VMSideExit VMSideExit;
 
 #ifdef __cplusplus
 namespace nanojit {
     class Fragment;
@@ -120,17 +115,17 @@ struct GlobalState {
     CLS(SlotList)           globalSlots;
 };
 
 /*
  * Trace monitor. Every JSThread (if JS_THREADSAFE) or JSRuntime (if not
  * JS_THREADSAFE) has an associated trace monitor that keeps track of loop
  * frequencies for all JavaScript code loaded into that runtime.
  */
-typedef struct JSTraceMonitor {
+struct JSTraceMonitor {
     /*
      * Flag set when running (or recording) JIT-compiled code. This prevents
      * both interpreter activation and last-ditch garbage collection when up
      * against our runtime's memory limits. This flag also suppresses calls to
      * JS_ReportOutOfMemory when failing due to runtime limits.
      *
      * !onTrace && !recorder: not on trace.
      * onTrace && recorder: recording a trace.
@@ -171,17 +166,17 @@ typedef struct JSTraceMonitor {
     /* Fragmento for the regular expression compiler. This is logically
      * a distinct compiler but needs to be managed in exactly the same
      * way as the real tracing Fragmento. */
     CLS(nanojit::LirBuffer) reLirBuf;
     CLS(nanojit::Fragmento) reFragmento;
 
     /* Keep a list of recorders we need to abort on cache flush. */
     CLS(TraceRecorder)      abortStack;
-} JSTraceMonitor;
+};
 
 typedef struct InterpStruct InterpStruct;
 
 #ifdef JS_TRACER
 # define JS_ON_TRACE(cx)            (JS_TRACE_MONITOR(cx).onTrace)
 #else
 # define JS_ON_TRACE(cx)            JS_FALSE
 #endif
@@ -201,21 +196,41 @@ typedef struct InterpStruct InterpStruct
 # define ID(x)                      x
 
 /* Have to typedef this for LiveConnect C code, which includes us. */
 typedef struct JSEvalCacheMeter {
     uint64 EVAL_CACHE_METER_LIST(ID);
 } JSEvalCacheMeter;
 
 # undef ID
-# define DECLARE_EVAL_CACHE_METER   JSEvalCacheMeter evalCacheMeter;
-#else
-# define DECLARE_EVAL_CACHE_METER   /* nothing */
 #endif
 
+struct JSThreadData {
+    /*
+     * The GSN cache is per thread since even multi-cx-per-thread embeddings
+     * do not interleave js_GetSrcNote calls.
+     */
+    JSGSNCache          gsnCache;
+
+    /* Property cache for faster call/get/set invocation. */
+    JSPropertyCache     propertyCache;
+
+#ifdef JS_TRACER
+    /* Trace-tree JIT recorder/interpreter state. */
+    JSTraceMonitor      traceMonitor;
+#endif
+
+    /* Lock-free hashed lists of scripts created by eval to garbage-collect. */
+    JSScript            *scriptsToGC[JS_EVAL_CACHE_SIZE];
+
+#ifdef JS_EVAL_CACHE_METERING
+    JSEvalCacheMeter    evalCacheMeter;
+#endif
+};
+
 #ifdef JS_THREADSAFE
 
 /*
  * Structure uniquely representing a thread.  It holds thread-private data
  * that can be accessed without a global lock.
  */
 struct JSThread {
     /* Linked list of all contexts active on this thread. */
@@ -225,49 +240,39 @@ struct JSThread {
     jsword              id;
 
     /*
      * Thread-local version of JSRuntime.gcMallocBytes to avoid taking
      * locks on each JS_malloc.
      */
     uint32              gcMallocBytes;
 
-    /*
-     * Store the GSN cache in struct JSThread, not struct JSContext, both to
-     * save space and to simplify cleanup in js_GC.  Any embedding (Firefox
-     * or another Gecko application) that uses many contexts per thread is
-     * unlikely to interleave js_GetSrcNote-intensive loops in the decompiler
-     * among two or more contexts running script in one thread.
-     */
-    JSGSNCache          gsnCache;
+    JSThreadData        data;
+};
 
-    /* Property cache for faster call/get/set invocation. */
-    JSPropertyCache     propertyCache;
+#define JS_THREAD_DATA(cx)      (&(cx)->thread->data)
 
-#ifdef JS_TRACER
-    /* Trace-tree JIT recorder/interpreter state. */
-    JSTraceMonitor      traceMonitor;
-#endif
-
-    /* Lock-free hashed lists of scripts created by eval to garbage-collect. */
-    JSScript            *scriptsToGC[JS_EVAL_CACHE_SIZE];
-
-    DECLARE_EVAL_CACHE_METER
+struct JSThreadsHashEntry {
+    JSDHashEntryHdr     base;
+    JSThread            *thread;
 };
 
-#define JS_CACHE_LOCUS(cx)      ((cx)->thread)
-
-extern void
-js_ThreadDestructorCB(void *ptr);
+/*
+ * The function takes the GC lock and does not release in successful return.
+ * On error (out of memory) the function releases the lock but delegates
+ * the error reporting to the caller.
+ */
+extern JSBool
+js_InitContextThread(JSContext *cx);
 
+/*
+ * On entrance the GC lock must be held and it will be held on exit.
+ */
 extern void
-js_InitContextThread(JSContext *cx, JSThread *thread);
-
-extern JSThread *
-js_GetCurrentThread(JSRuntime *rt);
+js_ClearContextThread(JSContext *cx);
 
 #endif /* JS_THREADSAFE */
 
 typedef enum JSDestroyContextMode {
     JSDCM_NO_GC,
     JSDCM_MAYBE_GC,
     JSDCM_FORCE_GC,
     JSDCM_NEW_FAILED
@@ -469,16 +474,18 @@ struct JSRuntime {
 
     /*
      * Lock serializing trapList and watchPointList accesses, and count of all
      * mutations to trapList and watchPointList made by debugger threads.  To
      * keep the code simple, we define debuggerMutations for the thread-unsafe
      * case too.
      */
     PRLock              *debuggerLock;
+
+    JSDHashTable        threads;
 #endif /* JS_THREADSAFE */
     uint32              debuggerMutations;
 
     /*
      * Security callbacks set on the runtime are used by each context unless
      * an override is set on the context.
      */
     JSSecurityCallbacks *securityCallbacks;
@@ -518,36 +525,19 @@ struct JSRuntime {
 
     /*
      * A helper list for the GC, so it can mark native iterator states. See
      * js_TraceNativeEnumerators for details.
      */
     JSNativeEnumerator  *nativeEnumerators;
 
 #ifndef JS_THREADSAFE
-    /*
-     * For thread-unsafe embeddings, the GSN cache lives in the runtime and
-     * not each context, since we expect it to be filled once when decompiling
-     * a longer script, then hit repeatedly as js_GetSrcNote is called during
-     * the decompiler activation that filled it.
-     */
-    JSGSNCache          gsnCache;
+    JSThreadData        threadData;
 
-    /* Property cache for faster call/get/set invocation. */
-    JSPropertyCache     propertyCache;
-
-    /* Trace-tree JIT recorder/interpreter state. */
-    JSTraceMonitor      traceMonitor;
-
-    /* Lock-free hashed lists of scripts created by eval to garbage-collect. */
-    JSScript            *scriptsToGC[JS_EVAL_CACHE_SIZE];
-
-    DECLARE_EVAL_CACHE_METER
-
-#define JS_CACHE_LOCUS(cx)      ((cx)->runtime)
+#define JS_THREAD_DATA(cx)      (&(cx)->runtime->threadData)
 #endif
 
     /*
      * Object shape (property cache structural type) identifier generator.
      *
      * Type 0 stands for the empty scope, and must not be regenerated due to
      * uint32 wrap-around. Since we use atomic pre-increment, the initial
      * value for the first typed non-empty scope will be 1.
@@ -647,23 +637,23 @@ struct JSRuntime {
 #endif
 
 #ifdef JS_GCMETER
     JSGCStats           gcStats;
 #endif
 };
 
 /* Common macros to access thread-local caches in JSThread or JSRuntime. */
-#define JS_GSN_CACHE(cx)        (JS_CACHE_LOCUS(cx)->gsnCache)
-#define JS_PROPERTY_CACHE(cx)   (JS_CACHE_LOCUS(cx)->propertyCache)
-#define JS_TRACE_MONITOR(cx)    (JS_CACHE_LOCUS(cx)->traceMonitor)
-#define JS_SCRIPTS_TO_GC(cx)    (JS_CACHE_LOCUS(cx)->scriptsToGC)
+#define JS_GSN_CACHE(cx)        (JS_THREAD_DATA(cx)->gsnCache)
+#define JS_PROPERTY_CACHE(cx)   (JS_THREAD_DATA(cx)->propertyCache)
+#define JS_TRACE_MONITOR(cx)    (JS_THREAD_DATA(cx)->traceMonitor)
+#define JS_SCRIPTS_TO_GC(cx)    (JS_THREAD_DATA(cx)->scriptsToGC)
 
 #ifdef JS_EVAL_CACHE_METERING
-# define EVAL_CACHE_METER(x)    (JS_CACHE_LOCUS(cx)->evalCacheMeter.x++)
+# define EVAL_CACHE_METER(x)    (JS_THREAD_DATA(cx)->evalCacheMeter.x++)
 #else
 # define EVAL_CACHE_METER(x)    ((void) 0)
 #endif
 #undef DECLARE_EVAL_CACHE_METER
 
 #ifdef DEBUG
 # define JS_RUNTIME_METER(rt, which)    JS_ATOMIC_INCREMENT(&(rt)->which)
 # define JS_RUNTIME_UNMETER(rt, which)  JS_ATOMIC_DECREMENT(&(rt)->which)
@@ -1137,32 +1127,24 @@ class JSAutoResolveFlags
 #define JSVERSION_ANONFUNFIX            0x2000  /* see jsapi.h, the comments
                                                    for JSOPTION_ANONFUNFIX */
 
 #define JSVERSION_NUMBER(cx)            ((JSVersion)((cx)->version &          \
                                                      JSVERSION_MASK))
 #define JS_HAS_XML_OPTION(cx)           ((cx)->version & JSVERSION_HAS_XML || \
                                          JSVERSION_NUMBER(cx) >= JSVERSION_1_6)
 
-/*
- * Initialize a library-wide thread private data index, and remember that it
- * has already been done, so that it happens only once ever.  Returns true on
- * success.
- */
 extern JSBool
-js_InitThreadPrivateIndex(void (*ptr)(void *));
+js_InitThreads(JSRuntime *rt);
 
-/*
- * Clean up thread-private data on the current thread. NSPR automatically
- * cleans up thread-private data for every thread except the main thread
- * (see bug 383977) on shutdown. Thus, this function should be called for 
- * exactly those threads that survive JS_ShutDown, including the main thread.
- */
-extern JSBool
-js_CleanupThreadPrivateData();
+extern void
+js_FinishThreads(JSRuntime *rt);
+
+extern void
+js_PurgeThreads(JSContext *cx);
 
 /*
  * Ensures the JSOPTION_XML and JSOPTION_ANONFUNFIX bits of cx->options are
  * reflected in cx->version, since each bit must travel with a script that has
  * it set.
  */
 extern void
 js_SyncOptionsToVersion(JSContext *cx);
@@ -1383,16 +1365,19 @@ extern JSErrorFormatString js_ErrorForma
  * is to be terminated.
  */
 extern JSBool
 js_InvokeOperationCallback(JSContext *cx);
 
 extern JSStackFrame *
 js_GetScriptedCaller(JSContext *cx, JSStackFrame *fp);
 
+extern jsbytecode*
+js_GetCurrentBytecodePC(JSContext* cx);
+
 #ifdef JS_TRACER
 /*
  * Reconstruct the JS stack and clear cx->onTrace. We must be currently
  * executing a _FAIL builtin from trace on cx. The machine code for the trace
  * remains on the C stack when js_DeepBail returns.
  *
  * Implemented in jstracer.cpp.
  */
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -119,17 +119,17 @@ js_UntrapScriptCode(JSContext *cx, JSScr
                 for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn))
                     continue;
                 nbytes += (sn - notes + 1) * sizeof *sn;
 
                 code = (jsbytecode *) JS_malloc(cx, nbytes);
                 if (!code)
                     break;
                 memcpy(code, script->code, nbytes);
-                JS_CLEAR_GSN_CACHE(cx);
+                JS_PURGE_GSN_CACHE(cx);
             }
             code[trap->pc - script->code] = trap->op;
         }
     }
     DBG_UNLOCK(rt);
     return code;
 }
 
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -2708,9 +2708,13 @@ js_FreezeLocalNames(JSContext *cx, JSFun
     n = fun->nargs + fun->u.i.nvars + fun->u.i.nupvars;
     if (2 <= n && n < MAX_ARRAY_LOCALS) {
         /* Shrink over-allocated array ignoring realloc failures. */
         array = (jsuword *) JS_realloc(cx, fun->u.i.names.array,
                                        n * sizeof *array);
         if (array)
             fun->u.i.names.array = array;
     }
+#ifdef DEBUG
+    if (n > MAX_ARRAY_LOCALS)
+        JS_DHashMarkTableImmutable(&fun->u.i.names.map->names);
+#endif
 }
--- a/js/src/jsfun.h
+++ b/js/src/jsfun.h
@@ -116,18 +116,18 @@ struct JSFunction {
 
 /*
  * Traceable native.  This expands to a JSFunctionSpec initializer (like JS_FN
  * in jsapi.h).  fastcall is a JSFastNative; trcinfo is a JSTraceableNative *.
  */
 #ifdef JS_TRACER
 /* MSVC demands the intermediate (void *) cast here. */
 # define JS_TN(name,fastcall,nargs,flags,trcinfo)                             \
-    {name, JS_DATA_TO_FUNC_PTR(JSNative, trcinfo), nargs,                     \
-     (flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS | JSFUN_TRACEABLE, 0}
+    JS_FN(name, JS_DATA_TO_FUNC_PTR(JSNative, trcinfo), nargs,                \
+          (flags) | JSFUN_FAST_NATIVE | JSFUN_STUB_GSOPS | JSFUN_TRACEABLE)
 #else
 # define JS_TN(name,fastcall,nargs,flags,trcinfo)                             \
     JS_FN(name, fastcall, nargs, flags)
 #endif
 
 extern JSClass js_ArgumentsClass;
 extern JS_FRIEND_DATA(JSClass) js_CallClass;
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -3105,34 +3105,44 @@ js_TraceContext(JSTracer *trc, JSContext
     }
 
     if (acx->sharpObjectMap.depth > 0)
         js_TraceSharpMap(trc, &acx->sharpObjectMap);
 
     js_TraceRegExpStatics(trc, acx);
 }
 
-void
-js_TraceTraceMonitor(JSTracer *trc, JSTraceMonitor *tm)
+#ifdef JS_TRACER
+
+static void
+MarkReservedObjects(JSTraceMonitor *tm)
 {
-    if (IS_GC_MARKING_TRACER(trc)) {
-        tm->reservedDoublePoolPtr = tm->reservedDoublePool;
-
-        tm->needFlush = JS_TRUE;
-
-        /* Keep the reserved objects. */
-        for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
-            uint8 *flagp = GetGCThingFlags(obj);
-            JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT);
-            JS_ASSERT(*flagp != GCF_FINAL);
-            *flagp |= GCF_MARK;
-        }
+    /* Keep the reserved objects. */
+    for (JSObject *obj = tm->reservedObjects; obj; obj = JSVAL_TO_OBJECT(obj->fslots[0])) {
+        uint8 *flagp = GetGCThingFlags(obj);
+        JS_ASSERT((*flagp & GCF_TYPEMASK) == GCX_OBJECT);
+        JS_ASSERT(*flagp != GCF_FINAL);
+        *flagp |= GCF_MARK;
     }
 }
 
+#ifdef JS_THREADSAFE
+static JSDHashOperator
+reserved_objects_marker(JSDHashTable *table, JSDHashEntryHdr *hdr,
+                        uint32, void *)
+{
+    JSThread *thread = ((JSThreadsHashEntry *) hdr)->thread;
+
+    MarkReservedObjects(&thread->data.traceMonitor);
+    return JS_DHASH_NEXT;
+}
+#endif
+
+#endif
+
 JS_REQUIRES_STACK void
 js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
 {
     JSRuntime *rt = trc->context->runtime;
     JSContext *iter, *acx;
 
     JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
     if (rt->gcLocksHash)
@@ -3149,26 +3159,25 @@ js_TraceRuntime(JSTracer *trc, JSBool al
         rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
 
 #ifdef JS_TRACER
     for (int i = 0; i < JSBUILTIN_LIMIT; i++) {
         if (rt->builtinFunctions[i])
             JS_CALL_OBJECT_TRACER(trc, rt->builtinFunctions[i], "builtin function");
     }
 
+    /* Mark the reserved objects unless we are shutting down. */
+    if (IS_GC_MARKING_TRACER(trc) && rt->state != JSRTS_LANDING) {
 #ifdef JS_THREADSAFE
-    /* Trace the loop table(s) which can contain pointers to code objects. */
-   while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
-       if (!acx->thread)
-           continue;
-       js_TraceTraceMonitor(trc, &acx->thread->traceMonitor);
-   }
+        JS_DHashTableEnumerate(&rt->threads, reserved_objects_marker, NULL);
 #else
-   js_TraceTraceMonitor(trc, &rt->traceMonitor);
+        MarkReservedObjects(&rt->threadData.traceMonitor);
 #endif
+    }
+
 #endif
 }
 
 static void
 ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
 {
     JSObject *obj, *pobj;
     uint32 slot;
@@ -3237,25 +3246,28 @@ ProcessSetSlotRequest(JSContext *cx, JSS
         }
     }
 
     /* Finally, do the deed. */
     STOBJ_SET_SLOT(obj, slot, OBJECT_TO_JSVAL(pobj));
     STOBJ_SET_DELEGATE(pobj);
 }
 
-static void
-DestroyScriptsToGC(JSContext *cx, JSScript **listp)
+void
+js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data)
 {
-    JSScript *script;
-
-    while ((script = *listp) != NULL) {
-        *listp = script->u.nextToGC;
-        script->u.nextToGC = NULL;
-        js_DestroyScript(cx, script);
+    JSScript **listp, *script;
+
+    for (size_t i = 0; i != JS_ARRAY_LENGTH(data->scriptsToGC); ++i) {
+        listp = &data->scriptsToGC[i];
+        while ((script = *listp) != NULL) {
+            *listp = script->u.nextToGC;
+            script->u.nextToGC = NULL;
+            js_DestroyScript(cx, script);
+        }
     }
 }
 
 /*
  * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
  * rt->gcLock already held, so the lock should be kept on return.
  */
 void
@@ -3277,17 +3289,24 @@ js_GC(JSContext *cx, JSGCInvocationKind 
     JSContext *acx, *iter;
 #endif
 #ifdef JS_GCMETER
     uint32 nlivearenas, nkilledarenas, nthings;
 #endif
 
     JS_ASSERT_IF(gckind == GC_LAST_DITCH, !JS_ON_TRACE(cx));
     rt = cx->runtime;
+
 #ifdef JS_THREADSAFE
+    /*
+     * We allow js_GC calls outside a request but the context must be bound
+     * to the current thread.
+     */
+    JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread));
+
     /* Avoid deadlock. */
     JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
 #endif
 
     if (gckind & GC_KEEP_ATOMS) {
         /*
          * The set slot request and last ditch GC kinds preserve all atoms and
          * weak roots.
@@ -3353,45 +3372,33 @@ js_GC(JSContext *cx, JSGCInvocationKind 
         if (!(gckind & GC_LOCK_HELD))
             JS_UNLOCK_GC(rt);
         return;
     }
 
     /*
      * If we're in one or more requests (possibly on more than one context)
      * running on the current thread, indicate, temporarily, that all these
-     * requests are inactive.  If cx->thread is NULL, then cx is not using
-     * the request model, and does not contribute to rt->requestCount.
+     * requests are inactive.
      */
     requestDebit = 0;
-    if (cx->thread) {
+    {
         JSCList *head, *link;
 
         /*
          * Check all contexts on cx->thread->contextList for active requests,
          * counting each such context against requestDebit.
          */
         head = &cx->thread->contextList;
         for (link = head->next; link != head; link = link->next) {
             acx = CX_FROM_THREAD_LINKS(link);
             JS_ASSERT(acx->thread == cx->thread);
             if (acx->requestDepth)
                 requestDebit++;
         }
-    } else {
-        /*
-         * We assert, but check anyway, in case someone is misusing the API.
-         * Avoiding the loop over all of rt's contexts is a win in the event
-         * that the GC runs only on request-less contexts with null threads,
-         * in a special thread such as might be used by the UI/DOM/Layout
-         * "mozilla" or "main" thread in Mozilla-the-browser.
-         */
-        JS_ASSERT(cx->requestDepth == 0);
-        if (cx->requestDepth)
-            requestDebit = 1;
     }
     if (requestDebit) {
         JS_ASSERT(requestDebit <= rt->requestCount);
         rt->requestCount -= requestDebit;
         if (rt->requestCount == 0)
             JS_NOTIFY_REQUEST_DONE(rt);
     }
 
@@ -3491,53 +3498,20 @@ js_GC(JSContext *cx, JSGCInvocationKind 
     rt->gcMallocBytes = 0;
 
 #ifdef JS_DUMP_SCOPE_METERS
   { extern void js_DumpScopeMeters(JSRuntime *rt);
     js_DumpScopeMeters(rt);
   }
 #endif
 
-    /* Clear property and JIT oracle caches (only for cx->thread if JS_THREADSAFE). */
-    js_FlushPropertyCache(cx);
 #ifdef JS_TRACER
-    js_FlushJITOracle(cx);
+    js_PurgeJITOracle();
 #endif
-
-    /* Destroy eval'ed scripts. */
-    for (i = 0; i < JS_ARRAY_LENGTH(JS_SCRIPTS_TO_GC(cx)); i++)
-        DestroyScriptsToGC(cx, &JS_SCRIPTS_TO_GC(cx)[i]);
-
-#ifdef JS_THREADSAFE
-    /*
-     * Clear thread-based caches. To avoid redundant clearing we unroll the
-     * current thread's step.
-     *
-     * In case a JSScript wrapped within an object was finalized, we null
-     * acx->thread->gsnCache.script and finish the cache's hashtable. Note
-     * that js_DestroyScript, called from script_finalize, will have already
-     * cleared cx->thread->gsnCache above during finalization, so we don't
-     * have to here.
-     */
-    iter = NULL;
-    while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
-        if (!acx->thread || acx->thread == cx->thread)
-            continue;
-        GSN_CACHE_CLEAR(&acx->thread->gsnCache);
-        js_FlushPropertyCache(acx);
-#ifdef JS_TRACER
-        js_FlushJITOracle(acx);
-#endif
-        for (i = 0; i < JS_ARRAY_LENGTH(acx->thread->scriptsToGC); i++)
-            DestroyScriptsToGC(cx, &acx->thread->scriptsToGC[i]);
-    }
-#else
-    /* The thread-unsafe case just has to clear the runtime's GSN cache. */
-    GSN_CACHE_CLEAR(&rt->gsnCache);
-#endif
+    js_PurgeThreads(cx);
 
   restart:
     rt->gcNumber++;
     JS_ASSERT(!rt->gcUntracedArenaStackTop);
     JS_ASSERT(rt->gcTraceLaterCount == 0);
 
     /* Reset the property cache's type id generator so we can compress ids. */
     rt->shapeGen = 0;
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -335,16 +335,19 @@ struct JSGCFreeListSet {
     JSGCFreeListSet     *link;
 };
 
 extern const JSGCFreeListSet js_GCEmptyFreeListSet;
 
 extern void
 js_RevokeGCLocalFreeLists(JSContext *cx);
 
+extern void
+js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data);
+
 struct JSWeakRoots {
     /* Most recently created things by type, members of the GC's root set. */
     void            *newborn[GCX_NTYPES];
 
     /* Atom root for the last-looked-up atom on this context. */
     jsval           lastAtom;
 
     /* Root for the result of the most recent js_InternalInvoke call. */
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -165,17 +165,23 @@ js_FillPropertyCache(JSContext *cx, JSOb
     if (protoIndex != 0) {
         JSObject *tmp;
 
         JS_ASSERT(pobj != obj);
         protoIndex = 1;
         tmp = obj;
         for (;;) {
             tmp = OBJ_GET_PROTO(cx, tmp);
-            if (!tmp) {
+
+            /*
+             * We cannot cache properties coming from native objects behind
+             * non-native ones on the prototype chain. The non-natives can
+             * mutate in arbitrary way without changing any shapes.
+             */
+            if (!tmp || !OBJ_IS_NATIVE(tmp)) {
                 PCMETER(cache->noprotos++);
                 *entryp = NULL;
                 return;
             }
             if (tmp == pobj)
                 break;
             ++protoIndex;
         }
@@ -421,21 +427,18 @@ js_FullTestPropertyCache(JSContext *cx, 
     JS_END_MACRO
 #else
 #define ASSERT_CACHE_IS_EMPTY(cache) ((void)0)
 #endif
 
 JS_STATIC_ASSERT(PCVAL_NULL == 0);
 
 void
-js_FlushPropertyCache(JSContext *cx)
+js_PurgePropertyCache(JSContext *cx, JSPropertyCache *cache)
 {
-    JSPropertyCache *cache;
-
-    cache = &JS_PROPERTY_CACHE(cx);
     if (cache->empty) {
         ASSERT_CACHE_IS_EMPTY(cache);
         return;
     }
 
     memset(cache->table, 0, sizeof cache->table);
     cache->empty = JS_TRUE;
 
@@ -493,17 +496,17 @@ js_FlushPropertyCache(JSContext *cx)
     }
   }
 #endif
 
     PCMETER(cache->flushes++);
 }
 
 void
-js_FlushPropertyCacheForScript(JSContext *cx, JSScript *script)
+js_PurgePropertyCacheForScript(JSContext *cx, JSScript *script)
 {
     JSPropertyCache *cache;
     JSPropCacheEntry *entry;
 
     cache = &JS_PROPERTY_CACHE(cx);
     for (entry = cache->table; entry < cache->table + PROPERTY_CACHE_SIZE;
          entry++) {
         if (JS_UPTRDIFF(entry->kpc, script->code) < script->length) {
@@ -559,16 +562,19 @@ AllocateAfterSP(JSContext *cx, jsval *sp
     return JS_TRUE;
 }
 
 JS_STATIC_INTERPRET JS_REQUIRES_STACK jsval *
 js_AllocRawStack(JSContext *cx, uintN nslots, void **markp)
 {
     jsval *sp;
 
+    JS_ASSERT(nslots != 0);
+    js_LeaveTrace(cx);
+
     if (!cx->stackPool.first.next) {
         int64 *timestamp;
 
         JS_ARENA_ALLOCATE_CAST(timestamp, int64 *,
                                &cx->stackPool, sizeof *timestamp);
         if (!timestamp) {
             js_ReportOutOfScriptQuota(cx);
             return NULL;
@@ -5081,17 +5087,16 @@ js_Interpret(JSContext *cx)
             ok = js_Invoke(cx, argc, vp, 0);
             regs.sp = vp + 1;
             CHECK_INTERRUPT_HANDLER();
             if (!ok)
                 goto error;
             if (!cx->rval2set) {
                 op2 = js_GetOpcode(cx, script, regs.pc + JSOP_SETCALL_LENGTH);
                 if (op2 != JSOP_DELELEM) {
-                    JS_ASSERT(!(js_CodeSpec[op2].format & JOF_DEL));
                     JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
                                          JSMSG_BAD_LEFTSIDE_OF_ASS);
                     goto error;
                 }
 
                 /*
                  * Store true as the result of the emulated delete of a
                  * non-existent property. NB: We don't METER_OP_PAIR here;
--- a/js/src/jsinterp.h
+++ b/js/src/jsinterp.h
@@ -385,21 +385,24 @@ js_FillPropertyCache(JSContext *cx, JSOb
             PCMETER(cache_->misses++);                                        \
     } while (0)
 
 extern JS_REQUIRES_STACK JSAtom *
 js_FullTestPropertyCache(JSContext *cx, jsbytecode *pc,
                          JSObject **objp, JSObject **pobjp,
                          JSPropCacheEntry **entryp);
 
-extern void
-js_FlushPropertyCache(JSContext *cx);
+/* The property cache does not need a destructor. */
+#define js_FinishPropertyCache(cache) ((void) 0)
 
 extern void
-js_FlushPropertyCacheForScript(JSContext *cx, JSScript *script);
+js_PurgePropertyCache(JSContext *cx, JSPropertyCache *cache);
+
+extern void
+js_PurgePropertyCacheForScript(JSContext *cx, JSScript *script);
 
 extern void
 js_DisablePropertyCache(JSContext *cx);
 
 extern void
 js_EnablePropertyCache(JSContext *cx);
 
 /*
--- a/js/src/jsnum.cpp
+++ b/js/src/jsnum.cpp
@@ -782,16 +782,27 @@ js_NewNumberInRootedValue(JSContext *cx,
 
     if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
         *vp = INT_TO_JSVAL(i);
         return JS_TRUE;
     }
     return js_NewDoubleInRootedValue(cx, d, vp);
 }
 
+JSBool
+js_NewWeaklyRootedNumber(JSContext *cx, jsdouble d, jsval *rval)
+{
+    jsint i;
+    if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
+        *rval = INT_TO_JSVAL(i);
+        return JS_TRUE;
+    }
+    return JS_NewDoubleValue(cx, d, rval);
+}
+
 /*
  * Convert a number to C string. The buf must be large enough to accommodate
  * the result, including '-' and '\0', if base == 10 or d is an integer that
  * fits in 32 bits. The caller must free the resulting pointer if it does not
  * point into buf.
  */
 static char *
 NumberToCString(JSContext *cx, jsdouble d, jsint base, char *buf, size_t bufSize)
--- a/js/src/jsnum.h
+++ b/js/src/jsnum.h
@@ -173,16 +173,23 @@ extern const char js_parseFloat_str[];
 extern const char js_parseInt_str[];
 
 /*
  * vp must be a root.
  */
 extern JSBool
 js_NewNumberInRootedValue(JSContext *cx, jsdouble d, jsval *vp);
 
+/*
+ * Create a weakly rooted integer or double jsval as appropriate for the given
+ * jsdouble.
+ */
+extern JSBool
+js_NewWeaklyRootedNumber(JSContext *cx, jsdouble d, jsval *vp);
+
 /* Convert a number to a GC'ed string. */
 extern JSString * JS_FASTCALL
 js_NumberToString(JSContext *cx, jsdouble d);
 
 /*
  * Convert a value to a number. On exit JSVAL_IS_NULL(*vp) iff there was an
  * error. If on exit JSVAL_IS_NUMBER(*vp), then *vp holds the jsval that
  * matches the result. Otherwise *vp is JSVAL_TRUE indicating that the jsval
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -4198,45 +4198,16 @@ js_NativeSet(JSContext *cx, JSObject *ob
          SCOPE_GET_PROPERTY(scope, sprop->id) == sprop)) {
   set_slot:
         LOCKED_OBJ_WRITE_BARRIER(cx, obj, slot, *vp);
     }
 
     return JS_TRUE;
 }
 
-static jsbytecode*
-js_GetCurrentBytecodePC(JSContext* cx)
-{
-    jsbytecode *pc, *imacpc;
-
-#ifdef JS_TRACER
-    if (JS_ON_TRACE(cx)) {
-        pc = cx->bailExit->pc;
-        imacpc = cx->bailExit->imacpc;
-    } else
-#endif
-    {
-        JS_ASSERT_NOT_ON_TRACE(cx);  /* for static analysis */
-        if (cx->fp && cx->fp->regs) {
-            pc = cx->fp->regs->pc;
-            imacpc = cx->fp->imacpc;
-        } else {
-            return NULL;
-        }
-    }
-
-    /*
-     * If we are inside GetProperty_tn or similar, return a pointer to the
-     * current instruction in the script, not the CALL instruction in the
-     * imacro, for the benefit of callers doing bytecode inspection.
-     */
-    return (*pc == JSOP_CALL && imacpc) ? imacpc : pc;
-}
-
 JSBool
 js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, jsval *vp,
                      JSPropCacheEntry **entryp)
 {
     JSObject *aobj, *obj2;
     uint32 shape;
     int protoIndex;
     JSProperty *prop;
--- a/js/src/jsparse.cpp
+++ b/js/src/jsparse.cpp
@@ -992,39 +992,16 @@ struct BindData {
     union {
         struct {
             uintN           overflow;
         } let;
     } u;
 };
 
 static JSBool
-BindArg(JSContext *cx, JSAtom *atom, JSTreeContext *tc)
-{
-    const char *name;
-
-    /*
-     * Check for a duplicate parameter name, a "feature" required by ECMA-262.
-     */
-    JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
-    if (js_LookupLocal(cx, tc->u.fun, atom, NULL) != JSLOCAL_NONE) {
-        name = js_AtomToPrintableString(cx, atom);
-        if (!name ||
-            !js_ReportCompileErrorNumber(cx, TS(tc->parseContext), NULL,
-                                         JSREPORT_WARNING | JSREPORT_STRICT,
-                                         JSMSG_DUPLICATE_FORMAL,
-                                         name)) {
-            return JS_FALSE;
-        }
-    }
-
-    return js_AddLocal(cx, tc->u.fun, atom, JSLOCAL_ARG);
-}
-
-static JSBool
 BindLocalVariable(JSContext *cx, JSFunction *fun, JSAtom *atom,
                   JSLocalKind localKind)
 {
     JS_ASSERT(localKind == JSLOCAL_VAR || localKind == JSLOCAL_CONST);
 
     /*
      * Don't bind a variable with the hidden name 'arguments', per ECMA-262.
      * Instead 'var arguments' always restates the predefined property of the
@@ -1045,41 +1022,32 @@ static JSParseNode *
 DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc,
                   JSTokenType tt);
 
 static JSBool
 BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom,
                      JSTreeContext *tc)
 {
     JSAtomListElement *ale;
-    const char *name;
 
     JS_ASSERT(tc->flags & TCF_IN_FUNCTION);
     ATOM_LIST_SEARCH(ale, &tc->decls, atom);
     if (!ale) {
         ale = js_IndexAtom(cx, atom, &tc->decls);
         if (!ale)
             return JS_FALSE;
         ALE_SET_JSOP(ale, data->op);
     }
 
     if (js_LookupLocal(cx, tc->u.fun, atom, NULL) != JSLOCAL_NONE) {
-        name = js_AtomToPrintableString(cx, atom);
-        if (!name ||
-            !js_ReportCompileErrorNumber(cx, TS(tc->parseContext), data->pn,
-                                         JSREPORT_WARNING | JSREPORT_STRICT,
-                                         JSMSG_DUPLICATE_FORMAL,
-                                         name)) {
-            return JS_FALSE;
-        }
-    } else {
-        if (!BindLocalVariable(cx, tc->u.fun, atom, JSLOCAL_VAR))
-            return JS_FALSE;
+        js_ReportCompileErrorNumber(cx, TS(tc->parseContext), NULL,
+                                    JSREPORT_ERROR, JSMSG_DESTRUCT_DUP_ARG);
+        return JS_FALSE;
     }
-    return JS_TRUE;
+    return BindLocalVariable(cx, tc->u.fun, atom, JSLOCAL_VAR);
 }
 #endif /* JS_HAS_DESTRUCTURING */
 
 static JSFunction *
 NewCompilerFunction(JSContext *cx, JSTreeContext *tc, JSAtom *atom,
                     uintN lambda)
 {
     JSObject *parent;
@@ -1126,16 +1094,17 @@ FunctionDef(JSContext *cx, JSTokenStream
     JSTokenType tt;
     JSAtom *funAtom;
     JSParsedObjectBox *funpob;
     JSAtomListElement *ale;
     JSFunction *fun;
     JSTreeContext funtc;
 #if JS_HAS_DESTRUCTURING
     JSParseNode *item, *list = NULL;
+    bool destructuringArg = false, duplicatedArg = false;
 #endif
 
     /* Make a TOK_FUNCTION node. */
 #if JS_HAS_GETTER_SETTER
     op = CURRENT_TOKEN(ts).t_op;
 #endif
     pn = NewParseNode(cx, ts, PN_FUNC, tc);
     if (!pn)
@@ -1250,16 +1219,21 @@ FunctionDef(JSContext *cx, JSTokenStream
 #if JS_HAS_DESTRUCTURING
               case TOK_LB:
               case TOK_LC:
               {
                 BindData data;
                 JSParseNode *lhs, *rhs;
                 jsint slot;
 
+                /* See comment below in the TOK_NAME case. */
+                if (duplicatedArg)
+                    goto report_dup_and_destructuring;
+                destructuringArg = true;
+
                 /*
                  * A destructuring formal parameter turns into one or more
                  * local variables initialized from properties of a single
                  * anonymous positional parameter, so here we must tweak our
                  * binder and its data.
                  */
                 data.pn = NULL;
                 data.op = JSOP_DEFVAR;
@@ -1300,24 +1274,58 @@ FunctionDef(JSContext *cx, JSTokenStream
                     PN_INIT_LIST(list);
                 }
                 PN_APPEND(list, item);
                 break;
               }
 #endif /* JS_HAS_DESTRUCTURING */
 
               case TOK_NAME:
-                if (!BindArg(cx, CURRENT_TOKEN(ts).t_atom, &funtc))
+              {
+                /*
+                 * Check for a duplicate parameter name, a "feature" that
+                 * ECMA-262 requires. Still if any argument is a destructuring
+                 * pattern, we will report error either now or later, when we
+                 * find the first pattern.
+                 */
+                JSAtom *atom = CURRENT_TOKEN(ts).t_atom;
+                if (js_LookupLocal(cx, fun, atom, NULL) != JSLOCAL_NONE) {
+#if JS_HAS_DESTRUCTURING
+                    if (destructuringArg)
+                        goto report_dup_and_destructuring;
+                    duplicatedArg = true;
+#endif
+                    const char *name = js_AtomToPrintableString(cx, atom);
+                    if (!name ||
+                        !js_ReportCompileErrorNumber(cx, TS(tc->parseContext),
+                                                     NULL,
+                                                     JSREPORT_WARNING |
+                                                     JSREPORT_STRICT,
+                                                     JSMSG_DUPLICATE_FORMAL,
+                                                     name)) {
+                        return NULL;
+                    }
+                }
+                if (!js_AddLocal(cx, fun, atom, JSLOCAL_ARG))
                     return NULL;
                 break;
+              }
 
               default:
                 js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR,
                                             JSMSG_MISSING_FORMAL);
                 return NULL;
+
+#if JS_HAS_DESTRUCTURING
+              report_dup_and_destructuring:
+                js_ReportCompileErrorNumber(cx, TS(tc->parseContext), NULL,
+                                            JSREPORT_ERROR,
+                                            JSMSG_DESTRUCT_DUP_ARG);
+                return NULL;
+#endif
             }
         } while (js_MatchToken(cx, ts, TOK_COMMA));
 
         MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FORMAL);
     }
 
 #if JS_HAS_EXPR_CLOSURES
     ts->flags |= TSF_OPERAND;
--- a/js/src/jsprvtd.h
+++ b/js/src/jsprvtd.h
@@ -96,20 +96,22 @@ typedef struct JSGenerator          JSGe
 typedef struct JSNativeEnumerator   JSNativeEnumerator;
 typedef struct JSParseContext       JSParseContext;
 typedef struct JSParsedObjectBox    JSParsedObjectBox;
 typedef struct JSParseNode          JSParseNode;
 typedef struct JSPropCacheEntry     JSPropCacheEntry;
 typedef struct JSSharpObjectMap     JSSharpObjectMap;
 typedef struct JSTempValueRooter    JSTempValueRooter;
 typedef struct JSThread             JSThread;
+typedef struct JSThreadData         JSThreadData;
 typedef struct JSToken              JSToken;
 typedef struct JSTokenPos           JSTokenPos;
 typedef struct JSTokenPtr           JSTokenPtr;
 typedef struct JSTokenStream        JSTokenStream;
+typedef struct JSTraceMonitor       JSTraceMonitor;
 typedef struct JSTreeContext        JSTreeContext;
 typedef struct JSTryNote            JSTryNote;
 typedef struct JSWeakRoots          JSWeakRoots;
 
 /* Friend "Advanced API" typedefs. */
 typedef struct JSAtom               JSAtom;
 typedef struct JSAtomList           JSAtomList;
 typedef struct JSAtomListElement    JSAtomListElement;
--- a/js/src/jsregexp.cpp
+++ b/js/src/jsregexp.cpp
@@ -1262,17 +1262,24 @@ ParseTerm(CompilerState *state)
                     /* Treat this as flat. termStart - 1 is the \. */
                     c = '\\';
                     goto asFlat;
                 }
 
                 /* Treat this as an octal escape. */
                 goto doOctal;
             }
-            JS_ASSERT(1 <= num && num <= 0x10000);
+
+            /*
+             * When FindParenCount calls the regex parser recursively (to find
+             * the number of backrefs) num can be arbitrary and the maximum
+             * supported number of backrefs does not bound it.
+             */
+            JS_ASSERT_IF(!(state->flags & JSREG_FIND_PAREN_COUNT),
+                         1 <= num && num <= 0x10000);
             state->result = NewRENode(state, REOP_BACKREF);
             if (!state->result)
                 return JS_FALSE;
             state->result->u.parenIndex = num - 1;
             state->progLength
                 += 1 + GetCompactIndexWidth(state->result->u.parenIndex);
             break;
           /* Control escape */
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1574,23 +1574,23 @@ js_DestroyScript(JSContext *cx, JSScript
 {
     js_CallDestroyScriptHook(cx, script);
     JS_ClearScriptTraps(cx, script);
 
     if (script->principals)
         JSPRINCIPALS_DROP(cx, script->principals);
 
     if (JS_GSN_CACHE(cx).code == script->code)
-        JS_CLEAR_GSN_CACHE(cx);
+        JS_PURGE_GSN_CACHE(cx);
 
     /*
      * The GC flushes all property caches, so no need to purge just the
      * entries for this script.
      *
-     * JS_THREADSAFE note: js_FlushPropertyCacheForScript flushes only the
+     * JS_THREADSAFE note: js_PurgePropertyCacheForScript purges only the
      * current thread's property cache, so a script not owned by a function
      * or object, which hands off lifetime management for that script to the
      * GC, must be used by only one thread over its lifetime.
      *
      * This should be an API-compatible change, since a script is never safe
      * against premature GC if shared among threads without a rooted object
      * wrapping it to protect the script's mapped atoms against GC. We use
      * script->owner to enforce this requirement via assertions.
@@ -1601,19 +1601,19 @@ js_DestroyScript(JSContext *cx, JSScript
 
     if (!cx->runtime->gcRunning) {
         JSStackFrame *fp = js_GetTopStackFrame(cx);
 
         if (!(fp && (fp->flags & JSFRAME_EVAL))) {
 #ifdef CHECK_SCRIPT_OWNER
             JS_ASSERT(script->owner == cx->thread);
 #endif
-            js_FlushPropertyCacheForScript(cx, script);
+            js_PurgePropertyCacheForScript(cx, script);
 #ifdef JS_TRACER
-            js_FlushScriptFragments(cx, script);
+            js_PurgeScriptFragments(cx, script);
 #endif
         }
     }
 
     JS_free(cx, script);
 }
 
 void
@@ -1672,16 +1672,27 @@ js_TraceScript(JSTracer *trc, JSScript *
 typedef struct GSNCacheEntry {
     JSDHashEntryHdr     hdr;
     jsbytecode          *pc;
     jssrcnote           *sn;
 } GSNCacheEntry;
 
 #define GSN_CACHE_THRESHOLD     100
 
+void
+js_PurgeGSNCache(JSGSNCache *cache)
+{
+    cache->code = NULL;
+    if (cache->table.ops) {
+        JS_DHashTableFinish(&cache->table);
+        cache->table.ops = NULL;
+    }
+    GSN_CACHE_METER(cache, purges);
+}
+
 jssrcnote *
 js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc)
 {
     ptrdiff_t target, offset;
     GSNCacheEntry *entry;
     jssrcnote *sn, *result;
     uintN nsrcnotes;
 
@@ -1709,17 +1720,17 @@ js_GetSrcNoteCached(JSContext *cx, JSScr
         if (offset == target && SN_IS_GETTABLE(sn)) {
             result = sn;
             break;
         }
     }
 
     if (JS_GSN_CACHE(cx).code != script->code &&
         script->length >= GSN_CACHE_THRESHOLD) {
-        JS_CLEAR_GSN_CACHE(cx);
+        JS_PURGE_GSN_CACHE(cx);
         nsrcnotes = 0;
         for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn);
              sn = SN_NEXT(sn)) {
             if (SN_IS_GETTABLE(sn))
                 ++nsrcnotes;
         }
         if (!JS_DHashTableInit(&JS_GSN_CACHE(cx).table, JS_DHashGetStubOps(),
                                NULL, sizeof(GSNCacheEntry),
--- a/js/src/jsstr.cpp
+++ b/js/src/jsstr.cpp
@@ -162,26 +162,16 @@ js_ConcatStrings(JSContext *cx, JSString
         left->u.chars = ls = s;
         ldep = left;
     }
 
     js_strncpy(s + ln, rs, rn);
     n = ln + rn;
     s[n] = 0;
 
-#ifdef JS_TRACER
-    /*
-     * Lame hack to avoid trying to deep-bail (@js_ReportAllocationOverflow)
-     * when called directly from trace.  Instead, retry from the interpreter.
-     * See bug 477351.
-     */
-    if (n > JSSTRING_LENGTH_MASK && JS_ON_TRACE(cx) && !cx->bailExit)
-        return NULL;
-#endif
-
     str = js_NewString(cx, s, n);
     if (!str) {
         /* Out of memory: clean up any space we (re-)allocated. */
         if (!ldep) {
             JS_free(cx, s);
         } else {
             s = (jschar *) JS_realloc(cx, ls, (ln + 1) * sizeof(jschar));
             if (s)
@@ -603,31 +593,17 @@ str_enumerate(JSContext *cx, JSObject *o
 static JSBool
 str_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
             JSObject **objp)
 {
     jsval v;
     JSString *str, *str1;
     jsint slot;
 
-    if (flags & JSRESOLVE_ASSIGNING)
-        return JS_TRUE;
-
-    if (id == ATOM_KEY(cx->runtime->atomState.lengthAtom)) {
-        v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
-        str = JSVAL_TO_STRING(v);
-        if (!OBJ_DEFINE_PROPERTY(cx, obj, id, INT_TO_JSVAL(17), NULL, NULL,
-                                 JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED, NULL)) {
-            return JS_FALSE;
-        }
-        *objp = obj;
-        return JS_TRUE;
-    }
-
-    if (!JSVAL_IS_INT(id))
+    if (!JSVAL_IS_INT(id) || (flags & JSRESOLVE_ASSIGNING))
         return JS_TRUE;
 
     v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
     JS_ASSERT(JSVAL_IS_STRING(v));
     str = JSVAL_TO_STRING(v);
 
     slot = JSVAL_TO_INT(id);
     if ((size_t)slot < JSSTRING_LENGTH(str)) {
@@ -804,34 +780,16 @@ static JSString* FASTCALL
 String_p_toString(JSContext* cx, JSObject* obj)
 {
     if (!JS_InstanceOf(cx, obj, &js_StringClass, NULL))
         return NULL;
     jsval v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
     JS_ASSERT(JSVAL_IS_STRING(v));
     return JSVAL_TO_STRING(v);
 }
-
-static JSString* FASTCALL
-String_p_substring(JSContext* cx, JSString* str, int32 begin, int32 end)
-{
-    JS_ASSERT(JS_ON_TRACE(cx));
-
-    size_t length = JSSTRING_LENGTH(str);
-    return SubstringTail(cx, str, length, begin, end);
-}
-
-static JSString* FASTCALL
-String_p_substring_1(JSContext* cx, JSString* str, int32 begin)
-{
-    JS_ASSERT(JS_ON_TRACE(cx));
-
-    size_t length = JSSTRING_LENGTH(str);
-    return SubstringTail(cx, str, length, begin, length);
-}
 #endif
 
 JSString* JS_FASTCALL
 js_toLowerCase(JSContext *cx, JSString *str)
 {
     size_t i, n;
     jschar *s, *news;
 
@@ -1493,49 +1451,19 @@ StringMatchHelper(JSContext *cx, uintN a
         *vp = *mdata.arrayval;
     JS_POP_TEMP_ROOT(cx, &tvr);
     return ok;
 }
 
 static JSBool
 str_match(JSContext *cx, uintN argc, jsval *vp)
 {
-    JSStackFrame *fp;
-
-    for (fp = js_GetTopStackFrame(cx); fp && !fp->regs; fp = fp->down)
-        JS_ASSERT(!fp->script);
-    return StringMatchHelper(cx, argc, vp, fp ? fp->regs->pc : NULL);
+    return StringMatchHelper(cx, argc, vp, js_GetCurrentBytecodePC(cx));
 }
 
-#ifdef JS_TRACER
-static jsval FASTCALL
-String_p_match(JSContext* cx, JSString* str, jsbytecode *pc, JSObject* regexp)
-{
-    jsval vp[3] = { JSVAL_NULL, STRING_TO_JSVAL(str), OBJECT_TO_JSVAL(regexp) };
-    JSAutoTempValueRooter tvr(cx, 3, vp);
-    if (!StringMatchHelper(cx, 1, vp, pc)) {
-        cx->builtinStatus |= JSBUILTIN_ERROR;
-        return JSVAL_VOID;
-    }
-    return vp[0];
-}
-
-static jsval FASTCALL
-String_p_match_obj(JSContext* cx, JSObject* str, jsbytecode *pc, JSObject* regexp)
-{
-    jsval vp[3] = { JSVAL_NULL, OBJECT_TO_JSVAL(str), OBJECT_TO_JSVAL(regexp) };
-    JSAutoTempValueRooter tvr(cx, 3, vp);
-    if (!StringMatchHelper(cx, 1, vp, pc)) {
-        cx->builtinStatus |= JSBUILTIN_ERROR;
-        return JSVAL_VOID;
-    }
-    return vp[0];
-}
-#endif
-
 static JSBool
 str_search(JSContext *cx, uintN argc, jsval *vp)
 {
     GlobData data;
 
     data.flags = MODE_SEARCH;
     data.optarg = 1;
     return match_or_replace(cx, NULL, NULL, &data, argc, vp);
@@ -1607,34 +1535,32 @@ interpret_dollar(JSContext *cx, jschar *
       case '`':
         return &res->leftContext;
       case '\'':
         return &res->rightContext;
     }
     return NULL;
 }
 
-static JSBool
+static JS_REQUIRES_STACK JSBool
 find_replen(JSContext *cx, ReplaceData *rdata, size_t *sizep)
 {
     JSString *repstr;
     size_t replen, skip;
     jschar *dp, *ep;
     JSSubString *sub;
     JSObject *lambda;
 
     lambda = rdata->lambda;
     if (lambda) {
         uintN argc, i, j, m, n, p;
         jsval *invokevp, *sp;
         void *mark;
         JSBool ok;
 
-        JS_ASSERT_NOT_ON_TRACE(cx);
-
         /*
          * Save the regExpStatics from the current regexp, since they may be
          * clobbered by a RegExp usage in the lambda function.  Note that all
          * members of JSRegExpStatics are JSSubStrings, so not GC roots, save
          * input, which is rooted otherwise via vp[1] in str_replace.
          */
         JSRegExpStatics save = cx->regExpStatics;
         JSBool freeMoreParens = JS_FALSE;
@@ -1773,17 +1699,17 @@ replace_destroy(JSContext *cx, GlobData 
 {
     ReplaceData *rdata;
 
     rdata = (ReplaceData *)data;
     JS_free(cx, rdata->chars);
     rdata->chars = NULL;
 }
 
-static JSBool
+static JS_REQUIRES_STACK JSBool
 replace_glob(JSContext *cx, jsint count, GlobData *data)
 {
     ReplaceData *rdata;
     JSString *str;
     size_t leftoff, leftlen, replen, growth;
     const jschar *left;
     jschar *chars;
 
@@ -1809,17 +1735,17 @@ replace_glob(JSContext *cx, jsint count,
     chars += rdata->index;
     rdata->index += growth;
     js_strncpy(chars, left, leftlen);
     chars += leftlen;
     do_replace(cx, rdata, chars);
     return JS_TRUE;
 }
 
-static JSBool
+static JS_REQUIRES_STACK JSBool
 str_replace(JSContext *cx, uintN argc, jsval *vp)
 {
     JSObject *lambda;
     JSString *repstr;
 
     if (argc >= 2 && JS_TypeOfValue(cx, vp[3]) == JSTYPE_FUNCTION) {
         lambda = JSVAL_TO_OBJECT(vp[3]);
         repstr = NULL;
@@ -1828,61 +1754,17 @@ str_replace(JSContext *cx, uintN argc, j
         repstr = ArgToRootedString(cx, argc, vp, 1);
         if (!repstr)
             return JS_FALSE;
     }
 
     return js_StringReplaceHelper(cx, argc, lambda, repstr, vp);
 }
 
-#ifdef JS_TRACER
-static JSString* FASTCALL
-String_p_replace_str(JSContext* cx, JSString* str, JSObject* regexp, JSString* repstr)
-{
-    /* Make sure we will not call regexp.toString() later. This is not a _FAIL builtin. */
-    if (OBJ_GET_CLASS(cx, regexp) != &js_RegExpClass)
-        return NULL;
-
-    jsval vp[4] = {
-        JSVAL_NULL, STRING_TO_JSVAL(str), OBJECT_TO_JSVAL(regexp), STRING_TO_JSVAL(repstr)
-    };
-    if (!js_StringReplaceHelper(cx, 2, NULL, repstr, vp))
-        return NULL;
-    JS_ASSERT(JSVAL_IS_STRING(vp[0]));
-    return JSVAL_TO_STRING(vp[0]);
-}
-
-static JSString* FASTCALL
-String_p_replace_str2(JSContext* cx, JSString* str, JSString* patstr, JSString* repstr)
-{
-    jsval vp[4] = {
-        JSVAL_NULL, STRING_TO_JSVAL(str), STRING_TO_JSVAL(patstr), STRING_TO_JSVAL(repstr)
-    };
-    if (!js_StringReplaceHelper(cx, 2, NULL, repstr, vp))
-        return NULL;
-    JS_ASSERT(JSVAL_IS_STRING(vp[0]));
-    return JSVAL_TO_STRING(vp[0]);
-}
-
-static JSString* FASTCALL
-String_p_replace_str3(JSContext* cx, JSString* str, JSString* patstr, JSString* repstr,
-                      JSString* flagstr)
-{
-    jsval vp[5] = {
-        JSVAL_NULL, STRING_TO_JSVAL(str), STRING_TO_JSVAL(patstr), STRING_TO_JSVAL(repstr),
-        STRING_TO_JSVAL(flagstr)
-    };
-    if (!js_StringReplaceHelper(cx, 3, NULL, repstr, vp))
-        return NULL;
-    JS_ASSERT(JSVAL_IS_STRING(vp[0]));
-    return JSVAL_TO_STRING(vp[0]);
-}
-#endif
-
-JSBool
+JSBool JS_REQUIRES_STACK
 js_StringReplaceHelper(JSContext *cx, uintN argc, JSObject *lambda,
                        JSString *repstr, jsval *vp)
 {
     ReplaceData rdata;
     JSBool ok;
     size_t leftlen, rightlen, length;
     jschar *chars;
     JSString *str;
@@ -2173,29 +2055,16 @@ str_split(JSContext *cx, uintN argc, jsv
             }
             i = j + sep->length;
         }
         ok = (j != -2);
     }
     return ok;
 }
 
-#ifdef JS_TRACER
-static JSObject* FASTCALL
-String_p_split(JSContext* cx, JSString* str, JSString* sepstr)
-{
-    // FIXME: Avoid building and then parsing this array.
-    jsval vp[4] = { JSVAL_NULL, STRING_TO_JSVAL(str), STRING_TO_JSVAL(sepstr), JSVAL_VOID };
-    if (!str_split(cx, 2, vp))
-        return NULL;
-    JS_ASSERT(JSVAL_IS_OBJECT(vp[0]));
-    return JSVAL_TO_OBJECT(vp[0]);
-}
-#endif
-
 #if JS_HAS_PERL_SUBSTR
 static JSBool
 str_substr(JSContext *cx, uintN argc, jsval *vp)
 {
     JSString *str;
     jsdouble d;
     jsdouble length, begin, end;
 
@@ -2264,49 +2133,16 @@ str_concat(JSContext *cx, uintN argc, js
         if (!str)
             return JS_FALSE;
         *vp = STRING_TO_JSVAL(str);
     }
 
     return JS_TRUE;
 }
 
-#ifdef JS_TRACER
-static JSString* FASTCALL
-String_p_concat_1int(JSContext* cx, JSString* str, int32 i)
-{
-    // FIXME: should be able to use stack buffer and avoid istr...
-    JSString* istr = js_NumberToString(cx, i);
-    if (!istr)
-        return NULL;
-    return js_ConcatStrings(cx, str, istr);
-}
-
-static JSString* FASTCALL
-String_p_concat_2str(JSContext* cx, JSString* str, JSString* a, JSString* b)
-{
-    str = js_ConcatStrings(cx, str, a);
-    if (str)
-        return js_ConcatStrings(cx, str, b);
-    return NULL;
-}
-
-static JSString* FASTCALL
-String_p_concat_3str(JSContext* cx, JSString* str, JSString* a, JSString* b, JSString* c)
-{
-    str = js_ConcatStrings(cx, str, a);
-    if (str) {
-        str = js_ConcatStrings(cx, str, b);
-        if (str)
-            return js_ConcatStrings(cx, str, c);
-    }
-    return NULL;
-}
-#endif
-
 static JSBool
 str_slice(JSContext *cx, uintN argc, jsval *vp)
 {
     jsval t, v;
     JSString *str;
 
     t = vp[1];
     v = vp[2];
@@ -2547,76 +2383,57 @@ js_String_getelem(JSContext* cx, JSStrin
 }
 #endif
 
 JS_DEFINE_CALLINFO_2(extern, BOOL,   js_EqualStrings, STRING, STRING,                       1, 1)
 JS_DEFINE_CALLINFO_2(extern, INT32,  js_CompareStrings, STRING, STRING,                     1, 1)
 
 JS_DEFINE_TRCINFO_1(str_toString,
     (2, (extern, STRING_RETRY,      String_p_toString, CONTEXT, THIS,                        1, 1)))
-JS_DEFINE_TRCINFO_2(str_substring,
-    (4, (static, STRING_RETRY,      String_p_substring, CONTEXT, THIS_STRING, INT32, INT32,   1, 1)),
-    (3, (static, STRING_RETRY,      String_p_substring_1, CONTEXT, THIS_STRING, INT32,        1, 1)))
 JS_DEFINE_TRCINFO_1(str_charAt,
     (3, (extern, STRING_RETRY,      js_String_getelem, CONTEXT, THIS_STRING, INT32,           1, 1)))
 JS_DEFINE_TRCINFO_2(str_charCodeAt,
     (1, (extern, DOUBLE,            js_String_p_charCodeAt0, THIS_STRING,                     1, 1)),
     (2, (extern, DOUBLE,            js_String_p_charCodeAt, THIS_STRING, DOUBLE,              1, 1)))
-JS_DEFINE_TRCINFO_4(str_concat,
-    (3, (static, STRING_RETRY,      String_p_concat_1int, CONTEXT, THIS_STRING, INT32,        1, 1)),
-    (3, (extern, STRING_RETRY,      js_ConcatStrings, CONTEXT, THIS_STRING, STRING,           1, 1)),
-    (4, (static, STRING_RETRY,      String_p_concat_2str, CONTEXT, THIS_STRING, STRING, STRING, 1, 1)),
-    (5, (static, STRING_RETRY,      String_p_concat_3str, CONTEXT, THIS_STRING, STRING, STRING, STRING, 1, 1)))
-JS_DEFINE_TRCINFO_2(str_match,
-    (4, (static, JSVAL_FAIL,        String_p_match, CONTEXT, THIS_STRING, PC, REGEXP,         1, 1)),
-    (4, (static, JSVAL_FAIL,        String_p_match_obj, CONTEXT, THIS, PC, REGEXP,            1, 1)))
-JS_DEFINE_TRCINFO_3(str_replace,
-    (4, (static, STRING_RETRY,      String_p_replace_str, CONTEXT, THIS_STRING, REGEXP, STRING, 1, 1)),
-    (4, (static, STRING_RETRY,      String_p_replace_str2, CONTEXT, THIS_STRING, STRING, STRING, 1, 1)),
-    (5, (static, STRING_RETRY,      String_p_replace_str3, CONTEXT, THIS_STRING, STRING, STRING, STRING, 1, 1)))
-JS_DEFINE_TRCINFO_1(str_split,
-    (3, (static, OBJECT_RETRY,      String_p_split, CONTEXT, THIS_STRING, STRING,             0, 0)))
-JS_DEFINE_TRCINFO_1(str_toLowerCase,
-    (2, (extern, STRING_RETRY,      js_toLowerCase, CONTEXT, THIS_STRING,                     1, 1)))
-JS_DEFINE_TRCINFO_1(str_toUpperCase,
-    (2, (extern, STRING_RETRY,      js_toUpperCase, CONTEXT, THIS_STRING,                     1, 1)))
+JS_DEFINE_TRCINFO_1(str_concat,
+    (3, (extern, STRING_RETRY,      js_ConcatStrings, CONTEXT, THIS_STRING, STRING,           1, 1)))
 
 #define GENERIC           JSFUN_GENERIC_NATIVE
 #define PRIMITIVE         JSFUN_THISP_PRIMITIVE
 #define GENERIC_PRIMITIVE (GENERIC | PRIMITIVE)
 
 static JSFunctionSpec string_methods[] = {
 #if JS_HAS_TOSOURCE
     JS_FN("quote",             str_quote,             0,GENERIC_PRIMITIVE),
     JS_FN(js_toSource_str,     str_toSource,          0,JSFUN_THISP_STRING),
 #endif
 
     /* Java-like methods. */
     JS_TN(js_toString_str,     str_toString,          0,JSFUN_THISP_STRING, str_toString_trcinfo),
     JS_FN(js_valueOf_str,      str_toString,          0,JSFUN_THISP_STRING),
     JS_FN(js_toJSON_str,       str_toString,          0,JSFUN_THISP_STRING),
-    JS_TN("substring",         str_substring,         2,GENERIC_PRIMITIVE, str_substring_trcinfo),
-    JS_TN("toLowerCase",       str_toLowerCase,       0,GENERIC_PRIMITIVE, str_toLowerCase_trcinfo),
-    JS_TN("toUpperCase",       str_toUpperCase,       0,GENERIC_PRIMITIVE, str_toUpperCase_trcinfo),
+    JS_FN("substring",         str_substring,         2,GENERIC_PRIMITIVE),
+    JS_FN("toLowerCase",       str_toLowerCase,       0,GENERIC_PRIMITIVE),
+    JS_FN("toUpperCase",       str_toUpperCase,       0,GENERIC_PRIMITIVE),
     JS_TN("charAt",            str_charAt,            1,GENERIC_PRIMITIVE, str_charAt_trcinfo),
     JS_TN("charCodeAt",        str_charCodeAt,        1,GENERIC_PRIMITIVE, str_charCodeAt_trcinfo),
     JS_FN("indexOf",           str_indexOf,           1,GENERIC_PRIMITIVE),
     JS_FN("lastIndexOf",       str_lastIndexOf,       1,GENERIC_PRIMITIVE),
     JS_FN("trim",              str_trim,              0,GENERIC_PRIMITIVE),
     JS_FN("trimLeft",          str_trimLeft,          0,GENERIC_PRIMITIVE),
     JS_FN("trimRight",         str_trimRight,         0,GENERIC_PRIMITIVE),
     JS_FN("toLocaleLowerCase", str_toLocaleLowerCase, 0,GENERIC_PRIMITIVE),
     JS_FN("toLocaleUpperCase", str_toLocaleUpperCase, 0,GENERIC_PRIMITIVE),
     JS_FN("localeCompare",     str_localeCompare,     1,GENERIC_PRIMITIVE),
 
     /* Perl-ish methods (search is actually Python-esque). */
-    JS_TN("match",             str_match,             1,GENERIC_PRIMITIVE, str_match_trcinfo),
+    JS_FN("match",             str_match,             1,GENERIC_PRIMITIVE),
     JS_FN("search",            str_search,            1,GENERIC_PRIMITIVE),
-    JS_TN("replace",           str_replace,           2,GENERIC_PRIMITIVE, str_replace_trcinfo),
-    JS_TN("split",             str_split,             2,GENERIC_PRIMITIVE, str_split_trcinfo),
+    JS_FN("replace",           str_replace,           2,GENERIC_PRIMITIVE),
+    JS_FN("split",             str_split,             2,GENERIC_PRIMITIVE),
 #if JS_HAS_PERL_SUBSTR
     JS_FN("substr",            str_substr,            2,GENERIC_PRIMITIVE),
 #endif
 
     /* Python-esque sequence methods. */
     JS_TN("concat",            str_concat,            1,GENERIC_PRIMITIVE, str_concat_trcinfo),
     JS_FN("slice",             str_slice,             2,GENERIC_PRIMITIVE),
 
@@ -2652,20 +2469,38 @@ js_String(JSContext *cx, JSObject *obj, 
         argv[0] = STRING_TO_JSVAL(str);
     } else {
         str = cx->runtime->emptyString;
     }
     if (!JS_IsConstructing(cx)) {
         *rval = STRING_TO_JSVAL(str);
         return JS_TRUE;
     }
-    STOBJ_SET_SLOT(obj, JSSLOT_PRIVATE, STRING_TO_JSVAL(str));
+    obj->fslots[JSSLOT_PRIVATE] = STRING_TO_JSVAL(str);
     return JS_TRUE;
 }
 
+#ifdef JS_TRACER
+
+JSObject* FASTCALL
+js_String_tn(JSContext* cx, JSObject* proto, JSString* str)
+{
+    JS_ASSERT(JS_ON_TRACE(cx));
+    JSObject* obj = js_NewNativeObject(cx, &js_StringClass, proto, JSSLOT_PRIVATE + 1);
+    if (!obj)
+        return NULL;
+
+    obj->fslots[JSSLOT_PRIVATE] = STRING_TO_JSVAL(str);
+    return obj;
+}
+
+JS_DEFINE_CALLINFO_3(extern, OBJECT, js_String_tn, CONTEXT, CALLEE_PROTOTYPE, STRING, 0, 0)
+
+#endif /* !JS_TRACER */
+
 static JSBool
 str_fromCharCode(JSContext *cx, uintN argc, jsval *vp)
 {
     jsval *argv;
     uintN i;
     uint16 code;
     jschar *chars;
     JSString *str;
@@ -2867,18 +2702,24 @@ js_InitStringClass(JSContext *cx, JSObje
     if (!JS_DefineFunctions(cx, obj, string_functions))
         return NULL;
 
     proto = JS_InitClass(cx, obj, NULL, &js_StringClass, js_String, 1,
                          NULL, string_methods,
                          NULL, string_static_methods);
     if (!proto)
         return NULL;
-    STOBJ_SET_SLOT(proto, JSSLOT_PRIVATE,
-                   STRING_TO_JSVAL(cx->runtime->emptyString));
+    proto->fslots[JSSLOT_PRIVATE] = STRING_TO_JSVAL(cx->runtime->emptyString);
+    if (!js_DefineNativeProperty(cx, proto, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom),
+                                 JSVAL_VOID, NULL, NULL,
+                                 JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED, 0, 0,
+                                 NULL)) {
+        return JS_FALSE;
+    }
+
     return proto;
 }
 
 JSString *
 js_NewString(JSContext *cx, jschar *chars, size_t length)
 {
     JSString *str;
 
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -128,17 +128,17 @@ static const char tagChar[]  = "OIDISIBI
 #define MAX_SKIP_BYTES (NJ_PAGE_SIZE - LIR_FAR_SLOTS)
 
 /* Max memory needed to rebuild the interpreter stack when falling off trace. */
 #define MAX_INTERP_STACK_BYTES                                                \
     (MAX_NATIVE_STACK_SLOTS * sizeof(jsval) +                                 \
      MAX_CALL_STACK_ENTRIES * sizeof(JSInlineFrame))
 
 /* Max number of branches per tree. */
-#define MAX_BRANCHES 16
+#define MAX_BRANCHES 32
 
 #ifdef JS_JIT_SPEW
 #define debug_only_a(x) if (js_verboseAbort || js_verboseDebug ) { x; }
 #define ABORT_TRACE(msg)   do { debug_only_a(fprintf(stdout, "abort: %d: %s\n", __LINE__, msg);)  return false; } while (0)
 #else
 #define debug_only_a(x)
 #define ABORT_TRACE(msg)   return false
 #endif
@@ -235,20 +235,17 @@ static avmplus::AvmCore s_core = avmplus
 static avmplus::AvmCore* core = &s_core;
 
 #ifdef JS_JIT_SPEW
 void
 js_DumpPeerStability(JSTraceMonitor* tm, const void* ip, uint32 globalShape);
 #endif
 
 /* We really need a better way to configure the JIT. Shaver, where is my fancy JIT object? */
-static bool nesting_enabled = true;
-#if defined(NANOJIT_IA32)
-static bool did_we_check_sse2 = false;
-#endif
+static bool did_we_check_processor_features = false;
 
 #ifdef JS_JIT_SPEW
 bool js_verboseDebug = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
 bool js_verboseStats = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "stats");
 bool js_verboseAbort = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "abort");
 #endif
 
 /* The entire VM shares one oracle. Collisions and concurrent updates are tolerated and worst
@@ -565,63 +562,60 @@ js_AttemptCompilation(JSTraceMonitor* tm
         JS_ASSERT(*(jsbytecode*)f->ip == JSOP_NOP || *(jsbytecode*)f->ip == JSOP_LOOP);
         *(jsbytecode*)f->ip = JSOP_LOOP;
         --f->recordAttempts;
         f->hits() = HOTLOOP;
         f = f->peer;
     }
 }
 
-#if defined(NJ_SOFTFLOAT)
 JS_DEFINE_CALLINFO_1(static, DOUBLE,    i2f, INT32,                 1, 1)
 JS_DEFINE_CALLINFO_1(static, DOUBLE,    u2f, UINT32,                1, 1)
-#endif
 
 static bool isi2f(LInsp i)
 {
     if (i->isop(LIR_i2f))
         return true;
 
-#if defined(NJ_SOFTFLOAT)
-    if (i->isop(LIR_qjoin) &&
+    if (nanojit::AvmCore::config.soft_float &&
+        i->isop(LIR_qjoin) &&
         i->oprnd1()->isop(LIR_call) &&
         i->oprnd2()->isop(LIR_callh))
     {
         if (i->oprnd1()->callInfo() == &i2f_ci)
             return true;
     }
-#endif
 
     return false;
 }
 
 static bool isu2f(LInsp i)
 {
     if (i->isop(LIR_u2f))
         return true;
 
-#if defined(NJ_SOFTFLOAT)
-    if (i->isop(LIR_qjoin) &&
+    if (nanojit::AvmCore::config.soft_float &&
+        i->isop(LIR_qjoin) &&
         i->oprnd1()->isop(LIR_call) &&
         i->oprnd2()->isop(LIR_callh))
     {
         if (i->oprnd1()->callInfo() == &u2f_ci)
             return true;
     }
-#endif
 
     return false;
 }
 
 static LInsp iu2fArg(LInsp i)
 {
-#if defined(NJ_SOFTFLOAT)
-    if (i->isop(LIR_qjoin))
+    if (nanojit::AvmCore::config.soft_float &&
+        i->isop(LIR_qjoin))
+    {
         return i->oprnd1()->arg(0);
-#endif
+    }
 
     return i->oprnd1();
 }
 
 
 static LIns* demote(LirWriter *out, LInsp i)
 {
     if (i->isCall())
@@ -670,18 +664,17 @@ static bool overflowSafe(LIns* i)
 {
     LIns* c;
     return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
             ((c->constval() & 0xc0000000) == 0)) ||
            (i->isop(LIR_rsh) && ((c = i->oprnd2())->isconst()) &&
             ((c->constval() > 0)));
 }
 
-#if defined(NJ_SOFTFLOAT)
-/* soft float */
+/* soft float support */
 
 JS_DEFINE_CALLINFO_1(static, DOUBLE,    fneg, DOUBLE,               1, 1)
 JS_DEFINE_CALLINFO_2(static, INT32,     fcmpeq, DOUBLE, DOUBLE,     1, 1)
 JS_DEFINE_CALLINFO_2(static, INT32,     fcmplt, DOUBLE, DOUBLE,     1, 1)
 JS_DEFINE_CALLINFO_2(static, INT32,     fcmple, DOUBLE, DOUBLE,     1, 1)
 JS_DEFINE_CALLINFO_2(static, INT32,     fcmpgt, DOUBLE, DOUBLE,     1, 1)
 JS_DEFINE_CALLINFO_2(static, INT32,     fcmpge, DOUBLE, DOUBLE,     1, 1)
 JS_DEFINE_CALLINFO_2(static, DOUBLE,    fmul, DOUBLE, DOUBLE,       1, 1)
@@ -825,18 +818,16 @@ public:
         // to do a quadCall ( qjoin(call,callh) )
         if ((ci->_argtypes & 3) == ARGSIZE_F)
             return quadCall(ci, args);
 
         return out->insCall(ci, args);
     }
 };
 
-#endif // NJ_SOFTFLOAT
-
 class FuncFilter: public LirWriter
 {
 public:
     FuncFilter(LirWriter* out):
         LirWriter(out)
     {
     }
 
@@ -954,17 +945,17 @@ public:
                         : out->insCall(&js_DoubleToInt32_ci, &idx);
                     LIns* args2[] = { idx, callArgN(s0, 0) };
                     return out->insCall(&js_String_p_charCodeAt_int_ci, args2);
                 }
             }
         } else if (ci == &js_BoxDouble_ci) {
             JS_ASSERT(s0->isQuad());
             if (isi2f(s0)) {
-                LIns* args2[] = { s0->oprnd1(), args[1] };
+                LIns* args2[] = { iu2fArg(s0), args[1] };
                 return out->insCall(&js_BoxInt32_ci, args2);
             }
             if (s0->isCall() && s0->callInfo() == &js_UnboxDouble_ci)
                 return callArgN(s0, 0);
         }
         return out->insCall(ci, args);
     }
 };
@@ -1208,42 +1199,48 @@ js_TrashTree(JSContext* cx, Fragment* f)
 
 JS_REQUIRES_STACK
 TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
         TreeInfo* ti, unsigned stackSlots, unsigned ngslots, uint8* typeMap,
         VMSideExit* innermostNestedGuard, jsbytecode* outer)
 {
     JS_ASSERT(!_fragment->vmprivate && ti);
 
+    /* Reset the fragment state we care about in case we got a recycled fragment. */
+    _fragment->lastIns = NULL;
+
     this->cx = cx;
     this->traceMonitor = &JS_TRACE_MONITOR(cx);
     this->globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
     this->lexicalBlock = cx->fp->blockChain;
     this->anchor = _anchor;
     this->fragment = _fragment;
     this->lirbuf = _fragment->lirbuf;
     this->treeInfo = ti;
     this->callDepth = _anchor ? _anchor->calldepth : 0;
     this->atoms = FrameAtomBase(cx, cx->fp);
     this->deepAborted = false;
     this->trashSelf = false;
     this->global_dslots = this->globalObj->dslots;
-    this->terminate = false;
+    this->loop = true; /* default assumption is we are compiling a loop */
     this->wasRootFragment = _fragment == _fragment->root;
     this->outer = outer;
-    
+    this->generatedTraceableNative = new JSTraceableNative();
+    JS_ASSERT(generatedTraceableNative);
+
     debug_only_v(printf("recording starting from %s:%u@%u\n",
                         ti->treeFileName, ti->treeLineNumber, ti->treePCOffset);)
     debug_only_v(printf("globalObj=%p, shape=%d\n", (void*)this->globalObj, OBJ_SHAPE(this->globalObj));)
 
     lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf);
     debug_only_v(lir = verbose_filter = new (&gc) VerboseWriter(&gc, lir, lirbuf->names);)
-#ifdef NJ_SOFTFLOAT
-    lir = float_filter = new (&gc) SoftFloatFilter(lir);
-#endif
+    if (nanojit::AvmCore::config.soft_float)
+        lir = float_filter = new (&gc) SoftFloatFilter(lir);
+    else
+        float_filter = 0;
     lir = cse_filter = new (&gc) CseFilter(lir, &gc);
     lir = expr_filter = new (&gc) ExprFilter(lir);
     lir = func_filter = new (&gc) FuncFilter(lir);
     lir->ins0(LIR_start);
 
     if (!nanojit::AvmCore::config.tree_opt || fragment->root == fragment)
         lirbuf->state = addName(lir->insParam(0, 0), "state");
 
@@ -1318,20 +1315,19 @@ TraceRecorder::~TraceRecorder()
         delete treeInfo;
     }
 #ifdef DEBUG
     delete verbose_filter;
 #endif
     delete cse_filter;
     delete expr_filter;
     delete func_filter;
-#ifdef NJ_SOFTFLOAT
     delete float_filter;
-#endif
     delete lir_buf_writer;
+    delete generatedTraceableNative;
 }
 
 void TraceRecorder::removeFragmentoReferences()
 {
     fragment = NULL;
 }
 
 /* Add debug information to a LIR instruction as we emit it. */
@@ -1964,65 +1960,16 @@ TraceRecorder::checkForGlobalObjectReall
             tracker.set(src++, NULL);
         }
         for (jsuint n = 0; n < length; ++n)
             tracker.set(dst++, map[n]);
         global_dslots = globalObj->dslots;
     }
 }
 
-/* Determine whether the current branch instruction terminates the loop. */
-static bool
-js_IsLoopExit(jsbytecode* pc, jsbytecode* header)
-{
-    switch (*pc) {
-      case JSOP_LT:
-      case JSOP_GT:
-      case JSOP_LE:
-      case JSOP_GE:
-      case JSOP_NE:
-      case JSOP_EQ:
-        /* These ops try to dispatch a JSOP_IFEQ or JSOP_IFNE that follows. */
-        JS_ASSERT(js_CodeSpec[*pc].length == 1);
-        pc++;
-        break;
-
-      default:
-        for (;;) {
-            if (*pc == JSOP_AND || *pc == JSOP_OR)
-                pc += GET_JUMP_OFFSET(pc);
-            else if (*pc == JSOP_ANDX || *pc == JSOP_ORX)
-                pc += GET_JUMPX_OFFSET(pc);
-            else
-                break;
-        }
-    }
-
-    switch (*pc) {
-      case JSOP_IFEQ:
-      case JSOP_IFNE:
-        /*
-         * Forward jumps are usually intra-branch, but for-in loops jump to the
-         * trailing enditer to clean up, so check for that case here.
-         */
-        if (pc[GET_JUMP_OFFSET(pc)] == JSOP_ENDITER)
-            return true;
-        return pc + GET_JUMP_OFFSET(pc) == header;
-
-      case JSOP_IFEQX:
-      case JSOP_IFNEX:
-        if (pc[GET_JUMPX_OFFSET(pc)] == JSOP_ENDITER)
-            return true;
-        return pc + GET_JUMPX_OFFSET(pc) == header;
-
-      default:;
-    }
-    return false;
-}
-
 /* Determine whether the current branch is a loop edge (taken or not taken). */
 static JS_REQUIRES_STACK bool
 js_IsLoopEdge(jsbytecode* pc, jsbytecode* header)
 {
     switch (*pc) {
       case JSOP_IFEQ:
       case JSOP_IFNE:
         return ((pc + GET_JUMP_OFFSET(pc)) == header);
@@ -2101,18 +2048,16 @@ TraceRecorder::determineSlotType(jsval* 
 }
 
 JS_REQUIRES_STACK LIns*
 TraceRecorder::snapshot(ExitType exitType)
 {
     JSStackFrame* fp = cx->fp;
     JSFrameRegs* regs = fp->regs;
     jsbytecode* pc = regs->pc;
-    if (exitType == BRANCH_EXIT && js_IsLoopExit(pc, (jsbytecode*)fragment->root->ip))
-        exitType = LOOP_EXIT;
 
     /* Check for a return-value opcode that needs to restart at the next instruction. */
     const JSCodeSpec& cs = js_CodeSpec[*pc];
 
     /* WARNING: don't return before restoring the original pc if (resumeAfter). */
     bool resumeAfter = (pendingTraceableNative &&
                         JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS);
     if (resumeAfter) {
@@ -2491,23 +2436,16 @@ checktype_fail_2:
         return true;
     } else {
         demote = false;
     }
 
     return false;
 }
 
-/* Check whether the current pc location is the loop header of the loop this recorder records. */
-JS_REQUIRES_STACK bool
-TraceRecorder::isLoopHeader(JSContext* cx) const
-{
-    return cx->fp->regs->pc == fragment->root->ip;
-}
-
 /* Compile the current fragment. */
 JS_REQUIRES_STACK void
 TraceRecorder::compile(JSTraceMonitor* tm)
 {
     Fragmento* fragmento = tm->fragmento;
     if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
         debug_only_v(printf("Blacklist: excessive stack use.\n"));
         js_Blacklist(fragment->root);
@@ -2568,55 +2506,63 @@ js_JoinPeersIfCompatible(Fragmento* frag
 
     stableTree->dependentTrees.addUnique(exit->from->root);
     ((TreeInfo*)exit->from->root->vmprivate)->linkedTrees.addUnique(stableFrag);
 
     return true;
 }
 
 /* Complete and compile a trace and link it to the existing tree if appropriate. */
-JS_REQUIRES_STACK bool
+JS_REQUIRES_STACK void
 TraceRecorder::closeLoop(JSTraceMonitor* tm, bool& demote)
 {
+    /*
+     * We should have arrived back at the loop header, and hence we don't want to be in an imacro
+     * here and the opcode should be either JSOP_LOOP, or in case this loop was blacklisted in the
+     * meantime JSOP_NOP.
+     */
+    JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc);
+
     bool stable;
     LIns* exitIns;
     Fragment* peer;
     VMSideExit* exit;
     Fragment* peer_root;
     Fragmento* fragmento = tm->fragmento;
 
     exitIns = snapshot(UNSTABLE_LOOP_EXIT);
     exit = (VMSideExit*)((GuardRecord*)exitIns->payload())->exit;
 
     if (callDepth != 0) {
         debug_only_v(printf("Blacklisted: stack depth mismatch, possible recursion.\n");)
         js_Blacklist(fragment->root);
         trashSelf = true;
-        return false;
+        return;
     }
 
     JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes);
 
     peer_root = getLoop(traceMonitor, fragment->root->ip, treeInfo->globalShape);
     JS_ASSERT(peer_root != NULL);
+
     stable = deduceTypeStability(peer_root, &peer, demote);
 
-    #if DEBUG
+#if DEBUG
     if (!stable)
         AUDIT(unstableLoopVariable);
-    #endif
+#endif
 
     if (trashSelf) {
         debug_only_v(printf("Trashing tree from type instability.\n");)
-        return false;
+        return;
     }
 
     if (stable && demote) {
         JS_ASSERT(fragment->kind == LoopTrace);
-        return false;
+        return;
     }
 
     if (!stable) {
         fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), exitIns);
 
         /*
          * If we didn't find a type stable peer, we compile the loop anyway and
          * hope it becomes stable later.
@@ -2629,43 +2575,32 @@ TraceRecorder::closeLoop(JSTraceMonitor*
              */
             debug_only_v(printf("Trace has unstable loop variable with no stable peer, "
                                 "compiling anyway.\n");)
             UnstableExit* uexit = new UnstableExit;
             uexit->fragment = fragment;
             uexit->exit = exit;
             uexit->next = treeInfo->unstableExits;
             treeInfo->unstableExits = uexit;
-
-            /*
-             * If we walked out of a loop, this exit is wrong. We need to back
-             * up to the if operation.
-             */
-            if (walkedOutOfLoop()) {
-                exit->pc = terminate_pc;
-                exit->imacpc = terminate_imacpc;
-            }
         } else {
             JS_ASSERT(peer->code());
             exit->target = peer;
             debug_only_v(printf("Joining type-unstable trace to target fragment %p.\n", (void*)peer);)
             stable = true;
             ((TreeInfo*)peer->vmprivate)->dependentTrees.addUnique(fragment->root);
             treeInfo->linkedTrees.addUnique(peer);
         }
-
-        compile(tm);
     } else {
         exit->target = fragment->root;
         fragment->lastIns = lir->insGuard(LIR_loop, lir->insImm(1), exitIns);
-        compile(tm);
-    }
+    }
+    compile(tm);
 
     if (fragmento->assm()->error() != nanojit::None)
-        return false;
+        return;
 
     joinEdgesToEntry(fragmento, peer_root);
 
     debug_only_v(printf("updating specializations on dependent and linked trees\n"))
     if (fragment->root->vmprivate)
         specializeTreesToMissingGlobals(cx, (TreeInfo*)fragment->root->vmprivate);
 
     /* 
@@ -2674,17 +2609,16 @@ TraceRecorder::closeLoop(JSTraceMonitor*
      */
     if (outer)
         js_AttemptCompilation(tm, globalObj, outer);
     
     debug_only_v(printf("recording completed at %s:%u@%u via closeLoop\n",
                         cx->fp->script->filename,
                         js_FramePCToLineNumber(cx, cx->fp),
                         FramePCOffset(cx->fp));)
-    return true;
 }
 
 JS_REQUIRES_STACK void
 TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, Fragment* peer_root)
 {
     if (fragment->kind == LoopTrace) {
         TreeInfo* ti;
         Fragment* peer;
@@ -2880,65 +2814,98 @@ TraceRecorder::trackCfgMerges(jsbytecode
         } else if (SN_TYPE(sn) == SRC_IF_ELSE)
             cfgMerges.add(pc + js_GetSrcNoteOffset(sn, 0));
     }
 }
 
 /* Invert the direction of the guard if this is a loop edge that is not
    taken (thin loop). */
 JS_REQUIRES_STACK void
-TraceRecorder::flipIf(jsbytecode* pc, bool& cond)
-{
+TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x)
+{
+    ExitType exitType;
     if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) {
-        switch (*pc) {
-          case JSOP_IFEQ:
-          case JSOP_IFEQX:
-            if (!cond)
-                return;
-            break;
-          case JSOP_IFNE:
-          case JSOP_IFNEX:
-            if (cond)
-                return;
-            break;
-          default:
-            JS_NOT_REACHED("flipIf");
+        exitType = LOOP_EXIT;
+
+        /*
+         * If we are about to walk out of the loop, generate code for the inverse loop
+         * condition, pretending we recorded the case that stays on trace.
+         */
+        if ((*pc == JSOP_IFEQ || *pc == JSOP_IFEQX) == cond) {
+            JS_ASSERT(*pc == JSOP_IFNE || *pc == JSOP_IFNEX || *pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+            debug_only_v(printf("Walking out of the loop, terminating it anyway.\n");)
+            cond = !cond;
         }
-        /* We are about to walk out of the loop, so terminate it with
-           an inverse loop condition. */
-        debug_only_v(printf("Walking out of the loop, terminating it anyway.\n");)
-        cond = !cond;
-        terminate = true;
-        /* If when we get to closeLoop the tree is decided to be type unstable, we need to
-           reverse this logic because the loop won't be closed after all.  Store the real
-           value of the IP the interpreter expects, so we can use it in our final LIR_x.
+
+        /*
+         * Conditional guards do not have to be emitted if the condition is constant. We
+         * make a note whether the loop condition is true or false here, so we later know
+         * whether to emit a loop edge or a loop end.
          */
-        if (*pc == JSOP_IFEQX || *pc == JSOP_IFNEX)
-            pc += GET_JUMPX_OFFSET(pc);
-        else
-            pc += GET_JUMP_OFFSET(pc);
-        terminate_pc = pc;
-        terminate_imacpc = cx->fp->imacpc;
-    }
+        if (x->isconst()) {
+            loop = (x->constval() == cond);
+            return;
+        }
+    } else {
+        exitType = BRANCH_EXIT;
+    }
+    if (!x->isconst())
+        guard(cond, x, exitType);
 }
 
 /* Emit code for a fused IFEQ/IFNE. */
 JS_REQUIRES_STACK void
 TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x)
 {
-    if (x->isconst()) // no need to guard if condition is constant
-        return;
-    if (*pc == JSOP_IFEQ) {
-        flipIf(pc, cond);
-        guard(cond, x, BRANCH_EXIT);
-        trackCfgMerges(pc);
-    } else if (*pc == JSOP_IFNE) {
-        flipIf(pc, cond);
-        guard(cond, x, BRANCH_EXIT);
-    }
+    if (*pc == JSOP_IFEQ || *pc == JSOP_IFNE) {
+        emitIf(pc, cond, x);
+        if (*pc == JSOP_IFEQ)
+            trackCfgMerges(pc);
+    }
+}
+
+/* Check whether we have reached the end of the trace. */
+JS_REQUIRES_STACK bool
+TraceRecorder::checkTraceEnd(jsbytecode *pc)
+{
+    if (js_IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) {
+        /*
+         * If we compile a loop, the trace should have a zero stack balance at the loop
+         * edge. Currently we are parked on a comparison op or IFNE/IFEQ, so advance
+         * pc to the loop header and adjust the stack pointer and pretend we have
+         * reached the loop header.
+         */
+        if (loop) {
+            JS_ASSERT(!cx->fp->imacpc && (pc == cx->fp->regs->pc || pc == cx->fp->regs->pc + 1));
+            bool fused = pc != cx->fp->regs->pc;
+            JSFrameRegs orig = *cx->fp->regs;
+
+            cx->fp->regs->pc = (jsbytecode*)fragment->root->ip;
+            cx->fp->regs->sp -= fused ? 2 : 1;
+
+            bool demote = false;
+            closeLoop(traceMonitor, demote);
+
+            *cx->fp->regs = orig;
+
+            /*
+             * If compiling this loop generated new oracle information which will likely
+             * lead to a different compilation result, immediately trigger another
+             * compiler run. This is guaranteed to converge since the oracle only
+             * accumulates adverse information but never drops it (except when we
+             * flush it during garbage collection.)
+             */
+            if (demote)
+                js_AttemptCompilation(traceMonitor, globalObj, outer);
+        } else {
+            endLoop(traceMonitor);
+        }
+        return false;
+    }
+    return true;
 }
 
 bool
 TraceRecorder::hasMethod(JSObject* obj, jsid id)
 {
     if (!obj)
         return false;
 
@@ -3558,172 +3525,137 @@ js_AttemptToExtendTree(JSContext* cx, VM
     }
     return false;
 }
 
 static JS_REQUIRES_STACK VMSideExit*
 js_ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
                VMSideExit** innermostNestedGuardp);
 
-static JS_REQUIRES_STACK bool
-js_CloseLoop(JSContext* cx)
-{
-    JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
-    Fragmento* fragmento = tm->fragmento;
-    TraceRecorder* r = tm->recorder;
-    JS_ASSERT(fragmento && r);
-    bool walkedOutOfLoop = r->walkedOutOfLoop();
-
-    if (fragmento->assm()->error()) {
-        js_AbortRecording(cx, "Error during recording");
-        return false;
-    }
-
-    bool demote = false;
-    Fragment* f = r->getFragment();
-    TreeInfo* ti = r->getTreeInfo();
-    uint32 globalShape = ti->globalShape;
-    SlotList* globalSlots = ti->globalSlots;
-    r->closeLoop(tm, demote);
-
-    /*
-     * If js_DeleteRecorder flushed the code cache, we can't rely on f any more.
-     */
-    if (!js_DeleteRecorder(cx))
-        return false;
-
-    /*
-     * If we just walked out of a thin loop, we can't immediately start the
-     * compiler again here since we didn't return to the loop header.
-     */
-    if (demote && !walkedOutOfLoop)
-        return js_RecordTree(cx, tm, f, NULL, globalShape, globalSlots);
-    return false;
-}
-
 JS_REQUIRES_STACK bool
 js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
 {
 #ifdef JS_THREADSAFE
     if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) {
         js_AbortRecording(cx, "Global object not owned by this context");
         return false; /* we stay away from shared global objects */
     }
 #endif
+
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
     TreeInfo* ti = r->getTreeInfo();
+
     /* Process deep abort requests. */
     if (r->wasDeepAborted()) {
         js_AbortRecording(cx, "deep abort requested");
         return false;
     }
-    /* If we hit our own loop header, close the loop and compile the trace. */
-    if (r->isLoopHeader(cx))
-        return js_CloseLoop(cx);
-    /* does this branch go to an inner loop? */
+
+    JS_ASSERT(r->getFragment() && !r->getFragment()->lastIns);
+
+    /* Does this branch go to an inner loop? */
     Fragment* f = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc, ti->globalShape);
-    if (nesting_enabled && f) {
-
-        /* Cannot handle treecalls with callDepth > 0 and argc > nargs, see bug 480244. */
-        if (r->getCallDepth() > 0 && 
-            cx->fp->argc > cx->fp->fun->nargs) {
-            js_AbortRecording(cx, "Can't call inner tree with extra args in pending frame");
-            return false;
-        }
-
-        /* Make sure inner tree call will not run into an out-of-memory condition. */
-        if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
-            !js_ReplenishReservedPool(cx, tm)) {
-            js_AbortRecording(cx, "Couldn't call inner tree (out of memory)");
-            return false;
-        }
-
-        /* Make sure the shape of the global object still matches (this might flush
-           the JIT cache). */
-        JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
-        uint32 globalShape = -1;
-        SlotList* globalSlots = NULL;
-        if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) {
-            js_AbortRecording(cx, "Couldn't call inner tree (prep failed)");
-            return false;
-        }
-
-        debug_only_v(printf("Looking for type-compatible peer (%s:%d@%d)\n",
-                            cx->fp->script->filename,
-                            js_FramePCToLineNumber(cx, cx->fp),
-                            FramePCOffset(cx->fp));)
-
-        /* Find an acceptable peer, make sure our types fit. */
-        Fragment* empty;
-        bool success = false;
-
-        f = r->findNestedCompatiblePeer(f, &empty);
-        if (f && f->code())
-            success = r->adjustCallerTypes(f);
-
-        if (!success) {
-            AUDIT(noCompatInnerTrees);
-
-            jsbytecode* outer = (jsbytecode*)tm->recorder->getFragment()->root->ip;
-            js_AbortRecording(cx, "No compatible inner tree");
-
-            f = empty;
+    if (!f) {
+        /* Not an inner loop we can call, abort trace. */
+        AUDIT(returnToDifferentLoopHeader);
+        JS_ASSERT(!cx->fp->imacpc);
+        debug_only_v(printf("loop edge to %d, header %d\n",
+                            cx->fp->regs->pc - cx->fp->script->code,
+                            (jsbytecode*)r->getFragment()->root->ip - cx->fp->script->code));
+        js_AbortRecording(cx, "Loop edge does not return to header");
+        return false;
+    }
+
+    /* Cannot handle treecalls with callDepth > 0 and argc > nargs, see bug 480244. */
+    if (r->getCallDepth() > 0 && cx->fp->argc > cx->fp->fun->nargs) {
+        js_AbortRecording(cx, "Can't call inner tree with extra args in pending frame");
+        return false;
+    }
+
+    /* Make sure inner tree call will not run into an out-of-memory condition. */
+    if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
+        !js_ReplenishReservedPool(cx, tm)) {
+        js_AbortRecording(cx, "Couldn't call inner tree (out of memory)");
+        return false;
+    }
+
+    /* Make sure the shape of the global object still matches (this might flush
+       the JIT cache). */
+    JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
+    uint32 globalShape = -1;
+    SlotList* globalSlots = NULL;
+    if (!js_CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) {
+        js_AbortRecording(cx, "Couldn't call inner tree (prep failed)");
+        return false;
+    }
+
+    debug_only_v(printf("Looking for type-compatible peer (%s:%d@%d)\n",
+                        cx->fp->script->filename,
+                        js_FramePCToLineNumber(cx, cx->fp),
+                        FramePCOffset(cx->fp));)
+
+    /* Find an acceptable peer, make sure our types fit. */
+    Fragment* empty;
+    bool success = false;
+
+    f = r->findNestedCompatiblePeer(f, &empty);
+    if (f && f->code())
+        success = r->adjustCallerTypes(f);
+
+    if (!success) {
+        AUDIT(noCompatInnerTrees);
+
+        jsbytecode* outer = (jsbytecode*)tm->recorder->getFragment()->root->ip;
+        js_AbortRecording(cx, "No compatible inner tree");
+
+        f = empty;
+        if (!f) {
+            f = getAnchor(tm, cx->fp->regs->pc, globalShape);
             if (!f) {
-                f = getAnchor(tm, cx->fp->regs->pc, globalShape);
-                if (!f) {
-                    js_FlushJITCache(cx);
-                    return false;
-                }
-            }
-            return js_RecordTree(cx, tm, f, outer, globalShape, globalSlots);
-        }
-
-        r->prepareTreeCall(f);
-        VMSideExit* innermostNestedGuard = NULL;
-        VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
-        if (!lr || r->wasDeepAborted()) {
-            if (!lr)
-                js_AbortRecording(cx, "Couldn't call inner tree");
-            return false;
-        }
-        jsbytecode* outer = (jsbytecode*)tm->recorder->getFragment()->root->ip;
-        switch (lr->exitType) {
-          case LOOP_EXIT:
-            /* If the inner tree exited on an unknown loop exit, grow the tree around it. */
-            if (innermostNestedGuard) {
-                js_AbortRecording(cx, "Inner tree took different side exit, abort current "
-                                      "recording and grow nesting tree");
-                return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, outer);
+                js_FlushJITCache(cx);
+                return false;
             }
-            /* emit a call to the inner tree and continue recording the outer tree trace */
-            r->emitTreeCall(f, lr);
-            return true;
-        case UNSTABLE_LOOP_EXIT:
-            /* abort recording so the inner loop can become type stable. */
-            js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
-            return js_AttemptToStabilizeTree(cx, lr, outer);
-        case BRANCH_EXIT:
-            /* abort recording the outer tree, extend the inner tree */
-            js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
-            return js_AttemptToExtendTree(cx, lr, NULL, outer);
-        default:
-            debug_only_v(printf("exit_type=%d\n", lr->exitType);)
+        }
+        return js_RecordTree(cx, tm, f, outer, globalShape, globalSlots);
+    }
+
+    r->prepareTreeCall(f);
+    VMSideExit* innermostNestedGuard = NULL;
+    VMSideExit* lr = js_ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
+    if (!lr || r->wasDeepAborted()) {
+        if (!lr)
+            js_AbortRecording(cx, "Couldn't call inner tree");
+        return false;
+    }
+
+    jsbytecode* outer = (jsbytecode*)tm->recorder->getFragment()->root->ip;
+    switch (lr->exitType) {
+      case LOOP_EXIT:
+        /* If the inner tree exited on an unknown loop exit, grow the tree around it. */
+        if (innermostNestedGuard) {
+            js_AbortRecording(cx, "Inner tree took different side exit, abort current "
+                              "recording and grow nesting tree");
+            return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, outer);
+        }
+        /* emit a call to the inner tree and continue recording the outer tree trace */
+        r->emitTreeCall(f, lr);
+        return true;
+      case UNSTABLE_LOOP_EXIT:
+        /* abort recording so the inner loop can become type stable. */
+        js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
+        return js_AttemptToStabilizeTree(cx, lr, outer);
+      case BRANCH_EXIT:
+        /* abort recording the outer tree, extend the inner tree */
+        js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
+        return js_AttemptToExtendTree(cx, lr, NULL, outer);
+      default:
+        debug_only_v(printf("exit_type=%d\n", lr->exitType);)
             js_AbortRecording(cx, "Inner tree not suitable for calling");
-            return false;
-        }
-    }
-
-    /* not returning to our own loop header, not an inner loop we can call, abort trace */
-    AUDIT(returnToDifferentLoopHeader);
-    JS_ASSERT(!cx->fp->imacpc);
-    debug_only_v(printf("loop edge to %d, header %d\n",
-                 cx->fp->regs->pc - cx->fp->script->code,
-                 (jsbytecode*)r->getFragment()->root->ip - cx->fp->script->code));
-    js_AbortRecording(cx, "Loop edge does not return to header");
-    return false;
+        return false;
+    }
 }
 
 static bool
 js_IsEntryTypeCompatible(jsval* vp, uint8* m)
 {
     unsigned tag = JSVAL_TAG(*vp);
 
     debug_only_v(printf("%c/%c ", tagChar[tag], typeChar[*m]);)
@@ -4342,113 +4274,101 @@ js_MonitorLoopEdge(JSContext* cx, uintN&
         /* No, this was an unusual exit (i.e. out of memory/GC), so just resume interpretation. */
         return false;
     }
 }
 
 JS_REQUIRES_STACK JSMonitorRecordingStatus
 TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
 {
-    if (tr->lirbuf->outOMem()) {
-        js_AbortRecording(cx, "no more LIR memory");
-        js_FlushJITCache(cx);
-        return JSMRS_STOP;
-    }
-
     /* Process deepAbort() requests now. */
     if (tr->wasDeepAborted()) {
         js_AbortRecording(cx, "deep abort requested");
         return JSMRS_STOP;
     }
 
-    if (tr->walkedOutOfLoop()) {
-        if (!js_CloseLoop(cx))
-            return JSMRS_STOP;
-    } else {
-        /* Clear one-shot state used to communicate between record_JSOP_CALL and post-
-           opcode-case-guts record hook (record_FastNativeCallComplete). */
-        tr->pendingTraceableNative = NULL;
-
-        jsbytecode* pc = cx->fp->regs->pc;
-
-        /* If we hit a break, end the loop and generate an always taken loop exit guard.
-           For other downward gotos (like if/else) continue recording. */
-        if (*pc == JSOP_GOTO || *pc == JSOP_GOTOX) {
-            jssrcnote* sn = js_GetSrcNote(cx->fp->script, pc);
-            if (sn && SN_TYPE(sn) == SRC_BREAK) {
-                AUDIT(breakLoopExits);
-                tr->endLoop(&JS_TRACE_MONITOR(cx));
-                js_DeleteRecorder(cx);
-                return JSMRS_STOP; /* done recording */
-            }
-        }
-
-        /* An explicit return from callDepth 0 should end the loop, not abort it. */
-        if (*pc == JSOP_RETURN && tr->callDepth == 0) {
-            AUDIT(returnLoopExits);
-            tr->endLoop(&JS_TRACE_MONITOR(cx));
-            js_DeleteRecorder(cx);
-            return JSMRS_STOP; /* done recording */
-        }
-
-#ifdef NANOJIT_IA32
-        /* Handle tableswitches specially -- prepare a jump table if needed. */
-        if (*pc == JSOP_TABLESWITCH || *pc == JSOP_TABLESWITCHX) {
-            LIns* guardIns = tr->tableswitch();
-            if (guardIns) {
-                tr->fragment->lastIns = guardIns;
-                tr->compile(&JS_TRACE_MONITOR(cx));
-                js_DeleteRecorder(cx);
-                return JSMRS_STOP;
-            }
-        }
-#endif
-    }
+    JS_ASSERT(!tr->fragment->lastIns);
+
+    /*
+     * Clear one-shot state used to communicate between record_JSOP_CALL and post-
+     * opcode-case-guts record hook (record_FastNativeCallComplete).
+     */
+    tr->pendingTraceableNative = NULL;
+
+    debug_only_v(js_Disassemble1(cx, cx->fp->script, cx->fp->regs->pc,
+                                 (cx->fp->imacpc)
+                                 ? 0
+                                 : cx->fp->regs->pc - cx->fp->script->code,
+                                 !cx->fp->imacpc, stdout);)
 
     /* If op is not a break or a return from a loop, continue recording and follow the
        trace. We check for imacro-calling bytecodes inside each switch case to resolve
        the if (JSOP_IS_IMACOP(x)) conditions at compile time. */
 
     bool flag;
+#ifdef DEBUG
+    bool wasInImacro = (cx->fp->imacpc != NULL);
+#endif
     switch (op) {
-      default: goto abort_recording;
+      default: goto stop_recording;
 # define OPDEF(x,val,name,token,length,nuses,ndefs,prec,format)               \
       case x:                                                                 \
-        debug_only_v(                                                         \
-            js_Disassemble1(cx, cx->fp->script, cx->fp->regs->pc,             \
-                            (cx->fp->imacpc)                                  \
-                            ? 0                                               \
-                            : cx->fp->regs->pc - cx->fp->script->code,        \
-                            !cx->fp->imacpc, stdout);)                        \
         flag = tr->record_##x();                                              \
         if (JSOP_IS_IMACOP(x))                                                \
             goto imacro;                                                      \
         break;
 # include "jsopcode.tbl"
 # undef OPDEF
     }
 
+    JS_ASSERT_IF(!wasInImacro, cx->fp->imacpc == NULL);
+
+    /* Process deepAbort() requests now. */
+    if (tr->wasDeepAborted()) {
+        js_AbortRecording(cx, "deep abort requested");
+        return JSMRS_STOP;
+    }
+
+    if (JS_TRACE_MONITOR(cx).fragmento->assm()->error()) {
+        js_AbortRecording(cx, "error during recording");
+        return JSMRS_STOP;
+    }
+
+    if (tr->lirbuf->outOMem()) {
+        js_AbortRecording(cx, "no more LIR memory");
+        js_FlushJITCache(cx);
+        return JSMRS_STOP;
+    }
+
     if (flag)
         return JSMRS_CONTINUE;
-    goto abort_recording;
+
+    goto stop_recording;
 
   imacro:
     /* We save macro-generated code size also via bool TraceRecorder::record_JSOP_*
        return type, instead of a three-state: OK, ABORTED, IMACRO_STARTED. But the
        price of this is the JSFRAME_IMACRO_START frame flag. We need one more bit
        to detect that TraceRecorder::call_imacro was invoked by the record_JSOP_
        method. */
     if (flag)
         return JSMRS_CONTINUE;
     if (cx->fp->flags & JSFRAME_IMACRO_START) {
         cx->fp->flags &= ~JSFRAME_IMACRO_START;
         return JSMRS_IMACRO;
     }
 
-  abort_recording:
+  stop_recording:
+    /* If we recorded the end of the trace, destroy the recorder now. */
+    if (tr->fragment->lastIns) {
+        js_DeleteRecorder(cx);
+        return JSMRS_STOP;
+    }
+
+    /* Looks like we encountered an error condition. Abort recording. */
     js_AbortRecording(cx, js_CodeName[op]);
     return JSMRS_STOP;
 }
 
 JS_REQUIRES_STACK void
 js_AbortRecording(JSContext* cx, const char* reason)
 {
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
@@ -4524,26 +4444,149 @@ js_CheckForSSE2()
         : /* We have no inputs */
         : "%eax", "%ecx"
        );
 #endif
     return (features & (1<<26)) != 0;
 }
 #endif
 
+#if defined(NANOJIT_ARM)
+
+#if defined(_MSC_VER) && defined(WINCE)
+
+// these come in from jswince.asm
+extern "C" int js_arm_try_armv6t2_op();
+extern "C" int js_arm_try_vfp_op();
+
+static bool
+js_arm_check_armv6t2() {
+    bool ret = false;
+    __try {
+        js_arm_try_armv6t2_op();
+        ret = true;
+    } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+        ret = false;
+    }
+    return ret;
+}
+
+static bool
+js_arm_check_vfp() {
+    bool ret = false;
+    __try {
+        js_arm_try_vfp_op();
+        ret = true;
+    } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
+        ret = false;
+    }
+    return ret;
+}
+
+#elif defined(__GNUC__) && defined(AVMPLUS_LINUX)
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <string.h>
+#include <elf.h>
+
+static bool arm_has_v7 = false;
+static bool arm_has_v6 = false;
+static bool arm_has_vfp = false;
+static bool arm_has_neon = false;
+static bool arm_has_iwmmxt = false;
+static bool arm_tests_initialized = false;
+
+static void
+arm_read_auxv() {
+    int fd;
+    Elf32_auxv_t aux;
+
+    fd = open("/proc/self/auxv", O_RDONLY);
+    if (fd > 0) {
+        while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
+            if (aux.a_type == AT_HWCAP) {
+                uint32_t hwcap = aux.a_un.a_val;
+                if (getenv("ARM_FORCE_HWCAP"))
+                    hwcap = strtoul(getenv("ARM_FORCE_HWCAP"), NULL, 0);
+                // hardcode these values to avoid depending on specific versions
+                // of the hwcap header, e.g. HWCAP_NEON
+                arm_has_vfp = (hwcap & 64) != 0;
+                arm_has_iwmmxt = (hwcap & 512) != 0;
+                // this flag is only present on kernel 2.6.29
+                arm_has_neon = (hwcap & 4096) != 0;
+            } else if (aux.a_type == AT_PLATFORM) {
+                const char *plat = (const char*) aux.a_un.a_val;
+                if (getenv("ARM_FORCE_PLATFORM"))
+                    plat = getenv("ARM_FORCE_PLATFORM");
+                if (strncmp(plat, "v7l", 3) == 0) {
+                    arm_has_v7 = true;
+                    arm_has_v6 = true;
+                } else if (strncmp(plat, "v6l", 3) == 0) {
+                    arm_has_v6 = true;
+                }
+            }
+        }
+        close (fd);
+
+        // if we don't have 2.6.29, we have to do this hack; set
+        // the env var to trust HWCAP.
+        if (!getenv("ARM_TRUST_HWCAP") && arm_has_v7)
+            arm_has_neon = true;
+    }
+
+    arm_tests_initialized = true;
+}
+
+static bool
+js_arm_check_armv6t2() {
+    if (!arm_tests_initialized)
+        arm_read_auxv();
+
+    return arm_has_v7;
+}
+
+static bool
+js_arm_check_vfp() {
+    if (!arm_tests_initialized)
+        arm_read_auxv();
+
+    return arm_has_vfp;
+}
+
+#else
+#warning Not sure how to check for armv6t2 and vfp on your platform, assuming neither present.
+static bool
+js_arm_check_armv6t2() { return false; }
+static bool
+js_arm_check_vfp() { return false; }
+#endif
+
+#endif /* NANOJIT_ARM */
+
 void
 js_InitJIT(JSTraceMonitor *tm)
 {
+    if (!did_we_check_processor_features) {
 #if defined NANOJIT_IA32
-    if (!did_we_check_sse2) {
         avmplus::AvmCore::config.use_cmov =
         avmplus::AvmCore::config.sse2 = js_CheckForSSE2();
-        did_we_check_sse2 = true;
-    }
 #endif
+#if defined NANOJIT_ARM
+        avmplus::AvmCore::config.vfp = js_arm_check_vfp();
+        avmplus::AvmCore::config.soft_float = !avmplus::AvmCore::config.vfp;
+        avmplus::AvmCore::config.v6t2 = js_arm_check_armv6t2();
+#endif
+        did_we_check_processor_features = true;
+    }
+
     if (!tm->fragmento) {
         JS_ASSERT(!tm->reservedDoublePool);
         Fragmento* fragmento = new (&gc) Fragmento(core, 24);
         verbose_only(fragmento->labels = new (&gc) LabelMap(core, NULL);)
         tm->fragmento = fragmento;
         tm->lirbuf = new (&gc) LirBuffer(fragmento, NULL);
 #ifdef DEBUG
         tm->lirbuf->names = new (&gc) LirNameMap(&gc, NULL, tm->fragmento->labels);
@@ -4636,29 +4679,27 @@ TraceRecorder::popAbortStack()
 
     JS_ASSERT(tm->abortStack == this);
 
     tm->abortStack = nextRecorderToAbort;
     nextRecorderToAbort = NULL;
 }
 
 void
-js_FlushJITOracle(JSContext* cx)
-{
-    if (!TRACING_ENABLED(cx))
-        return;
+js_PurgeJITOracle()
+{
     oracle.clear();
 }
 
 JS_REQUIRES_STACK void
-js_FlushScriptFragments(JSContext* cx, JSScript* script)
+js_PurgeScriptFragments(JSContext* cx, JSScript* script)
 {
     if (!TRACING_ENABLED(cx))
         return;
-    debug_only_v(printf("Flushing fragments for JSScript %p.\n", (void*)script);)
+    debug_only_v(printf("Purging fragments for JSScript %p.\n", (void*)script);)
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
     for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
         for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
             /* Disable future use of any script-associated VMFragment.*/
             if (JS_UPTRDIFF((*f)->ip, script->code) < script->length) {
                 debug_only_v(printf("Disconnecting VMFragment %p "
                                     "with ip %p, in range [%p,%p).\n",
                                     (void*)(*f), (*f)->ip, script->code,
@@ -4986,25 +5027,21 @@ TraceRecorder::call_imacro(jsbytecode* i
 JS_REQUIRES_STACK bool
 TraceRecorder::ifop()
 {
     jsval& v = stackval(-1);
     LIns* v_ins = get(&v);
     bool cond;
     LIns* x;
 
-    /* No need to guard if condition is constant. */
-    if (v_ins->isconst() || v_ins->isconstq())
-        return true;
-
-    /* No need to guard if type strictly determines true or false value. */
-    if (JSVAL_TAG(v) == JSVAL_OBJECT)
-        return true;
-
-    if (JSVAL_TAG(v) == JSVAL_BOOLEAN) {
+    /* Objects always evaluate to true since we specialize the Null type on trace. */
+    if (JSVAL_TAG(v) == JSVAL_OBJECT) {
+        cond = true;
+        x = lir->insImm(1);
+    } else if (JSVAL_TAG(v) == JSVAL_BOOLEAN) {
         /* Test for boolean is true, negate later if we are testing for false. */
         cond = JSVAL_TO_PSEUDO_BOOLEAN(v) == JS_TRUE;
         x = lir->ins2i(LIR_eq, v_ins, 1);
     } else if (isNumber(v)) {
         jsdouble d = asNumber(v);
         cond = !JSDOUBLE_IS_NaN(d) && d;
         x = lir->ins2(LIR_and,
                       lir->ins2(LIR_feq, v_ins, v_ins),
@@ -5015,19 +5052,20 @@ TraceRecorder::ifop()
                       lir->insLoad(LIR_ldp,
                                    v_ins,
                                    (int)offsetof(JSString, length)),
                       INS_CONSTPTR(reinterpret_cast<void *>(JSSTRING_LENGTH_MASK)));
     } else {
         JS_NOT_REACHED("ifop");
         return false;
     }
-    flipIf(cx->fp->regs->pc, cond);
-    guard(cond, x, BRANCH_EXIT);
-    return true;
+
+    jsbytecode* pc = cx->fp->regs->pc;
+    emitIf(pc, cond, x);
+    return checkTraceEnd(pc);
 }
 
 #ifdef NANOJIT_IA32
 /* Record LIR for a tableswitch or tableswitchx op. We record LIR only the
    "first" time we hit the op. Later, when we start traces after exiting that
    trace, we just patch. */
 JS_REQUIRES_STACK LIns*
 TraceRecorder::tableswitch()
@@ -5380,31 +5418,41 @@ TraceRecorder::equalityHelper(jsval l, j
     /* If the operands aren't numbers, compare them as integers. */
     LOpcode op = fp ? LIR_feq : LIR_eq;
     LIns* x = lir->ins2(op, l_ins, r_ins);
     if (negate) {
         x = lir->ins_eq0(x);
         cond = !cond;
     }
 
+    jsbytecode* pc = cx->fp->regs->pc;
+
     /*
      * Don't guard if the same path is always taken.  If it isn't, we have to
      * fuse comparisons and the following branch, because the interpreter does
      * that.
      */
-    if (tryBranchAfterCond && !x->isconst())
-        fuseIf(cx->fp->regs->pc + 1, cond, x);
+    if (tryBranchAfterCond)
+        fuseIf(pc + 1, cond, x);
+
+    /*
+     * There is no need to write out the result of this comparison if the trace
+     * ends on this operation.
+     */
+    if ((pc[1] == JSOP_IFNE || pc[1] == JSOP_IFEQ) && !checkTraceEnd(pc + 1))
+        return false;
 
     /*
      * We update the stack after the guard. This is safe since the guard bails
      * out at the comparison and the interpreter will therefore re-execute the
      * comparison. This way the value of the condition doesn't have to be
      * calculated and saved on the stack in most cases.
      */
     set(&rval, x);
+
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond)
 {
     jsval& r = stackval(-1);
     jsval& l = stackval(-2);
@@ -5500,31 +5548,41 @@ TraceRecorder::relational(LOpcode op, bo
   do_comparison:
     /* If the result is not a number or it's not a quad, we must use an integer compare. */
     if (!fp) {
         JS_ASSERT(op >= LIR_feq && op <= LIR_fge);
         op = LOpcode(op + (LIR_eq - LIR_feq));
     }
     x = lir->ins2(op, l_ins, r_ins);
 
+    jsbytecode* pc = cx->fp->regs->pc;
+
     /*
      * Don't guard if the same path is always taken.  If it isn't, we have to
      * fuse comparisons and the following branch, because the interpreter does
      * that.
      */
-    if (tryBranchAfterCond && !x->isconst())
-        fuseIf(cx->fp->regs->pc + 1, cond, x);
+    if (tryBranchAfterCond)
+        fuseIf(pc + 1, cond, x);
+
+    /*
+     * There is no need to write out the result of this comparison if the trace
+     * ends on this operation.
+     */
+    if ((pc[1] == JSOP_IFNE || pc[1] == JSOP_IFEQ) && !checkTraceEnd(pc + 1))
+        return false;
 
     /*
      * We update the stack after the guard. This is safe since the guard bails
      * out at the comparison and the interpreter will therefore re-execute the
      * comparison. This way the value of the condition doesn't have to be
      * calculated and saved on the stack in most cases.
      */
     set(&l, x);
+
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::unary(LOpcode op)
 {
     jsval& v = stackval(-1);
     bool intop = !(op & LIR64);
@@ -6161,32 +6219,52 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_LEAVEWITH()
 {
     return false;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_RETURN()
 {
+    /* A return from callDepth 0 terminates the current loop. */
+    if (callDepth == 0) {
+        AUDIT(returnLoopExits);
+        endLoop(traceMonitor);
+        return false;
+    }
+
+    /* If we inlined this function call, make the return value available to the caller code. */
     jsval& rval = stackval(-1);
     JSStackFrame *fp = cx->fp;
     if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && JSVAL_IS_PRIMITIVE(rval)) {
         JS_ASSERT(OBJECT_TO_JSVAL(fp->thisp) == fp->argv[-1]);
         rval_ins = get(&fp->argv[-1]);
     } else {
         rval_ins = get(&rval);
     }
     debug_only_v(printf("returning from %s\n", js_AtomToPrintableString(cx, cx->fp->fun->atom));)
     clearFrameSlotsFromCache();
+
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_GOTO()
 {
+    /*
+     * If we hit a break, end the loop and generate an always taken loop exit guard.
+     * For other downward gotos (like if/else) continue recording.
+     */
+    jssrcnote* sn = js_GetSrcNote(cx->fp->script, cx->fp->regs->pc);
+
+    if (sn && SN_TYPE(sn) == SRC_BREAK) {
+        AUDIT(breakLoopExits);
+        endLoop(traceMonitor);
+        return false;
+    }
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_IFEQ()
 {
     trackCfgMerges(cx->fp->regs->pc);
     return ifop();
@@ -6595,81 +6673,105 @@ TraceRecorder::newArray(JSObject *ctor, 
             box_jsval(argv[i], elt_ins);
             stobj_set_dslot(arr_ins, i, dslots_ins, elt_ins, "set_array_elt");
         }
     }
     set(rval, arr_ins);
     return true;
 }
 
-JS_REQUIRES_STACK bool
-TraceRecorder::functionCall(bool constructing, uintN argc)
-{
+bool
+TraceRecorder::newString(JSObject* ctor, jsval& arg, jsval* rval)
+{
+    if (!JSVAL_IS_PRIMITIVE(arg))
+        return call_imacro(new_imacros.String);
+
+    LIns* proto_ins;
+    if (!getClassPrototype(ctor, proto_ins))
+        return false;
+
+    LIns* args[] = { stringify(arg), proto_ins, cx_ins };
+    LIns* obj_ins = lir->insCall(&js_String_tn_ci, args);
+    guard(false, lir->ins_eq0(obj_ins), OOM_EXIT);
+
+    set(rval, obj_ins);
+    return true;
+}
+
+JS_REQUIRES_STACK bool
+TraceRecorder::emitNativeCall(JSTraceableNative* known, uintN argc, LIns* args[])
+{
+    bool constructing = known->flags & JSTN_CONSTRUCTOR;
+
+    if (JSTN_ERRTYPE(known) == FAIL_STATUS) {
+        // This needs to capture the pre-call state of the stack. So do not set
+        // pendingTraceableNative before taking this snapshot.
+        JS_ASSERT(!pendingTraceableNative);
+
+        // Take snapshot for deep LeaveTree and store it in cx->bailExit.
+        LIns* rec_ins = snapshot(DEEP_BAIL_EXIT);
+        GuardRecord* rec = (GuardRecord *) rec_ins->payload();
+        JS_ASSERT(rec->exit);
+        lir->insStorei(INS_CONSTPTR(rec->exit), cx_ins, offsetof(JSContext, bailExit));
+
+        // Tell nanojit not to discard or defer stack writes before this call.
+        lir->insGuard(LIR_xbarrier, rec_ins, rec_ins);
+    }
+
+    LIns* res_ins = lir->insCall(known->builtin, args);
+    if (!constructing)
+        rval_ins = res_ins;
+    switch (JSTN_ERRTYPE(known)) {
+      case FAIL_NULL:
+        guard(false, lir->ins_eq0(res_ins), OOM_EXIT);
+        break;
+      case FAIL_NEG:
+        res_ins = lir->ins1(LIR_i2f, res_ins);
+        guard(false, lir->ins2(LIR_flt, res_ins, lir->insImmq(0)), OOM_EXIT);
+        break;
+      case FAIL_VOID:
+        guard(false, lir->ins2i(LIR_eq, res_ins, JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), OOM_EXIT);
+        break;
+      case FAIL_COOKIE:
+        guard(false, lir->ins2(LIR_eq, res_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT);
+        break;
+      default:;
+    }
+
+    set(&stackval(0 - (2 + argc)), res_ins);
+
+    if (!constructing) {
+        /*
+         * The return value will be processed by FastNativeCallComplete since
+         * we have to know the actual return value type for calls that return
+         * jsval (like Array_p_pop).
+         */
+        pendingTraceableNative = known;
+    }
+
+    return true;
+}
+
+/*
+ * Check whether we have a specialized implementation for this fast native invocation.
+ */
+JS_REQUIRES_STACK bool
+TraceRecorder::callTraceableNative(JSFunction* fun, uintN argc, bool constructing)
+{
+    JSTraceableNative* known = FUN_TRCINFO(fun);
+    JS_ASSERT(known && (JSFastNative)fun->u.n.native == known->native);
+
     JSStackFrame* fp = cx->fp;
     jsbytecode *pc = fp->regs->pc;
 
     jsval& fval = stackval(0 - (2 + argc));
-    JS_ASSERT(&fval >= StackBase(fp));
-
-    if (!VALUE_IS_FUNCTION(cx, fval))
-        ABORT_TRACE("callee is not a function");
-
-    jsval& tval = stackval(0 - (argc + 1));
+    jsval& tval = stackval(0 - (1 + argc));
+
     LIns* this_ins = get(&tval);
 
-    /*
-     * If this is NULL, this is a shapeless call. If we observe a shapeless call
-     * at recording time, the call at this point will always be shapeless so we
-     * can make the decision based on recording-time introspection of this.
-     */
-    if (tval == JSVAL_NULL && !guardCallee(fval))
-        return false;
-
-    /*
-     * Require that the callee be a function object, to avoid guarding on its
-     * class here. We know if the callee and this were pushed by JSOP_CALLNAME
-     * or JSOP_CALLPROP that callee is a *particular* function, since these hit
-     * the property cache and guard on the object (this) in which the callee
-     * was found. So it's sufficient to test here that the particular function
-     * is interpreted, not guard on that condition.
-     *
-     * Bytecode sequences that push shapeless callees must guard on the callee
-     * class being Function and the function being interpreted.
-     */
-    JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fval));
-
-    if (FUN_INTERPRETED(fun)) {
-        if (constructing) {
-            LIns* args[] = { get(&fval), cx_ins };
-            LIns* tv_ins = lir->insCall(&js_NewInstance_ci, args);
-            guard(false, lir->ins_eq0(tv_ins), OOM_EXIT);
-            set(&tval, tv_ins);
-        }
-        return interpretedFunctionCall(fval, fun, argc, constructing);
-    }
-
-    if (FUN_SLOW_NATIVE(fun)) {
-        JSNative native = fun->u.n.native;
-        if (native == js_Array)
-            return newArray(FUN_OBJECT(fun), argc, &tval + 1, &fval);
-        if (native == js_String && argc == 1 && !constructing) {
-            jsval& v = stackval(0 - argc);
-            if (!JSVAL_IS_PRIMITIVE(v))
-                return call_imacro(call_imacros.String);
-            set(&fval, stringify(v));
-            return true;
-        }
-    }
-
-    if (!(fun->flags & JSFUN_TRACEABLE))
-        ABORT_TRACE("untraceable native");
-
-    JSTraceableNative* known = FUN_TRCINFO(fun);
-    JS_ASSERT(known && (JSFastNative)fun->u.n.native == known->native);
-
     LIns* args[5];
     do {
         if (((known->flags & JSTN_CONSTRUCTOR) != 0) != constructing)
             continue;
 
         uintN knownargc = strlen(known->argtypes);
         if (argc != knownargc)
             continue;
@@ -6749,75 +6851,134 @@ TraceRecorder::functionCall(bool constru
             }
             argp--;
         }
         goto success;
 
 next_specialization:;
     } while ((known++)->flags & JSTN_MORE);
 
-    if (!constructing)
-        ABORT_TRACE("unknown native");
-    if (!(fun->flags & JSFUN_TRACEABLE) && FUN_CLASP(fun))
-        ABORT_TRACE("can't trace native constructor");
-    ABORT_TRACE("can't trace unknown constructor");
+    return false;
 
 success:
 #if defined _DEBUG
     JS_ASSERT(args[0] != (LIns *)0xcdcdcdcd);
 #endif
 
-    if (JSTN_ERRTYPE(known) == FAIL_STATUS) {
-        // This needs to capture the pre-call state of the stack. So do not set
-        // pendingTraceableNative before taking this snapshot.
-        JS_ASSERT(!pendingTraceableNative);
-
-        // Take snapshot for deep LeaveTree and store it in cx->bailExit.
-        LIns* rec_ins = snapshot(DEEP_BAIL_EXIT);
-        GuardRecord* rec = (GuardRecord *) rec_ins->payload();
-        JS_ASSERT(rec->exit);
-        lir->insStorei(INS_CONSTPTR(rec->exit), cx_ins, offsetof(JSContext, bailExit));
-
-        // Tell nanojit not to discard or defer stack writes before this call.
-        lir->insGuard(LIR_xbarrier, rec_ins, rec_ins);
-    }
-
-    LIns* res_ins = lir->insCall(known->builtin, args);
-    if (!constructing)
-        rval_ins = res_ins;
-    switch (JSTN_ERRTYPE(known)) {
-      case FAIL_NULL:
-        guard(false, lir->ins_eq0(res_ins), OOM_EXIT);
-        break;
-      case FAIL_NEG:
-      {
-        res_ins = lir->ins1(LIR_i2f, res_ins);
-        guard(false, lir->ins2(LIR_flt, res_ins, lir->insImmq(0)), OOM_EXIT);
-        break;
-      }
-      case FAIL_VOID:
-        guard(false, lir->ins2i(LIR_eq, res_ins, JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID)), OOM_EXIT);
-        break;
-      case FAIL_COOKIE:
-        guard(false, lir->ins2(LIR_eq, res_ins, INS_CONST(JSVAL_ERROR_COOKIE)), OOM_EXIT);
-        break;
-      default:;
-    }
-    set(&fval, res_ins);
-
-    if (!constructing) {
-        /*
-         * The return value will be processed by FastNativeCallComplete since
-         * we have to know the actual return value type for calls that return
-         * jsval (like Array_p_pop).
-         */
-        pendingTraceableNative = known;
-    }
-
-    return true;
+    return emitNativeCall(known, argc, args);
+}
+
+bool
+TraceRecorder::callNative(JSFunction* fun, uintN argc, bool constructing)
+{
+    if (fun->flags & JSFUN_TRACEABLE) {
+        if (callTraceableNative(fun, argc, constructing))
+            return true;
+    }
+
+    if (!(fun->flags & JSFUN_FAST_NATIVE))
+        ABORT_TRACE("untraceable slow native");
+
+    if (constructing)
+        ABORT_TRACE("untraceable fast native constructor");
+
+    jsval* vp = &stackval(0 - (2 + argc));
+    invokevp_ins = lir->insAlloc((2 + argc) * sizeof(jsval));
+
+    /*
+     * For a very long argument list we might run out of LIR space, so better check while
+     * looping over the argument list.
+     */
+    for (jsint n = 0; n < jsint(2 + argc) && !lirbuf->outOMem(); ++n) {
+        LIns* i = get(&vp[n]);
+        box_jsval(vp[n], i);
+        lir->insStorei(i, invokevp_ins, n * sizeof(jsval));
+    }
+
+    LIns* args[] = { invokevp_ins, lir->insImm(argc), cx_ins };
+
+    CallInfo* ci = (CallInfo*) lir->skip(sizeof(struct CallInfo))->payload();
+    ci->_address = uintptr_t(fun->u.n.native);
+    ci->_argtypes = ARGSIZE_LO | ARGSIZE_LO << 2 | ARGSIZE_LO << 4 | ARGSIZE_LO << 6;
+    ci->_cse = ci->_fold = 0;
+    ci->_abi = ABI_CDECL;
+#ifdef DEBUG
+    ci->_name = "JSFastNative";
+#endif
+
+    // Generate a JSTraceableNative structure on the fly.
+    generatedTraceableNative->builtin = ci;
+    generatedTraceableNative->native = (JSFastNative)fun->u.n.native;
+    generatedTraceableNative->flags = FAIL_STATUS | JSTN_UNBOX_AFTER;
+    generatedTraceableNative->prefix = generatedTraceableNative->argtypes = NULL;
+
+    // argc is the original argc here. It is used to calculate where to place the return value.
+    return emitNativeCall(generatedTraceableNative, argc, args);
+}
+
+JS_REQUIRES_STACK bool
+TraceRecorder::functionCall(bool constructing, uintN argc)
+{
+    jsval& fval = stackval(0 - (2 + argc));
+    JS_ASSERT(&fval >= StackBase(cx->fp));
+
+    if (!VALUE_IS_FUNCTION(cx, fval))
+        ABORT_TRACE("callee is not a function");
+
+    jsval& tval = stackval(0 - (1 + argc));
+
+    /*
+     * If callee is not constant, it's a shapeless call and we have to guard
+     * explicitly that we will get this callee again at runtime.
+     */
+    if (!get(&fval)->isconst() && !guardCallee(fval))
+        return false;
+
+    /*
+     * Require that the callee be a function object, to avoid guarding on its
+     * class here. We know if the callee and this were pushed by JSOP_CALLNAME
+     * or JSOP_CALLPROP that callee is a *particular* function, since these hit
+     * the property cache and guard on the object (this) in which the callee
+     * was found. So it's sufficient to test here that the particular function
+     * is interpreted, not guard on that condition.
+     *
+     * Bytecode sequences that push shapeless callees must guard on the callee
+     * class being Function and the function being interpreted.
+     */
+    JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fval));
+
+    if (FUN_INTERPRETED(fun)) {
+        if (constructing) {
+            LIns* args[] = { get(&fval), cx_ins };
+            LIns* tv_ins = lir->insCall(&js_NewInstance_ci, args);
+            guard(false, lir->ins_eq0(tv_ins), OOM_EXIT);
+            set(&tval, tv_ins);
+        }
+        return interpretedFunctionCall(fval, fun, argc, constructing);
+    }
+
+    if (FUN_SLOW_NATIVE(fun)) {
+        JSNative native = fun->u.n.native;
+        if (native == js_Array)
+            return newArray(JSVAL_TO_OBJECT(fval), argc, &tval + 1, &fval);
+        if (native == js_String) {
+            if (argc != 1)
+                ABORT_TRACE("can't trace String when not called with a single argument");
+
+            jsval& v = stackval(0 - argc);
+            if (constructing)
+                return newString(JSVAL_TO_OBJECT(fval), v, &fval);
+            if (!JSVAL_IS_PRIMITIVE(v))
+                return call_imacro(call_imacros.String);
+            set(&fval, stringify(v));
+            return true;
+        }
+    }
+
+    return callNative(fun, argc, constructing);
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_NEW()
 {
     return functionCall(true, GET_ARGC(cx->fp->regs->pc));
 }
 
@@ -7139,24 +7300,28 @@ GetElement_tn(JSContext* cx, jsbytecode 
 JS_DEFINE_TRCINFO_1(GetProperty,
     (4, (static, JSVAL_FAIL,    GetProperty_tn, CONTEXT, PC, THIS, STRING,      0, 0)))
 JS_DEFINE_TRCINFO_1(GetElement,
     (4, (extern, JSVAL_FAIL,    GetElement_tn,  CONTEXT, PC, THIS, INT32,       0, 0)))
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_GETELEM()
 {
+    bool call = *cx->fp->regs->pc == JSOP_CALLELEM;
+
     jsval& idx = stackval(-1);
     jsval& lval = stackval(-2);
 
     LIns* obj_ins = get(&lval);
     LIns* idx_ins = get(&idx);
 
     // Special case for array-like access of strings.
     if (JSVAL_IS_STRING(lval) && isInt32(idx)) {
+        if (call)
+            ABORT_TRACE("JSOP_CALLELEM on a string");
         int i = asInt32(idx);
         if (size_t(i) >= JSSTRING_LENGTH(JSVAL_TO_STRING(lval)))
             ABORT_TRACE("Invalid string index in JSOP_GETELEM");
         idx_ins = makeNumberInt32(idx_ins);
         LIns* args[] = { idx_ins, obj_ins, cx_ins };
         LIns* unitstr_ins = lir->insCall(&js_String_getelem_ci, args);
         guard(false, lir->ins_eq0(unitstr_ins), MISMATCH_EXIT);
         set(&lval, unitstr_ins);
@@ -7182,33 +7347,35 @@ TraceRecorder::record_JSOP_GETELEM()
         // Store the interned string to the stack to save the interpreter from redoing this work.
         idx = ID_TO_VALUE(id);
 
         // The object is not guaranteed to be a dense array at this point, so it might be the
         // global object, which we have to guard against.
         if (!guardNotGlobalObject(obj, obj_ins))
             return false;
 
-        return call_imacro(getelem_imacros.getprop);
+        return call_imacro(call ? callelem_imacros.callprop : getelem_imacros.getprop);
     }
 
     // Invalid dense array index or not a dense array.
     if (JSVAL_TO_INT(idx) < 0 || !OBJ_IS_DENSE_ARRAY(cx, obj)) {
         if (!guardNotGlobalObject(obj, obj_ins))
             return false;
 
-        return call_imacro(getelem_imacros.getelem);
+        return call_imacro(call ? callelem_imacros.callelem : getelem_imacros.getelem);
     }
 
     // Fast path for dense arrays accessed with a non-negative integer index.
     jsval* vp;
     LIns* addr_ins;
     if (!elem(lval, idx, vp, v_ins, addr_ins))
         return false;
     set(&lval, v_ins);
+    if (call)
+        set(&idx, obj_ins);
     return true;
 }
 
 /* Functions used by JSOP_SETELEM */
 
 static JSBool
 SetProperty(JSContext *cx, uintN argc, jsval *vp)
 {
@@ -7577,16 +7744,22 @@ TraceRecorder::record_JSOP_APPLY()
     return call_imacro(call_imacro_table[argc]);
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_FastNativeCallComplete()
 {
     JS_ASSERT(pendingTraceableNative);
 
+    JS_ASSERT(*cx->fp->regs->pc == JSOP_CALL ||
+              *cx->fp->regs->pc == JSOP_APPLY);
+
+    jsval& v = stackval(-1);
+    LIns* v_ins = get(&v);
+
     /* At this point the generated code has already called the native function
        and we can no longer fail back to the original pc location (JSOP_CALL)
        because that would cause the interpreter to re-execute the native
        function, which might have side effects.
 
        Instead, snapshot(), which is invoked from unbox_jsval() below, will see
        that we are currently parked on a traceable native's JSOP_CALL
        instruction, and it will advance the pc to restore by the length of the
@@ -7595,28 +7768,51 @@ TraceRecorder::record_FastNativeCallComp
        boxed value which doesn't need to be boxed if the type guard generated
        by unbox_jsval() fails. */
 
     if (JSTN_ERRTYPE(pendingTraceableNative) == FAIL_STATUS) {
 #ifdef DEBUG
         // Keep cx->bailExit null when it's invalid.
         lir->insStorei(INS_CONSTPTR(NULL), cx_ins, (int) offsetof(JSContext, bailExit));
 #endif
+        LIns* status = lir->insLoad(LIR_ld, cx_ins, (int) offsetof(JSContext, builtinStatus));
+        if (pendingTraceableNative == generatedTraceableNative) {
+            LIns* ok_ins = v_ins;
+
+            /*
+             * If we run a generic traceable native, the return value is in the argument
+             * vector. The actual return value of the fast native is a JSBool indicated
+             * the error status.
+             */
+            v_ins = lir->insLoad(LIR_ld, invokevp_ins, 0);
+            set(&v, v_ins);
+
+            /*
+             * If this is a generic traceable native invocation, propagate the boolean return
+             * value of the fast native into builtinStatus. If the return value (v_ins)
+             * is true, status' == status. Otherwise status' = status | JSBUILTIN_ERROR.
+             * We calculate (rval&1)^1, which is 1 if rval is JS_FALSE (error), and then
+             * shift that by 1 which is JSBUILTIN_ERROR.
+             */
+            JS_STATIC_ASSERT((1 - JS_TRUE) << 1 == 0);
+            JS_STATIC_ASSERT((1 - JS_FALSE) << 1 == JSBUILTIN_ERROR);
+            status = lir->ins2(LIR_or,
+                               status,
+                               lir->ins2i(LIR_lsh,
+                                          lir->ins2i(LIR_xor,
+                                                     lir->ins2i(LIR_and, ok_ins, 1),
+                                                     1),
+                                          1));
+            lir->insStorei(status, cx_ins, (int) offsetof(JSContext, builtinStatus));
+        }
         guard(true,
-              lir->ins_eq0(
-                  lir->insLoad(LIR_ld, cx_ins, (int) offsetof(JSContext, builtinStatus))),
+              lir->ins_eq0(status),
               STATUS_EXIT);
     }
 
-    JS_ASSERT(*cx->fp->regs->pc == JSOP_CALL ||
-              *cx->fp->regs->pc == JSOP_APPLY);
-
-    jsval& v = stackval(-1);
-    LIns* v_ins = get(&v);
-
     bool ok = true;
     if (pendingTraceableNative->flags & JSTN_UNBOX_AFTER) {
         unbox_jsval(v, v_ins);
         set(&v, v_ins);
     } else if (JSTN_ERRTYPE(pendingTraceableNative) == FAIL_NEG) {
         /* Already added i2f in functionCall. */
         JS_ASSERT(JSVAL_IS_NUMBER(v));
     } else {
@@ -7942,17 +8138,23 @@ TraceRecorder::record_JSOP_AND()
 {
     return ifop();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_TABLESWITCH()
 {
 #ifdef NANOJIT_IA32
-    return true;
+    /* Handle tableswitches specially -- prepare a jump table if needed. */
+    LIns* guardIns = tableswitch();
+    if (guardIns) {
+        fragment->lastIns = guardIns;
+        compile(&JS_TRACE_MONITOR(cx));
+    }
+    return false;
 #else
     return switchop();
 #endif
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_LOOKUPSWITCH()
 {
@@ -8530,17 +8732,17 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_DEFLOCALFUN()
 {
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_GOTOX()
 {
-    return true;
+    return record_JSOP_GOTO();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_IFEQX()
 {
     return record_JSOP_IFEQ();
 }
 
@@ -8579,21 +8781,17 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_DEFAULTX()
 {
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_TABLESWITCHX()
 {
-#ifdef NANOJIT_IA32
-    return true;
-#else
-    return switchop();
-#endif
+    return record_JSOP_TABLESWITCH();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_LOOKUPSWITCHX()
 {
     return switchop();
 }
 
@@ -8981,17 +9179,17 @@ TraceRecorder::record_JSOP_RESETBASE0()
 {
     atoms = cx->fp->script->atomMap.vector;
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_CALLELEM()
 {
-    return false;
+    return record_JSOP_GETELEM();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_STOP()
 {
     JSStackFrame *fp = cx->fp;
 
     if (fp->imacpc) {
@@ -9379,18 +9577,20 @@ TraceRecorder::record_JSOP_LENGTH()
         return true;
     }
 
     JSObject* obj = JSVAL_TO_OBJECT(l);
     LIns* obj_ins = get(&l);
     LIns* v_ins;
     if (OBJ_IS_ARRAY(cx, obj)) {
         if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
-            if (!guardDenseArray(obj, obj_ins, BRANCH_EXIT))
+            if (!guardDenseArray(obj, obj_ins, BRANCH_EXIT)) {
                 JS_NOT_REACHED("OBJ_IS_DENSE_ARRAY but not?!?");
+                return false;
+            }
         } else {
             if (!guardClass(obj, obj_ins, &js_SlowArrayClass, snapshot(BRANCH_EXIT)))
                 ABORT_TRACE("can't trace length property access on non-array");
         }
         v_ins = lir->ins1(LIR_i2f, stobj_get_fslot(obj_ins, JSSLOT_ARRAY_LENGTH));
     } else {
         if (!OBJ_IS_NATIVE(obj))
             ABORT_TRACE("can't trace length property access on non-array, non-native object");
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -150,17 +150,17 @@ public:
     bool            has(const void* v) const;
     nanojit::LIns*  get(const void* v) const;
     void            set(const void* v, nanojit::LIns* ins);
     void            clear();
 };
 
 #ifdef JS_JIT_SPEW
 extern bool js_verboseDebug;
-#define debug_only_v(x) if (js_verboseDebug) { x; }
+#define debug_only_v(x) if (js_verboseDebug) { x; fflush(stdout); }
 #else
 #define debug_only_v(x)
 #endif
 
 /*
  * The oracle keeps track of hit counts for program counter locations, as
  * well as slots that should not be demoted to int because we know them to
  * overflow or they result in type-unstable traces. We are using simple
@@ -392,37 +392,35 @@ class TraceRecorder : public avmplus::GC
     TreeInfo*               treeInfo;
     nanojit::LirBuffer*     lirbuf;
     nanojit::LirWriter*     lir;
     nanojit::LirBufWriter*  lir_buf_writer;
     nanojit::LirWriter*     verbose_filter;
     nanojit::LirWriter*     cse_filter;
     nanojit::LirWriter*     expr_filter;
     nanojit::LirWriter*     func_filter;
-#ifdef NJ_SOFTFLOAT
     nanojit::LirWriter*     float_filter;
-#endif
     nanojit::LIns*          cx_ins;
     nanojit::LIns*          eos_ins;
     nanojit::LIns*          eor_ins;
     nanojit::LIns*          globalObj_ins;
     nanojit::LIns*          rval_ins;
     nanojit::LIns*          inner_sp_ins;
+    nanojit::LIns*          invokevp_ins;
     bool                    deepAborted;
     bool                    trashSelf;
     Queue<nanojit::Fragment*> whichTreesToTrash;
     Queue<jsbytecode*>      cfgMerges;
     jsval*                  global_dslots;
+    JSTraceableNative*      generatedTraceableNative;
     JSTraceableNative*      pendingTraceableNative;
-    bool                    terminate;
-    jsbytecode*             terminate_pc;
-    jsbytecode*             terminate_imacpc;
     TraceRecorder*          nextRecorderToAbort;
     bool                    wasRootFragment;
     jsbytecode*             outer;
+    bool                    loop;
 
     bool isGlobal(jsval* p) const;
     ptrdiff_t nativeGlobalOffset(jsval* p) const;
     JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(jsval* p) const;
     JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, jsval* p, uint8& t,
                                   const char *prefix, uintN index, JSStackFrame *fp);
     JS_REQUIRES_STACK void import(TreeInfo* treeInfo, nanojit::LIns* sp, unsigned stackSlots,
                                   unsigned callDepth, unsigned ngslots, uint8* typeMap);
@@ -535,23 +533,29 @@ class TraceRecorder : public avmplus::GC
     JS_REQUIRES_STACK bool guardDenseArrayIndex(JSObject* obj, jsint idx, nanojit::LIns* obj_ins,
                                                 nanojit::LIns* dslots_ins, nanojit::LIns* idx_ins,
                                                 ExitType exitType);
     JS_REQUIRES_STACK bool guardNotGlobalObject(JSObject* obj, nanojit::LIns* obj_ins);
     void clearFrameSlotsFromCache();
     JS_REQUIRES_STACK bool guardCallee(jsval& callee);
     JS_REQUIRES_STACK bool getClassPrototype(JSObject* ctor, nanojit::LIns*& proto_ins);
     JS_REQUIRES_STACK bool newArray(JSObject* ctor, uint32 argc, jsval* argv, jsval* vp);
+    JS_REQUIRES_STACK bool newString(JSObject* ctor, jsval& arg, jsval* rval);
     JS_REQUIRES_STACK bool interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc,
                                                    bool constructing);
+    JS_REQUIRES_STACK bool emitNativeCall(JSTraceableNative* known, uintN argc,
+                                          nanojit::LIns* args[]);
+    JS_REQUIRES_STACK bool callTraceableNative(JSFunction* fun, uintN argc, bool constructing);
+    JS_REQUIRES_STACK bool callNative(JSFunction* fun, uintN argc, bool constructing);
     JS_REQUIRES_STACK bool functionCall(bool constructing, uintN argc);
 
     JS_REQUIRES_STACK void trackCfgMerges(jsbytecode* pc);
-    JS_REQUIRES_STACK void flipIf(jsbytecode* pc, bool& cond);
+    JS_REQUIRES_STACK void emitIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
     JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
+    JS_REQUIRES_STACK bool checkTraceEnd(jsbytecode* pc);
 
     bool hasMethod(JSObject* obj, jsid id);
     JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj);
 
 public:
     JS_REQUIRES_STACK
     TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
                   unsigned stackSlots, unsigned ngslots, uint8* typeMap,
@@ -559,19 +563,18 @@ public:
     ~TraceRecorder();
 
     static JS_REQUIRES_STACK JSMonitorRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op);
 
     JS_REQUIRES_STACK uint8 determineSlotType(jsval* vp);
     JS_REQUIRES_STACK nanojit::LIns* snapshot(ExitType exitType);
     nanojit::Fragment* getFragment() const { return fragment; }
     TreeInfo* getTreeInfo() const { return treeInfo; }
-    JS_REQUIRES_STACK bool isLoopHeader(JSContext* cx) const;
     JS_REQUIRES_STACK void compile(JSTraceMonitor* tm);
-    JS_REQUIRES_STACK bool closeLoop(JSTraceMonitor* tm, bool& demote);
+    JS_REQUIRES_STACK void closeLoop(JSTraceMonitor* tm, bool& demote);
     JS_REQUIRES_STACK void endLoop(JSTraceMonitor* tm);
     JS_REQUIRES_STACK void joinEdgesToEntry(nanojit::Fragmento* fragmento,
                                             nanojit::Fragment* peer_root);
     void blacklist() { fragment->blacklist(); }
     JS_REQUIRES_STACK bool adjustCallerTypes(nanojit::Fragment* f);
     JS_REQUIRES_STACK nanojit::Fragment* findNestedCompatiblePeer(nanojit::Fragment* f,
                                                                   nanojit::Fragment** empty);
     JS_REQUIRES_STACK void prepareTreeCall(nanojit::Fragment* inner);
@@ -585,17 +588,16 @@ public:
     JS_REQUIRES_STACK bool record_LeaveFrame();
     JS_REQUIRES_STACK bool record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop);
     JS_REQUIRES_STACK bool record_SetPropMiss(JSPropCacheEntry* entry);
     JS_REQUIRES_STACK bool record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
     JS_REQUIRES_STACK bool record_FastNativeCallComplete();
 
     void deepAbort() { deepAborted = true; }
     bool wasDeepAborted() { return deepAborted; }
-    bool walkedOutOfLoop() { return terminate; }
     TreeInfo* getTreeInfo() { return treeInfo; }
 
 #define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format)               \
     JS_REQUIRES_STACK bool record_##op();
 # include "jsopcode.tbl"
 #undef OPDEF
 };
 #define TRACING_ENABLED(cx)       JS_HAS_OPTION(cx, JSOPTION_JIT)
@@ -632,23 +634,23 @@ js_AbortRecording(JSContext* cx, const c
 
 extern void
 js_InitJIT(JSTraceMonitor *tm);
 
 extern void
 js_FinishJIT(JSTraceMonitor *tm);
 
 extern void
-js_FlushScriptFragments(JSContext* cx, JSScript* script);
+js_PurgeScriptFragments(JSContext* cx, JSScript* script);
 
 extern void
 js_FlushJITCache(JSContext* cx);
 
 extern void
-js_FlushJITOracle(JSContext* cx);
+js_PurgeJITOracle();
 
 extern JSObject *
 js_GetBuiltinFunction(JSContext *cx, uintN index);
 
 #else  /* !JS_TRACER */
 
 #define TRACE_0(x)              ((void)0)
 #define TRACE_1(x,a)            ((void)0)
new file mode 100644
--- /dev/null
+++ b/js/src/jswince.asm
@@ -0,0 +1,44 @@
+    INCLUDE kxarm.h
+
+    area js_msvc, code, readonly
+
+    MACRO
+    FUNC_HEADER $Name
+FuncName    SETS    VBar:CC:"$Name":CC:VBar
+PrologName  SETS    VBar:CC:"$Name":CC:"_Prolog":CC:VBar
+FuncEndName SETS    VBar:CC:"$Name":CC:"_end":CC:VBar
+
+    AREA |.pdata|,ALIGN=2,PDATA
+    DCD	$FuncName
+    DCD	(($PrologName-$FuncName)/4) :OR: ((($FuncEndName-$FuncName)/4):SHL:8) :OR: 0x40000000
+    AREA $AreaName,CODE,READONLY
+    ALIGN	2
+    GLOBAL	$FuncName
+    EXPORT	$FuncName
+$FuncName
+    ROUT
+$PrologName
+    MEND
+
+    export  js_arm_try_armv6t2_op
+
+    ;; I'm not smart enough to figure out which flags to pass to armasm to get it
+    ;; to understand movt and fmdrr/vmov; the disassembler figures them out just fine!
+
+    FUNC_HEADER js_arm_try_armv6t2_op
+    ;; movt r0,#0xFFFF
+    DCD 0xE34F0FFF
+    mov pc,lr
+    ENTRY_END
+    endp
+
+    export  js_arm_try_vfp_op
+
+    FUNC_HEADER js_arm_try_vfp_op
+    ;; fmdrr d0, r0, r1
+    DCD 0xEC410B10
+    mov pc,lr
+    ENTRY_END
+    endp
+
+    end
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -50,17 +50,17 @@ extern "C" void __clear_cache(char *BEG,
 #endif
 
 #ifdef AVMPLUS_SPARC
 extern  "C"	void sync_instruction_memory(caddr_t v, u_int len);
 #endif
 
 namespace nanojit
 {
-
+	int UseSoftfloat = 0;
 
 	class DeadCodeFilter: public LirFilter
 	{
 		const CallInfo *functions;
 
 	    bool ignoreInstruction(LInsp ins)
 	    {
             LOpcode op = ins->opcode();
@@ -597,17 +597,21 @@ namespace nanojit
 			// the already-allocated register isn't in the allowed mask;
 			// we need to grab a new one and then copy over the old
 			// contents to the new.
 			resv->reg = UnknownReg;
 			_allocator.retire(r);
 			Register s = resv->reg = registerAlloc(prefer);
 			_allocator.addActive(s, i);
             if ((rmask(r) & GpRegs) && (rmask(s) & GpRegs)) {
+#ifdef NANOJIT_ARM
+				MOV(r, s);
+#else
     			MR(r, s);
+#endif
             } 
             else {
 				asm_nongp_copy(r, s);
 			}
 			return s;
 		}
 	}
 
@@ -1233,17 +1237,16 @@ namespace nanojit
 				case LIR_lsh:
 				case LIR_rsh:
 				case LIR_ush:
 				{
                     countlir_alu();
 					asm_arith(ins);
 					break;
 				}
-#ifndef NJ_SOFTFLOAT
 				case LIR_fneg:
 				{
                     countlir_fpu();
 					asm_fneg(ins);
 					break;
 				}
 				case LIR_fadd:
 				case LIR_fsub:
@@ -1261,17 +1264,16 @@ namespace nanojit
 					break;
 				}
 				case LIR_u2f:
 				{
                     countlir_fpu();
 					asm_u2f(ins);
 					break;
 				}
-#endif // NJ_SOFTFLOAT
 				case LIR_st:
 				case LIR_sti:
 				{
                     countlir_st();
                     asm_store32(ins->oprnd1(), ins->immdisp(), ins->oprnd2());
                     break;
 				}
 				case LIR_stq:
@@ -1412,28 +1414,26 @@ namespace nanojit
 				{
                     countlir_loop();
 					asm_loop(ins, loopJumps);
 			        assignSavedRegs();
 			        assignParamRegs();
 					break;
 				}
 
-#ifndef NJ_SOFTFLOAT
 				case LIR_feq:
 				case LIR_fle:
 				case LIR_flt:
 				case LIR_fgt:
 				case LIR_fge:
 				{
                     countlir_fpu();
 					asm_fcond(ins);
 					break;
 				}
-#endif
 				case LIR_eq:
                 case LIR_ov:
                 case LIR_cs:
 				case LIR_le:
 				case LIR_lt:
 				case LIR_gt:
 				case LIR_ge:
 				case LIR_ult:
@@ -1441,37 +1441,33 @@ namespace nanojit
 				case LIR_ugt:
 				case LIR_uge:
 				{
                     countlir_alu();
 					asm_cond(ins);
 					break;
 				}
 				
-#ifndef NJ_SOFTFLOAT
 				case LIR_fcall:
 				case LIR_fcalli:
-#endif
 #if defined NANOJIT_64BIT
 				case LIR_callh:
 #endif
 				case LIR_call:
 				case LIR_calli:
 				{
                     countlir_call();
                     Register rr = UnknownReg;
-#ifndef NJ_SOFTFLOAT
                     if ((op&LIR64))
                     {
                         // fcall or fcalli
 						Reservation* rR = getresv(ins);
 						rr = asm_prep_fcall(rR, ins);
                     }
                     else
-#endif
                     {
                         rr = retRegs[0];
 						prepResultReg(ins, rmask(rr));
                     }
 
 					// do this after we've handled the call result, so we dont
 					// force the call result to be spilled unnecessarily.
 
@@ -1942,23 +1938,22 @@ namespace nanojit
 
     uint32_t CallInfo::get_sizes(ArgSize* sizes) const
     {
 		uint32_t argt = _argtypes;
 		uint32_t argc = 0;
 		for (uint32_t i = 0; i < MAXARGS; i++) {
 			argt >>= 2;
 			ArgSize a = ArgSize(argt&3);
-#ifdef NJ_SOFTFLOAT
-			if (a == ARGSIZE_F) {
+			if (AvmCore::config.soft_float && a == ARGSIZE_F) {
                 sizes[argc++] = ARGSIZE_LO;
                 sizes[argc++] = ARGSIZE_LO;
                 continue;
             }
-#endif
+
             if (a != ARGSIZE_NONE) {
                 sizes[argc++] = a;
             } else {
                 break;
             }
 		}
         if (isIndirect()) {
             // add one more arg for indirect call address
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -1097,36 +1097,37 @@ namespace nanojit
 
 		uint32_t argt = ci->_argtypes;
         LOpcode op = (ci->isIndirect() ? k_callimap : k_callmap)[argt & 3];
         NanoAssert(op != LIR_skip); // LIR_skip here is just an error condition
 
         ArgSize sizes[2*MAXARGS];
         int32_t argc = ci->get_sizes(sizes);
 
-#ifdef NJ_SOFTFLOAT
-		if (op == LIR_fcall)
-			op = LIR_callh;
-		LInsp args2[MAXARGS*2]; // arm could require 2 args per double
-		int32_t j = 0;
-        int32_t i = 0;
-        while (j < argc) {
-			argt >>= 2;
-			ArgSize a = ArgSize(argt&3);
-			if (a == ARGSIZE_F) {
-				LInsp q = args[i++];
-				args2[j++] = ins1(LIR_qhi, q);
-				args2[j++] = ins1(LIR_qlo, q);
-			} else {
-				args2[j++] = args[i++];
+		if (AvmCore::config.soft_float) {
+			if (op == LIR_fcall)
+				op = LIR_callh;
+			LInsp args2[MAXARGS*2]; // arm could require 2 args per double
+			int32_t j = 0;
+			int32_t i = 0;
+			while (j < argc) {
+				argt >>= 2;
+				ArgSize a = ArgSize(argt&3);
+				if (a == ARGSIZE_F) {
+					LInsp q = args[i++];
+					args2[j++] = ins1(LIR_qhi, q);
+					args2[j++] = ins1(LIR_qlo, q);
+				} else {
+					args2[j++] = args[i++];
+				}
 			}
+			args = args2;
+			NanoAssert(j == argc);
 		}
-		args = args2;
-        NanoAssert(j == argc);
-#endif
+
 		//
 		// An example of the what we're trying to serialize:
 		//
 		// byte                                             word
 		// ----                                             ----
 		//    N  [ arg tramp #0 ------------------------ ]  K
 		//  N+4  [ arg tramp #1 ------------------------ ]  K+1
 		//  N+8  [ arg tramp #2 ------------------------ ]  K+2
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -43,80 +43,77 @@
 #include "portapi_nanojit.h"
 #endif
 
 #ifdef UNDER_CE
 #include <cmnintrin.h>
 #endif
 
 #if defined(AVMPLUS_LINUX)
+#include <signal.h>
+#include <setjmp.h>
 #include <asm/unistd.h>
 extern "C" void __clear_cache(char *BEG, char *END);
 #endif
 
 #ifdef FEATURE_NANOJIT
 
 namespace nanojit
 {
 
 #ifdef NJ_VERBOSE
-const char* regNames[] = {"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10","FP","IP","SP","LR","PC",
+const char* regNames[] = {"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10","fp","ip","sp","lr","pc",
                           "d0","d1","d2","d3","d4","d5","d6","d7","s14"};
+const char* condNames[] = {"eq","ne","cs","cc","mi","pl","vs","vc","hi","ls","ge","lt","gt","le",""/*al*/,"nv"};
+const char* shiftNames[] = { "lsl", "lsl", "lsr", "lsr", "asr", "asr", "ror", "ror" };
 #endif
 
 const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
 const Register Assembler::retRegs[] = { R0, R1 };
 const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
 
-const char *ccName(ConditionCode cc)
-{
-    const char *ccNames[] = { "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
-                              "hi", "ls", "ge", "lt", "gt", "le", "al", "nv" };
-    return ccNames[(int)cc];
-}
-
 void
 Assembler::nInit(AvmCore*)
 {
 }
 
 NIns*
 Assembler::genPrologue()
 {
     /**
      * Prologue
      */
 
     // NJ_RESV_OFFSET is space at the top of the stack for us
     // to use for parameter passing (8 bytes at the moment)
     uint32_t stackNeeded = STACK_GRANULARITY * _activation.highwatermark + NJ_STACK_OFFSET;
+    uint32_t savingCount = 2;
 
     uint32_t savingMask = rmask(FP) | rmask(LR);
-    uint32_t savingCount = 2;
 
     if (!_thisfrag->lirbuf->explicitSavedRegs) {
         for (int i = 0; i < NumSavedRegs; ++i)
             savingMask |= rmask(savedRegs[i]);
         savingCount += NumSavedRegs;
     }
 
     // so for alignment purposes we've pushed return addr and fp
     uint32_t stackPushed = STACK_GRANULARITY * savingCount;
     uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK);
     int32_t amt = aligned - stackPushed;
 
     // Make room on stack for what we are doing
     if (amt)
-        SUBi(SP, amt);
+        SUBi(SP, SP, amt);
 
     verbose_only( verbose_outputf("         %p:",_nIns); )
     verbose_only( verbose_output("         patch entry"); )
     NIns *patchEntry = _nIns;
 
-    MR(FP, SP);
+    MOV(FP, SP);
     PUSH_mask(savingMask);
     return patchEntry;
 }
 
 void
 Assembler::nFragExit(LInsp guard)
 {
     SideExit* exit = guard->record()->exit;
@@ -134,17 +131,17 @@ Assembler::nFragExit(LInsp guard)
         // patching.
         JMP_far(_epilogue);
 
         // stick the jmp pointer to the start of the sequence
         lr->jmp = _nIns;
     }
 
     // pop the stack frame first
-    MR(SP, FP);
+    MOV(SP, FP);
 
 #ifdef NJ_VERBOSE
     if (_frago->core()->config.show_stats) {
         // load R1 with Fragment *fromFrag, target fragment
         // will make use of this when calling fragenter().
         int fromfrag = int((Fragment*)_thisfrag);
         LDi(argRegs[1], fromfrag);
     }
@@ -165,20 +162,20 @@ Assembler::genEpilogue()
     RegisterMask savingMask = rmask(FP) | rmask(LR);
 
     if (!_thisfrag->lirbuf->explicitSavedRegs)
         for (int i = 0; i < NumSavedRegs; ++i)
             savingMask |= rmask(savedRegs[i]);
 
     POP_mask(savingMask); // regs
 
-    MR(SP,FP);
+    MOV(SP,FP);
 
     // this is needed if we jump here from nFragExit
-    MR(R0,R2); // return LinkRecord*
+    MOV(R0,R2); // return LinkRecord*
 
     return _nIns;
 }
 
 /* ARM EABI (used by gcc/linux) calling conventions differ from Windows CE; use these
  * as the default.
  *
  * - double arg following an initial dword arg use r0 for the int arg
@@ -196,19 +193,18 @@ Assembler::asm_call(LInsp ins)
 {
     const CallInfo* call = ins->callInfo();
     Reservation *callRes = getresv(ins);
 
     uint32_t atypes = call->_argtypes;
     uint32_t roffset = 0;
 
     // skip return type
-#ifdef NJ_ARM_VFP
     ArgSize rsize = (ArgSize)(atypes & 3);
-#endif
+
     atypes >>= 2;
 
     bool arg0IsInt32FollowedByFloat = false;
 #ifndef UNDER_CE
     // we need to detect if we have arg0 as LO followed by arg1 as F;
     // in that case, we need to skip using r1 -- the F needs to be
     // loaded in r2/r3, at least according to the ARM EABI and gcc 4.2's
     // generated code.
@@ -219,18 +215,17 @@ Assembler::asm_call(LInsp ins)
         {
             arg0IsInt32FollowedByFloat = true;
             break;
         }
         atypes >>= 2;
     }
 #endif
 
-#ifdef NJ_ARM_VFP
-    if (rsize == ARGSIZE_F) {
+    if (AvmCore::config.vfp && rsize == ARGSIZE_F) {
         NanoAssert(ins->opcode() == LIR_fcall);
         NanoAssert(callRes);
 
         //fprintf (stderr, "call ins: %p callRes: %p reg: %d ar: %d\n", ins, callRes, callRes->reg, callRes->arIndex);
 
         Register rr = callRes->reg;
         int d = disp(callRes);
         freeRsrcOf(ins, rr != UnknownReg);
@@ -239,100 +234,83 @@ Assembler::asm_call(LInsp ins)
             NanoAssert(IsFpReg(rr));
             FMDRR(rr,R0,R1);
         } else {
             NanoAssert(d);
             STR(R0, FP, d+0);
             STR(R1, FP, d+4);
         }
     }
-#endif
 
     BL((NIns*)(call->_address));
 
     ArgSize sizes[10];
     uint32_t argc = call->get_sizes(sizes);
     for(uint32_t i = 0; i < argc; i++) {
         uint32_t j = argc - i - 1;
         ArgSize sz = sizes[j];
         LInsp arg = ins->arg(j);
         // pre-assign registers R0-R3 for arguments (if they fit)
 
         Register r = (i + roffset) < 4 ? argRegs[i+roffset] : UnknownReg;
-#ifdef NJ_ARM_VFP
         if (sz == ARGSIZE_F) {
+            Register rlo = UnknownReg;
+            Register rhi = UnknownReg;
+
 #ifdef UNDER_CE
             if (r >= R0 && r <= R2) {
-                // we can use up r0/r1, r1/r2, r2/r3 without anything special
+                rlo = r;
+                rhi = nextreg(r);
                 roffset++;
-                FMRRD(r, nextreg(r), sr);
             } else if (r == R3) {
-                // to use R3 gets complicated; we need to move the high dword
-                // into R3, and the low dword on the stack.
-                STR_preindex(Scratch, SP, -4);
-                FMRDL(Scratch, sr);
-                FMRDH(r, sr);
-            } else {
-                asm_pusharg(arg);
+                rlo = r;
+                rhi = UnknownReg;
             }
 #else
             if (r == R0 || r == R2) {
+                rlo = r;
+                rhi = nextreg(r);
                 roffset++;
             } else if (r == R1) {
-                r = R2;
-                roffset++;
-            } else {
-                r = UnknownReg;
-            }
-
-            // XXX move this into asm_farg
-            Register sr = findRegFor(arg, FpRegs);
-
-            if (r != UnknownReg) {
-                // stick it into our scratch fp reg, and then copy into the base reg
-                //fprintf (stderr, "FMRRD: %d %d <- %d\n", r, nextreg(r), sr);
-                FMRRD(r, nextreg(r), sr);
-            } else {
-                asm_pusharg(arg);
+                rlo = R2;
+                rhi = nextreg(r);
+                roffset += 2;
             }
 #endif
+
+            asm_arm_farg(arg, rlo, rhi);
         } else {
             asm_arg(sz, arg, r);
         }
-#else
-        NanoAssert(sz == ARGSIZE_LO || sz == ARGSIZE_Q);
-        asm_arg(sz, arg, r);
-#endif
 
         // Under CE, arg0IsInt32FollowedByFloat will always be false
         if (i == 0 && arg0IsInt32FollowedByFloat)
             roffset = 1;
     }
 }
 
 void
 Assembler::nMarkExecute(Page* page, int flags)
 {
-	NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
+    NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
 #ifdef UNDER_CE
-	static const DWORD kProtFlags[4] = 
-	{
-		PAGE_READONLY,			// 0
-		PAGE_READWRITE,			// PAGE_WRITE
-		PAGE_EXECUTE_READ,		// PAGE_EXEC
-		PAGE_EXECUTE_READWRITE	// PAGE_EXEC|PAGE_WRITE
-	};
-	DWORD prot = kProtFlags[flags & (PAGE_WRITE|PAGE_EXEC)];
+    static const DWORD kProtFlags[4] = {
+        PAGE_READONLY,          // 0
+        PAGE_READWRITE,         // PAGE_WRITE
+        PAGE_EXECUTE_READ,      // PAGE_EXEC
+        PAGE_EXECUTE_READWRITE  // PAGE_EXEC|PAGE_WRITE
+    };
+    DWORD prot = kProtFlags[flags & (PAGE_WRITE|PAGE_EXEC)];
     DWORD dwOld;
     BOOL res = VirtualProtect(page, NJ_PAGE_SIZE, prot, &dwOld);
-	if (!res)
-	{
-		// todo: we can't abort or assert here, we have to fail gracefully.
-		NanoAssertMsg(false, "FATAL ERROR: VirtualProtect() failed\n");
-	}
+    if (!res)
+    {
+        // todo: we can't abort or assert here, we have to fail gracefully.
+        NanoAssertMsg(false, "FATAL ERROR: VirtualProtect() failed\n");
+    }
 #endif
 #ifdef AVMPLUS_PORTING_API
     NanoJIT_PortAPI_MarkExecutable(page, (void*)((char*)page+NJ_PAGE_SIZE), flags);
     // todo, must add error-handling to the portapi
 #endif
 }
 
 Register
@@ -366,19 +344,18 @@ Assembler::nRegisterResetAll(RegAlloc& a
 {
     // add scratch registers to our free list for the allocator
     a.clear();
     a.used = 0;
     a.free =
         rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
         rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
         rmask(R10);
-#ifdef NJ_ARM_VFP
-    a.free |= FpRegs;
-#endif
+    if (AvmCore::config.vfp)
+        a.free |= FpRegs;
 
     debug_only(a.managed = a.free);
 }
 
 NIns*
 Assembler::nPatchBranch(NIns* at, NIns* target)
 {
     // Patch the jump in a loop, as emitted by JMP_far.
@@ -467,28 +444,31 @@ Assembler::asm_store32(LIns *value, int 
     Register ra = rA->reg;
     Register rb = rB->reg;
     STR(ra, rb, dr);
 }
 
 void
 Assembler::asm_restore(LInsp i, Reservation *resv, Register r)
 {
-    (void)resv;
-    int d = findMemFor(i);
+    if (i->isop(LIR_alloc)) {
+        asm_add_imm(r, FP, disp(resv));
+    } else {
+        int d = findMemFor(i);
 
-    if (IsFpReg(r)) {
-        if (isS8(d >> 2)) {
-            FLDD(r, FP, d);
+        if (IsFpReg(r)) {
+            if (isS8(d >> 2)) {
+                FLDD(r, FP, d);
+            } else {
+                FLDD(r, IP, 0);
+                ADDi(IP, FP, d);
+            }
         } else {
-            FLDD(r, Scratch, 0);
-            arm_ADDi(Scratch, FP, d);
+            LDR(r, FP, d);
         }
-    } else {
-        LDR(r, FP, d);
     }
 
     verbose_only(
         if (_verbose)
             outputf("        restore %s",_thisfrag->lirbuf->names->formatRef(i));
     )
 }
 
@@ -497,18 +477,18 @@ Assembler::asm_spill(Register rr, int d,
 {
     (void) pop;
     (void) quad;
     if (d) {
         if (IsFpReg(rr)) {
             if (isS8(d >> 2)) {
                 FSTD(rr, FP, d);
             } else {
-                FSTD(rr, Scratch, 0);
-                arm_ADDi(Scratch, FP, d);
+                FSTD(rr, IP, 0);
+                ADDi(IP, FP, d);
             }
         } else {
             STR(rr, FP, d);
         }
     }
 }
 
 void
@@ -520,96 +500,99 @@ Assembler::asm_load64(LInsp ins)
     int offset = ins->oprnd2()->constval();
 
     Reservation *resv = getresv(ins);
     Register rr = resv->reg;
     int d = disp(resv);
 
     freeRsrcOf(ins, false);
 
-#ifdef NJ_ARM_VFP
-    Register rb = findRegFor(base, GpRegs);
+    if (AvmCore::config.vfp) {
+        Register rb = findRegFor(base, GpRegs);
 
-    NanoAssert(rb != UnknownReg);
-    NanoAssert(rr == UnknownReg || IsFpReg(rr));
+        NanoAssert(rb != UnknownReg);
+        NanoAssert(rr == UnknownReg || IsFpReg(rr));
 
-    if (rr != UnknownReg) {
-        if (!isS8(offset >> 2) || (offset&3) != 0) {
-            FLDD(rr,Scratch,0);
-            arm_ADDi(Scratch, rb, offset);
+        if (rr != UnknownReg) {
+            if (!isS8(offset >> 2) || (offset&3) != 0) {
+                FLDD(rr,IP,0);
+                ADDi(IP, rb, offset);
+            } else {
+                FLDD(rr,rb,offset);
+            }
         } else {
-            FLDD(rr,rb,offset);
+            asm_mmq(FP, d, rb, offset);
         }
+
+        // *(FP+dr) <- *(rb+db)
     } else {
+        NanoAssert(resv->reg == UnknownReg && d != 0);
+        Register rb = findRegFor(base, GpRegs);
         asm_mmq(FP, d, rb, offset);
     }
 
-    // *(FP+dr) <- *(rb+db)
-#else
-    NanoAssert(resv->reg == UnknownReg && d != 0);
-    Register rb = findRegFor(base, GpRegs);
-    asm_mmq(FP, d, rb, offset);
-#endif
-
     //asm_output(">>> load64");
 }
 
 void
 Assembler::asm_store64(LInsp value, int dr, LInsp base)
 {
     //asm_output("<<< store64 (dr: %d)", dr);
 
-#ifdef NJ_ARM_VFP
-    //Reservation *valResv = getresv(value);
-    Register rb = findRegFor(base, GpRegs);
+    if (AvmCore::config.vfp) {
+        //Reservation *valResv = getresv(value);
+        Register rb = findRegFor(base, GpRegs);
+
+        if (value->isconstq()) {
+            const int32_t* p = (const int32_t*) (value-2);
+
+            underrunProtect(LD32_size*2 + 8);
 
-    if (value->isconstq()) {
-        const int32_t* p = (const int32_t*) (value-2);
+            // XXX use another reg, get rid of dependency
+            STR(IP, rb, dr);
+            LD32_nochk(IP, p[0]);
+            STR(IP, rb, dr+4);
+            LD32_nochk(IP, p[1]);
+
+            return;
+        }
+
+        Register rv = findRegFor(value, FpRegs);
+
+        NanoAssert(rb != UnknownReg);
+        NanoAssert(rv != UnknownReg);
 
-        STR(Scratch, rb, dr);
-        LD32_nochk(Scratch, p[0]);
-        STR(Scratch, rb, dr+4);
-        LD32_nochk(Scratch, p[1]);
+        Register baseReg = rb;
+        intptr_t baseOffset = dr;
+
+        if (!isS8(dr)) {
+            baseReg = IP;
+            baseOffset = 0;
+        }
+
+        FSTD(rv, baseReg, baseOffset);
 
-        return;
+        if (!isS8(dr)) {
+            ADDi(IP, rb, dr);
+        }
+
+        // if it's a constant, make sure our baseReg/baseOffset location
+        // has the right value
+        if (value->isconstq()) {
+            const int32_t* p = (const int32_t*) (value-2);
+
+            underrunProtect(4*4);
+            asm_quad_nochk(rv, p);
+        }
+    } else {
+        int da = findMemFor(value);
+        Register rb = findRegFor(base, GpRegs);
+        asm_mmq(rb, dr, FP, da);
     }
 
-    Register rv = findRegFor(value, FpRegs);
-
-    NanoAssert(rb != UnknownReg);
-    NanoAssert(rv != UnknownReg);
-
-    Register baseReg = rb;
-    intptr_t baseOffset = dr;
-
-    if (!isS8(dr)) {
-        baseReg = Scratch;
-        baseOffset = 0;
-    }
-
-    FSTD(rv, baseReg, baseOffset);
-
-    if (!isS8(dr)) {
-        arm_ADDi(Scratch, rb, dr);
-    }
-
-    // if it's a constant, make sure our baseReg/baseOffset location
-    // has the right value
-    if (value->isconstq()) {
-        const int32_t* p = (const int32_t*) (value-2);
-
-        underrunProtect(12);
-
-        asm_quad_nochk(rv, p);
-    }
-#else
-    int da = findMemFor(value);
-    Register rb = findRegFor(base, GpRegs);
-    asm_mmq(rb, dr, FP, da);
-#endif
     //asm_output(">>> store64");
 }
 
 // stick a quad into register rr, where p points to the two
 // 32-bit parts of the quad, optinally also storing at FP+d
 void
 Assembler::asm_quad_nochk(Register rr, const int32_t* p)
 {
@@ -640,51 +623,32 @@ Assembler::asm_quad(LInsp ins)
     Reservation *res = getresv(ins);
     int d = disp(res);
     Register rr = res->reg;
 
     NanoAssert(d || rr != UnknownReg);
 
     const int32_t* p = (const int32_t*) (ins-2);
 
-#ifdef NJ_ARM_VFP
     freeRsrcOf(ins, false);
 
-    if (rr == UnknownReg) {
-        underrunProtect(12);
-
-        // asm_mmq might spill a reg, so don't call it;
-        // instead do the equivalent directly.
-        //asm_mmq(FP, d, PC, -16);
-
-        STR(Scratch, FP, d+4);
-        LDR(Scratch, PC, -20);
-        STR(Scratch, FP, d);
-        LDR(Scratch, PC, -16);
-
-        *(--_nIns) = (NIns) p[1];
-        *(--_nIns) = (NIns) p[0];
-        JMP_nochk(_nIns+2);
-    } else {
+    if (AvmCore::config.vfp &&
+        rr != UnknownReg)
+    {
         if (d)
             FSTD(rr, FP, d);
 
-        underrunProtect(16);
+        underrunProtect(4*4);
         asm_quad_nochk(rr, p);
+    } else {
+        STR(IP, FP, d+4);
+        asm_ld_imm(IP, p[1]);
+        STR(IP, FP, d);
+        asm_ld_imm(IP, p[0]);
     }
-#else
-    freeRsrcOf(ins, false);
-    if (d) {
-        underrunProtect(LD32_size * 2 + 8);
-        STR(Scratch, FP, d+4);
-        LD32_nochk(Scratch, p[1]);
-        STR(Scratch, FP, d);
-        LD32_nochk(Scratch, p[0]);
-    }
-#endif
 
     //asm_output("<<< asm_quad");
 }
 
 void
 Assembler::asm_nongp_copy(Register r, Register s)
 {
     if ((rmask(r) & FpRegs) && (rmask(s) & FpRegs)) {
@@ -712,56 +676,56 @@ Assembler::asm_binop_rhs_reg(LInsp)
 void
 Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
 {
     // value is either a 64bit struct or maybe a float
     // that isn't live in an FPU reg.  Either way, don't
     // put it in an FPU reg just to load & store it.
 
     // Don't use this with PC-relative loads; the registerAlloc might
-    // end up spilling a reg (and this the offset could end up being
+    // end up spilling a reg (and thus the offset could end up being
     // bogus)!
     NanoAssert(rs != PC);
 
     // use both IP and a second scratch reg
     Register t = registerAlloc(GpRegs & ~(rmask(rd)|rmask(rs)));
     _allocator.addFree(t);
 
     // XXX maybe figure out if we can use LDRD/STRD -- hard to
     // ensure right register allocation
-    STR(Scratch, rd, dd+4);
+    STR(IP, rd, dd+4);
     STR(t, rd, dd);
-    LDR(Scratch, rs, ds+4);
+    LDR(IP, rs, ds+4);
     LDR(t, rs, ds);
 }
 
 void
 Assembler::asm_pusharg(LInsp arg)
 {
     Reservation* argRes = getresv(arg);
     bool quad = arg->isQuad();
 
     if (argRes && argRes->reg != UnknownReg) {
         if (!quad) {
             STR_preindex(argRes->reg, SP, -4);
         } else {
             FSTD(argRes->reg, SP, 0);
-            SUBi(SP, 8);
+            SUBi(SP, SP, 8);
         }
     } else {
         int d = findMemFor(arg);
 
         if (!quad) {
-            STR_preindex(Scratch, SP, -4);
-            LDR(Scratch, FP, d);
+            STR_preindex(IP, SP, -4);
+            LDR(IP, FP, d);
         } else {
-            STR_preindex(Scratch, SP, -4);
-            LDR(Scratch, FP, d+4);
-            STR_preindex(Scratch, SP, -4);
-            LDR(Scratch, FP, d);
+            STR_preindex(IP, SP, -4);
+            LDR(IP, FP, d+4);
+            STR_preindex(IP, SP, -4);
+            LDR(IP, FP, d);
         }
     }
 }
 
 void
 Assembler::nativePageReset()
 {
     _nSlot = 0;
@@ -783,20 +747,23 @@ Assembler::nativePageSetup()
         _nExitIns--;
 
         // constpool starts at top of page and goes down,
         // code starts at bottom of page and moves up
         _nSlot = pageDataStart(_nIns); //(int*)(&((Page*)pageTop(_nIns))->lir[0]);
     }
 }
 
+// Note: underrunProtect should not touch any registers, even IP; it
+// might need to allocate a new page in the middle of an IP-using
+// sequence.
 void
 Assembler::underrunProtect(int bytes)
 {
-	NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small"); 
+    NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small"); 
     intptr_t u = bytes + sizeof(PageHeader)/sizeof(NIns) + 8;
     if ( (samepage(_nIns,_nSlot) && (((intptr_t)_nIns-u) <= intptr_t(_nSlot+1))) ||
          (!samepage((intptr_t)_nIns-u,_nIns)) )
     {
         NIns* target = _nIns;
 
         _nIns = pageAlloc(_inExit);
 
@@ -878,17 +845,26 @@ Assembler::BL(NIns* addr)
         asm_output("bl %p (32-bit)", addr);
     }
 }
 
 void
 Assembler::LD32_nochk(Register r, int32_t imm)
 {
     if (imm == 0) {
-        XOR(r, r);
+        EOR(r, r, r);
+        return;
+    }
+
+    if (AvmCore::config.v6t2) {
+        // We can just emit a movw/movt pair
+        // the movt is only necessary if the high 16 bits are nonzero
+        if (((imm >> 16) & 0xFFFF) != 0)
+            MOVT(r, (imm >> 16) & 0xFFFF);
+        MOVW(r, imm & 0xFFFF);
         return;
     }
 
     // We should always reach the const pool, since it's on the same page (<4096);
     // if we can't, someone didn't underrunProtect enough.
 
     *(++_nSlot) = (int)imm;
 
@@ -898,16 +874,54 @@ Assembler::LD32_nochk(Register r, int32_
 
     NanoAssert(isS12(offset) && (offset < 0));
 
     asm_output("  (%d(PC) = 0x%x)", offset, imm);
 
     LDR_nochk(r,PC,offset);
 }
 
+void
+Assembler::asm_ldr_chk(Register d, Register b, int32_t off, bool chk)
+{
+    if (IsFpReg(d)) {
+        FLDD_chk(d,b,off,chk);
+        return;
+    }
+
+    if (off > -4096 && off < 4096) {
+        if (chk) underrunProtect(4);
+        *(--_nIns) = (NIns)( COND_AL | ((off < 0 ? 0x51 : 0x59)<<20) | (b<<16) | (d<<12) | ((off < 0 ? -off : off)&0xFFF) );
+    } else {
+        if (chk) underrunProtect(4+LD32_size);
+        NanoAssert(b != IP);
+        *(--_nIns) = (NIns)( COND_AL | (0x79<<20) | (b<<16) | (d<<12) | IP );
+        LD32_nochk(IP, off);
+    }
+
+    asm_output("ldr %s, [%s, #%d]",gpn(d),gpn(b),(off));
+}
+
+void
+Assembler::asm_ld_imm(Register d, int32_t imm)
+{
+    if (imm == 0) {
+        EOR(d, d, d);
+    } else if (isS8(imm) || isU8(imm)) {
+        underrunProtect(4);
+        if (imm < 0)
+            *(--_nIns) = (NIns)( COND_AL | 0x3E<<20 | d<<12 | (imm^0xFFFFFFFF)&0xFF );
+        else
+            *(--_nIns) = (NIns)( COND_AL | 0x3B<<20 | d<<12 | imm&0xFF );
+        asm_output("ld  %s,0x%x",gpn(d), imm);
+    } else {
+        underrunProtect(LD32_size);
+        LD32_nochk(d, imm);
+    }
+}
 
 // Branch to target address _t with condition _c, doing underrun
 // checks (_chk == 1) or skipping them (_chk == 0).
 //
 // If the jump fits in a relative jump (+/-32MB), emit that.
 // If the jump is unconditional, emit the dest address inline in
 // the instruction stream and load it into pc.
 // If the jump has a condition, but noone's mucked with _nIns and our _nSlot
@@ -917,22 +931,27 @@ Assembler::LD32_nochk(Register r, int32_
 // and emit a jump to jump over it it in case the condition fails.
 //
 // NB: JMP_nochk depends on this not calling samepage() when _c == AL
 void
 Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk)
 {
     int32_t offs = PC_OFFSET_FROM(_t,_nIns-1);
     //fprintf(stderr, "B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1);
+
+    // optimistically check if this will fit in 24 bits
     if (isS24(offs>>2)) {
         if (_chk) underrunProtect(4);
+        // recalculate the offset, because underrunProtect may have
+        // moved _nIns to a new page
         offs = PC_OFFSET_FROM(_t,_nIns-1);
     }
 
     if (isS24(offs>>2)) {
+        // the underrunProtect for this was done above
         *(--_nIns) = (NIns)( ((_c)<<28) | (0xA<<24) | (((offs)>>2) & 0xFFFFFF) );
     } else if (_c == AL) {
         if(_chk) underrunProtect(8);
         *(--_nIns) = (NIns)(_t);
         *(--_nIns) = (NIns)( COND_AL | (0x51<<20) | (PC<<16) | (PC<<12) | 0x4 );
     } else if (samepage(_nIns,_nSlot)) {
         if(_chk) underrunProtect(8);
         *(++_nSlot) = (NIns)(_t);
@@ -941,23 +960,22 @@ Assembler::B_cond_chk(ConditionCode _c, 
         *(--_nIns) = (NIns)( ((_c)<<28) | (0x51<<20) | (PC<<16) | (PC<<12) | ((-offs) & 0xFFFFFF) );
     } else {
         if(_chk) underrunProtect(12);
         *(--_nIns) = (NIns)(_t);
         *(--_nIns) = (NIns)( COND_AL | (0xA<<24) | ((-4)>>2) & 0xFFFFFF );
         *(--_nIns) = (NIns)( ((_c)<<28) | (0x51<<20) | (PC<<16) | (PC<<12) | 0x0 );
     }
 
-    asm_output("%s %p", _c == AL ? "jmp" : "b(cnd)", (void*)(_t));
+    asm_output("b%s %p", condNames[_c], (void*)(_t));
 }
 
 void
-Assembler::asm_add_imm(Register rd, Register rn, int32_t imm)
+Assembler::asm_add_imm(Register rd, Register rn, int32_t imm, int stat)
 {
-
     int rot = 16;
     uint32_t immval;
     bool pos;
 
     if (imm >= 0) {
         immval = (uint32_t) imm;
         pos = true;
     } else {
@@ -968,32 +986,63 @@ Assembler::asm_add_imm(Register rd, Regi
     while (immval && ((immval & 0x3) == 0)) {
         immval >>= 2;
         rot--;
     }
 
     rot &= 0xf;
 
     if (immval < 256) {
-        underrunProtect(4);
-        if (pos)
-            *(--_nIns) = (NIns)( COND_AL | OP_IMM | OP_STAT | (1<<23) | (rn<<16) | (rd<<12) | (rot << 8) | immval );
-        else
-            *(--_nIns) = (NIns)( COND_AL | OP_IMM | OP_STAT | (1<<22) | (rn<<16) | (rd<<12) | (rot << 8) | immval );
-        asm_output("add %s,%s,%d",gpn(rd),gpn(rn),imm);
-    } else {
+        if (pos) {
+            ALUi_rot(AL, add, stat, rd, rn, immval, rot);
+        } else {
+            ALUi_rot(AL, sub, stat, rd, rn, immval, rot);
+        }
+   } else {
         // add scratch to rn, after loading the value into scratch.
-
-        // make sure someone isn't trying to use Scratch as an operand
-        NanoAssert(rn != Scratch);
+        // make sure someone isn't trying to use IP as an operand
+        NanoAssert(rn != IP);
+        ALUr(AL, add, stat, rd, rn, IP);
+        asm_ld_imm(IP, imm);
+    }
+}
 
-        *(--_nIns) = (NIns)( COND_AL | OP_STAT | (1<<23) | (rn<<16) | (rd<<12) | (Scratch));
-        asm_output("add %s,%s,%s",gpn(rd),gpn(rn),gpn(Scratch));
-
-        LD32_nochk(Scratch, imm);
+void
+Assembler::asm_sub_imm(Register rd, Register rn, int32_t imm, int stat)
+{
+    if (imm > -256 && imm < 256) {
+        if (imm >= 0)
+            ALUi(AL, sub, stat, rd, rn, imm);
+        else
+            ALUi(AL, add, stat, rd, rn, -imm);
+    } else if (imm >= 0) {
+        if (imm <= 510) {
+            /* between 0 and 510, inclusive */
+            int rem = imm - 255;
+            NanoAssert(rem < 256);
+            ALUi(AL, sub, stat, rd, rn, rem & 0xff);
+            ALUi(AL, sub, stat, rd, rn, 0xff);
+        } else {
+            /* more than 510 */
+            NanoAssert(rn != IP);
+            ALUr(AL, sub, stat, rd, rn, IP);
+            asm_ld_imm(IP, imm);
+        }
+    } else {
+        if (imm >= -510) {
+            /* between -510 and -1, inclusive */
+            int rem = -imm - 255;
+            ALUi(AL, add, stat, rd, rn, rem & 0xff);
+            ALUi(AL, add, stat, rd, rn, 0xff);
+        } else {
+            /* less than -510 */
+            NanoAssert(rn != IP);
+            ALUr(AL, add, stat, rd, rn, IP);
+            asm_ld_imm(IP, -imm);
+        }
     }
 }
 
 /*
  * VFP
  */
 
 void
@@ -1107,24 +1156,26 @@ Assembler::asm_branch(bool branchOnFalse
 
         if (branchOnFalse) {
             switch (condop) {
                 case LIR_feq: cc = NE; break;
                 case LIR_flt: cc = PL; break;
                 case LIR_fgt: cc = LE; break;
                 case LIR_fle: cc = HI; break;
                 case LIR_fge: cc = LT; break;
+                default: NanoAssert(0); break;
             }
         } else {
             switch (condop) {
                 case LIR_feq: cc = EQ; break;
                 case LIR_flt: cc = MI; break;
                 case LIR_fgt: cc = GT; break;
                 case LIR_fle: cc = LS; break;
                 case LIR_fge: cc = GE; break;
+                default: NanoAssert(0); break;
             }
         }
 
         B_cond(cc, targ);
         asm_output("b(%d) 0x%08x", cc, (unsigned int) targ);
 
         NIns *at = _nIns;
         asm_fcmp(cond);
@@ -1203,55 +1254,77 @@ Assembler::asm_cmp(LIns *cond)
 
     // ready to issue the compare
     if (rhs->isconst()) {
         int c = rhs->constval();
         if (c == 0 && cond->isop(LIR_eq)) {
             Register r = findRegFor(lhs, GpRegs);
             TEST(r,r);
             // No 64-bit immediates so fall-back to below
-        }
-        else if (!rhs->isQuad()) {
+        } else if (!rhs->isQuad()) {
             Register r = getBaseReg(lhs, c, GpRegs);
-            CMPi(r, c);
+            asm_cmpi(r, c);
+        } else {
+            NanoAssert(0);
         }
     } else {
         findRegFor2(GpRegs, lhs, rA, rhs, rB);
         Register ra = rA->reg;
         Register rb = rB->reg;
         CMP(ra, rb);
     }
 }
 
 void
+Assembler::asm_cmpi(Register r, int32_t imm)
+{
+    if (imm < 0) {
+        if (imm > -256) {
+            ALUi(AL, cmn, 1, 0, r, -imm);
+        } else {
+            CMP(r, IP);
+            asm_ld_imm(IP, imm);
+        }
+    } else {
+        if (imm < 256) {
+            ALUi(AL, cmp, 1, 0, r, imm);
+        } else {
+            CMP(r, IP);
+            asm_ld_imm(IP, imm);
+        }
+    }
+}
+
+void
 Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
 {
     // XXX asm_loop should be in Assembler.cpp!
 
     JMP_far(0);
     loopJumps.add(_nIns);
 
     // If the target we are looping to is in a different fragment, we have to restore
     // SP since we will target fragEntry and not loopEntry.
     if (ins->record()->exit->target != _thisfrag)
-        MR(SP,FP);
+        MOV(SP,FP);
 }
 
 void
 Assembler::asm_fcond(LInsp ins)
 {
     // only want certain regs
     Register r = prepResultReg(ins, AllowableFlagRegs);
 
     switch (ins->opcode()) {
         case LIR_feq: SET(r,EQ,NE); break;
         case LIR_flt: SET(r,MI,PL); break;
         case LIR_fgt: SET(r,GT,LE); break;
         case LIR_fle: SET(r,LS,HI); break;
         case LIR_fge: SET(r,GE,LT); break;
+        default: NanoAssert(0); break;
     }
 
     asm_fcmp(ins);
 }
 
 void
 Assembler::asm_cond(LInsp ins)
 {
@@ -1318,88 +1391,84 @@ Assembler::asm_arith(LInsp ins)
 
     Register rr = prepResultReg(ins, allow);
     Reservation* rA = getresv(lhs);
     Register ra;
     // if this is last use of lhs in reg, we can re-use result reg
     if (rA == 0 || (ra = rA->reg) == UnknownReg)
         ra = findSpecificRegFor(lhs, rr);
     // else, rA already has a register assigned.
+    NanoAssert(ra != UnknownReg);
 
     if (forceReg) {
         if (lhs == rhs)
             rb = ra;
 
         if (op == LIR_add || op == LIR_addp)
-            ADD(rr, rb);
+            ADDs(rr, ra, rb, 1);
         else if (op == LIR_sub)
-            SUB(rr, rb);
+            SUB(rr, ra, rb);
         else if (op == LIR_mul)
             MUL(rr, rb);
         else if (op == LIR_and)
-            AND(rr, rb);
+            AND(rr, ra, rb);
         else if (op == LIR_or)
-            OR(rr, rb);
+            ORR(rr, ra, rb);
         else if (op == LIR_xor)
-            XOR(rr, rb);
+            EOR(rr, ra, rb);
         else if (op == LIR_lsh)
-            SHL(rr, rb);
+            SHL(rr, ra, rb);
         else if (op == LIR_rsh)
-            SAR(rr, rb);
+            SAR(rr, ra, rb);
         else if (op == LIR_ush)
-            SHR(rr, rb);
+            SHR(rr, ra, rb);
         else
             NanoAssertMsg(0, "Unsupported");
     } else {
         int c = rhs->constval();
         if (op == LIR_add || op == LIR_addp)
-            ADDi(rr, c);
+            ADDi(rr, ra, c);
         else if (op == LIR_sub)
-                    SUBi(rr, c);
+            SUBi(rr, ra, c);
         else if (op == LIR_and)
-            ANDi(rr, c);
+            ANDi(rr, ra, c);
         else if (op == LIR_or)
-            ORi(rr, c);
+            ORRi(rr, ra, c);
         else if (op == LIR_xor)
-            XORi(rr, c);
+            EORi(rr, ra, c);
         else if (op == LIR_lsh)
-            SHLi(rr, c);
+            SHLi(rr, ra, c);
         else if (op == LIR_rsh)
-            SARi(rr, c);
+            SARi(rr, ra, c);
         else if (op == LIR_ush)
-            SHRi(rr, c);
+            SHRi(rr, ra, c);
         else
             NanoAssertMsg(0, "Unsupported");
     }
-
-    if (rr != ra)
-        MR(rr,ra);
 }
 
 void
 Assembler::asm_neg_not(LInsp ins)
 {
     LOpcode op = ins->opcode();
     Register rr = prepResultReg(ins, GpRegs);
 
     LIns* lhs = ins->oprnd1();
     Reservation *rA = getresv(lhs);
     // if this is last use of lhs in reg, we can re-use result reg
     Register ra;
     if (rA == 0 || (ra=rA->reg) == UnknownReg)
         ra = findSpecificRegFor(lhs, rr);
     // else, rA already has a register assigned.
+    NanoAssert(ra != UnknownReg);
 
     if (op == LIR_not)
-        NOT(rr);
+        MVN(rr, ra);
     else
-        NEG(rr);
-
-    if ( rr != ra )
-        MR(rr,ra);
+        RSBS(rr, ra);
 }
 
 void
 Assembler::asm_ld(LInsp ins)
 {
     LOpcode op = ins->opcode();
     LIns* base = ins->oprnd1();
     LIns* disp = ins->oprnd2();
@@ -1426,51 +1495,47 @@ Assembler::asm_ld(LInsp ins)
     }
 
     NanoAssertMsg(0, "Unsupported instruction in asm_ld");
 }
 
 void
 Assembler::asm_cmov(LInsp ins)
 {
-    LOpcode op = ins->opcode();
+    NanoAssert(ins->opcode() == LIR_cmov);
     LIns* condval = ins->oprnd1();
     NanoAssert(condval->isCmp());
 
     LIns* values = ins->oprnd2();
 
     NanoAssert(values->opcode() == LIR_2);
     LIns* iftrue = values->oprnd1();
     LIns* iffalse = values->oprnd2();
 
-    NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
+    NanoAssert(!iftrue->isQuad() && !iffalse->isQuad());
 
     const Register rr = prepResultReg(ins, GpRegs);
 
     // this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
     // (This is true on Intel, is it true on all architectures?)
     const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
-    if (op == LIR_cmov) {
-        switch (condval->opcode()) {
-            // note that these are all opposites...
-            case LIR_eq:    MRNE(rr, iffalsereg);   break;
-            case LIR_ov:    MRNO(rr, iffalsereg);   break;
-            case LIR_cs:    MRNC(rr, iffalsereg);   break;
-            case LIR_lt:    MRGE(rr, iffalsereg);   break;
-            case LIR_le:    MRG(rr, iffalsereg);    break;
-            case LIR_gt:    MRLE(rr, iffalsereg);   break;
-            case LIR_ge:    MRL(rr, iffalsereg);    break;
-            case LIR_ult:   MRAE(rr, iffalsereg);   break;
-            case LIR_ule:   MRA(rr, iffalsereg);    break;
-            case LIR_ugt:   MRBE(rr, iffalsereg);   break;
-            case LIR_uge:   MRB(rr, iffalsereg);    break;
-            default: debug_only( NanoAssert(0) );   break;
-        }
-    } else if (op == LIR_qcmov) {
-        NanoAssert(0);
+    switch (condval->opcode()) {
+        // note that these are all opposites...
+        case LIR_eq:    MOVNE(rr, iffalsereg);  break;
+        case LIR_ov:    MOVVC(rr, iffalsereg);  break;
+        case LIR_cs:    MOVNC(rr, iffalsereg);  break;
+        case LIR_lt:    MOVGE(rr, iffalsereg);  break;
+        case LIR_le:    MOVGT(rr, iffalsereg);  break;
+        case LIR_gt:    MOVLE(rr, iffalsereg);  break;
+        case LIR_ge:    MOVLT(rr, iffalsereg);  break;
+        case LIR_ult:   MOVCS(rr, iffalsereg);  break;
+        case LIR_ule:   MOVHI(rr, iffalsereg);  break;
+        case LIR_ugt:   MOVLS(rr, iffalsereg);  break;
+        case LIR_uge:   MOVCC(rr, iffalsereg);  break;
+        default: debug_only( NanoAssert(0) );   break;
     }
     /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
     asm_cmp(condval);
 }
 
 void
 Assembler::asm_qhi(LInsp ins)
 {
@@ -1533,85 +1598,32 @@ Assembler::asm_param(LInsp ins)
 }
 
 void
 Assembler::asm_short(LInsp ins)
 {
     Register rr = prepResultReg(ins, GpRegs);
     int32_t val = ins->imm16();
     if (val == 0)
-        XOR(rr,rr);
+        EOR(rr,rr,rr);
     else
         LDi(rr, val);
 }
 
 void
 Assembler::asm_int(LInsp ins)
 {
     Register rr = prepResultReg(ins, GpRegs);
     int32_t val = ins->imm32();
     if (val == 0)
-        XOR(rr,rr);
+        EOR(rr,rr,rr);
     else
         LDi(rr, val);
 }
 
-#if 0
-void
-Assembler::asm_quad(LInsp ins)
-{
-    Reservation *rR = getresv(ins);
-    Register rr = rR->reg;
-    if (rr != UnknownReg)
-    {
-        // @todo -- add special-cases for 0 and 1
-        _allocator.retire(rr);
-        rR->reg = UnknownReg;
-        NanoAssert((rmask(rr) & FpRegs) != 0);
-
-        const double d = ins->constvalf();
-        const uint64_t q = ins->constvalq();
-        if (rmask(rr) & XmmRegs) {
-            if (q == 0.0) {
-                // test (int64)0 since -0.0 == 0.0
-                SSE_XORPDr(rr, rr);
-            } else if (d == 1.0) {
-                // 1.0 is extremely frequent and worth special-casing!
-                static const double k_ONE = 1.0;
-                LDSDm(rr, &k_ONE);
-            } else {
-                findMemFor(ins);
-                const int d = disp(rR);
-                SSE_LDQ(rr, d, FP);
-            }
-        } else {
-            if (q == 0.0) {
-                // test (int64)0 since -0.0 == 0.0
-                FLDZ();
-            } else if (d == 1.0) {
-                FLD1();
-            } else {
-                findMemFor(ins);
-                int d = disp(rR);
-                FLDQ(d,FP);
-            }
-        }
-    }
-
-    // @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
-    int d = disp(rR);
-    freeRsrcOf(ins, false);
-    if (d) {
-        const int32_t* p = (const int32_t*) (ins-2);
-        STi(FP,d+4,p[1]);
-        STi(FP,d,p[0]);
-    }
-}
-#endif
-
 void
 Assembler::asm_arg(ArgSize sz, LInsp p, Register r)
 {
     if (sz == ARGSIZE_Q) {
         // ref arg - use lea
         if (r != UnknownReg) {
             // arg in specific reg
             int da = findMemFor(p);
@@ -1626,33 +1638,95 @@ Assembler::asm_arg(ArgSize sz, LInsp p, 
                 LDi(r, p->constval());
             } else {
                 Reservation* rA = getresv(p);
                 if (rA) {
                     if (rA->reg == UnknownReg) {
                         // load it into the arg reg
                         int d = findMemFor(p);
                         if (p->isop(LIR_alloc)) {
-                            LEA(r, d, FP);
+                            asm_add_imm(r, FP, d);
                         } else {
                             LD(r, d, FP);
                         }
                     } else {
                         // it must be in a saved reg
-                        MR(r, rA->reg);
+                        MOV(r, rA->reg);
                     }
                 } else {
                     // this is the last use, so fine to assign it
                     // to the scratch reg, it's dead after this point.
                     findSpecificRegFor(p, r);
                 }
             }
         } else {
             asm_pusharg(p);
         }
     } else {
         NanoAssert(sz == ARGSIZE_F);
         asm_farg(p);
     }
 }
 
+void
+Assembler::asm_arm_farg(LInsp arg, Register rlo, Register rhi)
+{
+    if (AvmCore::config.vfp) {
+        Register sr = findRegFor(arg, FpRegs);
+
+        if (rlo != UnknownReg && rhi != UnknownReg) {
+            NanoAssert(sr != UnknownReg);
+            FMRRD(rlo, rhi, sr);
+        } else if (rlo != UnknownReg && rhi == UnknownReg) {
+            NanoAssert(sr != UnknownReg);
+            STR_preindex(IP, SP, -4);
+            FMRDL(IP, sr);
+            FMRDH(rhi, sr);
+        } else {
+            asm_pusharg(arg);
+        }
+
+        return;
+    }
+
+    NanoAssert(arg->opcode() == LIR_qjoin || arg->opcode() == LIR_quad);
+
+    if (rlo != UnknownReg && rhi != UnknownReg) {
+        if (arg->opcode() == LIR_qjoin) {
+            LIns* lo = arg->oprnd1();
+            LIns* hi = arg->oprnd2();
+
+            findSpecificRegFor(lo, rlo);
+            findSpecificRegFor(hi, rhi);
+        } else {
+            // LIR_quad
+            const int32_t* p = (const int32_t*) (arg-2);
+
+            asm_ld_imm(rhi, p[1]);
+            asm_ld_imm(rlo, p[0]);
+        }
+    } else if (rlo != UnknownReg && rhi == UnknownReg) {
+        if (arg->opcode() == LIR_qjoin) {
+            LIns* lo = arg->oprnd1();
+            LIns* hi = arg->oprnd2();
+
+            int d = findMemFor(hi);
+
+            findSpecificRegFor(lo, rlo);
+
+            STR_preindex(IP, SP, -4);
+            LDR(IP, FP, d);
+        } else {
+            // LIR_quad
+            const int32_t* p = (const int32_t*) (arg-2);
+
+            STR_preindex(IP, SP, -4);
+            asm_ld_imm(IP, p[1]);
+            asm_ld_imm(rlo, p[0]);
+        }
+    } else {
+        asm_pusharg(arg);
+    }
 }
+
+}
+
 #endif /* FEATURE_NANOJIT */
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -53,35 +53,18 @@
 #define count_imt()
 #endif
 
 namespace nanojit
 {
 
 const int NJ_LOG2_PAGE_SIZE = 12;       // 4K
 
-// If NJ_ARM_VFP is defined, then VFP is assumed to
-// be present.  If it's not defined, then softfloat
-// is used, and NJ_SOFTFLOAT is defined.
-// When nanojit is used as part of Mozilla's JavaScript engine, this is
-// #defined or left #undefined by js/src/configure.in.
-//#define NJ_ARM_VFP
-
-#ifdef NJ_ARM_VFP
-
-// only d0-d7; we'll use d7 as s14-s15 for i2f/u2f/etc.
+// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
 #define NJ_VFP_MAX_REGISTERS            8
-
-#else
-
-#define NJ_VFP_MAX_REGISTERS            0
-#define NJ_SOFTFLOAT
-
-#endif
-
 #define NJ_MAX_REGISTERS                (11 + NJ_VFP_MAX_REGISTERS)
 #define NJ_MAX_STACK_ENTRY              256
 #define NJ_MAX_PARAMETERS               16
 #define NJ_ALIGN_STACK                  8
 #define NJ_STACK_OFFSET                 0
 
 #define NJ_CONSTANT_POOLS
 const int NJ_MAX_CPOOL_OFFSET = 4096;
@@ -119,22 +102,17 @@ typedef enum {
     D5 = 21,
     D6 = 22,
     D7 = 23,
 
     FirstFloatReg = 16,
     LastFloatReg = 22,
         
     FirstReg = 0,
-#ifdef NJ_ARM_VFP
     LastReg = 23,
-#else
-    LastReg = 10,
-#endif
-    Scratch = IP,
     UnknownReg = 31,
 
     // special value referring to S14
     FpSingleScratch = 24
 } Register;
 
 /* ARM condition codes */
 typedef enum {
@@ -151,18 +129,16 @@ typedef enum {
     GE = 0xA, // Greater or Equal
     LT = 0xB, // Less Than
     GT = 0xC, // Greater Than
     LE = 0xD, // Less or Equal
     AL = 0xE, // ALways
     NV = 0xF  // NeVer
 } ConditionCode;
 
-const char *ccName(ConditionCode cc);
-
 typedef int RegisterMask;
 typedef struct _FragInfo {
     RegisterMask    needRestoring;
     NIns*           epilogue;
 } FragInfo;
 
 // D0-D6 are not saved; D7-D15 are, but we don't use those,
 // so we don't have to worry about saving/restoring them
@@ -170,32 +146,34 @@ static const RegisterMask SavedFpRegs = 
 static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
 static const int NumSavedRegs = 7;
 
 static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f.
 static const RegisterMask GpRegs = 0x07FF;
 static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
 
 #define IsFpReg(_r)     ((rmask(_r) & (FpRegs | (1<<D7))) != 0)
-#define IsGpReg(_r)     ((rmask(_r) & (GpRegs | (1<<Scratch))) != 0)
+#define IsGpReg(_r)     ((rmask(_r) & (GpRegs | (1<<IP))) != 0)
 #define FpRegNum(_fpr)  ((_fpr) - FirstFloatReg)
 
 #define firstreg()      R0
 #define nextreg(r)      ((Register)((int)(r)+1))
 #if 0
 static Register nextreg(Register r) {
     if (r == R10)
         return D0;
     return (Register)(r+1);
 }
 #endif
 // only good for normal regs
 #define imm2register(c) (Register)(c-1)
 
 verbose_only( extern const char* regNames[]; )
+verbose_only( extern const char* condNames[]; )
+verbose_only( extern const char* shiftNames[]; )
 
 // abstract to platform specific calls
 #define nExtractPlatformFlags(x)    0
 
 #define DECLARE_PLATFORM_STATS()
 
 #define DECLARE_PLATFORM_REGALLOC()
 
@@ -204,17 +182,22 @@ verbose_only( extern const char* regName
     void LD32_nochk(Register r, int32_t imm);                           \
     void BL(NIns*);                                                     \
     void JMP_far(NIns*);                                                \
     void B_cond_chk(ConditionCode, NIns*, bool);                        \
     void underrunProtect(int bytes);                                    \
     void nativePageReset();                                             \
     void nativePageSetup();                                             \
     void asm_quad_nochk(Register, const int32_t*);                      \
-    void asm_add_imm(Register, Register, int32_t);                      \
+    void asm_add_imm(Register, Register, int32_t, int stat = 0);        \
+    void asm_sub_imm(Register, Register, int32_t, int stat = 0);        \
+    void asm_cmpi(Register, int32_t imm);                               \
+    void asm_ldr_chk(Register d, Register b, int32_t off, bool chk);    \
+    void asm_ld_imm(Register d, int32_t imm);                           \
+    void asm_arm_farg(LInsp arg, Register rlo, Register rhi); \
     int* _nSlot;                                                        \
     int* _nExitSlot;
 
 
 #define asm_farg(i) NanoAssert(false)
 
 //printf("jmp_l_n count=%d, nins=%X, %X = %X\n", (_c), nins, _nIns, ((intptr_t)(nins+(_c))-(intptr_t)_nIns - 4) );
 
@@ -254,289 +237,264 @@ typedef enum {
     (x) = (dictwordp*)_nIns; }
 
 // BX 
 #define BX(_r)  do {                                                    \
         underrunProtect(4);                                             \
         *(--_nIns) = (NIns)( COND_AL | (0x12<<20) | (0xFFF<<8) | (1<<4) | (_r)); \
         asm_output("bx LR"); } while(0)
 
-// _l = _r OR _l
-#define OR(_l,_r)       do {                                            \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0xC<<21) | (_r<<16) | (_l<<12) | (_l) ); \
-        asm_output("or %s,%s",gpn(_l),gpn(_r)); } while(0)
+/*
+ * ALU operations
+ */
 
-// _r = _r OR _imm
-#define ORi(_r,_imm)    do {                                            \
-        NanoAssert(isU8((_imm)));                                       \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | OP_IMM | (0xC<<21) | (_r<<16) | (_r<<12) | ((_imm)&0xFF) ); \
-        asm_output("or %s,%d",gpn(_r), (_imm)); } while(0)
+enum {
+    ARM_and = 0,
+    ARM_eor = 1,
+    ARM_sub = 2,
+    ARM_rsb = 3,
+    ARM_add = 4,
+    ARM_adc = 5,
+    ARM_sbc = 6,
+    ARM_rsc = 7,
+    ARM_tst = 8,
+    ARM_teq = 9,
+    ARM_cmp = 10,
+    ARM_cmn = 11,
+    ARM_orr = 12,
+    ARM_mov = 13,
+    ARM_bic = 14,
+    ARM_mvn = 15
+};
 
-// _l = _r AND _l
-#define AND(_l,_r) do {                                                 \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | ((_r)<<16) | ((_l)<<12) | (_l)); \
-        asm_output("and %s,%s",gpn(_l),gpn(_r)); } while(0)
+// ALU operation with register and 8-bit immediate arguments
+//  S   - bit, 0 or 1, whether the CPSR register is updated
+//  rd  - destination register
+//  rl  - first (left) operand register
+//  imm - immediate (max 8 bits)
+#define ALUi(cond, op, S, rd, rl, imm) do {\
+        underrunProtect(4);\
+        NanoAssert(isU8(imm));\
+        *(--_nIns) = (NIns) ((cond)<<28 | OP_IMM | (ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (imm));\
+        if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
+            asm_output("%s%s%s %s, #0x%X", #op, condNames[cond], (S)?"s":"", gpn(rd), (imm));\
+        else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
+            NanoAssert(S==1);\
+            asm_output("%s%s %s, #0x%X", #op, condNames[cond], gpn(rl), (imm));\
+        } else\
+            asm_output("%s%s%s %s, %s, #0x%X", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), (imm));\
+    } while (0)
 
-// _r = _r AND _imm
-#define ANDi(_r,_imm) do {                                              \
-        if (isU8((_imm))) {                                             \
-            underrunProtect(4);                                         \
-            *(--_nIns) = (NIns)( COND_AL | OP_IMM | ((_r)<<16) | ((_r)<<12) | ((_imm)&0xFF) ); \
-            asm_output("and %s,%d",gpn(_r),(_imm));}                   \
-        else if ((_imm)<0 && (_imm)>-256) {                             \
-            underrunProtect(8);                                         \
-            *(--_nIns) = (NIns)( COND_AL | ((_r)<<16) | ((_r)<<12) | (Scratch) ); \
-            asm_output("and %s,%s",gpn(_r),gpn(Scratch));              \
-            *(--_nIns) = (NIns)( COND_AL | (0x3E<<20) | ((Scratch)<<12) | (((_imm)^0xFFFFFFFF)&0xFF) ); \
-            asm_output("mvn %s,%d",gpn(Scratch),(_imm));}              \
-        else NanoAssert(0);                                             \
+// ALU operation with register and rotated 8-bit immediate arguments
+//  S   - bit, 0 or 1, whether the CPSR register is updated
+//  rd  - destination register
+//  rl  - first (left) operand register
+//  imm - immediate (max 8 bits)
+//  rot - rotation to apply to imm
+#define ALUi_rot(cond, op, S, rd, rl, imm, rot) do {\
+        underrunProtect(4);\
+        NanoAssert(isU8(imm));\
+        *(--_nIns) = (NIns) ((cond)<<28 | OP_IMM | (ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (rot)<<8 | (imm));\
+        if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
+            asm_output("%s%s%s %s, #0x%X, %d", #op, condNames[cond], (S)?"s":"", gpn(rd), (imm), (rot)*2);\
+        else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
+            NanoAssert(S==1);\
+            asm_output("%s%s %s, #0x%X, %d", #op, condNames[cond], gpn(rl), (imm), (rot)*2);\
+        } else\
+            asm_output("%s%s%s %s, %s, #0x%X, %d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), (imm), (rot)*2);\
     } while (0)
 
 
-// _l = _l XOR _r
-#define XOR(_l,_r)  do {                                                \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (1<<21) | ((_r)<<16) | ((_l)<<12) | (_l)); \
-        asm_output("eor %s,%s",gpn(_l),gpn(_r)); } while(0)
-
-// _r = _r XOR _imm
-#define XORi(_r,_imm)   do {                                            \
-        NanoAssert(isU8((_imm)));                                       \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<21) | ((_r)<<16) | ((_r)<<12) | ((_imm)&0xFF) ); \
-        asm_output("eor %s,%d",gpn(_r),(_imm)); } while(0)
+// ALU operation with two register arguments
+//  S   - bit, 0 or 1, whether the CPSR register is updated
+//  rd  - destination register
+//  rl  - first (left) operand register
+//  rr  - first (left) operand register
+#define ALUr(cond, op, S, rd, rl, rr) do {\
+        underrunProtect(4);\
+        *(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (rr));\
+        if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
+            asm_output("%s%s%s %s, %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr));\
+        else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
+            NanoAssert(S==1);\
+            asm_output("%s%s  %s, %s", #op, condNames[cond], gpn(rl), gpn(rr));\
+        } else\
+            asm_output("%s%s%s %s, %s, %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr));\
+    } while (0)
 
-// _d = _n + _m
-#define arm_ADD(_d,_n,_m) do {                                          \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | OP_STAT | (1<<23) | ((_n)<<16) | ((_d)<<12) | (_m)); \
-        asm_output("add %s,%s+%s",gpn(_d),gpn(_n),gpn(_m)); } while(0)
-
-// _l = _l + _r
-#define ADD(_l,_r)   arm_ADD(_l,_l,_r)
-
-// Note that this sometimes converts negative immediate values to a to a sub.
-// _d = _r + _imm
-#define arm_ADDi(_d,_n,_imm)   asm_add_imm(_d,_n,_imm)
-#define ADDi(_r,_imm)  arm_ADDi(_r,_r,_imm)
-
-// _l = _l - _r
-#define SUB(_l,_r)  do {                                                \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (1<<22) | ((_l)<<16) | ((_l)<<12) | (_r)); \
-        asm_output("sub %s,%s",gpn(_l),gpn(_r)); } while(0)
+// ALU operation with two register arguments, with rr operated on by a shift and shift immediate
+//  S   - bit, 0 or 1, whether the CPSR register is updated
+//  rd  - destination register
+//  rl  - first (left) operand register
+//  rr  - first (left) operand register
+//  sh  - a ShiftOperator
+//  imm - immediate argument to shift operator, 5 bits (0..31)
+#define ALUr_shi(cond, op, S, rd, rl, rr, sh, imm) do {\
+        underrunProtect(4);\
+        NanoAssert((imm)>=0 && (imm)<32);\
+        *(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (imm)<<7 | (sh)<<4 | (rr));\
+        if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
+            asm_output("%s%s%s %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr), shiftNames[sh], (imm));\
+        else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
+            NanoAssert(S==1);\
+            asm_output("%s%s  %s, %s, %s #%d", #op, condNames[cond], gpn(rl), gpn(rr), shiftNames[sh], (imm));\
+        } else\
+            asm_output("%s%s%s %s, %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr), shiftNames[sh], (imm));\
+    } while (0)
 
-// _r = _r - _imm
-#define SUBi(_r,_imm)  do {                                             \
-        if ((_imm)>-256 && (_imm)<256) {                                \
-            underrunProtect(4);                                         \
-            if ((_imm)>=0)  *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<22) | ((_r)<<16) | ((_r)<<12) | ((_imm)&0xFF) ); \
-            else            *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<23) | ((_r)<<16) | ((_r)<<12) | ((-(_imm))&0xFF) ); \
-        } else {                                                        \
-            if ((_imm)>=0) {                                            \
-                if ((_imm)<=510) {                                      \
-                    underrunProtect(8);                                 \
-                    int rem = (_imm) - 255;                             \
-                    NanoAssert(rem<256);                                \
-                    *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<22) | ((_r)<<16) | ((_r)<<12) | (rem&0xFF) ); \
-                    *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<22) | ((_r)<<16) | ((_r)<<12) | (0xFF) ); \
-                } else {                                                \
-                    underrunProtect(4+LD32_size);                       \
-                    *(--_nIns) = (NIns)( COND_AL | (1<<22) | ((_r)<<16) | ((_r)<<12) | (Scratch)); \
-                    LD32_nochk(Scratch, _imm);                          \
-                }                                                       \
-            } else {                                                    \
-                if ((_imm)>=-510) {                                     \
-                    underrunProtect(8);                                 \
-                    int rem = -(_imm) - 255;                            \
-                    *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<23) | ((_r)<<16) | ((_r)<<12) | ((rem)&0xFF) ); \
-                    *(--_nIns) = (NIns)( COND_AL | OP_IMM | (1<<23) | ((_r)<<16) | ((_r)<<12) | (0xFF) ); \
-                } else {                                                \
-                    underrunProtect(4+LD32_size);                       \
-                    *(--_nIns) = (NIns)( COND_AL | (1<<23) | ((_r)<<16) | ((_r)<<12) | (Scratch)); \
-                    LD32_nochk(Scratch, -(_imm)); \
-                }                                                       \
-            }                                                           \
-        }                                                               \
-        asm_output("sub %s,%d",gpn(_r),(_imm));                        \
+// ALU operation with two register arguments, with rr operated on by a shift and shift register
+//  S   - bit, 0 or 1, whether the CPSR register is updated
+//  rd  - destination register
+//  rl  - first (left) operand register
+//  rr  - first (left) operand register
+//  sh  - a ShiftOperator
+//  rs  - shift operand register
+#define ALUr_shr(cond, op, S, rd, rl, rr, sh, rs) do {\
+        underrunProtect(4);\
+        *(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (rs)<<8 | (sh)<<4 | (rr));\
+        if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
+            asm_output("%s%s%s %s, %s, %s %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr), shiftNames[sh], gpn(rs));\
+        else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
+            NanoAssert(S==1);\
+            asm_output("%s%s  %s, %s, %s %s", #op, condNames[cond], gpn(rl), gpn(rr), shiftNames[sh], gpn(rs));\
+        } else\
+            asm_output("%s%s%s %s, %s, %s, %s %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr), shiftNames[sh], gpn(rs));\
     } while (0)
 
+// _d = _l OR _r
+#define ORR(_d,_l,_r) ALUr(AL, orr, 0, _d, _l, _r)
+
+// _d = _l OR _imm
+#define ORRi(_d,_l,_imm) ALUi(AL, orr, 0, _d, _l, _imm)
+
+// _d = _l AND _r
+#define AND(_d,_l,_r) ALUr(AL, and, 0, _d, _l, _r)
+
+// _d = _l AND _imm
+#define ANDi(_d,_l,_imm) ALUi(AL, and, 0, _d, _l, _imm)
+
+// _d = _l ^ _r
+#define EOR(_d,_l,_r) ALUr(AL, eor, 0, _d, _l, _r)
+
+// _d = _l ^ _imm
+#define EORi(_d,_l,_imm) ALUi(AL, eor, 0, _d, _l, _imm)
+
+// _d = _l + _r; update flags
+#define ADD(_d,_l,_r) ALUr(AL, add, 1, _d, _l, _r)
+
+// _d = _l + _r; update flags if _stat == 1
+#define ADDs(_d,_l,_r,_stat) ALUr(AL, add, _stat, _d, _l, _r)
+
+// _d = _l + _imm; update flags
+#define ADDi(_d,_l,_imm) asm_add_imm(_d, _l, _imm, 1)
+
+// _d = _l + _imm; update flags if _stat == 1
+#define ADDis(_d,_l,_imm,_stat) asm_add_imm(_d, _l, _imm, _stat)
+
+// _d = _l - _r; update flags
+#define SUB(_d,_l,_r) ALUr(AL, sub, 1, _d, _l, _r)
+
+// _d = _l - _imm; update flags
+#define SUBi(_d,_l,_imm)  asm_sub_imm(_d, _l, _imm, 1)
+
 // _l = _l * _r
 #define MUL(_l,_r)  do {                                                \
         underrunProtect(4);                                             \
         *(--_nIns) = (NIns)( COND_AL | (_l)<<16 | (_l)<<8 | 0x90 | (_r) ); \
         asm_output("mul %s,%s",gpn(_l),gpn(_r)); } while(0)
 
+// _d = 0 - _r
+#define RSBS(_d,_r) ALUi(AL, rsb, 1, _d, _r, 0)
 
-// RSBS
-// _r = -_r
-#define NEG(_r) do {                                                    \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL |  (0x27<<20) | ((_r)<<16) | ((_r)<<12) ); \
-        asm_output("neg %s",gpn(_r)); } while(0)
-
-// MVNS
-// _r = !_r
-#define NOT(_r) do {                                                    \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL |  (0x1F<<20) | ((_r)<<12) |  (_r) ); \
-        asm_output("mvn %s",gpn(_r)); } while(0)
+// _d = ~_r (one's compliment)
+#define MVN(_d,_r) ALUr(AL, mvn, 0, _d, 0, _r)
 
-// MOVS _r, _r, LSR <_s>
-// _r = _r >> _s
-#define SHR(_r,_s) do {                                                 \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_s)<<8) | (LSR_reg<<4) | (_r) ); \
-        asm_output("shr %s,%s",gpn(_r),gpn(_s)); } while(0)
+// MOVS _d, _r, LSR <_s>
+// _d = _r >> _s
+#define SHR(_d,_r,_s) ALUr_shr(AL, mov, 1, _d, 0, _r, LSR_reg, _s)
 
-// MOVS _r, _r, LSR #_imm
-// _r = _r >> _imm
-#define SHRi(_r,_imm) do {                                              \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_imm)<<7) | (LSR_imm<<4) | (_r) ); \
-        asm_output("shr %s,%d",gpn(_r),_imm); } while(0)
+// MOVS _d, _r, LSR #_imm
+// _d = _r >> _imm
+#define SHRi(_d,_r,_imm)  ALUr_shi(AL, mov, 1, _d, 0, _r, LSR_imm, _imm)
 
-// MOVS _r, _r, ASR <_s>
-// _r = _r >> _s
-#define SAR(_r,_s) do {                                                 \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_s)<<8) | (ASR_reg<<4) | (_r) ); \
-        asm_output("asr %s,%s",gpn(_r),gpn(_s)); } while(0)
-
+// MOVS _d, _r, ASR <_s>
+// _d = _r >> _s
+#define SAR(_d,_r,_s) ALUr_shr(AL, mov, 1, _d, 0, _r, ASR_reg, _s)
 
 // MOVS _r, _r, ASR #_imm
-// _r = _r >> _imm
-#define SARi(_r,_imm) do {                                              \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_imm)<<7) | (ASR_imm<<4) | (_r) ); \
-        asm_output("asr %s,%d",gpn(_r),_imm); } while(0)
+// _d = _r >> _imm
+#define SARi(_d,_r,_imm) ALUr_shi(AL, mov, 1, _d, 0, _r, ASR_imm, _imm)
 
-// MOVS _r, _r, LSL <_s>
-// _r = _r << _s
-#define SHL(_r,_s) do {                                                 \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_s)<<8) | (LSL_reg<<4) | (_r) ); \
-        asm_output("lsl %s,%s",gpn(_r),gpn(_s)); } while(0)
+// MOVS _d, _r, LSL <_s>
+// _d = _r << _s
+#define SHL(_d, _r, _s) ALUr_shr(AL, mov, 1, _d, 0, _r, LSL_reg, _s)
 
-// MOVS _r, _r, LSL #_imm
-// _r = _r << _imm
-#define SHLi(_r,_imm) do {                                              \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x1B<<20) | ((_r)<<12) | ((_imm)<<7) | (LSL_imm<<4) | (_r) ); \
-        asm_output("lsl %s,%d",gpn(_r),(_imm)); } while(0)
+// MOVS _d, _r, LSL #_imm
+// _d = _r << _imm
+#define SHLi(_d, _r, _imm) ALUr_shi(AL, mov, 1, _d, 0, _r, LSL_imm, _imm)
                     
 // TST
-#define TEST(_d,_s) do {                                                \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x11<<20) | ((_d)<<16) | (_s) ); \
-        asm_output("test %s,%s",gpn(_d),gpn(_s)); } while(0)
-
-#define TSTi(_d,_imm) do {                                              \
-        underrunProtect(4);                                             \
-        NanoAssert(((_imm) & 0xff) == (_imm));                          \
-        *(--_nIns) = (NIns)( COND_AL | OP_IMM | (0x11<<20) | ((_d) << 16) | (0xF<<12) | ((_imm) & 0xff) ); \
-        asm_output("tst %s,#0x%x", gpn(_d), _imm);                     \
-    } while (0);
+#define TEST(_l,_r)     ALUr(AL, tst, 1, 0, _l, _r)
+#define TSTi(_d,_imm)   ALUi(AL, tst, 1, 0, _d, _imm)
 
 // CMP
-#define CMP(_l,_r)  do {                                                \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x015<<20) | ((_l)<<16) | (_r) ); \
-        asm_output("cmp %s,%s",gpn(_l),gpn(_r)); } while(0)
-
-// CMP (or CMN)
-#define CMPi(_r,_imm)  do {                                             \
-        if (_imm<0) {                                                   \
-            if ((_imm)>-256) {                                          \
-                underrunProtect(4);                                     \
-                *(--_nIns) = (NIns)( COND_AL | (0x37<<20) | ((_r)<<16) | (-(_imm)) ); \
-            } else {                                                      \
-                underrunProtect(4+LD32_size);                           \
-                *(--_nIns) = (NIns)( COND_AL | (0x17<<20) | ((_r)<<16) | (Scratch) ); \
-                LD32_nochk(Scratch, (_imm));                            \
-            }                                                           \
-        } else {                                                        \
-            if ((_imm)<256) {                                           \
-                underrunProtect(4);                                     \
-                *(--_nIns) = (NIns)( COND_AL | (0x035<<20) | ((_r)<<16) | ((_imm)&0xFF) ); \
-            } else {                                                    \
-                underrunProtect(4+LD32_size);                           \
-                *(--_nIns) = (NIns)( COND_AL | (0x015<<20) | ((_r)<<16) | (Scratch) ); \
-                LD32_nochk(Scratch, (_imm));                            \
-            }                                                           \
-        }                                                               \
-        asm_output("cmp %s,0x%x",gpn(_r),(_imm));                      \
-    } while(0)
+#define CMP(_l,_r)  ALUr(AL, cmp, 1, 0, _l, _r)
 
 // MOV
-#define MR(_d,_s)  do {                                                 \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0xD<<21) | ((_d)<<12) | (_s) ); \
-        asm_output("mov %s,%s",gpn(_d),gpn(_s)); } while (0)
+
+#define MOV_cond(_cond,_d,_s) ALUr(_cond, mov, 0, _d, 0, _s)
 
+#define MOV(dr,sr)   MOV_cond(AL, dr, sr)
+#define MOVEQ(dr,sr) MOV_cond(EQ, dr, sr)
+#define MOVNE(dr,sr) MOV_cond(NE, dr, sr)
+#define MOVLT(dr,sr) MOV_cond(LT, dr, sr)
+#define MOVLE(dr,sr) MOV_cond(LE, dr, sr)
+#define MOVGT(dr,sr) MOV_cond(GT, dr, sr)
+#define MOVGE(dr,sr) MOV_cond(GE, dr, sr)
+#define MOVCC(dr,sr) MOV_cond(CC, dr, sr)
+#define MOVLS(dr,sr) MOV_cond(LS, dr, sr)
+#define MOVHI(dr,sr) MOV_cond(HI, dr, sr)
+#define MOVCS(dr,sr) MOV_cond(CS, dr, sr)
+#define MOVVC(dr,sr) MOV_cond(VC, dr, sr) // overflow clear
+#define MOVNC(dr,sr) MOV_cond(CC, dr, sr) // carry clear
 
-#define MR_cond(_d,_s,_cond,_nm)  do {                                  \
-        underrunProtect(4);                                             \
-        *(--_nIns) = (NIns)( ((_cond)<<28) | (0xD<<21) | ((_d)<<12) | (_s) ); \
-        asm_output(_nm " %s,%s",gpn(_d),gpn(_s)); } while (0)
+// _d = [_b+off]
+#define LDR(_d,_b,_off)        asm_ldr_chk(_d,_b,_off,1)
+#define LDR_nochk(_d,_b,_off)  asm_ldr_chk(_d,_b,_off,0)
 
-#define MREQ(dr,sr) MR_cond(dr, sr, EQ, "moveq")
-#define MRNE(dr,sr) MR_cond(dr, sr, NE, "movne")
-#define MRL(dr,sr)  MR_cond(dr, sr, LT, "movlt")
-#define MRLE(dr,sr) MR_cond(dr, sr, LE, "movle")
-#define MRG(dr,sr)  MR_cond(dr, sr, GT, "movgt")
-#define MRGE(dr,sr) MR_cond(dr, sr, GE, "movge")
-#define MRB(dr,sr)  MR_cond(dr, sr, CC, "movcc")
-#define MRBE(dr,sr) MR_cond(dr, sr, LS, "movls")
-#define MRA(dr,sr)  MR_cond(dr, sr, HI, "movcs")
-#define MRAE(dr,sr) MR_cond(dr, sr, CS, "movhi")
-#define MRNO(dr,sr) MR_cond(dr, sr, VC, "movvc") // overflow clear
-#define MRNC(dr,sr) MR_cond(dr, sr, CC, "movcc") // carry clear
+// _d = #_imm
+#define LDi(_d,_imm) asm_ld_imm(_d,_imm)
+
+// MOVW and MOVT are ARMv6T2 or newer only
 
-#define LDR_chk(_d,_b,_off,_chk) do {                                   \
-        if (IsFpReg(_d)) {                                              \
-            FLDD_chk(_d,_b,_off,_chk);                                  \
-        } else if ((_off) > -4096 && (_off) < 4096) {                   \
-            if (_chk) underrunProtect(4);                               \
-            *(--_nIns) = (NIns)( COND_AL | (((_off) < 0 ? 0x51 : 0x59)<<20) | ((_b)<<16) | ((_d)<<12) | (((_off) < 0 ? -(_off) : (_off))&0xFFF) ); \
-        } else {                                                        \
-            if (_chk) underrunProtect(4+LD32_size);                     \
-            NanoAssert((_b) != IP);                                     \
-            *(--_nIns) = (NIns)( COND_AL | (0x79<<20) | ((_b)<<16) | ((_d)<<12) | Scratch ); \
-            LD32_nochk(Scratch, _off);                                  \
-        }                                                               \
-        asm_output("ldr %s, [%s, #%d]",gpn(_d),gpn(_b),(_off));        \
-    } while(0)
+// MOVW -- writes _imm into _d, zero-extends.
+#define MOVW_cond(_cond,_d,_imm) do {                                   \
+        NanoAssert(isU16(_imm) || isS16(_imm));                         \
+        underrunProtect(4);                                             \
+        *(--_nIns) = (NIns)( (_cond)<<28 | 3<<24 | 0<<20 | (((_imm)>>12)&0xf)<<16 | (_d)<<12 | (_imm)&0xfff ); \
+        asm_output("movw%s %s, #0x%x", condNames[_cond], gpn(_d), (_imm)); \
+    } while (0)
+
+#define MOVW(_d,_imm) MOVW_cond(AL, _d, _imm)
 
-#define LDR(_d,_b,_off)        LDR_chk(_d,_b,_off,1)
-#define LDR_nochk(_d,_b,_off)  LDR_chk(_d,_b,_off,0)
+// MOVT -- writes _imm into top halfword of _d, does not affect bottom halfword
+#define MOVT_cond(_cond,_d,_imm) do {                                   \
+        NanoAssert(isU16(_imm) || isS16(_imm));                         \
+        underrunProtect(4);                                             \
+        *(--_nIns) = (NIns)( (_cond)<<28 | 3<<24 | 4<<20 | (((_imm)>>12)&0xf)<<16 | (_d)<<12 | (_imm)&0xfff ); \
+        asm_output("movt%s %s, #0x%x", condNames[_cond], gpn(_d), (_imm)); \
+    } while (0)
+
+#define MOVT(_d,_imm) MOVT_cond(AL, _d, _imm)
 
 // i386 compat, for Assembler.cpp
-#define LD(reg,offset,base)    LDR_chk(reg,base,offset,1)
+#define MR(d,s) MOV(d,s)
+#define LD(reg,offset,base)    asm_ldr_chk(reg,base,offset,1)
 #define ST(base,offset,reg)    STR(reg,base,offset)
 
-#define LDi(_d,_imm) do {                                               \
-        if ((_imm) == 0) {                                              \
-            XOR(_d,_d);                                                 \
-        } else if (isS8((_imm)) || isU8((_imm))) {                      \
-            underrunProtect(4);                                         \
-            if ((_imm)<0)   *(--_nIns) = (NIns)( COND_AL | (0x3E<<20) | ((_d)<<12) | (((_imm)^0xFFFFFFFF)&0xFF) ); \
-            else            *(--_nIns) = (NIns)( COND_AL | (0x3B<<20) | ((_d)<<12) | ((_imm)&0xFF) ); \
-            asm_output("ld  %s,0x%x",gpn((_d)),(_imm));                \
-        } else {                                                        \
-            underrunProtect(LD32_size);                                 \
-            LD32_nochk(_d, (_imm));                                     \
-            asm_output("ld  %s,0x%x",gpn((_d)),(_imm));                \
-        }                                                               \
-    } while(0)
-
-
 // load 8-bit, zero extend (aka LDRB) note, only 5-bit offsets (!) are
 // supported for this, but that's all we need at the moment.
 // (LDRB/LDRH actually allow a 12-bit offset in ARM mode but
 // constraining to 5-bit gives us advantage for Thumb)
 #define LDRB(_d,_off,_b) do {                                           \
         NanoAssert((_off)>=0&&(_off)<=31);                              \
         underrunProtect(4);                                             \
         *(--_nIns) = (NIns)( COND_AL | (0x5D<<20) | ((_b)<<16) | ((_d)<<12) | ((_off)&0xfff)  ); \
@@ -572,21 +530,25 @@ typedef enum {
 #define STR_postindex(_d,_n,_off) do {                                  \
         NanoAssert(!IsFpReg(_d) && isS12(_off));                        \
         underrunProtect(4);                                             \
         if ((_off)<0)   *(--_nIns) = (NIns)( COND_AL | (0x40<<20) | ((_n)<<16) | ((_d)<<12) | ((-(_off))&0xFFF) ); \
         else            *(--_nIns) = (NIns)( COND_AL | (0x48<<20) | ((_n)<<16) | ((_d)<<12) | ((_off)&0xFFF) ); \
         asm_output("str %s, [%s], %d", gpn(_d), gpn(_n), (_off));      \
     } while(0)
 
-
+// There isn't really a LEA on ARM; this basically computes _r = _b + #_d, either as a
+//    ADD _r, _b, #_d   (if _d < 256)
+// or as a if (_d <= 1020)
+//    MOV _r, #(_d>>2)
+//    ADD _r, _b, _r << 2
 #define LEA(_r,_d,_b) do {                                              \
         NanoAssert((_d)<=1020);                                         \
         NanoAssert(((_d)&3)==0);                                        \
-        if (_b!=SP) NanoAssert(0);                                      \
+        NanoAssert((_b) == FP);                                         \
         if ((_d)<256) {                                                 \
             underrunProtect(4);                                         \
             *(--_nIns) = (NIns)( COND_AL | (0x28<<20) | ((_b)<<16) | ((_r)<<12) | ((_d)&0xFF) ); \
         } else {                                                        \
             underrunProtect(8);                                         \
             *(--_nIns) = (NIns)( COND_AL | (0x4<<21) | ((_b)<<16) | ((_r)<<12) | (2<<7)| (_r) ); \
             *(--_nIns) = (NIns)( COND_AL | (0x3B<<20) | ((_r)<<12) | (((_d)>>2)&0xFF) ); \
         }                                                               \
@@ -620,18 +582,18 @@ typedef enum {
         *(--_nIns) = (NIns)( COND_AL | (0x92<<20) | (SP<<16) | (_mask) ); \
         asm_output("push %x", (_mask));} while (0)
 
 // this form of PUSH takes a base + offset
 // we need to load into scratch reg, then push onto stack
 #define PUSHm(_off,_b)  do {                                            \
         NanoAssert( (int)(_off)>0 );                                    \
         underrunProtect(8);                                             \
-        *(--_nIns) = (NIns)( COND_AL | (0x92<<20) | (SP<<16) | (1<<(Scratch)) ); \
-        *(--_nIns) = (NIns)( COND_AL | (0x59<<20) | ((_b)<<16) | ((Scratch)<<12) | ((_off)&0xFFF) ); \
+        *(--_nIns) = (NIns)( COND_AL | (0x92<<20) | (SP<<16) | (1<<(IP)) ); \
+        *(--_nIns) = (NIns)( COND_AL | (0x59<<20) | ((_b)<<16) | ((IP)<<12) | ((_off)&0xFFF) ); \
         asm_output("push %d(%s)",(_off),gpn(_b)); } while (0)
 
 #define POPr(_r) do {                                                   \
         underrunProtect(4);                                             \
         *(--_nIns) = (NIns)( COND_AL | (0x8B<<20) | (SP<<16) | (1<<(_r)) ); \
         asm_output("pop %s",gpn(_r));} while (0)
 
 #define POP_mask(_mask) do {                                            \
@@ -649,55 +611,55 @@ typedef enum {
 
 // NB: don't use COND_AL here, we shift the condition into place!
 #define JMP(_t)                                 \
     B_cond_chk(AL,_t,1)
 
 #define JMP_nochk(_t)                           \
     B_cond_chk(AL,_t,0)
 
-#define JA(t)   do {B_cond(HI,t); asm_output("ja 0x%08x",(unsigned int)t); } while(0)
-#define JNA(t)  do {B_cond(LS,t); asm_output("jna 0x%08x",(unsigned int)t); } while(0)
-#define JB(t)   do {B_cond(CC,t); asm_output("jb 0x%08x",(unsigned int)t); } while(0)
-#define JNB(t)  do {B_cond(CS,t); asm_output("jnb 0x%08x",(unsigned int)t); } while(0)
-#define JE(t)   do {B_cond(EQ,t); asm_output("je 0x%08x",(unsigned int)t); } while(0)
-#define JNE(t)  do {B_cond(NE,t); asm_output("jne 0x%08x",(unsigned int)t); } while(0)                     
-#define JBE(t)  do {B_cond(LS,t); asm_output("jbe 0x%08x",(unsigned int)t); } while(0)
-#define JNBE(t) do {B_cond(HI,t); asm_output("jnbe 0x%08x",(unsigned int)t); } while(0)
-#define JAE(t)  do {B_cond(CS,t); asm_output("jae 0x%08x",(unsigned int)t); } while(0)
-#define JNAE(t) do {B_cond(CC,t); asm_output("jnae 0x%08x",(unsigned int)t); } while(0)
-#define JL(t)   do {B_cond(LT,t); asm_output("jl 0x%08x",(unsigned int)t); } while(0)  
-#define JNL(t)  do {B_cond(GE,t); asm_output("jnl 0x%08x",(unsigned int)t); } while(0)
-#define JLE(t)  do {B_cond(LE,t); asm_output("jle 0x%08x",(unsigned int)t); } while(0)
-#define JNLE(t) do {B_cond(GT,t); asm_output("jnle 0x%08x",(unsigned int)t); } while(0)
-#define JGE(t)  do {B_cond(GE,t); asm_output("jge 0x%08x",(unsigned int)t); } while(0)
-#define JNGE(t) do {B_cond(LT,t); asm_output("jnge 0x%08x",(unsigned int)t); } while(0)
-#define JG(t)   do {B_cond(GT,t); asm_output("jg 0x%08x",(unsigned int)t); } while(0)  
-#define JNG(t)  do {B_cond(LE,t); asm_output("jng 0x%08x",(unsigned int)t); } while(0)
-#define JC(t)   do {B_cond(CS,t); asm_output("bcs 0x%08x",(unsigned int)t); } while(0)
-#define JNC(t)  do {B_cond(CC,t); asm_output("bcc 0x%08x",(unsigned int)t); } while(0)
-#define JO(t)   do {B_cond(VS,t); asm_output("bvs 0x%08x",(unsigned int)t); } while(0)
-#define JNO(t)  do {B_cond(VC,t); asm_output("bvc 0x%08x",(unsigned int)t); } while(0)
+#define JA(t)   B_cond(HI,t)
+#define JNA(t)  B_cond(LS,t)
+#define JB(t)   B_cond(CC,t)
+#define JNB(t)  B_cond(CS,t)
+#define JE(t)   B_cond(EQ,t)
+#define JNE(t)  B_cond(NE,t)
+#define JBE(t)  B_cond(LS,t)
+#define JNBE(t) B_cond(HI,t)
+#define JAE(t)  B_cond(CS,t)
+#define JNAE(t) B_cond(CC,t)
+#define JL(t)   B_cond(LT,t)
+#define JNL(t)  B_cond(GE,t)
+#define JLE(t)  B_cond(LE,t)
+#define JNLE(t) B_cond(GT,t)
+#define JGE(t)  B_cond(GE,t)
+#define JNGE(t) B_cond(LT,t)
+#define JG(t)   B_cond(GT,t)
+#define JNG(t)  B_cond(LE,t)
+#define JC(t)   B_cond(CS,t)
+#define JNC(t)  B_cond(CC,t)
+#define JO(t)   B_cond(VS,t)
+#define JNO(t)  B_cond(VC,t)
 
 // used for testing result of an FP compare on x86; not used on arm.
 // JP = comparison  false
 #define JP(t)   do {NanoAssert(0); B_cond(NE,t); asm_output("jp 0x%08x",t); } while(0) 
 
 // JNP = comparison true
 #define JNP(t)  do {NanoAssert(0); B_cond(EQ,t); asm_output("jnp 0x%08x",t); } while(0)
 
 
 // MOV(EQ) _r, #1 
 // EOR(NE) _r, _r
 #define SET(_r,_cond,_opp) do {                                         \
     underrunProtect(8);                                                 \
     *(--_nIns) = (NIns)( (_opp<<28) | (1<<21) | ((_r)<<16) | ((_r)<<12) | (_r) ); \
     *(--_nIns) = (NIns)( (_cond<<28) | (0x3A<<20) | ((_r)<<12) | (1) ); \
-    asm_output("mov%s %s, #1", ccName(_cond), gpn(r), gpn(r));          \
-    asm_output("eor%s %s, %s", ccName(_opp), gpn(r), gpn(r));           \
+    asm_output("mov%s %s, #1", condNames[_cond], gpn(r), gpn(r));       \
+    asm_output("eor%s %s, %s", condNames[_opp], gpn(r), gpn(r));        \
     } while (0)
 
 
 #define SETE(r)     SET(r,EQ,NE)
 #define SETL(r)     SET(r,LT,GE)
 #define SETLE(r)    SET(r,LE,GT)
 #define SETG(r)     SET(r,GT,LE)
 #define SETGE(r)    SET(r,GE,LT)
@@ -758,22 +720,16 @@ typedef enum {
 
 #define LDMIA(_b, _mask) do {                                           \
         underrunProtect(4);                                             \
         NanoAssert(((_mask)&rmask(_b))==0 && isU8(_mask));              \
         *(--_nIns) = (NIns)(COND_AL | (0x8B<<20) | ((_b)<<16) | (_mask)&0xFF); \
         asm_output("ldmia %s!,{0x%x}", gpn(_b), (_mask)); \
     } while (0)
 
-#define MRS(_d) do {                            \
-        underrunProtect(4);                     \
-        *(--_nIns) = (NIns)(COND_AL | (0x10<<20) | (0xF<<16) | ((_d)<<12)); \
-        asm_output("msr %s", gpn(_d));                                 \
-    } while (0)
-
 /*
  * VFP
  */
 
 #define FMDRR(_Dm,_Rd,_Rn) do {                                         \
         underrunProtect(4);                                             \
         NanoAssert(IsFpReg(_Dm) && IsGpReg(_Rd) && IsGpReg(_Rn));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC4<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
@@ -784,21 +740,28 @@ typedef enum {
         underrunProtect(4);                                             \
         NanoAssert(IsGpReg(_Rd) && IsGpReg(_Rn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC5<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmrrd %s,%s,%s", gpn(_Rd), gpn(_Rn), gpn(_Dm));    \
     } while (0)
 
 #define FMRDH(_Rd,_Dn) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dm));                       \
+        NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
         asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
     } while (0)
 
+#define FMRDL(_Rd,_Dn) do {                                             \
+        underrunProtect(4);                                             \
+        NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
+        *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
+        asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
+    } while (0)
+
 #define FSTD(_Dd,_Rn,_offs) do {                                        \
         underrunProtect(4);                                             \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2));         \
         NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn));                      \
         int negflag = 1<<23;                                            \
         intptr_t offs = (_offs);                                        \
         if (_offs < 0) {                                                \
             negflag = 0<<23;                                            \
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -829,22 +829,22 @@ namespace nanojit
 #endif
     }
 
 	NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar)
 	{
 		NIns* at = 0;
 		LOpcode condop = cond->opcode();
 		NanoAssert(cond->isCond());
-#ifndef NJ_SOFTFLOAT
+
 		if (condop >= LIR_feq && condop <= LIR_fge)
 		{
 			return asm_jmpcc(branchOnFalse, cond, targ);
 		}
-#endif
+
 		// produce the branch
 		if (branchOnFalse)
 		{
 			if (condop == LIR_eq)
 				JNE(targ, isfar);
 			else if (condop == LIR_ov)
 				JNO(targ, isfar);
 			else if (condop == LIR_cs)
@@ -968,23 +968,21 @@ namespace nanojit
 	        MR(SP,FP);
 	}	
 
 	void Assembler::asm_fcond(LInsp ins)
 	{
 		// only want certain regs 
 		Register r = prepResultReg(ins, AllowableFlagRegs);
 		asm_setcc(r, ins);
-#ifdef NJ_ARM_VFP
-		SETE(r);
-#else
+
 		// SETcc only sets low 8 bits, so extend 
 		MOVZX8(r,r);
 		SETNP(r);
-#endif
+
 		asm_fcmp(ins);
 	}
 				
 	void Assembler::asm_cond(LInsp ins)
 	{
 		// only want certain regs 
 		LOpcode op = ins->opcode();			
 		Register r = prepResultReg(ins, AllowableFlagRegs);
--- a/js/src/nanojit/avmplus.h
+++ b/js/src/nanojit/avmplus.h
@@ -328,31 +328,52 @@ namespace avmplus {
             verbose_exits = 1;
             verbose_live = 1;
             show_stats = 1;
 #endif
 #if defined (AVMPLUS_AMD64)
             sse2 = true;
             use_cmov = true;
 #endif
-            tree_opt = 0;
         }
         
         uint32_t tree_opt:1;
         uint32_t quiet_opt:1;
         uint32_t verbose:1;
         uint32_t verbose_addrs:1;
         uint32_t verbose_live:1;
         uint32_t verbose_exits:1;
         uint32_t show_stats:1;
 
-        #if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
-		bool sse2;
-		bool use_cmov;
-		#endif
+#if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
+        bool sse2;
+        bool use_cmov;
+#endif
+
+#if defined (AVMPLUS_ARM)
+        // whethergenerate VFP instructions
+# if defined (NJ_FORCE_SOFTFLOAT)
+        static const bool vfp = false;
+# else
+        bool vfp;
+# endif
+
+        // whether generate ARMv6t2 instructions (MOVT/MOVW)
+# if defined (NJ_FORCE_NO_ARM_V6T2)
+        static const bool v6t2 = false;
+# else
+        bool v6t2;
+# endif
+#endif
+
+#if defined (NJ_FORCE_SOFTFLOAT)
+        static const bool soft_float = true;
+#else
+        bool soft_float;
+#endif
     };
 
     static const int kstrconst_emptyString = 0;
 
     class AvmInterpreter
     {
         class Labels {
         public:
--- a/js/src/trace-test.js
+++ b/js/src/trace-test.js
@@ -4554,16 +4554,19 @@ testStringLengthNoTinyId.expected = "t['
 test(testStringLengthNoTinyId);
 
 function testLengthInString()
 {
   var s = new String();
   var res = "length" in s;
   for (var i = 0; i < 5; i++)
     res = res && ("length" in s);
+  res = res && s.hasOwnProperty("length");
+  for (var i = 0; i < 5; i++)
+    res = res && s.hasOwnProperty("length");
   return res;
 }
 testLengthInString.expected = true;
 test(testLengthInString);
 
 function testSlowArrayLength()
 {
   var counter = 0;
@@ -4650,16 +4653,99 @@ test(testNEWINIT);
 function testNEWINIT_DOUBLE()
 {
     for (var z = 0; z < 2; ++z) { ({ 0.1: null })}
     return "ok";
 }
 testNEWINIT_DOUBLE.expected = "ok";
 test(testNEWINIT_DOUBLE);
 
+function testIntOverflow() {
+    // int32_max - 7
+    var ival = 2147483647 - 7;
+    for (var i = 0; i < 30; i++) {
+        ival += 2;
+    }
+    return (ival < 2147483647);
+}
+testIntOverflow.expected = false;
+testIntOverflow.jitstats = {
+    recorderStarted: 2,
+    recorderAborted: 0,
+    traceCompleted: 2,
+    traceTriggered: 2,
+};
+test(testIntOverflow);
+
+function testIntUnderflow() {
+    // int32_min + 8
+    var ival = -2147483648 + 8;
+    for (var i = 0; i < 30; i++) {
+        ival -= 2;
+    }
+    return (ival > -2147483648);
+}
+testIntUnderflow.expected = false;
+testIntUnderflow.jitstats = {
+    recorderStarted: 2,
+    recorderAborted: 0,
+    traceCompleted: 2,
+    traceTriggered: 2,
+};
+test(testIntUnderflow);
+
+function testCALLELEM()
+{
+    function f() {
+        return 5;
+    }
+
+    function g() {
+        return 7;
+    }
+
+    var x = [f,f,f,f,g];
+    var y = 0;
+    for (var i = 0; i < 5; ++i)
+        y = x[i]();
+    return y;
+}
+testCALLELEM.expected = 7;
+test(testCALLELEM);
+
+function testNewString()
+{
+  var o = { toString: function() { return "string"; } };
+  var r = [];
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(o));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(3));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(2.5));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String("string"));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(null));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(true));
+  for (var i = 0; i < 5; i++)
+    r.push(typeof new String(undefined));
+  return r.length === 35 && r.every(function(v) { return v === "object"; });
+}
+testNewString.expected = true;
+testNewString.jitstats = {
+  recorderStarted:  7,
+  recorderAborted: 0,
+  traceCompleted: 7,
+  sideExitIntoInterpreter: 7
+};
+test(testNewString);
+
+
 /*****************************************************************************
  *                                                                           *
  *  _____ _   _  _____ ______ _____ _______                                  *
  * |_   _| \ | |/ ____|  ____|  __ \__   __|                                 *
  *   | | |  \| | (___ | |__  | |__) | | |                                    *
  *   | | | . ` |\___ \|  __| |  _  /  | |                                    *
  *  _| |_| |\  |____) | |____| | \ \  | |                                    *
  * |_____|_| \_|_____/|______|_|  \_\ |_|                                    *
--- a/js/src/xpconnect/src/xpcjsruntime.cpp
+++ b/js/src/xpconnect/src/xpcjsruntime.cpp
@@ -1085,33 +1085,16 @@ XPCJSRuntime::XPCJSRuntime(nsXPConnect* 
                          sizeof(JSDHashEntryStub), 128);
 #endif
 
     DOM_InitInterfaces();
 
     // these jsids filled in later when we have a JSContext to work with.
     mStrIDs[0] = 0;
 
-    // Call XPCPerThreadData::GetData to initialize
-    // XPCPerThreadData::gTLSIndex before initializing
-    // JSRuntime::threadTPIndex in JS_NewRuntime.
-    //
-    // XPConnect uses a thread local storage (XPCPerThreadData) indexed by
-    // XPCPerThreadData::gTLSIndex, and SpiderMonkey GC uses a thread local
-    // storage indexed by JSRuntime::threadTPIndex.
-    //
-    // The destructor for XPCPerThreadData::gTLSIndex may access
-    // thread local storage indexed by JSRuntime::threadTPIndex.
-    // Thus, the destructor for JSRuntime::threadTPIndex must be called
-    // later than the one for XPCPerThreadData::gTLSIndex.
-    //
-    // We rely on the implementation of NSPR that calls destructors at
-    // the same order of calling PR_NewThreadPrivateIndex.
-    XPCPerThreadData::GetData(nsnull);
-
     mJSRuntime = JS_NewRuntime(32L * 1024L * 1024L); // pref ?
     if(mJSRuntime)
     {
         // Unconstrain the runtime's threshold on nominal heap size, to avoid
         // triggering GC too often if operating continuously near an arbitrary
         // finite threshold (0xffffffff is infinity for uint32 parameters).
         // This leaves the maximum-JS_malloc-bytes threshold still in effect
         // to cause period, and we hope hygienic, last-ditch GCs from within
new file mode 100644
--- /dev/null
+++ b/js/tests/ecma_3/Array/15.5.4.8-01.js
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Peter Seliger
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = '15.5.4.8-01.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 480096;
+var summary = 'Array.lastIndexOf';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  expect = '-12';
+  actual = 0;
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6]);
+  actual += [2, 3,, 4, 5, 6].lastIndexOf();
+  actual += Array.prototype.lastIndexOf.call([2, 3,, 4, 5, 6]);
+  actual += Array.prototype.lastIndexOf.apply([2, 3,, 4, 5, 6], [, -4]);
+  actual += Array.prototype.lastIndexOf.apply([2, 3,, 4, 5, 6], [undefined, -4]);
+  actual += Array.prototype.lastIndexOf.apply([2, 3,, 4, 5, 6], [undefined, -5]);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined, 1);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined, 2);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined, 1);
+  actual += Array.lastIndexOf([2, 3,, 4, 5, 6], undefined, 2);
+
+  actual = String(actual);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100755
--- /dev/null
+++ b/js/tests/ecma_3/Regress/regress-469937.js
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2007
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-469937.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 469937;
+var summary = 'Properties without DontEnum are sometimes not enumerated';
+var actual = false;
+var expect = true;
+
+printBugNumber(BUGNUMBER);
+printStatus (summary);
+ 
+(function(){ 
+    var o = { }
+    o.PageLeft = 1;
+    o.Rect2 = 6;
+    delete o.Rect2;
+    for (var p in o);
+    o.Rect3 = 7;
+    found = false;
+    for (var p in o) if (p == 'Rect3') found = true;
+    actual = found;    
+})();
+
+reportCompare(expect, actual, summary);
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/Regress/regress-457065-03.js
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-457065-03.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 457065;
+var summary = 'Do not assert: !fp->callee || fp->thisp == JSVAL_TO_OBJECT(fp->argv[-1])';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  (function() {
+    new function (){ for (var x = 0; x < 3; ++x){} };
+  })();
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/Regress/regress-477733.js
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-477733.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 477733;
+var summary = 'TM: Do not assert: !(fp->flags & JSFRAME_POP_BLOCKS)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  function g() {
+    [];
+  }
+
+  try {
+    d.d.d;
+  } catch(e) {
+    void (function(){});
+  }
+
+  for (var o in [1, 2, 3]) {
+    g();
+  }
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/Regress/regress-480244.js
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Graydon Hoare
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-480244.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 480244;
+var summary = 'Do not assert: isInt32(*p)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  function outer() {
+    var v = 10.234;
+    for (var i = 0; i < 0xff; ++i) {
+      inner(v);
+    }
+  }
+
+  var g = 0;
+  var h = 0;
+
+  function inner() {
+    var v = 10;
+    for (var k = 0; k < 0xff; ++k) {
+      g++;
+      if (g & 0xff == 0xff)
+        h++;
+    }
+    return h;
+  }
+
+  outer();
+  print("g=" + g + " h=" + h);
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/Regress/regress-483103.js
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-483103.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 483103;
+var summary = 'TM: Do not assert: p->isQuad()';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  var t = new String("");
+  for (var j = 0; j < 3; ++j) {
+    var e = t["-1"];
+  }
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/extensions/regress-479487.js
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Igor Bukanov
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-479487.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 479487;
+var summary = 'js_Array_dense_setelem can call arbitrary JS code';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  Array.prototype[1] = 2;
+
+  Array.prototype.__defineSetter__(32, function() { print("Hello from arbitrary JS");});
+  Array.prototype.__defineGetter__(32, function() { return 11; });
+
+  function f()
+  {
+    var a = [];
+    for (var i = 0; i != 10; ++i) {
+      a[1 << i] = 9999;
+    }
+    return a;
+  }
+
+  f();
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/extensions/regress-479551.js
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jeff Walden
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-479551.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 479551;
+var summary = 'Do not assert: (cx)->requestDepth || (cx)->thread == (cx)->runtime->gcThread';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  if (typeof shapeOf != 'function')
+  {
+    print(expect = actual = 'Test skipped: requires shell');
+  }
+  else
+  {
+    jit(true);
+
+    var o = {a:3, b:2};
+    shapeOf(o);
+    var p = {};
+    p.a =3;
+    p.b=2;
+    shapeOf(p);
+
+    jit(false);
+
+  }
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/extensions/regress-480579.js
@@ -0,0 +1,69 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jason Orendorff
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-480579.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 480579;
+var summary = 'Do not assert: pobj_ == obj2';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  expect = '12';
+
+  a = {x: 1};
+  b = {__proto__: a};
+  c = {__proto__: b};
+  for (i = 0; i < 2; i++) {
+    print(actual += c.x);
+    b.x = 2;
+  }
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_5/extensions/regress-481516.js
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jason Orendorff
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-481516.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 481516;
+var summary = 'TM: pobj_ == obj2';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  expect = '1111222';
+
+  a = {x: 1};
+  b = {__proto__: a};
+  c = {__proto__: b};
+  objs = [{__proto__: a}, {__proto__: a}, {__proto__: a}, b, {__proto__: a},
+          {__proto__: a}];
+  for (i = 0; i < 6; i++) {
+    print(actual += ""+c.x);
+    objs[i].x = 2;
+  }
+  print(actual += c.x);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_6/Regress/regress-476655.js
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-476655.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 476655;
+var summary = 'TM: Do not assert: count <= (size_t) (fp->regs->sp - StackBase(fp) - depth)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  try
+  {
+    eval(
+      "(function (){ for (var y in this) {} })();" +
+      "[''.watch(\"\", function(){}) for each (x in ['', '', eval, '', '']) if " +
+      "(x)].map(Function)"
+      );
+  }
+  catch(ex)
+  {
+  }
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
new file mode 100644
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/extensions/regress-479252.js
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Igor Bukanov
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-479252.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 479252;
+var summary = 'Avoid watchdog ticks when idle in shell';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  if (typeof sleep != 'function' || typeof scatter != 'function' ||
+      typeof timeout != 'function')
+  {
+    print(expect = actual = 'Test skipped: requires mulithreads and timeout.');
+  }
+  else
+  {
+    expectExitCode(6);
+
+    function f() { sleep(100); }
+    timeout(1.0);
+    scatter([f,f,f,f,f]);
+  }
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/extensions/regress-479381.js
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jason Orendorff
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-479381.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 479381;
+var summary = 'Do not crash @ js_FinalizeStringRT with multi-threads.';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  if (typeof gczeal != 'function' || typeof scatter != 'function')
+  {
+    print(expect = actual = 'Test skipped: requires mulithreads');
+  }
+  else
+  {
+    expect = actual = 'No Crash';
+
+    gczeal(2);
+
+    function f() {
+      var s;
+      for (var i = 0; i < 9999; i++)
+        s = 'a' + String(i)[3] + 'b';
+      return s;
+    }
+
+    print(scatter([f, f, f, f]));
+  }
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-457065-01.js
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-457065-01.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 457065;
+var summary = 'Do not assert: !fp->callee || fp->thisp == JSVAL_TO_OBJECT(fp->argv[-1])';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  var e = eval;
+  for (var a in this) { }
+  (function() { eval("this; for (let b in [0,1,2]) { }"); })();
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-457065-02.js
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-457065-02.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 457065;
+var summary = 'Do not assert: !fp->callee || fp->thisp == JSVAL_TO_OBJECT(fp->argv[-1])';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  (function(){ eval('this'); (function(){ for(let y in [0,1,2]) 6;})(); })();
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-471373.js
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-471373.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 471373;
+var summary = 'TM: do not assert: (size_t)(regs.pc - script->code) < script->length';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  if (typeof window == 'undefined')
+  {
+    expectExitCode(5);
+  }
+
+  jit(true);
+
+  function g() {
+    var x = <x/>;
+    for (var b = 0; b < 2; ++b) {
+      yield x;
+      for (var c = 0; c < 10;++c) {
+        x.r = x;
+      }
+    }
+  }
+  for (let y in g()) { }
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-471660.js
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-471660.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 471660;
+var summary = 'TM: Do not assert: !(fp->flags & JSFRAME_POP_BLOCKS)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  y = <x/>;
+
+  for (var w = 0; w < 5; ++w) {
+
+    let (y) { do break ; while (true); }
+    for each (let x in [{}, function(){}]) {y}
+
+  }
+
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-472528-01.js
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *                 Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-472528-01.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 472528;
+var summary = 'Do not assert: !js_IsActiveWithOrBlock(cx, fp->scopeChain, 0)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  try
+  {
+    for (var i = 0; i < 4; ++i) {
+      for (let j = 0; j < 2; ++j) { }
+      let (x) (function(){});
+    }
+  }
+  catch(ex)
+  {
+  }
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-472528-02.js
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *                 Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-472528-02.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 472528;
+var summary = 'Do not assert: !fp->blockChain';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  try
+  {
+    for (let i = 0; i < 4; ++i) {
+      for (let j = 0; j < 2; ++j) { }
+      let (x) (function(){});
+    }
+  }
+  catch(ex)
+  {
+  }
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-472703.js
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-472703.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 472703;
+var summary = 'Do not assert: regs.sp[-1] == OBJECT_TO_JSVAL(fp->scopeChain)';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  try
+  {
+    eval(
+      'for (var z = 0; z < 2; ++z) { with({}) for(let y in [1, null]); let(x)' +
+      '(function(){})(); }'
+      );
+  }
+  catch(ex)
+  {
+  }
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-476655.js
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-476655.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 476655;
+var summary = 'Do not assert: depth <= (size_t) (fp->regs->sp - StackBase(fp))';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  jit(true);
+
+  try
+  {
+    eval(
+      "for(let y in ['', '']) try {for(let y in ['', '']) ( /x/g ); } finally {" +
+      "with({}){} } this.zzz.zzz"
+
+      );
+  }
+  catch(ex)
+  {
+  }
+  jit(false);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8/regress/regress-483749.js
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Gary Kwong
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-483749.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 483749;
+var summary = 'Do not assert: !js_IsActiveWithOrBlock(cx, fp->scopeChain, 0)';
+var actual = '';
+var expect = '';
+
+printBugNumber(BUGNUMBER);
+printStatus (summary);
+
+jit(true);
+
+for each (let x in ['']) {
+  for (var b = 0; b < 5; ++b) {
+    if (b % 5 == 3) {
+      with([]) this;
+    }
+  }
+}
+
+jit(false);
+
+reportCompare(expect, actual, summary);
new file mode 100644
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8_1/regress/regress-452498-006.js
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Andreas Gal
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-452498-006.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 452498;
+var summary = 'TM: upvar2 regression tests';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+// ------- Comment #6 From Andreas Gal :gal
+
+  function foo() {
+    var x = 4;
+    var f = (function() { return x++; });
+    var g = (function() { return x++; });
+    return [f,g];
+  }
+
+  var bar = foo();
+
+  expect = '9';
+  actual = 0;
+
+  bar[0]();
+  bar[1]();
+
+  actual = String(expect);
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8_1/regress/regress-452498-027.js
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Brendan Eich
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-452498-027.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 452498;
+var summary = 'TM: upvar2 regression tests';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+  expect = '5';
+
+// ------- Comment #27 From Brendan Eich
+
+  function f(x){function g(y)x+y;return g}
+  g = f(2);
+
+  actual = String(g(3));
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
+
+
+
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8_1/regress/regress-452498-030.js
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Mike Shaver
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+var gTestfile = 'regress-452498-030.js';
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 452498;
+var summary = 'TM: upvar2 regression tests';
+var actual = '';
+var expect = '';
+
+
+//-----------------------------------------------------------------------------
+test();
+//-----------------------------------------------------------------------------
+
+function test()
+{
+  enterFunc ('test');
+  printBugNumber(BUGNUMBER);
+  printStatus (summary);
+
+// ------- Comment #30 From Mike Shaver
+
+  function f() { var i = 0; var i = 5; }
+  f();
+
+  reportCompare(expect, actual, summary);
+
+  exitFunc ('test');
+}
new file mode 100644
--- /dev/null
+++ b/js/tests/js1_8_1/regress/regress-452498-038.js
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is JavaScript Engine testing utilities.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s): Jesse Ruderman
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisio