Merge mc to tm
authorRobert Sayre <sayrer@gmail.com>
Wed, 11 Feb 2009 23:05:46 -0500
changeset 25101 6758dedadadcf1c6743fa8bbc57830a01e6e8bae
parent 24931 213429e61488cdc628d5fb2a7be2883ad87530ae (current diff)
parent 25100 975b36c50d33c8de608e798fcde43043db10bf68 (diff)
child 25102 e6238b9ea42a2d67aa5aae281a0b9daaac25002e
push idunknown
push userunknown
push dateunknown
milestone1.9.2a1pre
Merge mc to tm
dom/src/base/nsJSEnvironment.cpp
js/src/jsinterp.cpp
js/src/xpconnect/src/xpcprivate.h
--- a/dom/src/base/nsJSEnvironment.cpp
+++ b/dom/src/base/nsJSEnvironment.cpp
@@ -164,29 +164,38 @@ static PRLogModuleInfo* gJSDiagnostics;
 #define NS_COLLECTED_OBJECTS_LIMIT  5000
 // CC will be called if GC has been called at least this number of times and
 // there are at least NS_MIN_SUSPECT_CHANGES new suspected objects.
 #define NS_MAX_GC_COUNT             5
 #define NS_MIN_SUSPECT_CHANGES      10
 // CC will be called if there are at least NS_MAX_SUSPECT_CHANGES new suspected
 // objects.
 #define NS_MAX_SUSPECT_CHANGES      100
+// Regular GC runs once JS gcBytes increases by a certain factor from
+// what it was after the previous GC. NS_GC_ACCEL_TIME_1 seconds after
+// the last GC, the factor goes down, making GC more likely, and 
+// after NS_GC_ACCEL_TIME_2 seconds, it goes down again. This helps make
+// sure GC happens eventually in situations where the factor heuristic
+// fails to detect memory pressure.
+#define NS_GC_ACCEL_TIME_1          60
+#define NS_GC_ACCEL_TIME_2          600
 
 // if you add statics here, add them to the list in nsJSRuntime::Startup
 
 static PRUint32 sDelayedCCollectCount;
 static PRUint32 sCCollectCount;
 static PRBool sUserIsActive;
 static PRTime sPreviousCCTime;
 static PRUint32 sCollectedObjectsCounts;
 static PRUint32 sSavedGCCount;
 static PRUint32 sCCSuspectChanges;
 static PRUint32 sCCSuspectedCount;
 static nsITimer *sGCTimer;
 static PRBool sReadyForGC;
+static PRTime sPreviousGCTime;
 
 // The number of currently pending document loads. This count isn't
 // guaranteed to always reflect reality and can't easily as we don't
 // have an easy place to know when a load ends or is interrupted in
 // all cases. This counter also gets reset if we end up GC'ing while
 // we're waiting for a slow page to load. IOW, this count may be 0
 // even when there are pending loads.
 static PRUint32 sPendingLoadCount;
@@ -854,22 +863,31 @@ PrintWinCodebase(nsGlobalWindow *win)
 }
 #endif
 
 static void
 MaybeGC(JSContext *cx)
 {
   size_t bytes = cx->runtime->gcBytes;
   size_t lastBytes = cx->runtime->gcLastBytes;
-
-  if ((bytes > 8192 && bytes / 16 > lastBytes)
+  PRTime now = PR_Now();
+
+  PRInt32 factor = 16;
+  if (sPreviousGCTime) {
+    PRInt64 usec = now - sPreviousGCTime;
+    if (usec >= PRInt64(NS_GC_ACCEL_TIME_1 * PR_USEC_PER_SEC))
+      factor = usec < PRInt64(NS_GC_ACCEL_TIME_2 * PR_USEC_PER_SEC) ? 4 : 1;
+  }
+  
+  if ((bytes > 8192 && bytes > lastBytes * factor)
 #ifdef DEBUG
       || cx->runtime->gcZeal > 0
 #endif
       ) {
+    sPreviousGCTime = now;
     JS_GC(cx);
   }
 }
 
 static already_AddRefed<nsIPrompt>
 GetPromptFromContext(nsJSContext* ctx)
 {
   nsCOMPtr<nsPIDOMWindow> win(do_QueryInterface(ctx->GetGlobalObject()));
@@ -3697,16 +3715,17 @@ nsJSRuntime::ParseVersion(const nsString
 void
 nsJSRuntime::Startup()
 {
   // initialize all our statics, so that we can restart XPCOM
   sDelayedCCollectCount = 0;
   sCCollectCount = 0;
   sUserIsActive = PR_FALSE;
   sPreviousCCTime = 0;
+  sPreviousGCTime = 0;
   sCollectedObjectsCounts = 0;
   sSavedGCCount = 0;
   sCCSuspectChanges = 0;
   sCCSuspectedCount = 0;
   sGCTimer = nsnull;
   sReadyForGC = PR_FALSE;
   sLoadInProgressGCTimer = PR_FALSE;
   sPendingLoadCount = 0;
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -1535,33 +1535,33 @@ InitArrayObject(JSContext *cx, JSObject 
     }
     return JS_TRUE;
 }
 
 #ifdef JS_TRACER
 static JSString* FASTCALL
 Array_p_join(JSContext* cx, JSObject* obj, JSString *str)
 {
-    jsval v;
-    if (!array_join_sub(cx, obj, TO_STRING, str, &v)) {
+    JSAutoTempValueRooter tvr(cx);
+    if (!array_join_sub(cx, obj, TO_STRING, str, tvr.addr())) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
         return NULL;
     }
-    JS_ASSERT(JSVAL_IS_STRING(v));
-    return JSVAL_TO_STRING(v);
+    return JSVAL_TO_STRING(tvr.value());
 }
 
 static JSString* FASTCALL
 Array_p_toString(JSContext* cx, JSObject* obj)
 {
-    jsval v;
-    if (!array_join_sub(cx, obj, TO_STRING, NULL, &v))
+    JSAutoTempValueRooter tvr(cx);
+    if (!array_join_sub(cx, obj, TO_STRING, NULL, tvr.addr())) {
+        cx->builtinStatus |= JSBUILTIN_ERROR;
         return NULL;
-    JS_ASSERT(JSVAL_IS_STRING(v));
-    return JSVAL_TO_STRING(v);
+    }
+    return JSVAL_TO_STRING(tvr.value());
 }
 #endif
 
 /*
  * Perl-inspired join, reverse, and sort.
  */
 static JSBool
 array_join(JSContext *cx, uintN argc, jsval *vp)
@@ -2158,20 +2158,21 @@ js_ArrayCompPush(JSContext *cx, JSObject
     obj->dslots[length] = v;
     return JS_TRUE;
 }
 
 #ifdef JS_TRACER
 static jsval FASTCALL
 Array_p_push1(JSContext* cx, JSObject* obj, jsval v)
 {
+    JSAutoTempValueRooter tvr(cx, v);
     if (OBJ_IS_DENSE_ARRAY(cx, obj)
-        ? array_push1_dense(cx, obj, v, &v)
-        : array_push_slowly(cx, obj, 1, &v, &v)) {
-        return v;
+        ? array_push1_dense(cx, obj, v, tvr.addr())
+        : array_push_slowly(cx, obj, 1, tvr.addr(), tvr.addr())) {
+        return tvr.value();
     }
     cx->builtinStatus |= JSBUILTIN_ERROR;
     return JSVAL_VOID;
 }
 #endif
 
 static JSBool
 array_push(JSContext *cx, uintN argc, jsval *vp)
@@ -2229,21 +2230,21 @@ array_pop_dense(JSContext *cx, JSObject*
     obj->fslots[JSSLOT_ARRAY_LENGTH] = index;
     return JS_TRUE;
 }
 
 #ifdef JS_TRACER
 static jsval FASTCALL
 Array_p_pop(JSContext* cx, JSObject* obj)
 {
-    jsval v;
+    JSAutoTempValueRooter tvr(cx);
     if (OBJ_IS_DENSE_ARRAY(cx, obj)
-        ? array_pop_dense(cx, obj, &v)
-        : array_pop_slowly(cx, obj, &v)) {
-        return v;
+        ? array_pop_dense(cx, obj, tvr.addr())
+        : array_pop_slowly(cx, obj, tvr.addr())) {
+        return tvr.value();
     }
     cx->builtinStatus |= JSBUILTIN_ERROR;
     return JSVAL_VOID;
 }
 #endif
 
 static JSBool
 array_pop(JSContext *cx, uintN argc, jsval *vp)
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -1045,41 +1045,64 @@ FrameAtomBase(JSContext *cx, JSStackFram
 /* FIXME(bug 332648): Move this into a public header. */
 class JSAutoTempValueRooter
 {
   public:
     JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec)
         : mContext(cx) {
         JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
     }
-    JSAutoTempValueRooter(JSContext *cx, jsval v)
+    explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL)
         : mContext(cx) {
         JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
     }
     JSAutoTempValueRooter(JSContext *cx, JSString *str)
         : mContext(cx) {
         JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr);
     }
 
     ~JSAutoTempValueRooter() {
         JS_POP_TEMP_ROOT(mContext, &mTvr);
     }
 
+    jsval value() { return mTvr.u.value; }
+    jsval * addr() { return &mTvr.u.value; }
+
   protected:
     JSContext *mContext;
 
   private:
 #ifndef AIX
     static void *operator new(size_t);
     static void operator delete(void *, size_t);
 #endif
 
     JSTempValueRooter mTvr;
 };
 
+class JSAutoTempIdRooter
+{
+public:
+    explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0))
+        : mContext(cx) {
+        JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr);
+    }
+
+    ~JSAutoTempIdRooter() {
+        JS_POP_TEMP_ROOT(mContext, &mTvr);
+    }
+
+    jsid id() { return (jsid) mTvr.u.value; }
+    jsid * addr() { return (jsid *) &mTvr.u.value; }
+
+private:
+    JSContext *mContext;
+    JSTempValueRooter mTvr;
+};
+
 class JSAutoResolveFlags
 {
   public:
     JSAutoResolveFlags(JSContext *cx, uintN flags)
         : mContext(cx), mSaved(cx->resolveFlags) {
         cx->resolveFlags = flags;
     }
 
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -4924,26 +4924,16 @@ js_Interpret(JSContext *cx)
                     newifp->frame.imacpc = NULL;
                     newifp->frame.slots = newsp;
 
                     /* Push void to initialize local variables. */
                     nvars = fun->u.i.nvars;
                     while (nvars--)
                         *newsp++ = JSVAL_VOID;
 
-                    /* Call the debugger hook if present. */
-                    hook = cx->debugHooks->callHook;
-                    if (hook) {
-                        newifp->hookData = hook(cx, &newifp->frame, JS_TRUE, 0,
-                                                cx->debugHooks->callHookData);
-                        CHECK_INTERRUPT_HANDLER();
-                    } else {
-                        newifp->hookData = NULL;
-                    }
-
                     /* Scope with a call object parented by callee's parent. */
                     if (JSFUN_HEAVYWEIGHT_TEST(fun->flags) &&
                         !js_GetCallObject(cx, &newifp->frame, parent)) {
                         goto bad_inline_call;
                     }
 
                     /* Switch version if currentVersion wasn't overridden. */
                     newifp->callerVersion = (JSVersion) cx->version;
@@ -4956,16 +4946,26 @@ js_Interpret(JSContext *cx)
                     /* Push the frame and set interpreter registers. */
                     newifp->callerRegs = regs;
                     fp->regs = &newifp->callerRegs;
                     regs.sp = newsp;
                     regs.pc = script->code;
                     newifp->frame.regs = &regs;
                     cx->fp = fp = &newifp->frame;
 
+                    /* Call the debugger hook if present. */
+                    hook = cx->debugHooks->callHook;
+                    if (hook) {
+                        newifp->hookData = hook(cx, &newifp->frame, JS_TRUE, 0,
+                                                cx->debugHooks->callHookData);
+                        CHECK_INTERRUPT_HANDLER();
+                    } else {
+                        newifp->hookData = NULL;
+                    }
+
                     TRACE_0(EnterFrame);
 
                     inlineCallCount++;
                     JS_RUNTIME_METER(rt, inlineCalls);
 
 #ifdef INCLUDE_MOZILLA_DTRACE
                     /* DTrace function entry, inlines */
                     if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED())
--- a/js/src/jslock.cpp
+++ b/js/src/jslock.cpp
@@ -480,17 +480,17 @@ js_NudgeOtherContexts(JSContext *cx)
  */
 void
 js_NudgeThread(JSContext *cx, JSThread *thread)
 {
     JSRuntime *rt = cx->runtime;
     JSContext *acx = NULL;
     
     while ((acx = js_NextActiveContext(rt, acx)) != NULL) {
-        if (cx != acx && cx->thread == thread)
+        if (cx != acx && acx->thread == thread)
             JS_TriggerOperationCallback(acx);
     }
 }
 
 /*
  * Given a title with apparently non-null ownercx different from cx, try to
  * set ownercx to cx, claiming exclusive (single-threaded) ownership of title.
  * If we claim ownership, return true.  Otherwise, we wait for ownercx to be
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -1,10 +1,10 @@
 /* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sw=4 et tw=78:
+ * vim: set ts=8 sw=4 et tw=79:
  *
  * ***** BEGIN LICENSE BLOCK *****
  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
  *
  * The contents of this file are subject to the Mozilla Public License Version
  * 1.1 (the "License"); you may not use this file except in compliance with
  * the License. You may obtain a copy of the License at
  * http://www.mozilla.org/MPL/
@@ -1943,17 +1943,17 @@ const char js_propertyIsEnumerable_str[]
 #if JS_HAS_GETTER_SETTER
 const char js_defineGetter_str[] = "__defineGetter__";
 const char js_defineSetter_str[] = "__defineSetter__";
 const char js_lookupGetter_str[] = "__lookupGetter__";
 const char js_lookupSetter_str[] = "__lookupSetter__";
 #endif
 
 JS_DEFINE_TRCINFO_1(obj_valueOf,
-    (3, (static, JSVAL,      Object_p_valueOf,              CONTEXT, THIS, STRING,  0, 0)))
+    (3, (static, JSVAL,     Object_p_valueOf,               CONTEXT, THIS, STRING,  0, 0)))
 JS_DEFINE_TRCINFO_1(obj_hasOwnProperty,
     (3, (static, BOOL_FAIL, Object_p_hasOwnProperty,        CONTEXT, THIS, STRING,  0, 0)))
 JS_DEFINE_TRCINFO_1(obj_propertyIsEnumerable,
     (3, (static, BOOL_FAIL, Object_p_propertyIsEnumerable,  CONTEXT, THIS, STRING,  0, 0)))
 
 static JSFunctionSpec object_methods[] = {
 #if JS_HAS_TOSOURCE
     JS_FN(js_toSource_str,             obj_toSource,                0,0),
@@ -3896,19 +3896,25 @@ js_NativeSet(JSContext *cx, JSObject *ob
         /* If sprop has a stub setter, keep scope locked and just store *vp. */
         if (SPROP_HAS_STUB_SETTER(sprop))
             goto set_slot;
     } else {
         /*
          * Allow API consumers to create shared properties with stub setters.
          * Such properties lack value storage, so setting them is like writing
          * to /dev/null.
+         *
+         * But we can't short-circuit if there's a scripted getter or setter
+         * since we might need to throw. In that case, we let SPROP_SET
+         * decide whether to throw an exception. See bug 478047.
          */
-        if (SPROP_HAS_STUB_SETTER(sprop))
+        if (!(sprop->attrs & JSPROP_GETTER) && SPROP_HAS_STUB_SETTER(sprop)) {
+            JS_ASSERT(!(sprop->attrs & JSPROP_SETTER));
             return JS_TRUE;
+        }
     }
 
     sample = cx->runtime->propertyRemovals;
     JS_UNLOCK_SCOPE(cx, scope);
     JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr);
     ok = SPROP_SET(cx, sprop, obj, obj, vp);
     JS_POP_TEMP_ROOT(cx, &tvr);
     if (!ok)
--- a/js/src/jsstr.cpp
+++ b/js/src/jsstr.cpp
@@ -1448,27 +1448,29 @@ str_match(JSContext *cx, uintN argc, jsv
     return StringMatchHelper(cx, argc, vp, fp ? fp->regs->pc : NULL);
 }
 
 #ifdef JS_TRACER
 static jsval FASTCALL
 String_p_match(JSContext* cx, JSString* str, jsbytecode *pc, JSObject* regexp)
 {
     jsval vp[3] = { JSVAL_NULL, STRING_TO_JSVAL(str), OBJECT_TO_JSVAL(regexp) };
+    JSAutoTempValueRooter tvr(cx, 3, vp);
     if (!StringMatchHelper(cx, 1, vp, pc)) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
         return JSVAL_VOID;
     }
     return vp[0];
 }
 
 static jsval FASTCALL
 String_p_match_obj(JSContext* cx, JSObject* str, jsbytecode *pc, JSObject* regexp)
 {
     jsval vp[3] = { JSVAL_NULL, OBJECT_TO_JSVAL(str), OBJECT_TO_JSVAL(regexp) };
+    JSAutoTempValueRooter tvr(cx, 3, vp);
     if (!StringMatchHelper(cx, 1, vp, pc)) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
         return JSVAL_VOID;
     }
     return vp[0];
 }
 #endif
 
@@ -2483,17 +2485,17 @@ js_String_getelem(JSContext* cx, JSStrin
     return js_GetUnitString(cx, str, (size_t)i);
 }
 #endif
 
 JS_DEFINE_CALLINFO_2(extern, BOOL,   js_EqualStrings, STRING, STRING,                       1, 1)
 JS_DEFINE_CALLINFO_2(extern, INT32,  js_CompareStrings, STRING, STRING,                     1, 1)
 
 JS_DEFINE_TRCINFO_1(str_toString,
-    (2, (extern, STRING_FAIL,       String_p_toString, CONTEXT, THIS,                        1, 1)))
+    (2, (extern, STRING_RETRY,      String_p_toString, CONTEXT, THIS,                        1, 1)))
 JS_DEFINE_TRCINFO_2(str_substring,
     (4, (static, STRING_RETRY,      String_p_substring, CONTEXT, THIS_STRING, INT32, INT32,   1, 1)),
     (3, (static, STRING_RETRY,      String_p_substring_1, CONTEXT, THIS_STRING, INT32,        1, 1)))
 JS_DEFINE_TRCINFO_1(str_charAt,
     (3, (extern, STRING_RETRY,      js_String_getelem, CONTEXT, THIS_STRING, INT32,           1, 1)))
 JS_DEFINE_TRCINFO_1(str_charCodeAt,
     (2, (extern, INT32_RETRY,       js_String_p_charCodeAt, THIS_STRING, INT32,               1, 1)))
 JS_DEFINE_TRCINFO_4(str_concat,
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -343,16 +343,28 @@ static inline bool isInt32(jsval v)
 {
     if (!isNumber(v))
         return false;
     jsdouble d = asNumber(v);
     jsint i;
     return JSDOUBLE_IS_INT(d, i);
 }
 
+static inline bool asInt32(jsval v, jsint& rv)
+{
+    if (!isNumber(v))
+        return false;
+    if (JSVAL_IS_INT(v)) {
+        rv = JSVAL_TO_INT(v);
+        return true;
+    }
+    jsdouble d = asNumber(v);
+    return JSDOUBLE_IS_INT(d, rv);
+}
+
 /* Return JSVAL_DOUBLE for all numbers (int and double) and the tag otherwise. */
 static inline uint8 getPromotedType(jsval v)
 {
     return JSVAL_IS_INT(v) ? JSVAL_DOUBLE : JSVAL_IS_NULL(v) ? JSVAL_TNULL : uint8(JSVAL_TAG(v));
 }
 
 /* Return JSVAL_INT for all whole numbers that fit into signed 32-bit and the tag otherwise. */
 static inline uint8 getCoercedType(jsval v)
@@ -2493,30 +2505,37 @@ JS_REQUIRES_STACK void
 TraceRecorder::compile(JSTraceMonitor* tm)
 {
     Fragmento* fragmento = tm->fragmento;
     if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
         debug_only_v(printf("Trace rejected: excessive stack use.\n"));
         js_BlacklistPC(tm, fragment, treeInfo->globalShape);
         return;
     }
-    ++treeInfo->branchCount;
+    if (anchor && anchor->exitType != CASE_EXIT)
+        ++treeInfo->branchCount;
     if (lirbuf->outOMem()) {
         fragmento->assm()->setError(nanojit::OutOMem);
         return;
     }
     ::compile(fragmento->assm(), fragment);
     if (fragmento->assm()->error() == nanojit::OutOMem)
         return;
     if (fragmento->assm()->error() != nanojit::None) {
         js_BlacklistPC(tm, fragment, treeInfo->globalShape);
         return;
     }
-    if (anchor)
-        fragmento->assm()->patch(anchor);
+    if (anchor) {
+#ifdef NANOJIT_IA32
+        if (anchor->exitType == CASE_EXIT)
+            fragmento->assm()->patch(anchor, anchor->switchInfo);
+        else
+#endif
+            fragmento->assm()->patch(anchor);
+    }
     JS_ASSERT(fragment->code());
     JS_ASSERT(!fragment->vmprivate);
     if (fragment == fragment->root)
         fragment->vmprivate = treeInfo;
     /* :TODO: windows support */
 #if defined DEBUG && !defined WIN32
     const char* filename = cx->fp->script->filename;
     char* label = (char*)malloc((filename ? strlen(filename) : 7) + 16);
@@ -4273,16 +4292,17 @@ monitor_loop:
 
     /* If we exit on a branch, or on a tree call guard, try to grow the inner tree (in case
        of a branch exit), or the tree nested around the tree we exited from (in case of the
        tree call guard). */
     switch (lr->exitType) {
       case UNSTABLE_LOOP_EXIT:
         return js_AttemptToStabilizeTree(cx, lr, NULL);
       case BRANCH_EXIT:
+      case CASE_EXIT:
         return js_AttemptToExtendTree(cx, lr, NULL, NULL);
       case LOOP_EXIT:
         if (innermostNestedGuard)
             return js_AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL);
         return false;
       default:
         /* No, this was an unusual exit (i.e. out of memory/GC), so just resume interpretation. */
         return false;
@@ -4328,16 +4348,28 @@ TraceRecorder::monitorRecording(JSContex
 
         /* An explicit return from callDepth 0 should end the loop, not abort it. */
         if (*pc == JSOP_RETURN && tr->callDepth == 0) {
             AUDIT(returnLoopExits);
             tr->endLoop(&JS_TRACE_MONITOR(cx));
             js_DeleteRecorder(cx);
             return JSMRS_STOP; /* done recording */
         }
+#ifdef NANOJIT_IA32
+        /* Handle tableswitches specially--prepare a jump table if needed. */
+        if (*pc == JSOP_TABLESWITCH || *pc == JSOP_TABLESWITCHX) {
+            LIns* guardIns = tr->tableswitch();
+            if (guardIns) {
+                tr->fragment->lastIns = guardIns;
+                tr->compile(&JS_TRACE_MONITOR(cx));
+                js_DeleteRecorder(cx);
+                return JSMRS_STOP;
+            }
+        }
+#endif
     }
 
     /* If it's not a break or a return from a loop, continue recording and follow the trace. */
 
     /* We check for imacro-calling bytecodes inside the switch cases to resolve
        the "if" condition at the compile time. */
     bool flag;
     switch (op) {
@@ -4588,22 +4620,31 @@ js_FlushJITOracle(JSContext* cx)
     oracle.clear();
 }
 
 extern JS_REQUIRES_STACK void
 js_FlushScriptFragments(JSContext* cx, JSScript* script)
 {
     if (!TRACING_ENABLED(cx))
         return;
-    debug_only_v(printf("Flushing fragments for script %p.\n", script);)
+    debug_only_v(printf("Flushing fragments for JSScript %p.\n", script);)
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
     for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
-        VMFragment *f = tm->vmfragments[i];
-        if (f && JS_UPTRDIFF(f->ip, script->code) < script->length)
-            tm->vmfragments[i] = NULL;
+        for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
+            /* Disable future use of any script-associated VMFragment.*/
+            if (JS_UPTRDIFF((*f)->ip, script->code) < script->length) {
+                debug_only_v(printf("Disconnecting VMFragment %p "
+                                    "with ip %p, in range [%p,%p).\n",
+                                    *f, (*f)->ip, script->code,
+                                    script->code + script->length));
+                *f = (*f)->next;
+            } else {
+                f = &((*f)->next);
+            }
+        }
     }
 }
 
 extern JS_REQUIRES_STACK void
 js_FlushJITCache(JSContext* cx)
 {
     if (!TRACING_ENABLED(cx))
         return;
@@ -4951,16 +4992,74 @@ TraceRecorder::ifop()
     if (!x->isCond()) {
         x = lir->ins_eq0(x);
         expected = !expected;
     }
     guard(expected, x, BRANCH_EXIT);
     return true;
 }
 
+#ifdef NANOJIT_IA32
+/* Record LIR for a tableswitch or tableswitchx op. We record LIR only
+ * the "first" time we hit the op. Later, when we start traces after
+ * exiting that trace, we just patch. */
+JS_REQUIRES_STACK LIns*
+TraceRecorder::tableswitch()
+{
+    jsval& v = stackval(-1);
+    LIns* v_ins = get(&v);
+    /* no need to guard if condition is constant */
+    if (v_ins->isconst() || v_ins->isconstq())
+        return NULL;
+
+    jsbytecode* pc = cx->fp->regs->pc;
+    /* Starting a new trace after exiting a trace via switch. */
+    if (anchor && (anchor->exitType == CASE_EXIT ||
+                   anchor->exitType == DEFAULT_EXIT) && fragment->ip == pc)
+        return NULL;
+
+    /* Decode jsop. */
+    jsint low, high;
+    if (*pc == JSOP_TABLESWITCH) {
+        pc += JUMP_OFFSET_LEN;
+        low = GET_JUMP_OFFSET(pc);
+        pc += JUMP_OFFSET_LEN;
+        high = GET_JUMP_OFFSET(pc);
+    } else {
+        pc += JUMPX_OFFSET_LEN;
+        low = GET_JUMPX_OFFSET(pc);
+        pc += JUMPX_OFFSET_LEN;
+        high = GET_JUMPX_OFFSET(pc);            
+    }
+
+    /* Really large tables won't fit in a page. This is a conservative
+     * check. If it matters in practice we need to go off-page. */
+    if ((high + 1 - low) * sizeof(intptr_t*) + 128 > (unsigned) LARGEST_UNDERRUN_PROT) {
+        // This throws away the return value of switchop but it seems
+        // ok because switchop always returns true.
+        (void) switchop();
+        return NULL;
+    }
+
+    /* Generate switch LIR. */
+    LIns* si_ins = lir_buf_writer->skip(sizeof(SwitchInfo));
+    SwitchInfo* si = (SwitchInfo*) si_ins->payload();
+    si->count = high + 1 - low;
+    si->table = 0;
+    si->index = (uint32) -1;
+    LIns* diff = lir->ins2(LIR_sub, f2i(v_ins), lir->insImm(low));
+    LIns* cmp = lir->ins2(LIR_ult, diff, lir->insImm(si->count));
+    lir->insGuard(LIR_xf, cmp, snapshot(DEFAULT_EXIT));
+    lir->insStore(diff, lir->insImmPtr(&si->index), lir->insImm(0));
+    LIns* exit = snapshot(CASE_EXIT);
+    ((GuardRecord*) exit->payload())->exit->switchInfo = si;
+    return lir->insGuard(LIR_xtbl, diff, exit);
+}
+#endif
+
 JS_REQUIRES_STACK bool
 TraceRecorder::switchop()
 {
     jsval& v = stackval(-1);
     LIns* v_ins = get(&v);
     /* no need to guard if condition is constant */
     if (v_ins->isconst() || v_ins->isconstq())
         return true;
@@ -7003,27 +7102,27 @@ GetProperty(JSContext *cx, uintN argc, j
         return JS_FALSE;
     argv[0] = ID_TO_VALUE(id);
     return OBJ_GET_PROPERTY(cx, JS_THIS_OBJECT(cx, vp), id, &JS_RVAL(cx, vp));
 }
 
 static jsval FASTCALL
 GetProperty_tn(JSContext *cx, jsbytecode *pc, JSObject *obj, JSString *name)
 {
-    jsid id;
-    jsval v;
+    JSAutoTempIdRooter idr(cx);
+    JSAutoTempValueRooter tvr(cx);
 
     BEGIN_PC_HINT(pc);
-        if (!js_ValueToStringId(cx, STRING_TO_JSVAL(name), &id) ||
-            !OBJ_GET_PROPERTY(cx, obj, id, &v)) {
+        if (!js_ValueToStringId(cx, STRING_TO_JSVAL(name), idr.addr()) ||
+            !OBJ_GET_PROPERTY(cx, obj, idr.id(), tvr.addr())) {
             cx->builtinStatus |= JSBUILTIN_ERROR;
-            v = JSVAL_ERROR_COOKIE;
+            *tvr.addr() = JSVAL_ERROR_COOKIE;
         }
     END_PC_HINT();
-    return v;
+    return tvr.value();
 }
 
 static JSBool
 GetElement(JSContext *cx, uintN argc, jsval *vp)
 {
     jsval *argv;
     jsid id;
 
@@ -7034,30 +7133,30 @@ GetElement(JSContext *cx, uintN argc, js
         return JS_FALSE;
     argv[0] = ID_TO_VALUE(id);
     return OBJ_GET_PROPERTY(cx, JS_THIS_OBJECT(cx, vp), id, &JS_RVAL(cx, vp));
 }
 
 static jsval FASTCALL
 GetElement_tn(JSContext* cx, jsbytecode *pc, JSObject* obj, int32 index)
 {
-    jsval v;
-    jsid id;
-
-    if (!js_Int32ToId(cx, index, &id)) {
+    JSAutoTempValueRooter tvr(cx);
+    JSAutoTempIdRooter idr(cx);
+
+    if (!js_Int32ToId(cx, index, idr.addr())) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
         return JSVAL_ERROR_COOKIE;
     }
     BEGIN_PC_HINT(pc);
-        if (!OBJ_GET_PROPERTY(cx, obj, id, &v)) {
+        if (!OBJ_GET_PROPERTY(cx, obj, idr.id(), tvr.addr())) {
             cx->builtinStatus |= JSBUILTIN_ERROR;
-            v = JSVAL_ERROR_COOKIE;
+            *tvr.addr() = JSVAL_ERROR_COOKIE;
         }
     END_PC_HINT();
-    return v;
+    return tvr.value();
 }
 
 JS_DEFINE_TRCINFO_1(GetProperty,
     (4, (static, JSVAL_FAIL,    GetProperty_tn, CONTEXT, PC, THIS, STRING,      0, 0)))
 JS_DEFINE_TRCINFO_1(GetElement,
     (4, (extern, JSVAL_FAIL,    GetElement_tn,  CONTEXT, PC, THIS, INT32,       0, 0)))
 
 JS_REQUIRES_STACK bool
@@ -7149,20 +7248,21 @@ SetProperty(JSContext *cx, uintN argc, j
         return JS_FALSE;
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
     return JS_TRUE;
 }
 
 static int32 FASTCALL
 SetProperty_tn(JSContext* cx, JSObject* obj, JSString* idstr, jsval v)
 {
-    jsid id;
-
-    if (!js_ValueToStringId(cx, STRING_TO_JSVAL(idstr), &id) ||
-        !OBJ_SET_PROPERTY(cx, obj, id, &v)) {
+    JSAutoTempValueRooter tvr(cx, v);
+    JSAutoTempIdRooter idr(cx);
+
+    if (!js_ValueToStringId(cx, STRING_TO_JSVAL(idstr), idr.addr()) ||
+        !OBJ_SET_PROPERTY(cx, obj, idr.id(), tvr.addr())) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
     }
     return JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID);
 }
 
 static JSBool
 SetElement(JSContext *cx, uintN argc, jsval *vp)
 {
@@ -7179,20 +7279,23 @@ SetElement(JSContext *cx, uintN argc, js
         return JS_FALSE;
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
     return JS_TRUE;
 }
 
 static int32 FASTCALL
 SetElement_tn(JSContext* cx, JSObject* obj, int32 index, jsval v)
 {
-    jsid id;
-
-    if (!js_Int32ToId(cx, index, &id) || !OBJ_SET_PROPERTY(cx, obj, id, &v))
+    JSAutoTempIdRooter idr(cx);
+    JSAutoTempValueRooter tvr(cx, v);
+
+    if (!js_Int32ToId(cx, index, idr.addr()) ||
+        !OBJ_SET_PROPERTY(cx, obj, idr.id(), tvr.addr())) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
+    }
     return JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID);
 }
 
 JS_DEFINE_TRCINFO_1(SetProperty,
     (4, (extern, BOOL_FAIL,     SetProperty_tn, CONTEXT, THIS, STRING, JSVAL,   0, 0)))
 JS_DEFINE_TRCINFO_1(SetElement,
     (4, (extern, BOOL_FAIL,     SetElement_tn,  CONTEXT, THIS, INT32, JSVAL,    0, 0)))
 
@@ -7590,30 +7693,16 @@ TraceRecorder::prop(JSObject* obj, LIns*
         stack(-cs.nuses, v_ins);
         slot = SPROP_INVALID_SLOT;
         return true;
     }
 
     /* Insist if setting on obj being the directly addressed object. */
     uint32 setflags = (cs.format & (JOF_SET | JOF_INCDEC | JOF_FOR));
     LIns* dslots_ins = NULL;
-    if (obj2 != obj) {
-        if (setflags)
-            ABORT_TRACE("JOF_SET opcode hit prototype chain");
-
-        /*
-         * We're getting a proto-property. Walk up the prototype chain emitting
-         * proto slot loads, updating obj as we go, leaving obj set to obj2 with
-         * obj_ins the last proto-load.
-         */
-        while (obj != obj2) {
-            obj_ins = stobj_get_slot(obj_ins, JSSLOT_PROTO, dslots_ins);
-            obj = STOBJ_GET_PROTO(obj);
-        }
-    }
 
     /* Don't trace getter or setter calls, our caller wants a direct slot. */
     if (PCVAL_IS_SPROP(pcval)) {
         JSScopeProperty* sprop = PCVAL_TO_SPROP(pcval);
 
         if (setflags && !SPROP_HAS_STUB_SETTER(sprop))
             ABORT_TRACE("non-stub setter");
         if (setflags && (sprop->attrs & JSPROP_READONLY))
@@ -7640,16 +7729,31 @@ TraceRecorder::prop(JSObject* obj, LIns*
             ABORT_TRACE("no valid slot");
         slot = sprop->slot;
     } else {
         if (!PCVAL_IS_SLOT(pcval))
             ABORT_TRACE("PCE is not a slot");
         slot = PCVAL_TO_SLOT(pcval);
     }
 
+    if (obj2 != obj) {
+        if (setflags)
+            ABORT_TRACE("JOF_SET opcode hit prototype chain");
+
+        /*
+         * We're getting a proto-property. Walk up the prototype chain emitting
+         * proto slot loads, updating obj as we go, leaving obj set to obj2 with
+         * obj_ins the last proto-load.
+         */
+        while (obj != obj2) {
+            obj_ins = stobj_get_slot(obj_ins, JSSLOT_PROTO, dslots_ins);
+            obj = STOBJ_GET_PROTO(obj);
+        }
+    }
+
     v_ins = stobj_get_slot(obj_ins, slot, dslots_ins);
     unbox_jsval(STOBJ_GET_SLOT(obj, slot), v_ins);
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::elem(jsval& oval, jsval& idx, jsval*& vp, LIns*& v_ins, LIns*& addr_ins)
 {
@@ -7814,17 +7918,21 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_AND()
 {
     return ifop();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_TABLESWITCH()
 {
+#ifdef NANOJIT_IA32
+    return true;
+#else
     return switchop();
+#endif
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_LOOKUPSWITCH()
 {
     return switchop();
 }
 
@@ -8455,17 +8563,21 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_DEFAULTX()
 {
     return true;
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_TABLESWITCHX()
 {
+#ifdef NANOJIT_IA32
+    return true;
+#else
     return switchop();
+#endif
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_LOOKUPSWITCHX()
 {
     return switchop();
 }
 
@@ -8922,16 +9034,19 @@ JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_TYPEOFEXPR()
 {
     return record_JSOP_TYPEOF();
 }
 
 JS_REQUIRES_STACK bool
 TraceRecorder::record_JSOP_ENTERBLOCK()
 {
+    if (cx->fp->flags & JSFRAME_POP_BLOCKS)
+        ABORT_TRACE("can't trace after js_GetScopeChain");
+
     JSScript* script = cx->fp->script;
     JSFrameRegs& regs = *cx->fp->regs;
     JSObject* obj;
     JS_GET_SCRIPT_OBJECT(script, GET_FULL_INDEX(0), obj);
 
     LIns* void_ins = INS_CONST(JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID));
     for (int i = 0, n = OBJ_BLOCK_COUNT(cx, obj); i < n; i++)
         stack(i, void_ins);
@@ -9126,27 +9241,27 @@ static JSBool
 CallIteratorNext(JSContext *cx, uintN argc, jsval *vp)
 {
     return js_CallIteratorNext(cx, JS_THIS_OBJECT(cx, vp), &JS_RVAL(cx, vp));
 }
 
 static jsval FASTCALL
 CallIteratorNext_tn(JSContext* cx, jsbytecode* pc, JSObject* iterobj)
 {
-    jsval v;
+    JSAutoTempValueRooter tvr(cx);
 
     BEGIN_PC_HINT(pc);
-        bool ok = js_CallIteratorNext(cx, iterobj, &v);
+        bool ok = js_CallIteratorNext(cx, iterobj, tvr.addr());
     END_PC_HINT();
 
     if (!ok) {
         cx->builtinStatus |= JSBUILTIN_ERROR;
         return JSVAL_ERROR_COOKIE;
     }
-    return v;
+    return tvr.value();
 }
 
 JS_DEFINE_TRCINFO_1(ObjectToIterator,
     (4, (static, OBJECT_FAIL, ObjectToIterator_tn, CONTEXT, PC, THIS, INT32, 0, 0)))
 JS_DEFINE_TRCINFO_1(CallIteratorNext,
     (3, (static, JSVAL_FAIL,  CallIteratorNext_tn, CONTEXT, PC, THIS,        0, 0)))
 
 static const struct BuiltinFunctionInfo {
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -202,16 +202,18 @@ public:
     JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx,
                                                      SlotList& slots,
                                                      unsigned stackSlots);
     bool matches(TypeMap& other) const;
 };
 
 enum ExitType {
     BRANCH_EXIT,
+    CASE_EXIT,          // Exit at a tableswitch via a numbered case
+    DEFAULT_EXIT,       // Exit at a tableswitch via default
     LOOP_EXIT,
     NESTED_EXIT,
     MISMATCH_EXIT,
     OOM_EXIT,
     OVERFLOW_EXIT,
     UNSTABLE_LOOP_EXIT,
     TIMEOUT_EXIT,
     DEEP_BAIL_EXIT,
@@ -435,16 +437,19 @@ class TraceRecorder : public avmplus::GC
     nanojit::LIns* f2i(nanojit::LIns* f);
     JS_REQUIRES_STACK nanojit::LIns* makeNumberInt32(nanojit::LIns* f);
     JS_REQUIRES_STACK nanojit::LIns* stringify(jsval& v);
 
     JS_REQUIRES_STACK bool call_imacro(jsbytecode* imacro);
 
     JS_REQUIRES_STACK bool ifop();
     JS_REQUIRES_STACK bool switchop();
+#ifdef NANOJIT_IA32
+    JS_REQUIRES_STACK nanojit::LIns* tableswitch();
+#endif
     JS_REQUIRES_STACK bool inc(jsval& v, jsint incr, bool pre = true);
     JS_REQUIRES_STACK bool inc(jsval& v, nanojit::LIns*& v_ins, jsint incr, bool pre = true);
     JS_REQUIRES_STACK bool incProp(jsint incr, bool pre = true);
     JS_REQUIRES_STACK bool incElem(jsint incr, bool pre = true);
     JS_REQUIRES_STACK bool incName(jsint incr, bool pre = true);
 
     JS_REQUIRES_STACK void strictEquality(bool equal, bool cmpCase);
     JS_REQUIRES_STACK bool equality(bool negate, bool tryBranchAfterCond);
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -674,16 +674,27 @@ namespace nanojit
         GuardRecord *rec = exit->guards;
         AvmAssert(rec);
         while (rec) {
             patch(rec);
             rec = rec->next;
         }
     }
 
+#ifdef NANOJIT_IA32
+    void Assembler::patch(SideExit* exit, SwitchInfo* si)
+    {
+		for (GuardRecord* lr = exit->guards; lr; lr = lr->next) {
+			Fragment *frag = lr->exit->target;
+			NanoAssert(frag->fragEntry != 0);
+			si->table[si->index] = frag->fragEntry;
+		}
+    }
+#endif
+
     NIns* Assembler::asm_exit(LInsp guard)
     {
 		SideExit *exit = guard->record()->exit;
 		NIns* at = 0;
 		if (!_branchStateMap->get(exit))
 		{
 			at = asm_leave_trace(guard);
 		}
@@ -1021,18 +1032,21 @@ namespace nanojit
 #define countlir_xcc()
 #define countlir_x()
 #define countlir_loop()
 #define countlir_call()
 #endif
 
 	void Assembler::gen(LirFilter* reader,  NInsList& loopJumps)
 	{
-		// trace must start with LIR_x or LIR_loop
-		NanoAssert(reader->pos()->isop(LIR_x) || reader->pos()->isop(LIR_loop));
+		// trace must end with LIR_x, LIR_loop, LIR_ret, or LIR_xtbl
+		NanoAssert(reader->pos()->isop(LIR_x) ||
+		           reader->pos()->isop(LIR_loop) ||
+		           reader->pos()->isop(LIR_ret) ||
+				   reader->pos()->isop(LIR_xtbl));
 		 
 		for (LInsp ins = reader->read(); ins != 0 && !error(); ins = reader->read())
 		{
 			LOpcode op = ins->opcode();			
 			switch(op)
 			{
 				default:
 					NanoAssertMsgf(false, "unsupported LIR instruction: %d (~0x40: %d)", op, op&~LIR64);
@@ -1334,16 +1348,27 @@ namespace nanojit
                         label->addr = _nIns;
                     }
 					verbose_only( if (_verbose) { outputAddr=true; asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins)); } )
 					break;
 				}
 				case LIR_xbarrier: {
 					break;
 				}
+#ifdef NANOJIT_IA32
+				case LIR_xtbl: {
+                    NIns* exit = asm_exit(ins); // does intersectRegisterState()
+					asm_switch(ins, exit);
+					break;
+				}
+#else
+ 			    case LIR_xtbl:
+					NanoAssertMsg(0, "Not supported for this architecture");
+					break;
+#endif
                 case LIR_xt:
 				case LIR_xf:
 				{
                     countlir_xcc();
 					// we only support cmp with guard right now, also assume it is 'close' and only emit the branch
                     NIns* exit = asm_exit(ins); // does intersectRegisterState()
 					LIns* cond = ins->oprnd1();
 					asm_branch(op == LIR_xf, cond, exit, false);
@@ -1435,16 +1460,38 @@ namespace nanojit
 				return;
 
 			// check that all is well (don't check in exit paths since its more complicated)
 			debug_only( pageValidate(); )
 			debug_only( resourceConsistencyCheck();  )
 		}
 	}
 
+	/*
+	 * Write a jump table for the given SwitchInfo and store the table
+	 * address in the SwitchInfo. Every entry will initially point to
+	 * target.
+	 */
+	void Assembler::emitJumpTable(SwitchInfo* si, NIns* target)
+	{
+		underrunProtect(si->count * sizeof(NIns*) + 20);
+		// Align for platform. The branch should be optimized away and is
+		// required to select the compatible int type.
+		if (sizeof(NIns*) == 8) {
+			_nIns = (NIns*) (uint64(_nIns) & ~7);
+		} else if (sizeof(NIns*) == 4) {
+		    _nIns = (NIns*) (uint32(_nIns) & ~3);
+		}
+		for (uint32_t i = 0; i < si->count; ++i) {
+			_nIns = (NIns*) (((uint8*) _nIns) - sizeof(NIns*));
+			*(NIns**) _nIns = target;
+		}
+		si->table = (NIns**) _nIns;
+	}
+
     void Assembler::assignSavedRegs()
     {
         // restore saved regs
 		releaseRegisters();
         LirBuffer *b = _thisfrag->lirbuf;
         for (int i=0, n = NumSavedRegs; i < n; i++) {
             LIns *p = b->savedRegs[i];
             if (p)
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -186,16 +186,19 @@ namespace nanojit
 
 			void		assemble(Fragment* frag, NInsList& loopJumps);
 			void		endAssembly(Fragment* frag, NInsList& loopJumps);
 			void		beginAssembly(Fragment *frag, RegAllocMap* map);
 			void		copyRegisters(RegAlloc* copyTo);
 			void		releaseRegisters();
             void        patch(GuardRecord *lr);
             void        patch(SideExit *exit);
+#ifdef NANOJIT_IA32
+			void        patch(SideExit* exit, SwitchInfo* si);
+#endif
 			AssmError   error()	{ return _err; }
 			void		setError(AssmError e) { _err = e; }
 			void		setCallTable(const CallInfo *functions);
 			void		pageReset();
 			int32_t		codeBytes();
 			Page*		handoverPages(bool exitPages=false);
 
 			debug_only ( void		pageValidate(); )
@@ -310,16 +313,18 @@ namespace nanojit
 			void		asm_i2f(LInsp ins);
 			void		asm_u2f(LInsp ins);
 			Register	asm_prep_fcall(Reservation *rR, LInsp ins);
 			void		asm_nongp_copy(Register r, Register s);
 			void		asm_call(LInsp);
             void        asm_arg(ArgSize, LInsp, Register);
 			Register	asm_binop_rhs_reg(LInsp ins);
 			NIns*		asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar);
+			void        asm_switch(LIns* ins, NIns* target);
+			void        emitJumpTable(SwitchInfo* si, NIns* target);
             void        assignSavedRegs();
             void        reserveSavedRegs();
             void        assignParamRegs();
             void        handleLoopCarriedExprs();
 			
 			// flag values for nMarkExecute
 			enum 
 			{
--- a/js/src/nanojit/Fragmento.cpp
+++ b/js/src/nanojit/Fragmento.cpp
@@ -53,21 +53,28 @@ namespace nanojit
 		if (in > 30) return 30;	// 1GB should be enough for anyone
 		return in;
 	}
 
 	/**
 	 * This is the main control center for creating and managing fragments.
 	 */
 	Fragmento::Fragmento(AvmCore* core, uint32_t cacheSizeLog2) 
-		: _frags(core->GetGC()),
-           _freePages(core->GetGC(), 1024),
+		:
+#ifdef NJ_VERBOSE
+		  enterCounts(NULL),
+		  mergeCounts(NULL),
+		  labels(NULL),
+#endif
+		  _frags(core->GetGC()),
+		  _freePages(core->GetGC(), 1024),
 		  _allocList(core->GetGC()),
-			_max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
-			_pagesGrowth(1)
+		  _gcHeap(NULL),
+		  _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
+		  _pagesGrowth(1)
 	{
 #ifdef _DEBUG
 		{
 			// XXX These belong somewhere else, but I can't find the
 			//     right location right now.
 			NanoStaticAssert((LIR_lt ^ 3) == LIR_ge);
 			NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
 			NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
@@ -90,16 +97,18 @@ namespace nanojit
 		_allocList.set_meminfo_name("Fragmento._allocList");
 #endif
 		NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed 
 		_core = core;
 		GC *gc = core->GetGC();
 		_assm = NJ_NEW(gc, nanojit::Assembler)(this);
 		verbose_only( enterCounts = NJ_NEW(gc, BlockHist)(gc); )
 		verbose_only( mergeCounts = NJ_NEW(gc, BlockHist)(gc); )
+
+		memset(&_stats, 0, sizeof(_stats));
 	}
 
 	Fragmento::~Fragmento()
 	{
         AllocEntry *entry;
 
 		clearFrags();
         _frags.clear();		
@@ -511,19 +520,56 @@ namespace nanojit
 		drawTraceTrees(this, this->_frags, this->_core, fileName);
 	}
 #endif
 #endif // NJ_VERBOSE
 
 	//
 	// Fragment
 	//
-	Fragment::Fragment(const void* _ip) : ip(_ip)
+	Fragment::Fragment(const void* _ip)
+		:
+#ifdef NJ_VERBOSE
+		  _called(0),
+		  _native(0),
+		  _exitNative(0),
+		  _lir(0),
+		  _lirbytes(0),
+		  _token(NULL),
+		  traceTicks(0),
+		  interpTicks(0),
+		  eot_target(NULL),
+		  sid(0),
+		  compileNbr(0),
+#endif
+		  treeBranches(NULL),
+		  branches(NULL),
+		  nextbranch(NULL),
+		  anchor(NULL),
+		  root(NULL),
+		  parent(NULL),
+		  first(NULL),
+		  peer(NULL),
+		  lirbuf(NULL),
+		  lastIns(NULL),
+		  spawnedFrom(NULL),
+		  kind(LoopTrace),
+		  ip(_ip),
+		  guardCount(0),
+		  xjumpCount(0),
+		  recordAttempts(0),
+		  blacklistLevel(0),
+		  fragEntry(NULL),
+		  loopEntry(NULL),
+		  vmprivate(NULL),
+		  _code(NULL),
+		  _links(NULL),
+		  _hits(0),
+		  _pages(NULL)
 	{
-        // Fragment is a gc object which is zero'd by the GC, no need to clear fields
     }
 
 	Fragment::~Fragment()
 	{
 		NanoAssert(_pages == 0);
     }
 
     void Fragment::resetHits()
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -83,17 +83,23 @@ namespace nanojit
 	#undef counter_value
 	#define counter_value(x)		x
 #endif /* NJ_PROFILE */
 
 	//static int32_t buffer_count = 0;
 	
 	// LCompressedBuffer
 	LirBuffer::LirBuffer(Fragmento* frago, const CallInfo* functions)
-		: _frago(frago), _functions(functions), abi(ABI_FASTCALL), _pages(frago->core()->GetGC())
+		: _frago(frago),
+#ifdef NJ_VERBOSE
+		  names(NULL),
+#endif
+		  _functions(functions), abi(ABI_FASTCALL),
+		  state(NULL), param1(NULL), sp(NULL), rp(NULL),
+		  _pages(frago->core()->GetGC())
 	{
 		rewind();
 	}
 
 	LirBuffer::~LirBuffer()
 	{
 		clear();
 		verbose_only(if (names) NJ_DELETE(names);)
@@ -1878,16 +1884,17 @@ namespace nanojit
 			case LIR_not: 
 				sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1()));
 				break;
 
 			case LIR_x:
 			case LIR_xt:
 			case LIR_xf:
 			case LIR_xbarrier:
+			case LIR_xtbl:
 				formatGuard(i, s);
 				break;
 
 			case LIR_add:
 			case LIR_addp:
 			case LIR_sub: 
 		 	case LIR_mul: 
 			case LIR_fadd:
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -134,18 +134,31 @@ namespace nanojit
             return _count_args(_ARGSIZE_MASK_ANY) + isIndirect();
         }
 		inline uint32_t FASTCALL count_iargs() const {
             return _count_args(_ARGSIZE_MASK_INT);
         }
 		// fargs = args - iargs
 	};
 
+	/*
+	 * Record for extra data used to compile switches as jump tables.
+	 */
+	struct SwitchInfo
+	{
+		NIns**      table;       // Jump table; a jump address is NIns*
+		uint32_t    count;       // Number of table entries
+		// Index value at last execution of the switch. The index value
+		// is the offset into the jump table. Thus it is computed as 
+		// (switch expression) - (lowest case value).
+		uint32_t    index;
+	};
+
     inline bool isGuard(LOpcode op) {
-        return op == LIR_x || op == LIR_xf || op == LIR_xt || op == LIR_loop || op == LIR_xbarrier;
+        return op == LIR_x || op == LIR_xf || op == LIR_xt || op == LIR_loop || op == LIR_xbarrier || op == LIR_xtbl;
     }
 
     inline bool isCall(LOpcode op) {
         op = LOpcode(op & ~LIR64);
         return op == LIR_call || op == LIR_calli;
     }
 
     inline bool isStore(LOpcode op) {
--- a/js/src/nanojit/LIRopcode.tbl
+++ b/js/src/nanojit/LIRopcode.tbl
@@ -172,18 +172,18 @@ OPDEF(ult,      60, 2) // 0x3C 0011 1100
 OPDEF(ugt,      61, 2) // 0x3D 0011 1101
 OPDEF(ule,      62, 2) // 0x3E 0011 1110
 OPDEF(uge,      63, 2) // 0x3F 0011 1111
 
 OPDEF64(2,          0, 2) // wraps a pair of refs
 OPDEF64(file,       1, 2)
 OPDEF64(line,       2, 2)
 OPDEF64(xbarrier,   3, 1) // memory barrier (dummy guard)
+OPDEF64(xtbl,       4, 1) // exit via indirect jump
 
-OPDEF64(unused4_64,   4, 2)
 OPDEF64(unused5_64,   5, 2)
 OPDEF64(unused6_64,   6, 2)
 OPDEF64(unused7_64,   7, 2)
 OPDEF64(unused8_64,   8, 2)
 OPDEF64(unused9_64,   9, 2)
 OPDEF64(unused10_64, 10, 2)
 
 OPDEF64(stq, LIR_st, 2) // quad store
--- a/js/src/nanojit/Native.h
+++ b/js/src/nanojit/Native.h
@@ -57,29 +57,31 @@
 #error "unknown nanojit architecture"
 #endif
 
 namespace nanojit {
 	const uint32_t NJ_PAGE_SIZE = 1 << NJ_LOG2_PAGE_SIZE;
 	
     class Fragment;
     struct SideExit;
+	struct SwitchInfo;
     
     struct GuardRecord 
     {
         void* jmp;
         GuardRecord* next;
         SideExit* exit;
     };
     
     struct SideExit
     {
         GuardRecord* guards;
         Fragment* from;
         Fragment* target;
+		SwitchInfo* switchInfo;
         
         void addGuard(GuardRecord* lr) 
         {
             lr->next = guards;
             guards = lr;
         }
     };
 }
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -132,73 +132,54 @@ namespace nanojit
         PUSHr(FP); // Save caller's FP.
 
 		if (!_thisfrag->lirbuf->explicitSavedRegs) {
 			PUSHr(FP); // dummy
 			for (int i = 0; i < NumSavedRegs; ++i)
 				PUSHr(savedRegs[i]);
 		}
 
-        // align the entry point
-        asm_align_code();
-
 		return fragEntry;
 	}
 
-    void Assembler::asm_align_code() {
-        static uint8_t nop[][9] = {
-                {0x90},
-                {0x66,0x90},
-                {0x0f,0x1f,0x00},
-                {0x0f,0x1f,0x40,0x00},
-                {0x0f,0x1f,0x44,0x00,0x00},
-                {0x66,0x0f,0x1f,0x44,0x00,0x00},
-                {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00},
-                {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00},
-                {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00},
-        };
-        unsigned n;
-        while((n = uintptr_t(_nIns) & 15) != 0) {
-            if (n > 9)
-                n = 9;
-            underrunProtect(n);
-            _nIns -= n;
-            memcpy(_nIns, nop[n-1], n);
-            asm_output("nop%d", n);
-        }
-    }
-
 	void Assembler::nFragExit(LInsp guard)
 	{
 		SideExit *exit = guard->record()->exit;
 		bool trees = _frago->core()->config.tree_opt;
         Fragment *frag = exit->target;
         GuardRecord *lr = 0;
 		bool destKnown = (frag && frag->fragEntry);
-		if (destKnown && !trees)
-		{
-			// already exists, emit jump now.  no patching required.
-			JMP(frag->fragEntry);
-            lr = 0;
-		}
-		else
-		{
-			// target doesn't exit yet.  emit jump to epilog, and set up to patch later.
+		// Generate jump to epilog and initialize lr.
+		// If the guard is LIR_xtbl, use a jump table with epilog in every entry
+		if (guard->isop(LIR_xtbl)) {
 			lr = guard->record();
-#if defined NANOJIT_AMD64
-            /* 8 bytes for address, 4 for imm32, 2 for jmp */
-            underrunProtect(14);
-            _nIns -= 8;
-            *(intptr_t *)_nIns = intptr_t(_epilogue);
-            lr->jmp = _nIns;
-            JMPm_nochk(0);
-#else
-            JMP_long(_epilogue);
-            lr->jmp = _nIns;
-#endif
+			Register r = EBX;
+			SwitchInfo* si = guard->record()->exit->switchInfo;
+			emitJumpTable(si, _epilogue);
+			JMP_indirect(r);
+			LEAmi4(r, si->table, r);
+		} else {
+			// If the guard already exists, use a simple jump.
+			if (destKnown && !trees) {
+				JMP(frag->fragEntry);
+				lr = 0;
+			} else {  // target doesn't exist. Use 0 jump offset and patch later
+				lr = guard->record();
+	#if defined NANOJIT_AMD64
+				/* 8 bytes for address, 4 for imm32, 2 for jmp */
+				underrunProtect(14);
+				_nIns -= 8;
+				*(intptr_t *)_nIns = intptr_t(_epilogue);
+				lr->jmp = _nIns;
+				JMPm_nochk(0);
+	#else
+				JMP_long(_epilogue);
+				lr->jmp = _nIns;
+	#endif
+			}
 		}
 		// first restore ESP from EBP, undoing SUBi(SP,amt) from genPrologue
         MR(SP,FP);
 
 		// return value is GuardRecord*
 	#if defined NANOJIT_IA32
         LDi(EAX, int(lr));
 	#elif defined NANOJIT_AMD64
@@ -907,16 +888,23 @@ namespace nanojit
 			else //if (condop == LIR_uge)
 				JAE(targ, isfar);
 		}
 		at = _nIns;
 		asm_cmp(cond);
 		return at;
 	}
 
+	void Assembler::asm_switch(LIns* ins, NIns* exit)
+	{
+		LIns* diff = ins->oprnd1();
+		findSpecificRegFor(diff, EBX);
+		JMP(exit);
+   	}
+
 	void Assembler::asm_cmp(LIns *cond)
 	{
         LOpcode condop = cond->opcode();
         
         // LIR_ov and LIR_cs recycle the flags set by arithmetic ops
         if ((condop == LIR_ov) || (condop == LIR_cs))
             return;
         
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -96,17 +96,17 @@ namespace nanojit
 	#define NJ_MAX_STACK_ENTRY 256
 	#define NJ_MAX_PARAMETERS 1
 
         // Preserve a 16-byte stack alignment, to support the use of
         // SSE instructions like MOVDQA (if not by Tamarin itself,
         // then by the C functions it calls).
 	const int NJ_ALIGN_STACK = 16;
 
-	const int32_t LARGEST_UNDERRUN_PROT = 32;  // largest value passed to underrunProtect
+	const int32_t LARGEST_UNDERRUN_PROT = 3200;  // largest value passed to underrunProtect
 	
 	typedef uint8_t NIns;
 
 	// These are used as register numbers in various parts of the code
 	typedef enum
 	{
 		// general purpose 32bit regs
 		EAX = 0, // return value, scratch
@@ -176,18 +176,17 @@ namespace nanojit
 
 	#define DECLARE_PLATFORM_ASSEMBLER()	\
         const static Register argRegs[2], retRegs[2]; \
 		bool x87Dirty;						\
 		bool pad[3];\
 		void nativePageReset();\
 		void nativePageSetup();\
         void underrunProtect(int);\
-        void asm_farg(LInsp);\
-        void asm_align_code();
+        void asm_farg(LInsp);
 		
 	#define swapptrs()  { NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins; }
 		
 #define IMM32(i)	\
 	_nIns -= 4;		\
 	*((int32_t*)_nIns) = (int32_t)(i)
 
 #define MODRMs(r,d,b,l,i) \
@@ -372,16 +371,19 @@ namespace nanojit
 #define ADDmi(d,b,i) do { count_alust(); ALUmi(0x05, d, b, i); asm_output("add %d(%s), %d", d, gpn(b), i); } while(0)
 
 #define TEST(d,s)	do { count_alu(); ALU(0x85,d,s);				asm_output("test %s,%s",gpn(d),gpn(s)); } while(0)
 #define CMP(l,r)	do { count_alu(); ALU(0x3b, (l),(r));			asm_output("cmp %s,%s",gpn(l),gpn(r)); } while(0)
 #define CMPi(r,i)	do { count_alu(); ALUi(0x3d,r,i);				asm_output("cmp %s,%d",gpn(r),i); } while(0)
 
 #define MR(d,s)		do { count_mov(); ALU(0x8b,d,s);				asm_output("mov %s,%s",gpn(d),gpn(s)); } while(0)
 #define LEA(r,d,b)	do { count_alu(); ALUm(0x8d, r,d,b);			asm_output("lea %s,%d(%s)",gpn(r),d,gpn(b)); } while(0)
+// lea %r, d(%i*4)
+// This addressing mode is not supported by the MODRMSIB macro.
+#define LEAmi4(r,d,i) do { count_alu(); IMM32(d); *(--_nIns) = (2<<6)|(i<<3)|5; *(--_nIns) = (0<<6)|(r<<3)|4; *(--_nIns) = 0x8d;                    asm_output("lea %s, %p(%s*4)", gpn(r), d, gpn(i)); } while(0)
 
 #define SETE(r)		do { count_alu(); ALU2(0x0f94,(r),(r));			asm_output("sete %s",gpn(r)); } while(0)
 #define SETNP(r)	do { count_alu(); ALU2(0x0f9B,(r),(r));			asm_output("setnp %s",gpn(r)); } while(0)
 #define SETL(r)		do { count_alu(); ALU2(0x0f9C,(r),(r));			asm_output("setl %s",gpn(r)); } while(0)
 #define SETLE(r)	do { count_alu(); ALU2(0x0f9E,(r),(r));			asm_output("setle %s",gpn(r)); } while(0)
 #define SETG(r)		do { count_alu(); ALU2(0x0f9F,(r),(r));			asm_output("setg %s",gpn(r)); } while(0)
 #define SETGE(r)	do { count_alu(); ALU2(0x0f9D,(r),(r));			asm_output("setge %s",gpn(r)); } while(0)
 #define SETB(r)     do { count_alu(); ALU2(0x0f92,(r),(r));          asm_output("setb %s",gpn(r)); } while(0)
@@ -569,16 +571,22 @@ namespace nanojit
 
 // this should only be used when you can guarantee there is enough room on the page
 #define JMP_long_nochk_offset(o) do {\
 		verbose_only( NIns* next = _nIns; (void)next; ) \
  		IMM32((o)); \
  		*(--_nIns) = JMP32; \
 		asm_output("jmp %p",(next+(o))); } while(0)
 
+#define JMP_indirect(r) do { \
+        underrunProtect(2);  \
+        MODRMm(4, 0, r);     \
+        *(--_nIns) = 0xff;   \
+        asm_output("jmp *(%s)", gpn(r)); } while (0)
+
 #define JE(t, isfar)	   JCC(0x04, t, isfar, "je")
 #define JNE(t, isfar)	   JCC(0x05, t, isfar, "jne")
 #define JP(t, isfar)	   JCC(0x0A, t, isfar, "jp")
 #define JNP(t, isfar)	   JCC(0x0B, t, isfar, "jnp")
 
 #define JB(t, isfar)	   JCC(0x02, t, isfar, "jb")
 #define JNB(t, isfar)	   JCC(0x03, t, isfar, "jnb")
 #define JBE(t, isfar)	   JCC(0x06, t, isfar, "jbe")
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -41,16 +41,17 @@
 /*
  * JS shell.
  */
 #include "jsstddef.h"
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <signal.h>
 #include <locale.h>
 #include "jstypes.h"
 #include "jsarena.h"
 #include "jsutil.h"
 #include "jsprf.h"
 #include "jsapi.h"
 #include "jsarray.h"
 #include "jsatom.h"
@@ -2800,21 +2801,16 @@ ShapeOf(JSContext *cx, uintN argc, jsval
     jsval v = JS_ARGV(cx, vp)[0];
     if (!JSVAL_IS_OBJECT(v)) {
         JS_ReportError(cx, "shapeOf: object expected");
         return JS_FALSE;
     }
     return JS_NewNumberValue(cx, ShapeOf_tn(JSVAL_TO_OBJECT(v)), vp);
 }
 
-static void
-Callback(JSRuntime *rt)
-{
-}
-
 #ifdef JS_THREADSAFE
 
 static JSBool
 Sleep_fn(JSContext *cx, uintN argc, jsval *vp)
 {
     jsdouble t_secs;
     PRUint32 t_ticks;
     jsrefcount rc;
--- a/js/src/trace-test.js
+++ b/js/src/trace-test.js
@@ -4335,16 +4335,26 @@ function testGeneratorDeepBail() {
     for (let i = 0; i < iterables.length; i++)
         for each (let j in iterables[i])
                      total += j;
     return total;
 }
 testGeneratorDeepBail.expected = 3;
 test(testGeneratorDeepBail);
 
+function testRegexpGet() {
+    var re = /hi/;
+    var a = [];
+    for (let i = 0; i < 5; ++i)
+        a.push(re.source);
+    return a.toString();
+}
+testRegexpGet.expected = "hi,hi,hi,hi,hi";
+test(testRegexpGet);
+
 /*****************************************************************************
  *                                                                           *
  *  _____ _   _  _____ ______ _____ _______                                  *
  * |_   _| \ | |/ ____|  ____|  __ \__   __|                                 *
  *   | | |  \| | (___ | |__  | |__) | | |                                    *
  *   | | | . ` |\___ \|  __| |  _  /  | |                                    *
  *  _| |_| |\  |____) | |____| | \ \  | |                                    *
  * |_____|_| \_|_____/|______|_|  \_\ |_|                                    *
@@ -4382,20 +4392,16 @@ load("mandelbrot-results.js");
 
   // Control of iteration numbers and sizing.  We'll do
   // scaler * colorNames.length iterations or so before deciding that we
   // don't escape.
   const scaler = 5;
   const numRows = 600;
   const numCols = 600;
 
-  // For now, avoid hitting memory pressure
-  gcparam("maxBytes", 1300000000);
-  gcparam("maxMallocBytes", 1300000000);
-
   const colorNames = [
     "black",
     "green",
     "blue",
     "red",
     "purple",
     "orange",
     "cyan",