Merge mozilla-central and tracemonkey. (a=blockers)
authorChris Leary <cdleary@mozilla.com>
Thu, 20 Jan 2011 20:47:21 -0800
changeset 61068 aa618e93942e2491f5f19d14b70d13b12fcaad13
parent 61042 c103cd525dee0ff62e37446faa7cebcf9be06f81 (current diff)
parent 61067 64274de90e2d279d923b89199d761d197b1feec8 (diff)
child 61069 b7ea3d9683bad08798a619a99f13297c586fd88f
child 61204 245900601090df69057da0f37a93be6fd8683b4d
push idunknown
push userunknown
push dateunknown
reviewersblockers
milestone2.0b10pre
Merge mozilla-central and tracemonkey. (a=blockers)
js/src/methodjit/Compiler.cpp
js/src/xpconnect/loader/mozJSComponentLoader.cpp
--- a/content/events/src/nsEventListenerService.cpp
+++ b/content/events/src/nsEventListenerService.cpp
@@ -179,19 +179,19 @@ nsEventListenerInfo::GetDebugObject(nsIS
         // Extra block to finish the auto request before calling pop
         JSAutoRequest ar(cx);
 
         jsval v = JSVAL_NULL;
         if (GetJSVal(&v)) {
           nsCOMPtr<jsdIValue> jsdValue;
           jsd->WrapJSValue(v, getter_AddRefs(jsdValue));
           *aRetVal = jsdValue.forget().get();
-          return NS_OK;
         }
       }
+      stack->Pop(&cx);
     }
   }
 #endif
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
--- a/js/src/assembler/assembler/MacroAssemblerCodeRef.h
+++ b/js/src/assembler/assembler/MacroAssemblerCodeRef.h
@@ -141,17 +141,19 @@ public:
     }
 
     explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
         : m_value(ra.value())
     {
         ASSERT_VALID_CODE_POINTER(m_value);
     }
 
-    void* executableAddress() const { return m_value; }
+    void* executableAddress() const {
+        return m_value;
+    }
 #if WTF_CPU_ARM_THUMB2
     // To use this pointer as a data address remove the decoration.
     void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
 #else
     void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; }
 #endif
 
     bool operator!()
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug599854.js
@@ -0,0 +1,19 @@
+function assertEqArray(actual, expected) {
+    if (actual.length != expected.length) {
+        throw new Error(
+            "array lengths not equal: got " +
+            uneval(actual) + ", expected " + uneval(expected));
+    }
+
+    for (var i = 0; i < actual.length; ++i) {
+        if (actual[i] != expected[i]) {
+        throw new Error(
+            "arrays not equal at element " + i + ": got " +
+            uneval(actual) + ", expected " + uneval(expected));
+        }
+    }
+}
+
+assertEqArray(/(?:(?:(")(c)")?)*/.exec('"c"'), [ '"c"', '"', "c" ]);
+assertEqArray(/(?:(?:a*?(")(c)")?)*/.exec('"c"'), [ '"c"', '"', "c" ]);
+assertEqArray(/<script\s*(?![^>]*type=['"]?(?:dojo\/|text\/html\b))(?:[^>]*?(?:src=(['"]?)([^>]*?)\1[^>]*)?)*>([\s\S]*?)<\/script>/gi.exec('<script type="text/javascript" src="..."></script>'), ['<script type="text/javascript" src="..."></script>', '"', "...", ""]);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug606882-1.js
@@ -0,0 +1,3 @@
+// don't crash
+
+"ABC".match("A+(?:X?(?:|(?:))(?:(?:B)?C+w?w?)?)*");
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug606882-2.js
@@ -0,0 +1,16 @@
+// don't crash
+var book = 'Ps';
+var pattern =   "(?:"
++                   "(?:"
++                       "(?:"
++                           "(?:-|)"
++                           "\\s?"
++                       ")"
++                       "|"
++                   ")"
++                   " ?"
++                   "\\d+"
++                   "\\w?"
++               ")*";
+var re = new RegExp(pattern);
+'8:5-8'.match(re);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/testMethodWriteBarrier4.js
@@ -0,0 +1,12 @@
+var z = 0;
+function f() {
+    this.b = function() {};
+    this.b = undefined;
+    if (z++ > HOTLOOP)
+        this.b();
+}
+
+try {
+    for (var i = 0; i < HOTLOOP + 2; i++)
+        new f();
+} catch (exc) {}
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -5702,16 +5702,42 @@ JS_SetErrorReporter(JSContext *cx, JSErr
     older = cx->errorReporter;
     cx->errorReporter = er;
     return older;
 }
 
 /************************************************************************/
 
 /*
+ * Dates.
+ */
+JS_PUBLIC_API(JSObject *)
+JS_NewDateObject(JSContext *cx, int year, int mon, int mday, int hour, int min, int sec)
+{
+    CHECK_REQUEST(cx);
+    return js_NewDateObject(cx, year, mon, mday, hour, min, sec);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewDateObjectMsec(JSContext *cx, jsdouble msec)
+{
+    CHECK_REQUEST(cx);
+    return js_NewDateObjectMsec(cx, msec);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ObjectIsDate(JSContext *cx, JSObject *obj)
+{
+    JS_ASSERT(obj);
+    return obj->isDate();
+}
+
+/************************************************************************/
+
+/*
  * Regular Expressions.
  */
 JS_PUBLIC_API(JSObject *)
 JS_NewRegExpObject(JSContext *cx, JSObject *obj, char *bytes, size_t length, uintN flags)
 {
     CHECK_REQUEST(cx);
     jschar *chars = js_InflateString(cx, bytes, &length);
     if (!chars)
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -3592,16 +3592,34 @@ struct JSErrorReport {
                                               JSREPORT_STRICT_MODE_ERROR) != 0)
 
 extern JS_PUBLIC_API(JSErrorReporter)
 JS_SetErrorReporter(JSContext *cx, JSErrorReporter er);
 
 /************************************************************************/
 
 /*
+ * Dates.
+ */
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewDateObject(JSContext *cx, int year, int mon, int mday, int hour, int min, int sec);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewDateObjectMsec(JSContext *cx, jsdouble msec);
+
+/*
+ * Infallible predicate to test whether obj is a date object.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ObjectIsDate(JSContext *cx, JSObject *obj);
+
+/************************************************************************/
+
+/*
  * Regular Expressions.
  */
 #define JSREG_FOLD      0x01    /* fold uppercase to lowercase */
 #define JSREG_GLOB      0x02    /* global exec, creates array of matches */
 #define JSREG_MULTILINE 0x04    /* treat ^ and $ as begin and end of line */
 #define JSREG_STICKY    0x08    /* only match starting at lastIndex */
 #define JSREG_FLAT      0x10    /* parse as a flat regexp */
 #define JSREG_NOCOMPILE 0x20    /* do not try to compile to native code */
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -131,27 +131,23 @@ static const size_t MAX_NATIVE_STACK_SLO
 static const size_t MAX_CALL_STACK_ENTRIES = 500;
 static const size_t MAX_GLOBAL_SLOTS = 4096;
 static const size_t GLOBAL_SLOTS_BUFFER_SIZE = MAX_GLOBAL_SLOTS + 1;
 static const size_t MAX_SLOW_NATIVE_EXTRA_SLOTS = 16;
 
 /* Forward declarations of tracer types. */
 class VMAllocator;
 class FrameInfoCache;
-struct REHashFn;
-struct REHashKey;
 struct FrameInfo;
 struct VMSideExit;
 struct TreeFragment;
 struct TracerState;
 template<typename T> class Queue;
 typedef Queue<uint16> SlotList;
 class TypeMap;
-struct REFragment;
-typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
 class LoopProfile;
 
 #if defined(JS_JIT_SPEW) || defined(DEBUG)
 struct FragPI;
 typedef nanojit::HashMap<uint32, FragPI, nanojit::DefaultHash<uint32> > FragStatsMap;
 #endif
 
 namespace mjit {
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -234,36 +234,31 @@ struct TraceMonitor {
 
     /*
      * If nonzero, do not flush the JIT cache after a deep bail. That would
      * free JITted code pages that we will later return to. Instead, set the
      * needFlush flag so that it can be flushed later.
      */
     JSBool                  needFlush;
 
-    /*
-     * Fragment map for the regular expression compiler.
-     */
-    REHashMap*              reFragments;
-
     // Cached temporary typemap to avoid realloc'ing every time we create one.
     // This must be used in only one place at a given time. It must be cleared
     // before use.
     TypeMap*                cachedTempTypeMap;
 
     /* Scripts with recorded fragments. */
     TracedScriptSet         tracedScripts;
 
 #ifdef DEBUG
     /* Fields needed for fragment/guard profiling. */
     nanojit::Seq<nanojit::Fragment*>* branches;
     uint32                  lastFragID;
     /*
-     * profAlloc has a lifetime which spans exactly from js_InitJIT to
-     * js_FinishJIT.
+     * profAlloc has a lifetime which spans exactly from InitJIT to
+     * FinishJIT.
      */
     VMAllocator*            profAlloc;
     FragStatsMap*           profTab;
 #endif
 
     bool ontrace() const {
         return !!tracecx;
     }
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -136,16 +136,19 @@ PurgeCallICs(JSContext *cx, JSScript *st
             script->jitCtor->nukeScriptDependentICs();
     }
 }
 #endif
 
 JS_FRIEND_API(JSBool)
 js_SetDebugMode(JSContext *cx, JSBool debug)
 {
+    if (!cx->compartment)
+        return JS_TRUE;
+
     cx->compartment->debugMode = debug;
 #ifdef JS_METHODJIT
     for (JSScript *script = (JSScript *)cx->compartment->scripts.next;
          &script->links != &cx->compartment->scripts;
          script = (JSScript *)script->links.next) {
         if (script->debugMode != !!debug &&
             script->hasJITCode() &&
             !IsScriptLive(cx, script)) {
@@ -1919,23 +1922,24 @@ JS_StopProfiling()
     Probes::stopProfiling();
 }
 
 #ifdef MOZ_PROFILING
 
 static JSBool
 StartProfiling(JSContext *cx, uintN argc, jsval *vp)
 {
-    JS_SET_RVAL(cx, vp, BOOLEAN_TO_JSVAL(Probes::startProfiling()));
+    JS_SET_RVAL(cx, vp, BOOLEAN_TO_JSVAL(JS_StartProfiling()));
     return true;
 }
 
 static JSBool
 StopProfiling(JSContext *cx, uintN argc, jsval *vp)
 {
+    JS_StopProfiling();
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
     return true;
 }
 
 #ifdef MOZ_SHARK
 
 static JSBool
 IgnoreAndReturnTrue(JSContext *cx, uintN argc, jsval *vp)
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -2157,67 +2157,63 @@ GCHelperThread::doSweep()
 #endif /* JS_THREADSAFE */
 
 static void
 SweepCrossCompartmentWrappers(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime;
     /*
      * Figure out how much JIT code should be released from inactive compartments.
-     * If multiple eighth-lifes have passed, compound the release interval linearly;
+     * If multiple eighth-lives have passed, compound the release interval linearly;
      * if enough time has passed, all inactive JIT code will be released.
      */
     uint32 releaseInterval = 0;
     int64 now = PRMJ_Now();
     if (now >= rt->gcJitReleaseTime) {
         releaseInterval = 8;
         while (now >= rt->gcJitReleaseTime) {
             if (--releaseInterval == 1)
                 rt->gcJitReleaseTime = now;
             rt->gcJitReleaseTime += JIT_SCRIPT_EIGHTH_LIFETIME;
         }
     }
 
     /* Remove dead wrappers from the compartment map. */
-    for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c) {
+    for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
         (*c)->sweep(cx, releaseInterval);
-    }
-    
 }
 
 static void
 SweepCompartments(JSContext *cx, JSGCInvocationKind gckind)
 {
     JSRuntime *rt = cx->runtime;
     JSCompartmentCallback callback = rt->compartmentCallback;
-    JSCompartment **read = rt->compartments.begin();
+
+    /* Skip the atomsCompartment. */
+    JSCompartment **read = rt->compartments.begin() + 1;
     JSCompartment **end = rt->compartments.end();
     JSCompartment **write = read;
-
-    /* Delete atomsCompartment only during runtime shutdown */
-    rt->atomsCompartment->marked = true;
+    JS_ASSERT(rt->compartments.length() >= 1);
+    JS_ASSERT(*rt->compartments.begin() == rt->atomsCompartment);
 
     while (read < end) {
-        JSCompartment *compartment = (*read++);
-        if (compartment->marked) {
-            compartment->marked = false;
-            *write++ = compartment;
-        } else {
+        JSCompartment *compartment = *read++;
+
+        /* Unmarked compartments containing marked objects don't get deleted, except LAST_CONTEXT GC is performed. */
+        if ((!compartment->marked && compartment->arenaListsAreEmpty()) || gckind == GC_LAST_CONTEXT) {
             JS_ASSERT(compartment->freeLists.isEmpty());
-            if (compartment->arenaListsAreEmpty() || gckind == GC_LAST_CONTEXT) {
-                if (callback)
-                    (void) callback(cx, compartment, JSCOMPARTMENT_DESTROY);
-                if (compartment->principals)
-                    JSPRINCIPALS_DROP(cx, compartment->principals);
-                js_delete(compartment);
-            } else {
-                compartment->marked = false;
-                *write++ = compartment;
-            }
+            if (callback)
+                (void) callback(cx, compartment, JSCOMPARTMENT_DESTROY);
+            if (compartment->principals)
+                JSPRINCIPALS_DROP(cx, compartment->principals);
+            js_delete(compartment);
+            continue;
         }
+        compartment->marked = false;
+        *write++ = compartment;
     }
     rt->compartments.resize(write - rt->compartments.begin());
 }
 
 /*
  * Common cache invalidation and so forth that must be done before GC. Even if
  * GCUntilDone calls GC several times, this work needs to be done only once.
  */
@@ -2349,17 +2345,16 @@ MarkAndSweepCompartment(JSContext *cx, J
 #endif
 
     /*
      * We finalize iterators before other objects so the iterator can use the
      * object which properties it enumerates over to finalize the enumeration
      * state. We finalize objects before other GC things to ensure that
      * object's finalizer can access them even if they will be freed.
      */
-
     comp->sweep(cx, 0);
 
     comp->finalizeObjectArenaLists(cx);
     TIMESTAMP(sweepObjectEnd);
 
     comp->finalizeStringArenaLists(cx);
     TIMESTAMP(sweepStringEnd);
 
@@ -2459,24 +2454,23 @@ MarkAndSweep(JSContext *cx, JSGCInvocati
     SweepCrossCompartmentWrappers(cx);
 
     /*
      * We finalize iterators before other objects so the iterator can use the
      * object which properties it enumerates over to finalize the enumeration
      * state. We finalize objects before other GC things to ensure that
      * object's finalizer can access them even if they will be freed.
      */
-
-    for (JSCompartment **comp = rt->compartments.begin(); comp != rt->compartments.end(); comp++)
-        (*comp)->finalizeObjectArenaLists(cx);
+    for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
+        (*c)->finalizeObjectArenaLists(cx);
 
     TIMESTAMP(sweepObjectEnd);
 
-    for (JSCompartment **comp = rt->compartments.begin(); comp != rt->compartments.end(); comp++)
-        (*comp)->finalizeStringArenaLists(cx);
+    for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
+        (*c)->finalizeStringArenaLists(cx);
 
     TIMESTAMP(sweepStringEnd);
 
     SweepCompartments(cx, gckind);
 
     /*
      * Sweep the runtime's property trees after finalizing objects, in case any
      * had watchpoints referencing tree nodes.
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -2175,43 +2175,41 @@ JS_STATIC_ASSERT(JSOP_INCNAME_LENGTH == 
  * Inline fast paths for iteration. js_IteratorMore and js_IteratorNext handle
  * all cases, but we inline the most frequently taken paths here.
  */
 static inline bool
 IteratorMore(JSContext *cx, JSObject *iterobj, bool *cond, Value *rval)
 {
     if (iterobj->getClass() == &js_IteratorClass) {
         NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
-        *cond = (ni->props_cursor < ni->props_end);
-    } else {
-        if (!js_IteratorMore(cx, iterobj, rval))
-            return false;
-        *cond = rval->isTrue();
-    }
+        if (ni->isKeyIter()) {
+            *cond = (ni->props_cursor < ni->props_end);
+            return true;
+        }
+    }
+    if (!js_IteratorMore(cx, iterobj, rval))
+        return false;
+    *cond = rval->isTrue();
     return true;
 }
 
 static inline bool
 IteratorNext(JSContext *cx, JSObject *iterobj, Value *rval)
 {
     if (iterobj->getClass() == &js_IteratorClass) {
         NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
-        JS_ASSERT(ni->props_cursor < ni->props_end);
         if (ni->isKeyIter()) {
-            jsid id = *ni->currentKey();
+            JS_ASSERT(ni->props_cursor < ni->props_end);
+            jsid id = *ni->current();
             if (JSID_IS_ATOM(id)) {
                 rval->setString(JSID_TO_STRING(id));
-                ni->incKeyCursor();
+                ni->incCursor();
                 return true;
             }
             /* Take the slow path if we have to stringify a numeric property name. */
-        } else {
-            *rval = *ni->currentValue();
-            ni->incValueCursor();
-            return true;
         }
     }
     return js_IteratorNext(cx, iterobj, rval);
 }
 
 static inline bool
 ScriptPrologue(JSContext *cx, JSStackFrame *fp)
 {
--- a/js/src/jsiter.cpp
+++ b/js/src/jsiter.cpp
@@ -113,20 +113,17 @@ Class js_IteratorClass = {
         iterator_iterator,
         NULL        /* unused */
     }
 };
 
 void
 NativeIterator::mark(JSTracer *trc)
 {
-    if (isKeyIter())
-        MarkIdRange(trc, beginKey(), endKey(), "props");
-    else
-        MarkValueRange(trc, beginValue(), endValue(), "props");
+    MarkIdRange(trc, begin(), end(), "props");
     if (obj)
         MarkObject(trc, *obj, "obj");
 }
 
 static void
 iterator_finalize(JSContext *cx, JSObject *obj)
 {
     JS_ASSERT(obj->getClass() == &js_IteratorClass);
@@ -167,56 +164,20 @@ NewKeyValuePair(JSContext *cx, jsid id, 
 
     JSObject *aobj = NewDenseCopiedArray(cx, 2, vec);
     if (!aobj)
         return false;
     rval->setObject(*aobj);
     return true;
 }
 
-struct KeyEnumeration
-{
-    typedef AutoIdVector ResultVector;
-
-    static JS_ALWAYS_INLINE bool
-    append(JSContext *, AutoIdVector &keys, JSObject *, jsid id, uintN flags)
-    {
-        JS_ASSERT((flags & JSITER_FOREACH) == 0);
-        return keys.append(id);
-    }
-};
-
-struct ValueEnumeration
-{
-    typedef AutoValueVector ResultVector;
-
-    static JS_ALWAYS_INLINE bool
-    append(JSContext *cx, AutoValueVector &vals, JSObject *obj, jsid id, uintN flags)
-    {
-        JS_ASSERT(flags & JSITER_FOREACH);
-
-        if (!vals.growBy(1))
-            return false;
-
-        /* Do the lookup on the original object instead of the prototype. */
-        Value *vp = vals.end() - 1;
-        if (!obj->getProperty(cx, id, vp))
-            return false;
-        if ((flags & JSITER_KEYVALUE) && !NewKeyValuePair(cx, id, *vp, vp))
-            return false;
-
-        return true;
-    }
-};
-
-template <class EnumPolicy>
 static inline bool
 Enumerate(JSContext *cx, JSObject *obj, JSObject *pobj, jsid id,
           bool enumerable, bool sharedPermanent, uintN flags, IdSet& ht,
-          typename EnumPolicy::ResultVector *props)
+          AutoIdVector *props)
 {
     IdSet::AddPtr p = ht.lookupForAdd(id);
     JS_ASSERT_IF(obj == pobj && !obj->isProxy(), !p);
 
     /* If we've already seen this, we definitely won't add it. */
     if (JS_UNLIKELY(!!p))
         return true;
 
@@ -239,73 +200,70 @@ Enumerate(JSContext *cx, JSObject *obj, 
          */
         if (!pobj->getProto() && id == ATOM_TO_JSID(cx->runtime->atomState.protoAtom))
             return true;
         if (pobj != obj && !(sharedPermanent && pobj->getClass() == obj->getClass()))
             return true;
     }
 
     if (enumerable || (flags & JSITER_HIDDEN))
-        return EnumPolicy::append(cx, *props, obj, id, flags);
+        return props->append(id);
 
     return true;
 }
 
-template <class EnumPolicy>
 static bool
 EnumerateNativeProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN flags, IdSet &ht,
-                          typename EnumPolicy::ResultVector *props)
+                          AutoIdVector *props)
 {
     size_t initialLength = props->length();
 
     /* Collect all unique properties from this object's scope. */
     for (Shape::Range r = pobj->lastProperty()->all(); !r.empty(); r.popFront()) {
         const Shape &shape = r.front();
 
         if (!JSID_IS_DEFAULT_XML_NAMESPACE(shape.id) &&
             !shape.isAlias() &&
-            !Enumerate<EnumPolicy>(cx, obj, pobj, shape.id, shape.enumerable(),
-                                   shape.isSharedPermanent(), flags, ht, props))
+            !Enumerate(cx, obj, pobj, shape.id, shape.enumerable(),
+                       shape.isSharedPermanent(), flags, ht, props))
         {
             return false;
         }
     }
 
     Reverse(props->begin() + initialLength, props->end());
     return true;
 }
 
-template <class EnumPolicy>
 static bool
 EnumerateDenseArrayProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN flags,
-                              IdSet &ht, typename EnumPolicy::ResultVector *props)
+                              IdSet &ht, AutoIdVector *props)
 {
-    if (!Enumerate<EnumPolicy>(cx, obj, pobj, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom), false, true,
-                               flags, ht, props)) {
+    if (!Enumerate(cx, obj, pobj, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom), false, true,
+                   flags, ht, props)) {
         return false;
     }
 
     if (pobj->getArrayLength() > 0) {
         size_t capacity = pobj->getDenseArrayCapacity();
         Value *vp = pobj->getDenseArrayElements();
         for (size_t i = 0; i < capacity; ++i, ++vp) {
             if (!vp->isMagic(JS_ARRAY_HOLE)) {
                 /* Dense arrays never get so large that i would not fit into an integer id. */
-                if (!Enumerate<EnumPolicy>(cx, obj, pobj, INT_TO_JSID(i), true, false, flags, ht, props))
+                if (!Enumerate(cx, obj, pobj, INT_TO_JSID(i), true, false, flags, ht, props))
                     return false;
             }
         }
     }
 
     return true;
 }
 
-template <class EnumPolicy>
 static bool
-Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultVector *props)
+Snapshot(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector *props)
 {
     /*
      * FIXME: Bug 575997 - We won't need to initialize this hash table if
      *        (flags & JSITER_OWNONLY) when we eliminate inheritance of
      *        shared-permanent properties as own properties.
      */
     IdSet ht(cx);
     if (!ht.init(32))
@@ -314,53 +272,53 @@ Snapshot(JSContext *cx, JSObject *obj, u
     JSObject *pobj = obj;
     do {
         Class *clasp = pobj->getClass();
         if (pobj->isNative() &&
             !pobj->getOps()->enumerate &&
             !(clasp->flags & JSCLASS_NEW_ENUMERATE)) {
             if (!clasp->enumerate(cx, pobj))
                 return false;
-            if (!EnumerateNativeProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
+            if (!EnumerateNativeProperties(cx, obj, pobj, flags, ht, props))
                 return false;
         } else if (pobj->isDenseArray()) {
-            if (!EnumerateDenseArrayProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
+            if (!EnumerateDenseArrayProperties(cx, obj, pobj, flags, ht, props))
                 return false;
         } else {
             if (pobj->isProxy()) {
                 AutoIdVector proxyProps(cx);
                 if (flags & JSITER_OWNONLY) {
                     if (!JSProxy::keys(cx, pobj, proxyProps))
                         return false;
                 } else {
                     if (!JSProxy::enumerate(cx, pobj, proxyProps))
                         return false;
                 }
                 for (size_t n = 0, len = proxyProps.length(); n < len; n++) {
-                    if (!Enumerate<EnumPolicy>(cx, obj, pobj, proxyProps[n], true, false, flags, ht, props))
+                    if (!Enumerate(cx, obj, pobj, proxyProps[n], true, false, flags, ht, props))
                         return false;
                 }
                 /* Proxy objects enumerate the prototype on their own, so we are done here. */
                 break;
             }
             Value state;
             JSIterateOp op = (flags & JSITER_HIDDEN) ? JSENUMERATE_INIT_ALL : JSENUMERATE_INIT;
             if (!pobj->enumerate(cx, op, &state, NULL))
                 return false;
             if (state.isMagic(JS_NATIVE_ENUMERATE)) {
-                if (!EnumerateNativeProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
+                if (!EnumerateNativeProperties(cx, obj, pobj, flags, ht, props))
                     return false;
             } else {
                 while (true) {
                     jsid id;
                     if (!pobj->enumerate(cx, JSENUMERATE_NEXT, &state, &id))
                         return false;
                     if (state.isNull())
                         break;
-                    if (!Enumerate<EnumPolicy>(cx, obj, pobj, id, true, false, flags, ht, props))
+                    if (!Enumerate(cx, obj, pobj, id, true, false, flags, ht, props))
                         return false;
                 }
             }
         }
 
         if (JS_UNLIKELY(pobj->isXML()))
             break;
     } while ((pobj = pobj->getProto()) != NULL);
@@ -385,17 +343,17 @@ VectorToIdArray(JSContext *cx, AutoIdVec
     memcpy(ida->vector, props.begin(), idsz);
     *idap = ida;
     return true;
 }
 
 JS_FRIEND_API(bool)
 GetPropertyNames(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector *props)
 {
-    return Snapshot<KeyEnumeration>(cx, obj, flags & (JSITER_OWNONLY | JSITER_HIDDEN), props);
+    return Snapshot(cx, obj, flags & (JSITER_OWNONLY | JSITER_HIDDEN), props);
 }
 
 }
 
 static inline bool
 GetCustomIterator(JSContext *cx, JSObject *obj, uintN flags, Value *vp)
 {
     /* Check whether we have a valid __iterator__ method. */
@@ -467,45 +425,30 @@ NewIteratorObject(JSContext *cx, uintN f
         obj->setMap(cx->runtime->emptyEnumeratorShape);
         return obj;
     }
 
     return NewBuiltinClassInstance(cx, &js_IteratorClass);
 }
 
 NativeIterator *
-NativeIterator::allocateKeyIterator(JSContext *cx, uint32 slength, const AutoIdVector &props)
+NativeIterator::allocateIterator(JSContext *cx, uint32 slength, const AutoIdVector &props)
 {
     size_t plength = props.length();
     NativeIterator *ni = (NativeIterator *)
         cx->malloc(sizeof(NativeIterator) + plength * sizeof(jsid) + slength * sizeof(uint32));
     if (!ni)
         return NULL;
     ni->props_array = ni->props_cursor = (jsid *) (ni + 1);
     ni->props_end = (jsid *)ni->props_array + plength;
     if (plength)
         memcpy(ni->props_array, props.begin(), plength * sizeof(jsid));
     return ni;
 }
 
-NativeIterator *
-NativeIterator::allocateValueIterator(JSContext *cx, const AutoValueVector &props)
-{
-    size_t plength = props.length();
-    NativeIterator *ni = (NativeIterator *)
-        cx->malloc(sizeof(NativeIterator) + plength * sizeof(Value));
-    if (!ni)
-        return NULL;
-    ni->props_array = ni->props_cursor = (Value *) (ni + 1);
-    ni->props_end = (Value *)ni->props_array + plength;
-    if (plength)
-        memcpy(ni->props_array, props.begin(), plength * sizeof(Value));
-    return ni;
-}
-
 inline void
 NativeIterator::init(JSObject *obj, uintN flags, uint32 slength, uint32 key)
 {
     this->obj = obj;
     this->flags = flags;
     this->shapes_array = (uint32 *) this->props_end;
     this->shapes_length = slength;
     this->shapes_key = key;
@@ -529,17 +472,17 @@ VectorToKeyIterator(JSContext *cx, JSObj
                     uint32 slength, uint32 key, Value *vp)
 {
     JS_ASSERT(!(flags & JSITER_FOREACH));
 
     JSObject *iterobj = NewIteratorObject(cx, flags);
     if (!iterobj)
         return false;
 
-    NativeIterator *ni = NativeIterator::allocateKeyIterator(cx, slength, keys);
+    NativeIterator *ni = NativeIterator::allocateIterator(cx, slength, keys);
     if (!ni)
         return false;
     ni->init(obj, flags, slength, key);
 
     if (slength) {
         /*
          * Fill in the shape array from scratch.  We can't use the array that was
          * computed for the cache lookup earlier, as constructing iterobj could
@@ -567,26 +510,26 @@ namespace js {
 
 bool
 VectorToKeyIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &props, Value *vp)
 {
     return VectorToKeyIterator(cx, obj, flags, props, 0, 0, vp);
 }
 
 bool
-VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, AutoValueVector &vals,
+VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &keys,
                       Value *vp)
 {
     JS_ASSERT(flags & JSITER_FOREACH);
 
     JSObject *iterobj = NewIteratorObject(cx, flags);
     if (!iterobj)
         return false;
 
-    NativeIterator *ni = NativeIterator::allocateValueIterator(cx, vals);
+    NativeIterator *ni = NativeIterator::allocateIterator(cx, 0, keys);
     if (!ni)
         return false;
     ni->init(obj, flags, 0, 0);
 
     iterobj->setNativeIterator(ni);
     vp->setObject(*iterobj);
 
     RegisterEnumerator(cx, iterobj, ni);
@@ -594,30 +537,17 @@ VectorToValueIterator(JSContext *cx, JSO
 }
 
 bool
 EnumeratedIdVectorToIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &props, Value *vp)
 {
     if (!(flags & JSITER_FOREACH))
         return VectorToKeyIterator(cx, obj, flags, props, vp);
 
-    /* For for-each iteration, we need to look up the value of each id. */
-
-    size_t plength = props.length();
-
-    AutoValueVector vals(cx);
-    if (!vals.reserve(plength))
-        return NULL;
-
-    for (size_t i = 0; i < plength; ++i) {
-        if (!ValueEnumeration::append(cx, vals, obj, props[i], flags))
-            return false;
-    }
-
-    return VectorToValueIterator(cx, obj, flags, vals, vp);
+    return VectorToValueIterator(cx, obj, flags, props, vp);
 }
 
 typedef Vector<uint32, 8> ShapeVector;
 
 static inline void
 UpdateNativeIterator(NativeIterator *ni, JSObject *obj)
 {
     // Update the object for which the native iterator is associated, so
@@ -713,26 +643,25 @@ GetIterator(JSContext *cx, JSObject *obj
         if (!GetCustomIterator(cx, obj, flags, vp))
             return false;
         if (!vp->isUndefined())
             return true;
     }
 
     /* NB: for (var p in null) succeeds by iterating over no properties. */
 
+    AutoIdVector keys(cx);
     if (flags & JSITER_FOREACH) {
-        AutoValueVector vals(cx);
-        if (JS_LIKELY(obj != NULL) && !Snapshot<ValueEnumeration>(cx, obj, flags, &vals))
+        if (JS_LIKELY(obj != NULL) && !Snapshot(cx, obj, flags, &keys))
             return false;
         JS_ASSERT(shapes.empty());
-        if (!VectorToValueIterator(cx, obj, flags, vals, vp))
+        if (!VectorToValueIterator(cx, obj, flags, keys, vp))
             return false;
     } else {
-        AutoIdVector keys(cx);
-        if (JS_LIKELY(obj != NULL) && !Snapshot<KeyEnumeration>(cx, obj, flags, &keys))
+        if (JS_LIKELY(obj != NULL) && !Snapshot(cx, obj, flags, &keys))
             return false;
         if (!VectorToKeyIterator(cx, obj, flags, keys, shapes.length(), key, vp))
             return false;
     }
 
     JSObject *iterobj = &vp->toObject();
 
     /* Cache the iterator object if possible. */
@@ -901,18 +830,18 @@ SuppressDeletedPropertyHelper(JSContext 
 {
     JSObject *iterobj = cx->enumerators;
     while (iterobj) {
       again:
         NativeIterator *ni = iterobj->getNativeIterator();
         /* This only works for identified surpressed keys, not values. */
         if (ni->isKeyIter() && ni->obj == obj && ni->props_cursor < ni->props_end) {
             /* Check whether id is still to come. */
-            jsid *props_cursor = ni->currentKey();
-            jsid *props_end = ni->endKey();
+            jsid *props_cursor = ni->current();
+            jsid *props_end = ni->end();
             for (jsid *idp = props_cursor; idp < props_end; ++idp) {
                 if (predicate(*idp)) {
                     /*
                      * Check whether another property along the prototype chain
                      * became visible as a result of this deletion.
                      */
                     if (obj->getProto()) {
                         AutoObjectRooter proto(cx, obj->getProto());
@@ -940,20 +869,20 @@ SuppressDeletedPropertyHelper(JSContext 
                         goto again;
 
                     /*
                      * No property along the prototype chain stepped in to take the
                      * property's place, so go ahead and delete id from the list.
                      * If it is the next property to be enumerated, just skip it.
                      */
                     if (idp == props_cursor) {
-                        ni->incKeyCursor();
+                        ni->incCursor();
                     } else {
                         memmove(idp, idp + 1, (props_end - (idp + 1)) * sizeof(jsid));
-                        ni->props_end = ni->endKey() - 1;
+                        ni->props_end = ni->end() - 1;
                     }
                     if (predicate.matchesAtMostOne())
                         break;
                 }
             }
         }
         iterobj = ni->next;
     }
@@ -992,45 +921,56 @@ js_SuppressDeletedIndexProperties(JSCont
 {
     return SuppressDeletedPropertyHelper(cx, obj, IndexRangePredicate(begin, end));
 }
 
 JSBool
 js_IteratorMore(JSContext *cx, JSObject *iterobj, Value *rval)
 {
     /* Fast path for native iterators */
+    NativeIterator *ni = NULL;
     if (iterobj->getClass() == &js_IteratorClass) {
-        /*
-         * Implement next directly as all the methods of native iterator are
-         * read-only and permanent.
-         */
-        NativeIterator *ni = iterobj->getNativeIterator();
-        rval->setBoolean(ni->props_cursor < ni->props_end);
-        return true;
+        /* Key iterators are handled by fast-paths. */
+        ni = iterobj->getNativeIterator();
+        bool more = ni->props_cursor < ni->props_end;
+        if (ni->isKeyIter() || !more) {
+            rval->setBoolean(more);
+            return true;
+        }
     }
 
     /* We might still have a pending value. */
     if (!cx->iterValue.isMagic(JS_NO_ITER_VALUE)) {
         rval->setBoolean(true);
         return true;
     }
 
     /* Fetch and cache the next value from the iterator. */
-    jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
-    if (!js_GetMethod(cx, iterobj, id, JSGET_METHOD_BARRIER, rval))
-        return false;
-    if (!ExternalInvoke(cx, iterobj, *rval, 0, NULL, rval)) {
-        /* Check for StopIteration. */
-        if (!cx->isExceptionPending() || !js_ValueIsStopIteration(cx->getPendingException()))
+    if (!ni) {
+        jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
+        if (!js_GetMethod(cx, iterobj, id, JSGET_METHOD_BARRIER, rval))
             return false;
+        if (!ExternalInvoke(cx, iterobj, *rval, 0, NULL, rval)) {
+            /* Check for StopIteration. */
+            if (!cx->isExceptionPending() || !js_ValueIsStopIteration(cx->getPendingException()))
+                return false;
 
-        cx->clearPendingException();
-        cx->iterValue.setMagic(JS_NO_ITER_VALUE);
-        rval->setBoolean(false);
-        return true;
+            cx->clearPendingException();
+            cx->iterValue.setMagic(JS_NO_ITER_VALUE);
+            rval->setBoolean(false);
+            return true;
+        }
+    } else {
+        JS_ASSERT(!ni->isKeyIter());
+        jsid id = *ni->current();
+        ni->incCursor();
+        if (!ni->obj->getProperty(cx, id, rval))
+            return false;
+        if ((ni->flags & JSITER_KEYVALUE) && !NewKeyValuePair(cx, id, *rval, rval))
+            return false;
     }
 
     /* Cache the value returned by iterobj.next() so js_IteratorNext() can find it. */
     JS_ASSERT(!rval->isMagic(JS_NO_ITER_VALUE));
     cx->iterValue = *rval;
     rval->setBoolean(true);
     return true;
 }
@@ -1040,40 +980,37 @@ js_IteratorNext(JSContext *cx, JSObject 
 {
     /* Fast path for native iterators */
     if (iterobj->getClass() == &js_IteratorClass) {
         /*
          * Implement next directly as all the methods of the native iterator are
          * read-only and permanent.
          */
         NativeIterator *ni = iterobj->getNativeIterator();
-        JS_ASSERT(ni->props_cursor < ni->props_end);
         if (ni->isKeyIter()) {
-            *rval = IdToValue(*ni->currentKey());
-            ni->incKeyCursor();
-        } else {
-            *rval = *ni->currentValue();
-            ni->incValueCursor();
-        }
+            JS_ASSERT(ni->props_cursor < ni->props_end);
+            *rval = IdToValue(*ni->current());
+            ni->incCursor();
 
-        if (rval->isString() || !ni->isKeyIter())
-            return true;
+            if (rval->isString())
+                return true;
 
-        JSString *str;
-        jsint i;
-        if (rval->isInt32() && (jsuint(i = rval->toInt32()) < INT_STRING_LIMIT)) {
-            str = JSString::intString(i);
-        } else {
-            str = js_ValueToString(cx, *rval);
-            if (!str)
-                return false;
+            JSString *str;
+            jsint i;
+            if (rval->isInt32() && (jsuint(i = rval->toInt32()) < INT_STRING_LIMIT)) {
+                str = JSString::intString(i);
+            } else {
+                str = js_ValueToString(cx, *rval);
+                if (!str)
+                    return false;
+            }
+
+            rval->setString(str);
+            return true;
         }
-
-        rval->setString(str);
-        return true;
     }
 
     JS_ASSERT(!cx->iterValue.isMagic(JS_NO_ITER_VALUE));
     *rval = cx->iterValue;
     cx->iterValue.setMagic(JS_NO_ITER_VALUE);
 
     return true;
 }
--- a/js/src/jsiter.h
+++ b/js/src/jsiter.h
@@ -64,79 +64,50 @@
  * Not serialized by XDR.
  */
 #define JSITER_ACTIVE     0x1000
 
 namespace js {
 
 struct NativeIterator {
     JSObject  *obj;
-    void      *props_array;
-    void      *props_cursor;
-    void      *props_end;
+    jsid      *props_array;
+    jsid      *props_cursor;
+    jsid      *props_end;
     uint32    *shapes_array;
     uint32    shapes_length;
     uint32    shapes_key;
     uint32    flags;
     JSObject  *next;  /* Forms cx->enumerators list, garbage otherwise. */
 
     bool isKeyIter() const { return (flags & JSITER_FOREACH) == 0; }
 
-    inline jsid *beginKey() const {
-        JS_ASSERT(isKeyIter());
-        return (jsid *)props_array;
+    inline jsid *begin() const {
+        return props_array;
     }
 
-    inline jsid *endKey() const {
-        JS_ASSERT(isKeyIter());
-        return (jsid *)props_end;
+    inline jsid *end() const {
+        return props_end;
     }
 
     size_t numKeys() const {
-        return endKey() - beginKey();
-    }
-
-    jsid *currentKey() const {
-        JS_ASSERT(isKeyIter());
-        return reinterpret_cast<jsid *>(props_cursor);
-    }
-
-    void incKeyCursor() {
-        JS_ASSERT(isKeyIter());
-        props_cursor = reinterpret_cast<jsid *>(props_cursor) + 1;
-    }
-
-    inline js::Value *beginValue() const {
-        JS_ASSERT(!isKeyIter());
-        return (js::Value *)props_array;
+        return end() - begin();
     }
 
-    inline js::Value *endValue() const {
-        JS_ASSERT(!isKeyIter());
-        return (js::Value *)props_end;
-    }
-
-    size_t numValues() const {
-        return endValue() - beginValue();
+    jsid *current() const {
+        JS_ASSERT(props_cursor < props_end);
+        return props_cursor;
     }
 
-    js::Value *currentValue() const {
-        JS_ASSERT(!isKeyIter());
-        return reinterpret_cast<js::Value *>(props_cursor);
+    void incCursor() {
+        props_cursor = props_cursor + 1;
     }
 
-    void incValueCursor() {
-        JS_ASSERT(!isKeyIter());
-        props_cursor = reinterpret_cast<js::Value *>(props_cursor) + 1;
-    }
-
-    static NativeIterator *allocateKeyIterator(JSContext *cx, uint32 slength,
-                                               const js::AutoIdVector &props);
-    static NativeIterator *allocateValueIterator(JSContext *cx,
-                                                 const js::AutoValueVector &props);
+    static NativeIterator *allocateIterator(JSContext *cx, uint32 slength,
+                                            const js::AutoIdVector &props);
     void init(JSObject *obj, uintN flags, uint32 slength, uint32 key);
 
     void mark(JSTracer *trc);
 };
 
 bool
 VectorToIdArray(JSContext *cx, js::AutoIdVector &props, JSIdArray **idap);
 
@@ -145,17 +116,17 @@ GetPropertyNames(JSContext *cx, JSObject
 
 bool
 GetIterator(JSContext *cx, JSObject *obj, uintN flags, js::Value *vp);
 
 bool
 VectorToKeyIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoIdVector &props, js::Value *vp);
 
 bool
-VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoValueVector &props, js::Value *vp);
+VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoIdVector &props, js::Value *vp);
 
 /*
  * Creates either a key or value iterator, depending on flags. For a value
  * iterator, performs value-lookup to convert the given list of jsids.
  */
 bool
 EnumeratedIdVectorToIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoIdVector &props, js::Value *vp);
 
--- a/js/src/jsparse.cpp
+++ b/js/src/jsparse.cpp
@@ -144,25 +144,37 @@ JSParseNode::become(JSParseNode *pn2)
             pnup = &(*pnup)->pn_link;
         *pnup = this;
         pn_link = pn2->pn_link;
         pn_used = true;
         pn2->pn_link = NULL;
         pn2->pn_used = false;
     }
 
-    /* If this is a function node fix up the pn_funbox->node back-pointer. */
-    if (PN_TYPE(pn2) == TOK_FUNCTION && pn2->pn_arity == PN_FUNC)
-        pn2->pn_funbox->node = this;
-
     pn_type = pn2->pn_type;
     pn_op = pn2->pn_op;
     pn_arity = pn2->pn_arity;
     pn_parens = pn2->pn_parens;
     pn_u = pn2->pn_u;
+
+    /*
+     * If any pointers are pointing to pn2, change them to point to this
+     * instead, since pn2 will be cleared and probably recycled.
+     */
+    if (PN_TYPE(this) == TOK_FUNCTION && pn_arity == PN_FUNC) {
+        /* Function node: fix up the pn_funbox->node back-pointer. */
+        JS_ASSERT(pn_funbox->node == pn2);
+        pn_funbox->node = this;
+    } else if (pn_arity == PN_LIST && !pn_head) {
+        /* Empty list: fix up the pn_tail pointer. */
+        JS_ASSERT(pn_count == 0);
+        JS_ASSERT(pn_tail == &pn2->pn_head);
+        pn_tail = &pn_head;
+    }
+
     pn2->clear();
 }
 
 void
 JSParseNode::clear()
 {
     pn_type = TOK_EOF;
     pn_op = JSOP_NOP;
@@ -3496,17 +3508,17 @@ Parser::statements()
 }
 
 JSParseNode *
 Parser::condition()
 {
     JSParseNode *pn;
 
     MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_COND);
-    pn = parenExpr(NULL, NULL);
+    pn = parenExpr();
     if (!pn)
         return NULL;
     MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_COND);
 
     /* Check for (a = b) and warn about possible (a == b) mistype. */
     if (pn->pn_type == TOK_ASSIGN &&
         pn->pn_op == JSOP_NOP &&
         !pn->pn_parens &&
@@ -5029,17 +5041,17 @@ Parser::switchStatement()
     JSBool seenDefault = JS_FALSE;
 
     JSParseNode *pn = BinaryNode::create(tc);
     if (!pn)
         return NULL;
     MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_SWITCH);
 
     /* pn1 points to the switch's discriminant. */
-    JSParseNode *pn1 = parenExpr(NULL, NULL);
+    JSParseNode *pn1 = parenExpr();
     if (!pn1)
         return NULL;
 
     MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_SWITCH);
     MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_SWITCH);
 
     /*
      * NB: we must push stmtInfo before calling GenerateBlockIdForStmtNode
@@ -5648,17 +5660,17 @@ Parser::withStatement()
         reportErrorNumber(NULL, JSREPORT_ERROR, JSMSG_STRICT_CODE_WITH);
         return NULL;
     }
 
     JSParseNode *pn = BinaryNode::create(tc);
     if (!pn)
         return NULL;
     MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_WITH);
-    JSParseNode *pn2 = parenExpr(NULL, NULL);
+    JSParseNode *pn2 = parenExpr();
     if (!pn2)
         return NULL;
     MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_WITH);
     pn->pn_left = pn2;
 
     JSParseNode *oldWith = tc->innermostWith;
     tc->innermostWith = pn;
 
@@ -6934,38 +6946,45 @@ AdjustBlockId(JSParseNode *pn, uintN adj
 bool
 CompExprTransplanter::transplant(JSParseNode *pn)
 {
     if (!pn)
         return true;
 
     switch (pn->pn_arity) {
       case PN_LIST:
-        for (JSParseNode *pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next)
-            transplant(pn2);
+        for (JSParseNode *pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+            if (!transplant(pn2))
+                return false;
+        }
         if (pn->pn_pos >= root->pn_pos)
             AdjustBlockId(pn, adjust, tc);
         break;
 
       case PN_TERNARY:
-        transplant(pn->pn_kid1);
-        transplant(pn->pn_kid2);
-        transplant(pn->pn_kid3);
+        if (!transplant(pn->pn_kid1) ||
+            !transplant(pn->pn_kid2) ||
+            !transplant(pn->pn_kid3))
+            return false;
         break;
 
       case PN_BINARY:
-        transplant(pn->pn_left);
+        if (!transplant(pn->pn_left))
+            return false;
 
         /* Binary TOK_COLON nodes can have left == right. See bug 492714. */
-        if (pn->pn_right != pn->pn_left)
-            transplant(pn->pn_right);
+        if (pn->pn_right != pn->pn_left) {
+            if (!transplant(pn->pn_right))
+                return false;
+        }
         break;
 
       case PN_UNARY:
-        transplant(pn->pn_kid);
+        if (!transplant(pn->pn_kid))
+            return false;
         break;
 
       case PN_FUNC:
       {
         /*
          * Only the first level of transplant recursion through functions needs
          * to reparent the funbox, since all descendant functions are correctly
          * linked under the top-most funbox. But every visit to this case needs
@@ -6990,17 +7009,18 @@ CompExprTransplanter::transplant(JSParse
             funbox->siblings = parent->kids;
             parent->kids = funbox;
             funbox->level = tc->staticLevel;
         }
         /* FALL THROUGH */
       }
 
       case PN_NAME:
-        transplant(pn->maybeExpr());
+        if (!transplant(pn->maybeExpr()))
+            return false;
         if (pn->pn_arity == PN_FUNC)
             --funcLevel;
 
         if (pn->pn_defn) {
             if (genexp && !BumpStaticLevel(pn, tc))
                 return false;
         } else if (pn->pn_used) {
             JS_ASSERT(pn->pn_op != JSOP_NOP);
@@ -7028,31 +7048,32 @@ CompExprTransplanter::transplant(JSParse
 #ifdef DEBUG
             JSStmtInfo *stmt = js_LexicalLookup(tc, atom, NULL);
             JS_ASSERT(!stmt || stmt != tc->topStmt);
 #endif
             if (genexp && PN_OP(dn) != JSOP_CALLEE) {
                 JS_ASSERT(!tc->decls.lookup(atom));
 
                 if (dn->pn_pos < root->pn_pos || dn->isPlaceholder()) {
-                    JSAtomListElement *ale = tc->lexdeps.add(tc->parser, dn->pn_atom);
+                    JSAtomListElement *ale = tc->lexdeps.add(tc->parser, atom);
                     if (!ale)
                         return false;
 
                     if (dn->pn_pos >= root->pn_pos) {
                         tc->parent->lexdeps.remove(tc->parser, atom);
                     } else {
-                        JSDefinition *dn2 = (JSDefinition *)NameNode::create(dn->pn_atom, tc);
+                        JSDefinition *dn2 = (JSDefinition *)NameNode::create(atom, tc);
                         if (!dn2)
                             return false;
 
-                        dn2->pn_type = dn->pn_type;
-                        dn2->pn_pos = root->pn_pos;
+                        dn2->pn_type = TOK_NAME;
+                        dn2->pn_op = JSOP_NOP;
                         dn2->pn_defn = true;
                         dn2->pn_dflags |= PND_PLACEHOLDER;
+                        dn2->pn_pos = root->pn_pos;
 
                         JSParseNode **pnup = &dn->dn_uses;
                         JSParseNode *pnu;
                         while ((pnu = *pnup) != NULL && pnu->pn_pos >= root->pn_pos) {
                             pnu->pn_lexdef = dn2;
                             dn2->pn_dflags |= pnu->pn_dflags & PND_USE2DEF_FLAGS;
                             pnup = &pnu->pn_link;
                         }
@@ -7068,17 +7089,18 @@ CompExprTransplanter::transplant(JSParse
             }
         }
 
         if (pn->pn_pos >= root->pn_pos)
             AdjustBlockId(pn, adjust, tc);
         break;
 
       case PN_NAMESET:
-        transplant(pn->pn_tree);
+        if (!transplant(pn->pn_tree))
+            return false;
         break;
     }
     return true;
 }
 
 /*
  * Starting from a |for| keyword after the first array initialiser element or
  * an expression in an open parenthesis, parse the tail of the comprehension
@@ -7275,30 +7297,32 @@ Parser::comprehensionTail(JSParseNode *k
 #if JS_HAS_GENERATOR_EXPRS
 
 /*
  * Starting from a |for| keyword after an expression, parse the comprehension
  * tail completing this generator expression. Wrap the expression at kid in a
  * generator function that is immediately called to evaluate to the generator
  * iterator that is the value of this generator expression.
  *
- * Callers pass a blank unary node via pn, which generatorExpr fills in as the
- * yield expression, which ComprehensionTail in turn wraps in a TOK_SEMI-type
- * expression-statement node that constitutes the body of the |for| loop(s) in
- * the generator function.
+ * |kid| must be the expression before the |for| keyword; we return an
+ * application of a generator function that includes the |for| loops and
+ * |if| guards, with |kid| as the operand of a |yield| expression as the
+ * innermost loop body.
  *
  * Note how unlike Python, we do not evaluate the expression to the right of
  * the first |in| in the chain of |for| heads. Instead, a generator expression
  * is merely sugar for a generator function expression and its application.
  */
 JSParseNode *
-Parser::generatorExpr(JSParseNode *pn, JSParseNode *kid)
-{
-    /* Initialize pn, connecting it to kid. */
-    JS_ASSERT(pn->pn_arity == PN_UNARY);
+Parser::generatorExpr(JSParseNode *kid)
+{
+    /* Create a |yield| node for |kid|. */
+    JSParseNode *pn = UnaryNode::create(tc);
+    if (!pn)
+        return NULL;
     pn->pn_type = TOK_YIELD;
     pn->pn_op = JSOP_YIELD;
     pn->pn_parens = true;
     pn->pn_pos = kid->pn_pos;
     pn->pn_kid = kid;
     pn->pn_hidden = true;
 
     /* Make a new node for the desugared generator function. */
@@ -7390,20 +7414,17 @@ Parser::argumentList(JSParseNode *listNo
             !argNode->pn_parens &&
             tokenStream.peekToken() == TOK_COMMA) {
             reportErrorNumber(argNode, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_SYNTAX, js_yield_str);
             return JS_FALSE;
         }
 #endif
 #if JS_HAS_GENERATOR_EXPRS
         if (tokenStream.matchToken(TOK_FOR)) {
-            JSParseNode *pn = UnaryNode::create(tc);
-            if (!pn)
-                return JS_FALSE;
-            argNode = generatorExpr(pn, argNode);
+            argNode = generatorExpr(argNode);
             if (!argNode)
                 return JS_FALSE;
             if (listNode->pn_count > 1 ||
                 tokenStream.peekToken() == TOK_COMMA) {
                 reportErrorNumber(argNode, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_SYNTAX,
                                   js_generator_str);
                 return JS_FALSE;
             }
@@ -8708,17 +8729,17 @@ Parser::primaryExpr(TokenKind tt, JSBool
         pn->pn_num = (jsint) tokenStream.currentToken().t_dval;
         break;
 #endif /* JS_HAS_SHARP_VARS */
 
       case TOK_LP:
       {
         JSBool genexp;
 
-        pn = parenExpr(NULL, &genexp);
+        pn = parenExpr(&genexp);
         if (!pn)
             return NULL;
         pn->pn_parens = true;
         if (!genexp)
             MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_IN_PAREN);
         break;
       }
 
@@ -8949,17 +8970,17 @@ Parser::primaryExpr(TokenKind tt, JSBool
       default:
         reportErrorNumber(NULL, JSREPORT_ERROR, JSMSG_SYNTAX_ERROR);
         return NULL;
     }
     return pn;
 }
 
 JSParseNode *
-Parser::parenExpr(JSParseNode *pn1, JSBool *genexp)
+Parser::parenExpr(JSBool *genexp)
 {
     TokenPtr begin;
     JSParseNode *pn;
 
     JS_ASSERT(tokenStream.currentToken().type == TOK_LP);
     begin = tokenStream.currentToken().pos.begin;
 
     if (genexp)
@@ -8974,22 +8995,17 @@ Parser::parenExpr(JSParseNode *pn1, JSBo
             reportErrorNumber(pn, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_SYNTAX, js_yield_str);
             return NULL;
         }
         if (pn->pn_type == TOK_COMMA && !pn->pn_parens) {
             reportErrorNumber(pn->last(), JSREPORT_ERROR, JSMSG_BAD_GENERATOR_SYNTAX,
                               js_generator_str);
             return NULL;
         }
-        if (!pn1) {
-            pn1 = UnaryNode::create(tc);
-            if (!pn1)
-                return NULL;
-        }
-        pn = generatorExpr(pn1, pn);
+        pn = generatorExpr(pn);
         if (!pn)
             return NULL;
         pn->pn_pos.begin = begin;
         if (genexp) {
             if (tokenStream.getToken() != TOK_RP) {
                 reportErrorNumber(NULL, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_SYNTAX,
                                   js_generator_str);
                 return NULL;
--- a/js/src/jsparse.h
+++ b/js/src/jsparse.h
@@ -1146,32 +1146,32 @@ private:
     JSParseNode *eqExpr();
     JSParseNode *relExpr();
     JSParseNode *shiftExpr();
     JSParseNode *addExpr();
     JSParseNode *mulExpr();
     JSParseNode *unaryExpr();
     JSParseNode *memberExpr(JSBool allowCallSyntax);
     JSParseNode *primaryExpr(js::TokenKind tt, JSBool afterDot);
-    JSParseNode *parenExpr(JSParseNode *pn1, JSBool *genexp);
+    JSParseNode *parenExpr(JSBool *genexp = NULL);
 
     /*
      * Additional JS parsers.
      */
     bool recognizeDirectivePrologue(JSParseNode *pn, bool *isDirectivePrologueMember);
 
     enum FunctionType { GETTER, SETTER, GENERAL };
     bool functionArguments(JSTreeContext &funtc, JSFunctionBox *funbox, JSParseNode **list);
     JSParseNode *functionBody();
     JSParseNode *functionDef(JSAtom *name, FunctionType type, uintN lambda);
 
     JSParseNode *condition();
     JSParseNode *comprehensionTail(JSParseNode *kid, uintN blockid,
                                    js::TokenKind type = js::TOK_SEMI, JSOp op = JSOP_NOP);
-    JSParseNode *generatorExpr(JSParseNode *pn, JSParseNode *kid);
+    JSParseNode *generatorExpr(JSParseNode *kid);
     JSBool argumentList(JSParseNode *listNode);
     JSParseNode *bracketedExpr();
     JSParseNode *letBlock(JSBool statement);
     JSParseNode *returnOrYield(bool useAssignExpr);
     JSParseNode *destructuringExpr(BindData *data, js::TokenKind tt);
 
 #if JS_HAS_XML_SUPPORT
     JSParseNode *endBracketedExpr();
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -128,45 +128,103 @@ getExitName(ExitType type)
 }
 #endif /* DEBUG */
 
 namespace nanojit {
 using namespace js;
 using namespace js::gc;
 using namespace js::tjit;
 
+/*
+ * This macro is just like JS_NOT_REACHED but it exists in non-debug builds
+ * too.  Its presence indicates shortcomings in jstracer's handling of some
+ * OOM situations:
+ * - OOM failures in constructors, which lack a return value to pass back a
+ *   failure code (though it can and should be done indirectly).
+ * - OOM failures in the "infallible" allocators used for Nanojit.
+ *
+ * FIXME: bug 624590 is open to fix these problems.
+ */
+#define OUT_OF_MEMORY_ABORT(msg)    JS_Assert(msg, __FILE__, __LINE__);
+
 /* Implement embedder-specific nanojit members. */
 
+/* 
+ * Nanojit requires infallible allocations most of the time.  We satisfy this
+ * by reserving some space in each allocator which is used as a fallback if
+ * js_calloc() fails.  Ideallly this reserve space should be big enough to
+ * allow for all infallible requests made to the allocator until the next OOM
+ * check occurs, but it turns out that's impossible to guarantee (though it
+ * should be unlikely).  So we abort if the reserve runs out;  this is better
+ * than allowing memory errors to occur.
+ *
+ * The space calculations are as follows... between OOM checks, each
+ * VMAllocator can do (ie. has been seen to do) the following maximum
+ * allocations on 64-bits:
+ *
+ * - dataAlloc: 31 minimum-sized chunks (MIN_CHUNK_SZB) in assm->compile()
+ *   (though arbitrarily more could occur due to LabelStateMap additions done
+ *   when handling labels):  62,248 bytes.  This one is the most likely to
+ *   overflow.
+ *
+ * - traceAlloc: 1 minimum-sized chunk:  2,008 bytes.
+ *
+ * - tempAlloc: 1 LIR code chunk (CHUNK_SZB) and 5 minimum-sized chunks for
+ *   sundry small allocations:  18,048 bytes.
+ *
+ * The reserve sizes are chosen by exceeding this by a reasonable amount.
+ * Reserves for 32-bits are slightly more than half, because most of the
+ * allocated space is used to hold pointers.
+ *
+ * FIXME: Bug 624590 is open to get rid of all this.
+ */
+static const size_t DataReserveSize  = 12500 * sizeof(uintptr_t);
+static const size_t TraceReserveSize =  5000 * sizeof(uintptr_t);
+static const size_t TempReserveSize  =  1000 * sizeof(uintptr_t);
+
 void*
-nanojit::Allocator::allocChunk(size_t nbytes)
+nanojit::Allocator::allocChunk(size_t nbytes, bool fallible)
 {
     VMAllocator *vma = (VMAllocator*)this;
-    JS_ASSERT(!vma->outOfMemory());
+    /*
+     * Nb: it's conceivable that request 1 might fail (in which case
+     * mOutOfMemory will be set) and then request 2 succeeds.  The subsequent
+     * OOM check will still fail, which is what we want, and the success of
+     * request 2 makes it less likely that the reserve space will overflow.
+     */
     void *p = js_calloc(nbytes);
-    if (!p) {
-        JS_ASSERT(nbytes < sizeof(vma->mReserve));
+    if (p) {
+        vma->mSize += nbytes;
+    } else {
         vma->mOutOfMemory = true;
-        p = (void*) &vma->mReserve[0];
-    }
-    vma->mSize += nbytes;
+        if (!fallible) {
+            p = (void *)vma->mReserveCurr;
+            vma->mReserveCurr += nbytes;
+            if (vma->mReserveCurr > vma->mReserveLimit)
+                OUT_OF_MEMORY_ABORT("nanojit::Allocator::allocChunk: out of memory");
+            memset(p, 0, nbytes);
+            vma->mSize += nbytes;
+        }
+    }
     return p;
 }
 
 void
 nanojit::Allocator::freeChunk(void *p) {
     VMAllocator *vma = (VMAllocator*)this;
-    if (p != &vma->mReserve[0])
+    if (p < vma->mReserve || uintptr_t(p) >= vma->mReserveLimit)
         js_free(p);
 }
 
 void
 nanojit::Allocator::postReset() {
     VMAllocator *vma = (VMAllocator*)this;
     vma->mOutOfMemory = false;
     vma->mSize = 0;
+    vma->mReserveCurr = uintptr_t(vma->mReserve);
 }
 
 int
 StackFilter::getTop(LIns* guard)
 {
     VMSideExit* e = (VMSideExit*)guard->record()->exit;
     return e->sp_adj;
 }
@@ -496,22 +554,16 @@ InitJITStatsClass(JSContext *cx, JSObjec
 #define AUDIT(x) (jitstats.x++)
 #else
 #define AUDIT(x) ((void)0)
 #endif /* JS_JIT_SPEW */
 
 static avmplus::AvmCore s_core = avmplus::AvmCore();
 static avmplus::AvmCore* core = &s_core;
 
-static void OutOfMemoryAbort()
-{
-    JS_NOT_REACHED("out of memory");
-    abort();
-}
-
 #ifdef JS_JIT_SPEW
 static void
 DumpPeerStability(TraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
 #endif
 
 /*
  * We really need a better way to configure the JIT. Shaver, where is
  * my fancy JIT object?
@@ -621,18 +673,18 @@ InitJITLogController()
 
 /* ------------------ Frag-level profiling support ------------------ */
 
 #ifdef JS_JIT_SPEW
 
 /*
  * All the allocations done by this profile data-collection and
  * display machinery, are done in TraceMonitor::profAlloc.  That is
- * emptied out at the end of js_FinishJIT.  It has a lifetime from
- * js_InitJIT to js_FinishJIT, which exactly matches the span
+ * emptied out at the end of FinishJIT.  It has a lifetime from
+ * InitJIT to FinishJIT, which exactly matches the span
  * js_FragProfiling_init to js_FragProfiling_showResults.
  */
 template<class T>
 static
 Seq<T>* reverseInPlace(Seq<T>* seq)
 {
     Seq<T>* prev = NULL;
     Seq<T>* curr = seq;
@@ -1394,17 +1446,17 @@ class FrameInfoCache
         return *p;
     }
 };
 
 FrameInfoCache::FrameInfoCache(VMAllocator *allocator)
   : allocator(allocator)
 {
     if (!set.init())
-        OutOfMemoryAbort();
+        OUT_OF_MEMORY_ABORT("FrameInfoCache::FrameInfoCache(): out of memory");
 }
 
 #define PC_HASH_COUNT 1024
 
 static void
 Blacklist(jsbytecode* pc)
 {
     AUDIT(blacklisted);
@@ -2280,17 +2332,17 @@ TraceRecorder::TraceRecorder(JSContext* 
     verbose_only( fragment->loopLabel = NULL; )
 
     /*
      * Don't change fragment->profFragID, though.  Once the identity of the
      * Fragment is set up (for profiling purposes), we can't change it.
      */
 
     if (!guardedShapeTable.init())
-        abort();
+        OUT_OF_MEMORY_ABORT("TraceRecorder::TraceRecorder: out of memory");
 
 #ifdef JS_JIT_SPEW
     debug_only_print0(LC_TMMinimal, "\n");
     debug_only_printf(LC_TMMinimal, "Recording starting from %s:%u@%u (FragID=%06u)\n",
                       tree->treeFileName, tree->treeLineNumber, tree->treePCOffset,
                       fragment->profFragID);
 
     debug_only_printf(LC_TMTracer, "globalObj=%p, shape=%d\n",
@@ -2460,17 +2512,16 @@ TraceRecorder::finishSuccessfully()
 }
 
 /* This function aborts a recorder and any pending outer recorders. */
 JS_REQUIRES_STACK TraceRecorder::AbortResult
 TraceRecorder::finishAbort(const char* reason)
 {
     JS_ASSERT(!traceMonitor->profile);
     JS_ASSERT(traceMonitor->recorder == this);
-    JS_ASSERT(!fragment->code());
 
     AUDIT(recorderAborted);
 #ifdef DEBUG
     debug_only_printf(LC_TMMinimal | LC_TMAbort,
                       "Abort recording of tree %s:%d@%d at %s:%d@%d: %s.\n",
                       tree->treeFileName,
                       tree->treeLineNumber,
                       tree->treePCOffset,
@@ -2777,28 +2828,26 @@ TraceMonitor::flush()
     frameCache->reset();
     dataAlloc->reset();
     traceAlloc->reset();
     codeAlloc->reset();
     tempAlloc->reset();
     oracle->clear();
     loopProfiles->clear();
 
-    Allocator& alloc = *dataAlloc;
-
     for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
         globalStates[i].globalShape = -1;
-        globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
-    }
-
-    assembler = new (alloc) Assembler(*codeAlloc, alloc, alloc, core, &LogController, avmplus::AvmCore::config);
+        globalStates[i].globalSlots = new (*dataAlloc) SlotList(dataAlloc);
+    }
+
+    assembler = new (*dataAlloc) Assembler(*codeAlloc, *dataAlloc, *dataAlloc, core,
+                                           &LogController, avmplus::AvmCore::config);
     verbose_only( branches = NULL; )
 
     PodArrayZero(vmfragments);
-    reFragments = new (alloc) REHashMap(alloc);
     tracedScripts.clear();
 
     needFlush = JS_FALSE;
 }
 
 inline bool
 IsShapeAboutToBeFinalized(JSContext *cx, const js::Shape *shape)
 {
@@ -4513,20 +4562,20 @@ TraceRecorder::compile()
         sprintf(label, "%s:%u", filename ? filename : "<stdin>",
                 js_FramePCToLineNumber(cx, cx->fp()));
         lirbuf->printer->addrNameMap->addAddrRange(fragment, sizeof(Fragment), 0, label);
         js_free(label);
     }
 #endif
 
     Assembler *assm = traceMonitor->assembler;
-    JS_ASSERT(assm->error() == nanojit::None);
+    JS_ASSERT(!assm->error());
     assm->compile(fragment, tempAlloc(), /*optimize*/true verbose_only(, lirbuf->printer));
 
-    if (assm->error() != nanojit::None) {
+    if (assm->error()) {
         assm->setError(nanojit::None);
         debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
         Blacklist((jsbytecode*)tree->ip);
         return ARECORD_STOP;
     }
 
     if (outOfMemory())
         return ARECORD_STOP;
@@ -5702,17 +5751,18 @@ RecordTree(JSContext* cx, TreeFragment* 
         Backoff(cx, (jsbytecode*) localRootIP);
         return false;
     }
 
     AUDIT(recorderStarted);
 
     if (tm->outOfMemory() ||
         OverfullJITCache(cx, tm) ||
-        !tm->tracedScripts.put(cx->fp()->script())) {
+        !tm->tracedScripts.put(cx->fp()->script()))
+    {
         if (!OverfullJITCache(cx, tm))
             js_ReportOutOfMemory(cx);
         Backoff(cx, (jsbytecode*) f->root->ip);
         ResetJIT(cx, FR_OOM);
         debug_only_print0(LC_TMTracer,
                           "Out of memory recording new tree, flushing cache.\n");
         return false;
     }
@@ -7627,17 +7677,17 @@ InitJIT(TraceMonitor *tm)
     tm->profAlloc = NULL;
     /* Set up debug logging. */
     if (!did_we_set_up_debug_logging) {
         InitJITLogController();
         did_we_set_up_debug_logging = true;
     }
     /* Set up fragprofiling, if required. */
     if (LogController.lcbits & LC_FragProfile) {
-        tm->profAlloc = js_new<VMAllocator>();
+        tm->profAlloc = js_new<VMAllocator>((char*)NULL, 0); /* no reserve needed in debug builds */
         JS_ASSERT(tm->profAlloc);
         tm->profTab = new (*tm->profAlloc) FragStatsMap(*tm->profAlloc);
     }
     tm->lastFragID = 0;
 #else
     PodZero(&LogController);
 #endif
 
@@ -7679,19 +7729,23 @@ InitJIT(TraceMonitor *tm)
         return false;
 
     CHECK_ALLOC(tm->loopProfiles, js_new<LoopProfileMap>());
     if (!tm->loopProfiles->init(PC_HASH_COUNT))
         return false;
 
     tm->flushEpoch = 0;
     
-    CHECK_ALLOC(tm->dataAlloc, js_new<VMAllocator>());
-    CHECK_ALLOC(tm->traceAlloc, js_new<VMAllocator>());
-    CHECK_ALLOC(tm->tempAlloc, js_new<VMAllocator>());
+    char *dataReserve, *traceReserve, *tempReserve;
+    CHECK_ALLOC(dataReserve, (char *)js_malloc(DataReserveSize));
+    CHECK_ALLOC(traceReserve, (char *)js_malloc(TraceReserveSize));
+    CHECK_ALLOC(tempReserve, (char *)js_malloc(TempReserveSize));
+    CHECK_ALLOC(tm->dataAlloc, js_new<VMAllocator>(dataReserve, DataReserveSize));
+    CHECK_ALLOC(tm->traceAlloc, js_new<VMAllocator>(traceReserve, TraceReserveSize));
+    CHECK_ALLOC(tm->tempAlloc, js_new<VMAllocator>(tempReserve, TempReserveSize));
     CHECK_ALLOC(tm->codeAlloc, js_new<CodeAlloc>());
     CHECK_ALLOC(tm->frameCache, js_new<FrameInfoCache>(tm->dataAlloc));
     CHECK_ALLOC(tm->storage, js_new<TraceNativeStorage>());
     CHECK_ALLOC(tm->cachedTempTypeMap, js_new<TypeMap>((Allocator*)NULL));
     tm->flush();
     verbose_only( tm->branches = NULL; )
 
 #if !defined XP_WIN
@@ -7777,22 +7831,16 @@ FinishJIT(TraceMonitor *tm)
         }
         for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
             for (TreeFragment *f = tm->vmfragments[i]; f; f = f->next) {
                 JS_ASSERT(f->root == f);
                 for (TreeFragment *p = f; p; p = p->peer)
                     FragProfiling_FragFinalizer(p, tm);
             }
         }
-        REHashMap::Iter iter(*(tm->reFragments));
-        while (iter.next()) {
-            VMFragment* frag = (VMFragment*)iter.value();
-            FragProfiling_FragFinalizer(frag, tm);
-        }
-
         FragProfiling_showResults(tm);
         js_delete(tm->profAlloc);
 
     } else {
         NanoAssert(!tm->profTab);
         NanoAssert(!tm->profAlloc);
     }
 #endif
@@ -7886,17 +7934,17 @@ OverfullJITCache(JSContext *cx, TraceMon
      *     cache, in pages, has been exceeded.
      *
      * Condition 1 doesn't happen very often, but we're obliged to try to
      * safely shut down and signal the rest of spidermonkey when it
      * does. Condition 2 happens quite regularly.
      *
      * Presently, the code in this file doesn't check the outOfMemory condition
      * often enough, and frequently misuses the unchecked results of
-     * lirbuffer insertions on the asssumption that it will notice the
+     * lirbuffer insertions on the assumption that it will notice the
      * outOfMemory flag "soon enough" when it returns to the monitorRecording
      * function. This turns out to be a false assumption if we use outOfMemory
      * to signal condition 2: we regularly provoke "passing our intended
      * size" and regularly fail to notice it in time to prevent writing
      * over the end of an artificially self-limited LIR buffer.
      *
      * To mitigate, though not completely solve, this problem, we're
      * modeling the two forms of memory exhaustion *separately* for the
@@ -7904,21 +7952,17 @@ OverfullJITCache(JSContext *cx, TraceMon
      * nanojit, and condition 2 is being handled independently *here*. So
      * we construct our allocators to use all available memory they like,
      * and only report outOfMemory to us when there is literally no OS memory
      * left. Merely purging our cache when we hit our highwater mark is
      * handled by the (few) callers of this function.
      *
      */
     jsuint maxsz = JS_THREAD_DATA(cx)->maxCodeCacheBytes;
-    VMAllocator *dataAlloc = tm->dataAlloc;
-    VMAllocator *traceAlloc = tm->traceAlloc;
-    CodeAlloc *codeAlloc = tm->codeAlloc;
-
-    return (codeAlloc->size() + dataAlloc->size() + traceAlloc->size() > maxsz);
+    return (tm->codeAlloc->size() + tm->dataAlloc->size() + tm->traceAlloc->size() > maxsz);
 }
 
 JS_FORCES_STACK JS_FRIEND_API(void)
 DeepBail(JSContext *cx)
 {
     JS_ASSERT(JS_ON_TRACE(cx));
 
     /*
@@ -14724,39 +14768,48 @@ TraceRecorder::record_JSOP_MOREITER()
         RETURN_STOP_A("for-in on a primitive value");
 
     RETURN_IF_XML_A(iterobj_val);
 
     JSObject* iterobj = &iterobj_val.toObject();
     LIns* iterobj_ins = get(&iterobj_val);
     LIns* cond_ins;
 
-    /* JSOP_FOR* already guards on this, but in certain rare cases we might record misformed loop traces. */
+    /*
+     * JSOP_FOR* already guards on this, but in certain rare cases we might
+     * record misformed loop traces. Note that it's not necessary to guard on
+     * ni->flags (nor do we in unboxNextValue), because the different
+     * iteration type will guarantee a different entry typemap.
+     */
     if (iterobj->hasClass(&js_IteratorClass)) {
         guardClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
 
-        LIns *ni_ins = w.ldpObjPrivate(iterobj_ins);
-        LIns *cursor_ins = w.ldpIterCursor(ni_ins);
-        LIns *end_ins = w.ldpIterEnd(ni_ins);
-
-        cond_ins = w.ltp(cursor_ins, end_ins);
+        NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
+        if (ni->isKeyIter()) {
+            LIns *ni_ins = w.ldpObjPrivate(iterobj_ins);
+            LIns *cursor_ins = w.ldpIterCursor(ni_ins);
+            LIns *end_ins = w.ldpIterEnd(ni_ins);
+
+            cond_ins = w.ltp(cursor_ins, end_ins);
+            stack(0, cond_ins);
+            return ARECORD_CONTINUE;
+        }
     } else {
         guardNotClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
-
-        enterDeepBailCall();
-
-        LIns* vp_ins = w.allocp(sizeof(Value));
-        LIns* args[] = { vp_ins, iterobj_ins, cx_ins };
-        pendingGuardCondition = w.call(&IteratorMore_ci, args);
-
-        leaveDeepBailCall();
-
-        cond_ins = is_boxed_true(AllocSlotsAddress(vp_ins));
-    }
-
+    }
+
+    enterDeepBailCall();
+
+    LIns* vp_ins = w.allocp(sizeof(Value));
+    LIns* args[] = { vp_ins, iterobj_ins, cx_ins };
+    pendingGuardCondition = w.call(&IteratorMore_ci, args);
+
+    leaveDeepBailCall();
+
+    cond_ins = is_boxed_true(AllocSlotsAddress(vp_ins));
     stack(0, cond_ins);
 
     return ARECORD_CONTINUE;
 }
 
 static JSBool FASTCALL
 CloseIterator(JSContext *cx, JSObject *iterobj)
 {
@@ -14814,19 +14867,19 @@ TraceRecorder::unboxNextValue(LIns* &v_i
         guardClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
         NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
 
         LIns *ni_ins = w.ldpObjPrivate(iterobj_ins);
         LIns *cursor_ins = w.ldpIterCursor(ni_ins);
 
         /* Emit code to stringify the id if necessary. */
         Address cursorAddr = IterPropsAddress(cursor_ins);
-        if (!(((NativeIterator *) iterobj->getPrivate())->flags & JSITER_FOREACH)) {
+        if (ni->isKeyIter()) {
             /* Read the next id from the iterator. */
-            jsid id = *ni->currentKey();
+            jsid id = *ni->current();
             LIns *id_ins = w.name(w.ldp(cursorAddr), "id");
 
             /*
              * Most iterations over object properties never have to actually deal with
              * any numeric properties, so we guard here instead of branching.
              */
             guard(JSID_IS_STRING(id), is_string_id(id_ins), BRANCH_EXIT);
 
@@ -14845,33 +14898,27 @@ TraceRecorder::unboxNextValue(LIns* &v_i
                 RETURN_STOP_A("iterated over a property with an XML id");
 #else
                 JS_NEVER_REACHED("unboxNextValue");
 #endif
             }
 
             /* Increment the cursor by one jsid and store it back. */
             cursor_ins = w.addp(cursor_ins, w.nameImmw(sizeof(jsid)));
-        } else {
-            /* Read the next value from the iterator. */
-            Value v = *ni->currentValue();
-            v_ins = unbox_value(v, cursorAddr, snapshot(BRANCH_EXIT));
-
-            /* Increment the cursor by one Value and store it back. */
-            cursor_ins = w.addp(cursor_ins, w.nameImmw(sizeof(Value)));
-        }
-
-        w.stpIterCursor(cursor_ins, ni_ins);
+            w.stpIterCursor(cursor_ins, ni_ins);
+            return ARECORD_CONTINUE;
+        }
     } else {
         guardNotClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
-
-        Address iterValueAddr = CxAddress(iterValue);
-        v_ins = unbox_value(cx->iterValue, iterValueAddr, snapshot(BRANCH_EXIT));
-        storeMagic(JS_NO_ITER_VALUE, iterValueAddr);
-    }
+    }
+
+
+    Address iterValueAddr = CxAddress(iterValue);
+    v_ins = unbox_value(cx->iterValue, iterValueAddr, snapshot(BRANCH_EXIT));
+    storeMagic(JS_NO_ITER_VALUE, iterValueAddr);
 
     return ARECORD_CONTINUE;
 }
 
 JS_REQUIRES_STACK AbortableRecordingStatus
 TraceRecorder::record_JSOP_FORNAME()
 {
     Value* vp;
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -406,19 +406,25 @@ struct VMSideExit : public nanojit::Side
         return fromFrag()->root;
     }
 };
 
 class VMAllocator : public nanojit::Allocator
 {
 
 public:
-    VMAllocator() : mOutOfMemory(false), mSize(0)
+    VMAllocator(char* reserve, size_t reserveSize)
+      : mOutOfMemory(false), mSize(0), mReserve(reserve),
+        mReserveCurr(uintptr_t(reserve)), mReserveLimit(uintptr_t(reserve + reserveSize))
     {}
 
+    ~VMAllocator() {
+        js_free(mReserve);
+    }
+
     size_t size() {
         return mSize;
     }
 
     bool outOfMemory() {
         return mOutOfMemory;
     }
 
@@ -459,53 +465,20 @@ public:
         current_limit = m.saved_limit;
         mSize = m.saved_size;
         memset(current_top, 0, current_limit - current_top);
     }
 
     bool mOutOfMemory;
     size_t mSize;
 
-    /*
-     * FIXME: Area the LIR spills into if we encounter an OOM mid-way
-     * through compilation; we must check mOutOfMemory before we run out
-     * of mReserve, otherwise we're in undefined territory. This area
-     * used to be one page, now 16 to be "safer". This is a temporary
-     * and quite unsatisfactory approach to handling OOM in Nanojit.
-     */
-    uintptr_t mReserve[0x10000];
-};
-
-struct REHashKey {
-    size_t re_length;
-    uint16 re_flags;
-    const jschar* re_chars;
-
-    REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
-        : re_length(re_length)
-        , re_flags(re_flags)
-        , re_chars(re_chars)
-    {}
-
-    bool operator==(const REHashKey& other) const
-    {
-        return ((this->re_length == other.re_length) &&
-                (this->re_flags == other.re_flags) &&
-                !memcmp(this->re_chars, other.re_chars,
-                        this->re_length * sizeof(jschar)));
-    }
-};
-
-struct REHashFn {
-    static size_t hash(const REHashKey& k) {
-        return
-            k.re_length +
-            k.re_flags +
-            nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
-    }
+    /* See nanojit::Allocator::allocChunk() for details on these. */
+    char* mReserve;
+    uintptr_t mReserveCurr;
+    uintptr_t mReserveLimit;
 };
 
 struct FrameInfo {
     JSObject*       block;      // caller block chain head
     jsbytecode*     pc;         // caller fp->regs->pc
     jsbytecode*     imacpc;     // caller fp->imacpc
     uint32          spdist;     // distance from fp->slots to fp->regs->sp at JSOP_CALL
 
--- a/js/src/jswrapper.cpp
+++ b/js/src/jswrapper.cpp
@@ -547,51 +547,36 @@ Reify(JSContext *cx, JSCompartment *orig
     if (!origin->wrap(cx, &obj))
         return false;
 
     /*
      * Wrap the elements in the iterator's snapshot.
      * N.B. the order of closing/creating iterators is important due to the
      * implicit cx->enumerators state.
      */
-
-    if (ni->isKeyIter()) {
-        size_t length = ni->numKeys();
-        AutoIdVector keys(cx);
-        if (length > 0) {
-            if (!keys.resize(length))
-                return false;
-            for (size_t i = 0; i < length; ++i) {
-                keys[i] = ni->beginKey()[i];
-                if (!origin->wrapId(cx, &keys[i]))
-                    return false;
-            }
-        }
-
-        close.clear();
-        return js_CloseIterator(cx, iterObj) &&
-               VectorToKeyIterator(cx, obj, ni->flags, keys, vp);
-    }
-
-    size_t length = ni->numValues();
-    AutoValueVector vals(cx);
+    size_t length = ni->numKeys();
+    bool isKeyIter = ni->isKeyIter();
+    AutoIdVector keys(cx);
     if (length > 0) {
-        if (!vals.resize(length))
+        if (!keys.resize(length))
             return false;
         for (size_t i = 0; i < length; ++i) {
-            vals[i] = ni->beginValue()[i];
-            if (!origin->wrap(cx, &vals[i]))
+            keys[i] = ni->begin()[i];
+            if (!origin->wrapId(cx, &keys[i]))
                 return false;
         }
-
     }
 
     close.clear();
-    return js_CloseIterator(cx, iterObj) &&
-           VectorToValueIterator(cx, obj, ni->flags, vals, vp);
+    if (!js_CloseIterator(cx, iterObj))
+        return false;
+
+    if (isKeyIter)
+        return VectorToKeyIterator(cx, obj, ni->flags, keys, vp);
+    return VectorToValueIterator(cx, obj, ni->flags, keys, vp); 
 }
 
 bool
 JSCrossCompartmentWrapper::iterate(JSContext *cx, JSObject *wrapper, uintN flags, Value *vp)
 {
     PIERCE(cx, wrapper, GET,
            NOTHING,
            JSWrapper::iterate(cx, wrapper, flags, vp),
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -59,17 +59,17 @@
 #include "nanojit/nanojit.h"
 
 using namespace nanojit;
 using namespace std;
 
 /* Allocator SPI implementation. */
 
 void*
-nanojit::Allocator::allocChunk(size_t nbytes)
+nanojit::Allocator::allocChunk(size_t nbytes, bool /*fallible*/)
 {
     void *p = malloc(nbytes);
     if (!p)
         exit(1);
     return p;
 }
 
 void
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -1494,22 +1494,24 @@ mjit::Compiler::generateMethod()
             jsop_stricteq(op);
           END_CASE(JSOP_STRICTEQ)
 
           BEGIN_CASE(JSOP_STRICTNE)
             jsop_stricteq(op);
           END_CASE(JSOP_STRICTNE)
 
           BEGIN_CASE(JSOP_ITER)
-            iter(PC[1]);
+            if (!iter(PC[1]))
+                return Compile_Error;
           END_CASE(JSOP_ITER)
 
           BEGIN_CASE(JSOP_MOREITER)
-            /* This MUST be fused with IFNE or IFNEX. */
-            iterMore();
+            /* At the byte level, this is always fused with IFNE or IFNEX. */
+            if (!iterMore())
+                return Compile_Error;
             break;
           END_CASE(JSOP_MOREITER)
 
           BEGIN_CASE(JSOP_ENDITER)
             iterEnd();
           END_CASE(JSOP_ENDITER)
 
           BEGIN_CASE(JSOP_POP)
@@ -3959,32 +3961,32 @@ mjit::Compiler::jsop_propinc(JSOp op, Vo
         frame.pop();
         frame.pushSynced();
     }
 
     PC += JSOP_PROPINC_LENGTH;
     return true;
 }
 
-void
+bool
 mjit::Compiler::iter(uintN flags)
 {
     FrameEntry *fe = frame.peek(-1);
 
     /*
      * Stub the call if this is not a simple 'for in' loop or if the iterated
      * value is known to not be an object.
      */
     if ((flags != JSITER_ENUMERATE) || fe->isNotType(JSVAL_TYPE_OBJECT)) {
         prepareStubCall(Uses(1));
         masm.move(Imm32(flags), Registers::ArgReg1);
         INLINE_STUBCALL(stubs::Iter);
         frame.pop();
         frame.pushSynced();
-        return;
+        return true;
     }
 
     if (!fe->isTypeKnown()) {
         Jump notObject = frame.testObject(Assembler::NotEqual, fe);
         stubcc.linkExit(notObject, Uses(1));
     }
 
     RegisterID reg = frame.tempRegForData(fe);
@@ -4060,16 +4062,18 @@ mjit::Compiler::iter(uintN flags)
     stubcc.masm.move(Imm32(flags), Registers::ArgReg1);
     OOL_STUBCALL(stubs::Iter);
 
     /* Push the iterator object. */
     frame.pop();
     frame.pushTypedPayload(JSVAL_TYPE_OBJECT, ioreg);
 
     stubcc.rejoin(Changes(1));
+
+    return true;
 }
 
 /*
  * This big nasty function emits a fast-path for native iterators, producing
  * a temporary value on the stack for FORLOCAL,ARG,GLOBAL,etc ops to use.
  */
 void
 mjit::Compiler::iterNext()
@@ -4087,17 +4091,17 @@ mjit::Compiler::iterNext()
     stubcc.linkExit(notFast, Uses(1));
 
     /* Get private from iter obj. */
     masm.loadObjPrivate(reg, T1);
 
     RegisterID T3 = frame.allocReg();
     RegisterID T4 = frame.allocReg();
 
-    /* Test if for-each. */
+    /* Test for a value iterator, which could come through an Iterator object. */
     masm.load32(Address(T1, offsetof(NativeIterator, flags)), T3);
     notFast = masm.branchTest32(Assembler::NonZero, T3, Imm32(JSITER_FOREACH));
     stubcc.linkExit(notFast, Uses(1));
 
     RegisterID T2 = frame.allocReg();
 
     /* Get cursor. */
     masm.loadPtr(Address(T1, offsetof(NativeIterator, props_cursor)), T2);
@@ -4124,30 +4128,35 @@ mjit::Compiler::iterNext()
 
     /* Join with the stub call. */
     stubcc.rejoin(Changes(1));
 }
 
 bool
 mjit::Compiler::iterMore()
 {
-    FrameEntry *fe= frame.peek(-1);
+    FrameEntry *fe = frame.peek(-1);
     RegisterID reg = frame.tempRegForData(fe);
 
     frame.pinReg(reg);
     RegisterID T1 = frame.allocReg();
     frame.unpinReg(reg);
 
     /* Test clasp */
     Jump notFast = masm.testObjClass(Assembler::NotEqual, reg, &js_IteratorClass);
     stubcc.linkExitForBranch(notFast);
 
     /* Get private from iter obj. */
     masm.loadObjPrivate(reg, T1);
 
+    /* Test that the iterator supports fast iteration. */
+    notFast = masm.branchTest32(Assembler::NonZero, Address(T1, offsetof(NativeIterator, flags)),
+                                Imm32(JSITER_FOREACH));
+    stubcc.linkExitForBranch(notFast);
+
     /* Get props_cursor, test */
     RegisterID T2 = frame.allocReg();
     frame.syncAndForgetEverything();
     masm.loadPtr(Address(T1, offsetof(NativeIterator, props_cursor)), T2);
     masm.loadPtr(Address(T1, offsetof(NativeIterator, props_end)), T1);
     Jump jFast = masm.branchPtr(Assembler::LessThan, T2, T1);
 
     jsbytecode *target = &PC[JSOP_MOREITER_LENGTH];
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -391,17 +391,17 @@ class Compiler : public BaseCompiler
     uint32 fullAtomIndex(jsbytecode *pc);
     bool jumpInScript(Jump j, jsbytecode *pc);
     bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
     bool canUseApplyTricks();
 
     /* Emitting helpers. */
     void restoreFrameRegs(Assembler &masm);
     bool emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused);
-    void iter(uintN flags);
+    bool iter(uintN flags);
     void iterNext();
     bool iterMore();
     void iterEnd();
     MaybeJump loadDouble(FrameEntry *fe, FPRegisterID fpReg);
 #ifdef JS_POLYIC
     void passICAddress(BaseICInfo *ic);
 #endif
 #ifdef JS_MONOIC
--- a/js/src/methodjit/InvokeHelpers.cpp
+++ b/js/src/methodjit/InvokeHelpers.cpp
@@ -915,19 +915,19 @@ UpdateTraceHintSingle(Repatcher &repatch
      */
     repatcher.relink(jump, target);
 
     JaegerSpew(JSpew_PICs, "relinking trace hint %p to %p\n",
                jump.executableAddress(), target.executableAddress());
 }
 
 static void
-DisableTraceHint(VMFrame &f, ic::TraceICInfo &ic)
+DisableTraceHint(JITScript *jit, ic::TraceICInfo &ic)
 {
-    Repatcher repatcher(f.jit());
+    Repatcher repatcher(jit);
     UpdateTraceHintSingle(repatcher, ic.traceHint, ic.jumpTarget);
 
     if (ic.hasSlowTraceHint)
         UpdateTraceHintSingle(repatcher, ic.slowTraceHint, ic.jumpTarget);
 }
 
 static void
 ResetTraceHintAt(JSScript *script, js::mjit::JITScript *jit,
@@ -1016,17 +1016,17 @@ RunTracer(VMFrame &f)
 #endif
     tpa = MonitorTracePoint(f.cx, inlineCallCount, &blacklist, traceData, traceEpoch,
                             loopCounter, hits);
     JS_ASSERT(!TRACE_RECORDER(cx));
 
 #if JS_MONOIC
     ic.loopCounterStart = *loopCounter;
     if (blacklist)
-        DisableTraceHint(f, ic);
+        DisableTraceHint(entryFrame->jit(), ic);
 #endif
 
     // Even though ExecuteTree() bypasses the interpreter, it should propagate
     // error failures correctly.
     JS_ASSERT_IF(cx->isExceptionPending(), tpa == TPA_Error);
 
 	f.fp() = cx->fp();
     JS_ASSERT(f.fp() == cx->fp());
--- a/js/src/nanojit-import-rev
+++ b/js/src/nanojit-import-rev
@@ -1,1 +1,1 @@
-4ca71b4e30e696851c0a7a934a0e73426cf8c2c7
+f6016c7c7cd415a26dad9cf39d34141b8b482d43
--- a/js/src/nanojit/Allocator.cpp
+++ b/js/src/nanojit/Allocator.cpp
@@ -63,34 +63,41 @@ namespace nanojit
             c = prev;
         }
         current_chunk = NULL;
         current_top = NULL;
         current_limit = NULL;
         postReset();
     }
 
-    void* Allocator::allocSlow(size_t nbytes)
+    void* Allocator::allocSlow(size_t nbytes, bool fallible)
     {
         NanoAssert((nbytes & 7) == 0);
-        fill(nbytes);
-        NanoAssert(current_top + nbytes <= current_limit);
-        void* p = current_top;
-        current_top += nbytes;
-        return p;
+        if (fill(nbytes, fallible)) {
+            NanoAssert(current_top + nbytes <= current_limit);
+            void* p = current_top;
+            current_top += nbytes;
+            return p;
+        }
+        return NULL;
     }
 
-    void Allocator::fill(size_t nbytes)
+    bool Allocator::fill(size_t nbytes, bool fallible)
     {
-        const size_t minChunk = 2000;
-        if (nbytes < minChunk)
-            nbytes = minChunk;
+        if (nbytes < MIN_CHUNK_SZB)
+            nbytes = MIN_CHUNK_SZB;
         size_t chunkbytes = sizeof(Chunk) + nbytes - sizeof(int64_t);
-        void* mem = allocChunk(chunkbytes);
-        Chunk* chunk = (Chunk*) mem;
-        chunk->prev = current_chunk;
-        current_chunk = chunk;
-        current_top = (char*)chunk->data;
-        current_limit = (char*)mem + chunkbytes;
+        void* mem = allocChunk(chunkbytes, fallible);
+        if (mem) {
+            Chunk* chunk = (Chunk*) mem;
+            chunk->prev = current_chunk;
+            current_chunk = chunk;
+            current_top = (char*)chunk->data;
+            current_limit = (char*)mem + chunkbytes;
+            return true;
+        } else {
+            NanoAssert(fallible);
+            return false;
+        }
     }
 }
 
 #endif // FEATURE_NANOJIT
--- a/js/src/nanojit/Allocator.h
+++ b/js/src/nanojit/Allocator.h
@@ -41,55 +41,78 @@
 #define __nanojit_Allocator__
 
 namespace nanojit
 {
     /**
      * Allocator is a bump-pointer allocator with an SPI for getting more
      * memory from embedder-implemented allocator, such as malloc()/free().
      *
-     * allocations never return NULL.  The implementation of allocChunk()
+     * alloc() never returns NULL.  The implementation of allocChunk()
      * is expected to perform a longjmp or exception when an allocation can't
-     * proceed.
+     * proceed.  fallibleAlloc() (and fallibleAllocChunk()) may return NULL.
+     * They should be used for large allocations whose failure can be handled
+     * without too much difficulty.
      */
     class Allocator {
     public:
         Allocator();
         ~Allocator();
+
+        // Usable space in the minimum chunk size;  there are also a few bytes
+        // used for administration.
+        static const size_t MIN_CHUNK_SZB = 2000;
+
         void reset();
 
         /** alloc memory, never return null. */
         void* alloc(size_t nbytes) {
+            void* p;
             nbytes = (nbytes + 7) & ~7; // round up
             if (current_top + nbytes <= current_limit) {
-                void *p = current_top;
+                p = current_top;
                 current_top += nbytes;
-                return p;
+            } else {
+                p = allocSlow(nbytes, /* fallible = */false);
+                NanoAssert(p);
             }
-            return allocSlow(nbytes);
+            return p;
+        }
+
+        /** alloc memory, maybe return null. */
+        void* fallibleAlloc(size_t nbytes) {
+            void* p;
+            nbytes = (nbytes + 7) & ~7; // round up
+            if (current_top + nbytes <= current_limit) {
+                p = current_top;
+                current_top += nbytes;
+            } else {
+                p = allocSlow(nbytes, /* fallible = */true);
+            }
+            return p;
         }
 
     protected:
-        void* allocSlow(size_t nbytes);
-        void fill(size_t minbytes);
+        void* allocSlow(size_t nbytes, bool fallible = false);
+        bool fill(size_t minbytes, bool fallible);
 
         class Chunk {
         public:
             Chunk* prev;
             int64_t data[1]; // int64_t forces 8-byte alignment.
         };
 
         Chunk* current_chunk;
         char* current_top;
         char* current_limit;
 
         // allocator SPI
 
         /** allocate another block from a host provided allocator */
-        void* allocChunk(size_t nbytes);
+        void* allocChunk(size_t nbytes, bool fallible);
 
         /** free back to the same allocator */
         void freeChunk(void*);
 
         /** hook for post-reset action. */
         void postReset();
     };
 }
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -2056,19 +2056,19 @@ namespace nanojit
         : LirWriter(out),
           EMB_NUM_USED_ACCS(embNumUsedAccs),
           CSE_NUM_USED_ACCS(EMB_NUM_USED_ACCS + 2),
           CSE_ACC_CONST(    EMB_NUM_USED_ACCS + 0),
           CSE_ACC_MULTIPLE( EMB_NUM_USED_ACCS + 1),
           storesSinceLastLoad(ACCSET_NONE),
           alloc(alloc),
           knownCmpValues(alloc),
-          suspended(false)
+          suspended(false),
+          initOOM(false)
     {
-
         m_findNL[NLImmISmall] = &CseFilter::findImmISmall;
         m_findNL[NLImmILarge] = &CseFilter::findImmILarge;
         m_findNL[NLImmQ]      = PTR_SIZE(NULL, &CseFilter::findImmQ);
         m_findNL[NLImmD]      = &CseFilter::findImmD;
         m_findNL[NL1]         = &CseFilter::find1;
         m_findNL[NL2]         = &CseFilter::find2;
         m_findNL[NL3]         = &CseFilter::find3;
         m_findNL[NLCall]      = &CseFilter::findCall;
@@ -2077,25 +2077,36 @@ namespace nanojit
         m_capNL[NLImmILarge]  = 64;
         m_capNL[NLImmQ]       = PTR_SIZE(0, 16);
         m_capNL[NLImmD]       = 16;
         m_capNL[NL1]          = 256;
         m_capNL[NL2]          = 512;
         m_capNL[NL3]          = 16;
         m_capNL[NLCall]       = 64;
 
+        // The largish allocations are fallible, the small ones are
+        // infallible.  See the comment on initOOM's declaration for why.
+
         for (NLKind nlkind = NLFirst; nlkind <= NLLast; nlkind = nextNLKind(nlkind)) {
-            m_listNL[nlkind] = new (alloc) LIns*[m_capNL[nlkind]];
+            m_listNL[nlkind] = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capNL[nlkind]);
+            if (!m_listNL[nlkind]) {
+                initOOM = true;
+                return;
+            }
             m_usedNL[nlkind] = 1; // Force memset in clearAll().
         }
 
         // Note that this allocates the CONST and MULTIPLE tables as well.
         for (CseAcc a = 0; a < CSE_NUM_USED_ACCS; a++) {
             m_capL[a] = 16;
-            m_listL[a] = new (alloc) LIns*[m_capL[a]];
+            m_listL[a] = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capL[a]);
+            if (!m_listL[a]) {
+                initOOM = true;
+                return;
+            }
             m_usedL[a] = 1; // Force memset(0) in first clearAll().
         }
 
         clearAll();
     }
 
     // Inlined/separated version of SuperFastHash.
     // This content is copyrighted by Paul Hsieh.
@@ -2205,81 +2216,111 @@ namespace nanojit
 
     inline uint32_t CseFilter::hashCall(const CallInfo *ci, uint32_t argc, LIns* args[]) {
         uint32_t hash = hashptr(0, ci);
         for (int32_t j=argc-1; j >= 0; j--)
             hash = hashptr(hash,args[j]);
         return hashfinish(hash);
     }
 
-    void CseFilter::growNL(NLKind nlkind)
+    bool CseFilter::growNL(NLKind nlkind)
     {
         NanoAssert(nlkind != NLImmISmall);
         const uint32_t oldcap = m_capNL[nlkind];
         m_capNL[nlkind] <<= 1;
-        LIns** oldlist = m_listNL[nlkind];
-        m_listNL[nlkind] = new (alloc) LIns*[m_capNL[nlkind]];
-        VMPI_memset(m_listNL[nlkind], 0, m_capNL[nlkind] * sizeof(LIns*));
-        find_t find = m_findNL[nlkind];
-        for (uint32_t i = 0; i < oldcap; i++) {
-            LIns* ins = oldlist[i];
-            if (!ins) continue;
-            uint32_t j = (this->*find)(ins);
-            NanoAssert(!m_listNL[nlkind][j]);
-            m_listNL[nlkind][j] = ins;
+        // We make this allocation fallible because it's potentially large and
+        // easy to recover from.  If it fails, we won't add any more
+        // instructions to the table and some CSE opportunities may be missed.
+        LIns** tmp = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capNL[nlkind]);
+        if (tmp) {
+            LIns** oldlist = m_listNL[nlkind];
+            m_listNL[nlkind] = tmp;
+            VMPI_memset(m_listNL[nlkind], 0, m_capNL[nlkind] * sizeof(LIns*));
+            find_t find = m_findNL[nlkind];
+            for (uint32_t i = 0; i < oldcap; i++) {
+                LIns* ins = oldlist[i];
+                if (!ins) continue;
+                uint32_t j = (this->*find)(ins);
+                NanoAssert(!m_listNL[nlkind][j]);
+                m_listNL[nlkind][j] = ins;
+            }
+            return true;
+        } else {
+            m_capNL[nlkind] = oldcap;
+            return false;
         }
     }
 
-    void CseFilter::growL(CseAcc cseAcc)
+    bool CseFilter::growL(CseAcc cseAcc)
     {
         const uint32_t oldcap = m_capL[cseAcc];
         m_capL[cseAcc] <<= 1;
-        LIns** oldlist = m_listL[cseAcc];
-        m_listL[cseAcc] = new (alloc) LIns*[m_capL[cseAcc]];
-        VMPI_memset(m_listL[cseAcc], 0, m_capL[cseAcc] * sizeof(LIns*));
-        find_t find = &CseFilter::findLoad;
-        for (uint32_t i = 0; i < oldcap; i++) {
-            LIns* ins = oldlist[i];
-            if (!ins) continue;
-            uint32_t j = (this->*find)(ins);
-            NanoAssert(!m_listL[cseAcc][j]);
-            m_listL[cseAcc][j] = ins;
+        LIns** tmp = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capL[cseAcc]);
+        if (tmp) {
+            LIns** oldlist = m_listL[cseAcc];
+            m_listL[cseAcc] = tmp;
+            VMPI_memset(m_listL[cseAcc], 0, m_capL[cseAcc] * sizeof(LIns*));
+            find_t find = &CseFilter::findLoad;
+            for (uint32_t i = 0; i < oldcap; i++) {
+                LIns* ins = oldlist[i];
+                if (!ins) continue;
+                uint32_t j = (this->*find)(ins);
+                NanoAssert(!m_listL[cseAcc][j]);
+                m_listL[cseAcc][j] = ins;
+            }
+            return true;
+        } else {
+            m_capL[cseAcc] = oldcap;
+            return false;
         }
     }
 
     void CseFilter::addNLImmISmall(LIns* ins, uint32_t k)
     {
+        NanoAssert(!initOOM);
         if (suspended) return;
         NLKind nlkind = NLImmISmall;
         NanoAssert(k < m_capNL[nlkind]);
         NanoAssert(!m_listNL[nlkind][k]);
         m_usedNL[nlkind]++;
         m_listNL[nlkind][k] = ins;
     }
 
     void CseFilter::addNL(NLKind nlkind, LIns* ins, uint32_t k)
     {
+        NanoAssert(!initOOM);
         if (suspended) return;
         NanoAssert(!m_listNL[nlkind][k]);
         m_usedNL[nlkind]++;
         m_listNL[nlkind][k] = ins;
         if ((m_usedNL[nlkind] * 4) >= (m_capNL[nlkind] * 3)) {  // load factor of 0.75
-            growNL(nlkind);
+            bool ok = growNL(nlkind);
+            if (!ok) {
+                // OOM: undo the insertion.
+                m_usedNL[nlkind]--;
+                m_listNL[nlkind][k] = NULL;
+            }
         }
     }
 
     void CseFilter::addL(LIns* ins, uint32_t k)
     {
+        NanoAssert(!initOOM);
         if (suspended) return;
         CseAcc cseAcc = miniAccSetToCseAcc(ins->miniAccSet(), ins->loadQual());
         NanoAssert(!m_listL[cseAcc][k]);
         m_usedL[cseAcc]++;
         m_listL[cseAcc][k] = ins;
         if ((m_usedL[cseAcc] * 4) >= (m_capL[cseAcc] * 3)) {  // load factor of 0.75
-            growL(cseAcc);
+            bool ok = growL(cseAcc);
+            if (!ok) {
+                // OOM: undo the insertion.
+                m_usedL[cseAcc]--;
+                m_listL[cseAcc][k] = NULL;
+            }
         }
     }
 
     inline LIns* CseFilter::findImmISmall(int32_t a, uint32_t &k)
     {
         // This one is a direct array lookup rather than a hashtable lookup.
         NLKind nlkind = NLImmISmall;
         k = a;
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -2049,31 +2049,43 @@ namespace nanojit
 #endif
         uint32_t findImmD(LIns* ins);
         uint32_t find1(LIns* ins);
         uint32_t find2(LIns* ins);
         uint32_t find3(LIns* ins);
         uint32_t findCall(LIns* ins);
         uint32_t findLoad(LIns* ins);
 
-        void growNL(NLKind kind);
-        void growL(CseAcc cseAcc);
+        // These return false if they failed to grow due to OOM.
+        bool growNL(NLKind kind);
+        bool growL(CseAcc cseAcc);
 
         void addNLImmISmall(LIns* ins, uint32_t k);
         // 'k' is the index found by findXYZ().
         void addNL(NLKind kind, LIns* ins, uint32_t k);
         void addL(LIns* ins, uint32_t k);
 
         void clearAll();            // clears all tables
         void clearNL(NLKind);       // clears one non-load table
         void clearL(CseAcc);        // clears one load table
 
     public:
         CseFilter(LirWriter *out, uint8_t embNumUsedAccs, Allocator&);
 
+        // CseFilter does some largish fallible allocations at start-up.  If
+        // they fail, the constructor sets this field to 'true'.  It should be
+        // checked after creation, and if set the CseFilter cannot be used.
+        // (But the check can be skipped if allocChunk() always succeeds.)
+        //
+        // FIXME: This fallibility is a sop to TraceMonkey's implementation of
+        // infallible malloc -- by avoiding some largish infallible
+        // allocations, it reduces the size of the reserve space needed.
+        // Bug 624590 is open to fix this.
+        bool initOOM;
+
         LIns* insImmI(int32_t imm);
 #ifdef NANOJIT_64BIT
         LIns* insImmQ(uint64_t q);
 #endif
         LIns* insImmD(double d);
         LIns* ins0(LOpcode v);
         LIns* ins1(LOpcode v, LIns*);
         LIns* ins2(LOpcode v, LIns*, LIns*);
@@ -2111,23 +2123,23 @@ namespace nanojit
                 uint32_t lir;    // # instructions
             }
             _stats;
 
             AbiKind abi;
             LIns *state, *param1, *sp, *rp;
             LIns* savedRegs[NumSavedRegs+1]; // Allocate an extra element in case NumSavedRegs == 0
 
-        protected:
-            friend class LirBufWriter;
-
             /** Each chunk is just a raw area of LIns instances, with no header
                 and no more than 8-byte alignment.  The chunk size is somewhat arbitrary. */
             static const size_t CHUNK_SZB = 8000;
 
+        protected:
+            friend class LirBufWriter;
+
             /** Get CHUNK_SZB more memory for LIR instructions. */
             void        chunkAlloc();
             void        moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk);
 
             Allocator&  _allocator;
             uintptr_t   _unused;   // next unused instruction slot in the current LIR chunk
             uintptr_t   _limit;    // one past the last usable byte of the current LIR chunk
     };
--- a/js/src/tests/js1_8_5/regress/jstests.list
+++ b/js/src/tests/js1_8_5/regress/jstests.list
@@ -25,16 +25,17 @@ script regress-563210.js
 script regress-563221.js
 script regress-566549.js
 script regress-566914.js
 script regress-567152.js
 script regress-569306.js
 script regress-569464.js
 script regress-571014.js
 script regress-573875.js
+script regress-576847.js
 script regress-577648-1.js
 script regress-577648-2.js
 script regress-583429.js
 script regress-584355.js
 script regress-586482-1.js
 script regress-586482-2.js
 script regress-586482-3.js
 script regress-586482-4.js
@@ -75,8 +76,9 @@ fails-if(!xulRuntime.shell) script regre
 script regress-619003-1.js
 script regress-619003-2.js
 script regress-620376-1.js
 script regress-620376-2.js
 script regress-621814.js
 script regress-620750.js
 script regress-624199.js
 script regress-624547.js
+script regress-626436.js
new file mode 100644
--- /dev/null
+++ b/js/src/tests/js1_8_5/regress/regress-576847.js
@@ -0,0 +1,19 @@
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+/* Don't crash. */
+try {
+    eval("function f(){}(((f)for(x in function(){}))())");
+    var threwTypeError = false;
+} catch (x) {
+    var threwTypeError = x instanceof TypeError;
+}
+assertEq(threwTypeError, true);
+
+/* Properly bind f. */
+assertEq(eval("function f() {}; var i = (f for (f in [1])); uneval([n for (n in i)])"),
+         '["0"]');
+
+reportCompare(true, true);
new file mode 100644
--- /dev/null
+++ b/js/src/tests/js1_8_5/regress/regress-626436.js
@@ -0,0 +1,7 @@
+// Any copyright is dedicated to the Public Domain.
+// http://creativecommons.org/licenses/publicdomain/
+// Contributors: Christian Holler <decoder@own-hero.net>, Jesse Ruderman <jruderman@gmail.com>
+
+(1 ? 2 : delete(0 ? 0 : {})).x;
+
+reportCompare(0, 0, 'ok');
--- a/js/src/tracejit/Writer.cpp
+++ b/js/src/tracejit/Writer.cpp
@@ -103,18 +103,21 @@ Writer::init(LogControl *logc_)
     ValidateWriter *validate2;
     lir = validate2 =
         new (alloc) ValidateWriter(lir, lirbuf->printer, "end of writer pipeline");
 #endif
 #ifdef JS_JIT_SPEW
     if (logc->lcbits & LC_TMRecorder)
        lir = new (alloc) VerboseWriter(*alloc, lir, lirbuf->printer, logc);
 #endif
-    if (avmplus::AvmCore::config.cseopt)
-        lir = cse = new (alloc) CseFilter(lir, TM_NUM_USED_ACCS, *alloc);
+    if (avmplus::AvmCore::config.cseopt) {
+        cse = new (alloc) CseFilter(lir, TM_NUM_USED_ACCS, *alloc);
+        if (!cse->initOOM)
+            lir = cse;      // Skip CseFilter if we OOM'd when creating it.
+    }
     lir = new (alloc) ExprFilter(lir);
     lir = new (alloc) FuncFilter(lir);
 #ifdef DEBUG
     ValidateWriter *validate1 =
         new (alloc) ValidateWriter(lir, lirbuf->printer, "start of writer pipeline");
     lir = validate1;
 #endif
 }
--- a/js/src/xpconnect/loader/mozJSComponentLoader.cpp
+++ b/js/src/xpconnect/loader/mozJSComponentLoader.cpp
@@ -1495,17 +1495,17 @@ mozJSComponentLoader::ImportInto(const n
     *_retval = mod->global;
 
     jsval symbols;
     if (targetObj) {
         JSCLContextHelper cxhelper(this);
 
         JSAutoEnterCompartment ac;
         if (!ac.enter(mContext, mod->global))
-            return NULL;
+            return NS_ERROR_FAILURE;
 
         if (!JS_GetProperty(mContext, mod->global,
                             "EXPORTED_SYMBOLS", &symbols)) {
             return ReportOnCaller(cxhelper, ERROR_NOT_PRESENT,
                                   PromiseFlatCString(aLocation).get());
         }
 
         JSObject *symbolsObj = nsnull;
--- a/js/src/xpconnect/public/nsAutoJSValHolder.h
+++ b/js/src/xpconnect/public/nsAutoJSValHolder.h
@@ -51,17 +51,16 @@
  */
 class nsAutoJSValHolder
 {
 public:
 
   nsAutoJSValHolder()
     : mRt(NULL)
     , mVal(JSVAL_NULL)
-    , mGCThing(NULL)
     , mHeld(JS_FALSE)
   {
     // nothing to do
   }
 
   /**
    * Always release on destruction.
    */
@@ -73,46 +72,45 @@ public:
    * Hold by rooting on the context's runtime.
    */
   JSBool Hold(JSContext* aCx) {
     return Hold(JS_GetRuntime(aCx));
   }
 
   /**
    * Hold by rooting on the runtime.
-   * Note that mGCThing may be JSVAL_NULL, which is not a problem.
+   * Note that mVal may be JSVAL_NULL, which is not a problem.
    */
   JSBool Hold(JSRuntime* aRt) {
     if (!mHeld) {
-      if (js_AddGCThingRootRT(aRt, &mGCThing, "nsAutoJSValHolder")) {
+      if (js_AddRootRT(aRt, &mVal, "nsAutoJSValHolder")) {
         mRt = aRt;
         mHeld = JS_TRUE;
       } else {
         Release(); // out of memory
       }
     }
     return mHeld;
   }
 
   /**
-   * Manually release, nullifying mVal, mGCThing, and mRt, but returning
+   * Manually release, nullifying mVal, and mRt, but returning
    * the original jsval.
    */
   jsval Release() {
     NS_ASSERTION(!mHeld || mRt, "Bad!");
 
     jsval oldval = mVal;
 
     if (mHeld) {
-      js_RemoveRoot(mRt, &mGCThing); // infallible
+      js_RemoveRoot(mRt, &mVal); // infallible
       mHeld = JS_FALSE;
     }
 
     mVal = JSVAL_NULL;
-    mGCThing = NULL;
     mRt = NULL;
 
     return oldval;
   }
 
   /**
    * Determine if Hold has been called.
    */
@@ -149,22 +147,18 @@ public:
 
   nsAutoJSValHolder &operator=(jsval aOther) {
 #ifdef DEBUG
     if (JSVAL_IS_OBJECT(aOther) && JSVAL_TO_OBJECT(aOther)) {
       NS_ASSERTION(mHeld, "Not rooted!");
     }
 #endif
     mVal = aOther;
-    mGCThing = JSVAL_IS_GCTHING(aOther)
-             ? JSVAL_TO_GCTHING(aOther)
-             : NULL;
     return *this;
   }
 
 private:
   JSRuntime* mRt;
   jsval mVal;
-  void* mGCThing;
   JSBool mHeld;
 };
 
 #endif /* __NSAUTOJSVALHOLDER_H__ */
--- a/js/src/xpconnect/src/XPCWrapper.cpp
+++ b/js/src/xpconnect/src/XPCWrapper.cpp
@@ -122,17 +122,17 @@ AttachNewConstructorObject(XPCCallContex
 
 }
 
 namespace XPCWrapper {
 
 JSObject *
 Unwrap(JSContext *cx, JSObject *wrapper)
 {
-  if (wrapper->isProxy()) {
+  if (wrapper->isWrapper()) {
     if (xpc::AccessCheck::isScriptAccessOnly(cx, wrapper))
       return nsnull;
     return wrapper->unwrap();
   }
 
   return nsnull;
 }
 
--- a/js/src/xpconnect/wrappers/WrapperFactory.cpp
+++ b/js/src/xpconnect/wrappers/WrapperFactory.cpp
@@ -78,16 +78,18 @@ GetCurrentOuter(JSContext *cx, JSObject 
     }
 
     return obj;
 }
 
 JSObject *
 WrapperFactory::WaiveXray(JSContext *cx, JSObject *obj)
 {
+    obj = obj->unwrap();
+
     // We have to make sure that if we're wrapping an outer window, that
     // the .wrappedJSObject also wraps the outer window.
     obj = GetCurrentOuter(cx, obj);
 
     {
         // See if we already have a waiver wrapper for this object.
         CompartmentPrivate *priv =
             (CompartmentPrivate *)JS_GetCompartmentPrivate(cx, obj->compartment());
@@ -96,17 +98,19 @@ WrapperFactory::WaiveXray(JSContext *cx,
             wobj = priv->waiverWrapperMap->Find(obj);
 
         // No wrapper yet, make one.
         if (!wobj) {
             JSObject *proto = obj->getProto();
             if (proto && !(proto = WaiveXray(cx, proto)))
                 return nsnull;
 
-            js::SwitchToCompartment sc(cx, obj->compartment());
+            JSAutoEnterCompartment ac;
+            if (!ac.enter(cx, obj))
+                return nsnull;
             wobj = JSWrapper::New(cx, obj, proto, obj->getGlobal(), &WaiveXrayWrapperWrapper);
             if (!wobj)
                 return nsnull;
 
             // Add the new wrapper so we find it next time.
             if (priv) {
                 if (!priv->waiverWrapperMap) {
                     priv->waiverWrapperMap = JSObject2JSObjectMap::newMap(XPC_WRAPPER_MAP_SIZE);
@@ -123,17 +127,20 @@ WrapperFactory::WaiveXray(JSContext *cx,
 
     return obj;
 }
 
 JSObject *
 WrapperFactory::DoubleWrap(JSContext *cx, JSObject *obj, uintN flags)
 {
     if (flags & WrapperFactory::WAIVE_XRAY_WRAPPER_FLAG) {
-        js::SwitchToCompartment sc(cx, obj->compartment());
+        JSAutoEnterCompartment ac;
+        if (!ac.enter(cx, obj))
+            return nsnull;
+
         return WaiveXray(cx, obj);
     }
     return obj;
 }
 
 JSObject *
 WrapperFactory::PrepareForWrapping(JSContext *cx, JSObject *scope, JSObject *obj, uintN flags)
 {
@@ -340,17 +347,17 @@ WrapperFactory::WrapLocationObject(JSCon
 }
 
 bool
 WrapperFactory::WaiveXrayAndWrap(JSContext *cx, jsval *vp)
 {
     if (JSVAL_IS_PRIMITIVE(*vp))
         return JS_WrapValue(cx, vp);
 
-    JSObject *obj = JSVAL_TO_OBJECT(*vp)->unwrap();
+    JSObject *obj = JSVAL_TO_OBJECT(*vp);
 
     obj = WaiveXray(cx, obj);
     if (!obj)
         return false;
 
     *vp = OBJECT_TO_JSVAL(obj);
     return JS_WrapValue(cx, vp);
 }
--- a/js/src/yarr/pcre/pcre_exec.cpp
+++ b/js/src/yarr/pcre/pcre_exec.cpp
@@ -718,16 +718,20 @@ RECURSE:
                 /* For a non-repeating ket, just continue at this level. This also
                  happens for a repeating ket if no characters were matched in the group.
                  This is the forcible breaking of infinite loops as implemented in Perl
                  5.005. If there is an options reset, it will get obeyed in the normal
                  course of events. */
                 
                 if (*stack.currentFrame->args.instructionPtr == OP_KET || stack.currentFrame->args.subjectPtr == stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
                     DPRINTF(("non-repeating ket or empty match\n"));
+                    if (stack.currentFrame->args.subjectPtr == stack.currentFrame->locals.subjectPtrAtStartOfInstruction && stack.currentFrame->args.groupMatched) {
+                        DPRINTF(("empty string while group already matched; bailing"));
+                        RRETURN_NO_MATCH;
+                    }
                     stack.currentFrame->args.instructionPtr += 1 + LINK_SIZE;
                     NEXT_OPCODE;
                 }
                 
                 /* The repeating kets try the rest of the pattern or restart from the
                  preceding bracket, in the appropriate order. */
                 
                 stack.currentFrame->extractBrackets(LOCALS(instructionPtrAtStartOfOnce));
@@ -1261,17 +1265,17 @@ RECURSE:
                     }
                     
                     if (min == stack.currentFrame->locals.max)
                         NEXT_OPCODE;
                     
                     if (minimize) {
                         stack.currentFrame->locals.repeatOthercase = othercase;
                         for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
-                            RECURSIVE_MATCH(28, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(28, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
                                 RRETURN;
                             if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc && *stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.repeatOthercase)
                                 RRETURN;
                             ++stack.currentFrame->args.subjectPtr;
                         }
@@ -1303,17 +1307,17 @@ RECURSE:
                         stack.currentFrame->args.subjectPtr += 2;
                     }
                     
                     if (min == stack.currentFrame->locals.max)
                         NEXT_OPCODE;
                     
                     if (minimize) {
                         for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
-                            RECURSIVE_MATCH(30, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(30, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
                                 RRETURN;
                             if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc)
                                 RRETURN;
                             stack.currentFrame->args.subjectPtr += 2;
                         }
@@ -1323,17 +1327,17 @@ RECURSE:
                         for (int i = min; i < stack.currentFrame->locals.max; i++) {
                             if (stack.currentFrame->args.subjectPtr > md.endSubject - 2)
                                 break;
                             if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc)
                                 break;
                             stack.currentFrame->args.subjectPtr += 2;
                         }
                         while (stack.currentFrame->args.subjectPtr >= stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
-                            RECURSIVE_MATCH(31, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(31, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             stack.currentFrame->args.subjectPtr -= 2;
                         }
                         RRETURN_NO_MATCH;
                     }
                     /* Control never reaches here */
                 }
@@ -1419,17 +1423,17 @@ RECURSE:
                             RRETURN_NO_MATCH;
                     }
                     
                     if (min == stack.currentFrame->locals.max)
                         NEXT_OPCODE;      
                     
                     if (minimize) {
                         for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
-                            RECURSIVE_MATCH(38, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(38, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             int d = *stack.currentFrame->args.subjectPtr++;
                             if (d < 128)
                                 d = toLowerCase(d);
                             if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject || stack.currentFrame->locals.fc == d)
                                 RRETURN;
                         }
@@ -1447,17 +1451,17 @@ RECURSE:
                             int d = *stack.currentFrame->args.subjectPtr;
                             if (d < 128)
                                 d = toLowerCase(d);
                             if (stack.currentFrame->locals.fc == d)
                                 break;
                             ++stack.currentFrame->args.subjectPtr;
                         }
                         for (;;) {
-                            RECURSIVE_MATCH(40, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(40, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
                                 break;        /* Stop if tried at original pos */
                         }
                         
                         RRETURN;
                     }
@@ -1473,17 +1477,17 @@ RECURSE:
                             RRETURN_NO_MATCH;
                     }
 
                     if (min == stack.currentFrame->locals.max)
                         NEXT_OPCODE;
                     
                     if (minimize) {
                         for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
-                            RECURSIVE_MATCH(42, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(42, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             int d = *stack.currentFrame->args.subjectPtr++;
                             if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject || stack.currentFrame->locals.fc == d)
                                 RRETURN;
                         }
                         /* Control never reaches here */
                     }
@@ -1497,17 +1501,17 @@ RECURSE:
                             if (stack.currentFrame->args.subjectPtr >= md.endSubject)
                                 break;
                             int d = *stack.currentFrame->args.subjectPtr;
                             if (stack.currentFrame->locals.fc == d)
                                 break;
                             ++stack.currentFrame->args.subjectPtr;
                         }
                         for (;;) {
-                            RECURSIVE_MATCH(44, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
+                            RECURSIVE_MATCH(44, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
                             if (isMatch)
                                 RRETURN;
                             if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
                                 break;        /* Stop if tried at original pos */
                         }
 
                         RRETURN;
                     }
--- a/js/src/yarr/yarr/RegexCompiler.cpp
+++ b/js/src/yarr/yarr/RegexCompiler.cpp
@@ -526,24 +526,27 @@ public:
                     alternative->m_hasFixedSize = false;
                 } else
                     currentInputPosition += term.quantityCount;
                 break;
 
             case PatternTerm::TypeParenthesesSubpattern:
                 // Note: for fixed once parentheses we will ensure at least the minimum is available; others are on their own.
                 term.frameLocation = currentCallFrameSize;
-                if ((term.quantityCount == 1) && !term.parentheses.isCopy) {
-                    if (term.quantityType == QuantifierFixedCount) {
-                        currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
+                if (term.quantityCount == 1 && !term.parentheses.isCopy) {
+                    if (term.quantityType != QuantifierFixedCount)
+                        currentCallFrameSize += RegexStackSpaceForBackTrackInfoParenthesesOnce;
+                    currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
+                    // If quantity is fixed, then pre-check its minimum size.
+                    if (term.quantityType == QuantifierFixedCount)
                         currentInputPosition += term.parentheses.disjunction->m_minimumSize;
-                    } else {
-                        currentCallFrameSize += RegexStackSpaceForBackTrackInfoParenthesesOnce;
-                        currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
-                    }
+                    term.inputPosition = currentInputPosition;
+                } else if (term.parentheses.isTerminal) {
+                    currentCallFrameSize += RegexStackSpaceForBackTrackInfoParenthesesTerminal;
+                    currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
                     term.inputPosition = currentInputPosition;
                 } else {
                     term.inputPosition = currentInputPosition;
                     setupDisjunctionOffsets(term.parentheses.disjunction, 0, currentInputPosition);
                     currentCallFrameSize += RegexStackSpaceForBackTrackInfoParentheses;
                 }
                 // Fixed count of 1 could be accepted, if they have a fixed size *AND* if all alternatives are of the same length.
                 alternative->m_hasFixedSize = false;
@@ -587,16 +590,43 @@ public:
         return maximumCallFrameSize;
     }
 
     void setupOffsets()
     {
         setupDisjunctionOffsets(m_pattern.m_body, 0, 0);
     }
 
+    // This optimization identifies sets of parentheses that we will never need to backtrack.
+    // In these cases we do not need to store state from prior iterations.
+    // We can presently avoid backtracking for:
+    //   * a set of parens at the end of the regular expression (last term in any of the alternatives of the main body disjunction).
+    //   * where the parens are non-capturing, and quantified unbounded greedy (*).
+    //   * where the parens do not contain any capturing subpatterns.
+    void checkForTerminalParentheses()
+    {
+        // This check is much too crude; should be just checking whether the candidate
+        // node contains nested capturing subpatterns, not the whole expression!
+        if (m_pattern.m_numSubpatterns)
+            return;
+
+        js::Vector<PatternAlternative*, 0, js::SystemAllocPolicy>& alternatives = m_pattern.m_body->m_alternatives;
+        for (unsigned i =0; i < alternatives.length(); ++i) {
+            js::Vector<PatternTerm, 0, js::SystemAllocPolicy>& terms = alternatives[i]->m_terms;
+            if (terms.length()) {
+                PatternTerm& term = terms.back();
+                if (term.type == PatternTerm::TypeParenthesesSubpattern
+                    && term.quantityType == QuantifierGreedy
+                    && term.quantityCount == UINT_MAX
+                    && !term.capture())
+                    term.parentheses.isTerminal = true;
+            }
+        }
+    }
+
 private:
     RegexPattern& m_pattern;
     PatternAlternative* m_alternative;
     CharacterClassConstructor m_characterClassConstructor;
     bool m_invertCharacterClass;
 };
 
 
@@ -619,15 +649,16 @@ int compileRegex(const UString& patternS
         int error =
 #endif
             parse(constructor, patternString, numSubpatterns);
 
         JS_ASSERT(!error);
         JS_ASSERT(numSubpatterns == pattern.m_numSubpatterns);
     }
 
+    constructor.checkForTerminalParentheses();
     constructor.setupOffsets();
 
     return 0;
 }
 
 
 } }
--- a/js/src/yarr/yarr/RegexJIT.cpp
+++ b/js/src/yarr/yarr/RegexJIT.cpp
@@ -912,22 +912,17 @@ class RegexGenerator : private MacroAsse
 
     void generateParenthesesSingle(TermGenerationState& state)
     {
         const RegisterID indexTemporary = regT0;
         PatternTerm& term = state.term();
         PatternDisjunction* disjunction = term.parentheses.disjunction;
         ASSERT(term.quantityCount == 1);
 
-        if (term.parentheses.isCopy) {
-            m_shouldFallBack = true;
-            return;
-        }
-
-        unsigned preCheckedCount = ((term.quantityCount == 1) && (term.quantityType == QuantifierFixedCount)) ? disjunction->m_minimumSize : 0;
+        unsigned preCheckedCount = (term.quantityType == QuantifierFixedCount) ? disjunction->m_minimumSize : 0;
 
         unsigned parenthesesFrameLocation = term.frameLocation;
         unsigned alternativeFrameLocation = parenthesesFrameLocation;
         if (term.quantityType != QuantifierFixedCount)
             alternativeFrameLocation += RegexStackSpaceForBackTrackInfoParenthesesOnce;
 
         // optimized case - no capture & no quantifier can be handled in a light-weight manner.
         if (!term.invertOrCapture && (term.quantityType == QuantifierFixedCount)) {
@@ -936,22 +931,22 @@ class RegexGenerator : private MacroAsse
             // this expects that any backtracks back out of the parentheses will be in the
             // parenthesesState's backTrackJumps vector, and that if they need backtracking
             // they will have set an entry point on the parenthesesState's backtrackLabel.
             state.propagateBacktrackingFrom(parenthesesState, this);
         } else {
             Jump nonGreedySkipParentheses;
             Label nonGreedyTryParentheses;
             if (term.quantityType == QuantifierGreedy)
-                storeToFrame(Imm32(1), parenthesesFrameLocation);
+                storeToFrame(index, parenthesesFrameLocation);
             else if (term.quantityType == QuantifierNonGreedy) {
-                storeToFrame(Imm32(0), parenthesesFrameLocation);
+                storeToFrame(Imm32(-1), parenthesesFrameLocation);
                 nonGreedySkipParentheses = jump();
                 nonGreedyTryParentheses = label();
-                storeToFrame(Imm32(1), parenthesesFrameLocation);
+                storeToFrame(index, parenthesesFrameLocation);
             }
 
             // store the match start index
             if (term.invertOrCapture) {
                 int inputOffset = state.inputOffset() - preCheckedCount;
                 if (inputOffset) {
                     move(index, indexTemporary);
                     add32(Imm32(inputOffset), indexTemporary);
@@ -959,89 +954,73 @@ class RegexGenerator : private MacroAsse
                 } else
                     store32(index, Address(output, (term.parentheses.subpatternId << 1) * sizeof(int)));
             }
 
             // generate the body of the parentheses
             TermGenerationState parenthesesState(disjunction, state.checkedTotal);
             generateParenthesesDisjunction(state.term(), parenthesesState, alternativeFrameLocation);
 
-            // store the match end index
-            if (term.invertOrCapture) {
-                int inputOffset = state.inputOffset();
-                if (inputOffset) {
-                    move(index, indexTemporary);
-                    add32(Imm32(state.inputOffset()), indexTemporary);
-                    store32(indexTemporary, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
-                } else
-                    store32(index, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
-            }
-            Jump success = jump();
+            Jump success = (term.quantityType == QuantifierFixedCount) ?
+                jump() :
+                branch32(NotEqual, index, Address(stackPointerRegister, (parenthesesFrameLocation * sizeof(void*))));
 
             // A failure AFTER the parens jumps here
             Label backtrackFromAfterParens(this);
 
             if (term.quantityType == QuantifierGreedy) {
-                // If this is zero we have now tested with both with and without the parens.
+                // If this is -1 we have now tested with both with and without the parens.
                 loadFromFrame(parenthesesFrameLocation, indexTemporary);
-                state.jumpToBacktrack(branchTest32(Zero, indexTemporary), this);
+                state.jumpToBacktrack(branch32(Equal, indexTemporary, Imm32(-1)), this);
             } else if (term.quantityType == QuantifierNonGreedy) {
-                // If this is zero we have now tested with both with and without the parens.
+                // If this is -1 we have now tested without the parens, now test with.
                 loadFromFrame(parenthesesFrameLocation, indexTemporary);
-                branchTest32(Zero, indexTemporary).linkTo(nonGreedyTryParentheses, this);
+                branch32(Equal, indexTemporary, Imm32(-1)).linkTo(nonGreedyTryParentheses, this);
             }
 
             parenthesesState.plantJumpToBacktrackIfExists(this);
             // A failure WITHIN the parens jumps here
             parenthesesState.linkAlternativeBacktracks(this);
             if (term.invertOrCapture) {
                 store32(Imm32(-1), Address(output, (term.parentheses.subpatternId << 1) * sizeof(int)));
 #if 0
                 store32(Imm32(-1), Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
 #endif
             }
 
             if (term.quantityType == QuantifierGreedy)
-                storeToFrame(Imm32(0), parenthesesFrameLocation);
+                storeToFrame(Imm32(-1), parenthesesFrameLocation);
             else
                 state.jumpToBacktrack(jump(), this);
 
             state.setBacktrackGenerated(backtrackFromAfterParens);
             if (term.quantityType == QuantifierNonGreedy)
                 nonGreedySkipParentheses.link(this);
             success.link(this);
+
+            // store the match end index
+            if (term.invertOrCapture) {
+                int inputOffset = state.inputOffset();
+                if (inputOffset) {
+                    move(index, indexTemporary);
+                    add32(Imm32(state.inputOffset()), indexTemporary);
+                    store32(indexTemporary, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
+                } else
+                    store32(index, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
+            }
         }
     }
 
     void generateParenthesesGreedyNoBacktrack(TermGenerationState& state)
     {
         PatternTerm& parenthesesTerm = state.term();
         PatternDisjunction* disjunction = parenthesesTerm.parentheses.disjunction;
         ASSERT(parenthesesTerm.type == PatternTerm::TypeParenthesesSubpattern);
         ASSERT(parenthesesTerm.quantityCount != 1); // Handled by generateParenthesesSingle.
 
-        // Capturing not yet implemented!
-        if (parenthesesTerm.invertOrCapture) {
-            m_shouldFallBack = true;
-            return;
-        }
-
-        // Quantification limit not yet implemented!
-        if (parenthesesTerm.quantityCount != 0xffffffff) {
-            m_shouldFallBack = true;
-            return;
-        }
-
-        // Need to reset nested subpatterns between iterations...
-        // for the minute this crude check rejects all patterns with any subpatterns!
-        if (m_pattern.m_numSubpatterns) {
-            m_shouldFallBack = true;
-            return;
-        }
-
         TermGenerationState parenthesesState(disjunction, state.checkedTotal);
 
         Label matchAgain(this);
 
         storeToFrame(index, parenthesesTerm.frameLocation); // Save the current index to check for zero len matches later.
 
         for (parenthesesState.resetAlternative(); parenthesesState.alternativeValid(); parenthesesState.nextAlternative()) {
 
@@ -1053,17 +1032,21 @@ class RegexGenerator : private MacroAsse
                 parenthesesState.addBacktrackJump(jumpIfNoAvailableInput(countToCheck));
                 parenthesesState.checkedTotal += countToCheck;
             }
 
             for (parenthesesState.resetTerm(); parenthesesState.termValid(); parenthesesState.nextTerm())
                 generateTerm(parenthesesState);
 
             // If we get here, we matched! If the index advanced then try to match more since limit isn't supported yet.
-            branch32(GreaterThan, index, Address(stackPointerRegister, (parenthesesTerm.frameLocation * sizeof(void*))), matchAgain);
+            branch32(NotEqual, index, Address(stackPointerRegister, (parenthesesTerm.frameLocation * sizeof(void*))), matchAgain);
+
+            // If we get here we matched, but we matched "" - cannot accept this alternative as is, so either backtrack,
+            // or fall through to try the next alternative if no backtrack is available.
+            parenthesesState.plantJumpToBacktrackIfExists(this);
 
             parenthesesState.linkAlternativeBacktracks(this);
             // We get here if the alternative fails to match - fall through to the next iteration, or out of the loop.
 
             if (countToCheck) {
                 sub32(Imm32(countToCheck), index);
                 parenthesesState.checkedTotal -= countToCheck;
             }
@@ -1186,27 +1169,22 @@ class RegexGenerator : private MacroAsse
         case PatternTerm::TypeBackReference:
             m_shouldFallBack = true;
             break;
 
         case PatternTerm::TypeForwardReference:
             break;
 
         case PatternTerm::TypeParenthesesSubpattern:
-            if (term.quantityCount == 1) {
+            if (term.quantityCount == 1 && !term.parentheses.isCopy)
                 generateParenthesesSingle(state);
-                break;
-            } else if (state.isLastTerm() && state.isMainDisjunction()) { // Is this is the last term of the main disjunction?
-                // If this has a greedy quantifier, then it will never need to backtrack!
-                if (term.quantityType == QuantifierGreedy) {
-                    generateParenthesesGreedyNoBacktrack(state);
-                    break;
-                }
-            }
-            m_shouldFallBack = true;
+            else if (term.parentheses.isTerminal)
+                generateParenthesesGreedyNoBacktrack(state);
+            else
+                m_shouldFallBack = true;
             break;
 
         case PatternTerm::TypeParentheticalAssertion:
             generateParentheticalAssertion(state);
             break;
         }
     }
 
--- a/js/src/yarr/yarr/RegexJIT.h
+++ b/js/src/yarr/yarr/RegexJIT.h
@@ -69,17 +69,18 @@ public:
     JSRegExp* getFallback() { return m_fallback; }
     void setFallback(JSRegExp* fallback) { m_fallback = fallback; }
 
     bool operator!() { return (!m_ref.m_code.executableAddress() && !m_fallback); }
     void set(MacroAssembler::CodeRef ref) { m_ref = ref; }
 
     int execute(const UChar* input, unsigned start, unsigned length, int* output)
     {
-        return JS_EXTENSION((reinterpret_cast<RegexJITCode>(m_ref.m_code.executableAddress()))(input, start, length, output));
+        void *code = m_ref.m_code.executableAddress();
+        return JS_EXTENSION((reinterpret_cast<RegexJITCode>(code))(input, start, length, output));
     }
 
 private:
     MacroAssembler::CodeRef m_ref;
     JSRegExp* m_fallback;
 };
 
 void jitCompileRegex(ExecutableAllocator &allocator, RegexCodeBlock& jitObject, const UString& pattern, unsigned& numSubpatterns, int& error, bool &fellBack, bool ignoreCase = false, bool multiline = false
--- a/js/src/yarr/yarr/RegexPattern.h
+++ b/js/src/yarr/yarr/RegexPattern.h
@@ -34,16 +34,17 @@
 namespace JSC { namespace Yarr {
 
 #define RegexStackSpaceForBackTrackInfoPatternCharacter 1 // Only for !fixed quantifiers.
 #define RegexStackSpaceForBackTrackInfoCharacterClass 1 // Only for !fixed quantifiers.
 #define RegexStackSpaceForBackTrackInfoBackReference 2
 #define RegexStackSpaceForBackTrackInfoAlternative 1 // One per alternative.
 #define RegexStackSpaceForBackTrackInfoParentheticalAssertion 1
 #define RegexStackSpaceForBackTrackInfoParenthesesOnce 1 // Only for !fixed quantifiers.
+#define RegexStackSpaceForBackTrackInfoParenthesesTerminal 1
 #define RegexStackSpaceForBackTrackInfoParentheses 4
 
 struct PatternDisjunction;
 
 struct CharacterRange {
     UChar begin;
     UChar end;
 
@@ -132,16 +133,17 @@ struct PatternTerm {
         UChar patternCharacter;
         CharacterClass* characterClass;
         unsigned subpatternId;
         struct {
             PatternDisjunction* disjunction;
             unsigned subpatternId;
             unsigned lastSubpatternId;
             bool isCopy;
+            bool isTerminal;
         } parentheses;
     };
     QuantifierType quantityType;
     unsigned quantityCount;
     int inputPosition;
     unsigned frameLocation;
 
     PatternTerm(UChar ch)
@@ -163,16 +165,17 @@ struct PatternTerm {
 
     PatternTerm(Type type, unsigned subpatternId, PatternDisjunction* disjunction, bool invertOrCapture)
         : type(type)
         , invertOrCapture(invertOrCapture)
     {
         parentheses.disjunction = disjunction;
         parentheses.subpatternId = subpatternId;
         parentheses.isCopy = false;
+        parentheses.isTerminal = false;
         quantityType = QuantifierFixedCount;
         quantityCount = 1;
     }
     
     PatternTerm(Type type, bool invert = false)
         : type(type)
         , invertOrCapture(invert)
     {