Merge tracemonkey to mozilla-central. a=blockers
authorRobert Sayre <sayrer@gmail.com>
Mon, 06 Dec 2010 16:10:01 -0500
changeset 58726 25af9b539829ef7c04ba38515492a947c0c5aed9
parent 58675 b308f615ba23a60f75adeb1a77cf37c5a80ba472 (current diff)
parent 58725 0d6d19884ccd0ba5214856f41d026c70f843143f (diff)
child 58727 2f96714fd6d2b46a79a9453bca2b45ab9288c121
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
reviewersblockers
milestone2.0b8pre
Merge tracemonkey to mozilla-central. a=blockers
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -927,16 +927,23 @@ namespace JSC {
             m_buffer.ensureSpace(space);
         }
 
         int sizeOfConstantPool()
         {
             return m_buffer.sizeOfConstantPool();
         }
 
+#ifdef DEBUG
+        void allowPoolFlush(bool allowFlush)
+        {
+            m_buffer.allowPoolFlush(allowFlush);
+        }
+#endif
+
         JmpDst label()
         {
             JmpDst label(m_buffer.size());
             js::JaegerSpew(js::JSpew_Insns, IPFX "#label     ((%d))\n", MAYBE_PAD, label.m_offset);
             return label;
         }
 
         JmpDst align(int alignment)
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -32,16 +32,17 @@
 #define AssemblerBufferWithConstantPool_h
 
 #include "assembler/wtf/Platform.h"
 
 #if ENABLE_ASSEMBLER
 
 #include "AssemblerBuffer.h"
 #include "assembler/wtf/SegmentedVector.h"
+#include "assembler/wtf/Assertions.h"
 
 #define ASSEMBLER_HAS_CONSTANT_POOL 1
 
 namespace JSC {
 
 /*
     On a constant pool 4 or 8 bytes data can be stored. The values can be
     constants or addresses. The addresses should be 32 or 64 bits. The constants
@@ -98,16 +99,19 @@ public:
         UnusedEntry
     };
 
     AssemblerBufferWithConstantPool()
         : AssemblerBuffer()
         , m_numConsts(0)
         , m_maxDistance(maxPoolSize)
         , m_lastConstDelta(0)
+#ifdef DEBUG
+        , m_allowFlush(true)
+#endif
     {
         m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
         m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
     }
 
     ~AssemblerBufferWithConstantPool()
     {
         free(m_mask);
@@ -230,16 +234,25 @@ public:
         return m_pool;
     }
 
     int sizeOfConstantPool()
     {
         return m_numConsts;
     }
 
+#ifdef DEBUG
+    // Guard constant pool flushes to ensure that they don't occur during
+    // regions where offsets into the code have to be maintained (such as PICs).
+    void allowPoolFlush(bool allowFlush)
+    {
+        m_allowFlush = allowFlush;
+    }
+#endif
+
 private:
     void correctDeltas(int insnSize)
     {
         m_maxDistance -= insnSize;
         m_lastConstDelta -= insnSize;
         if (m_lastConstDelta < 0)
             m_lastConstDelta = 0;
     }
@@ -249,16 +262,17 @@ private:
         correctDeltas(insnSize);
 
         m_maxDistance -= m_lastConstDelta;
         m_lastConstDelta = constSize;
     }
 
     void flushConstantPool(bool useBarrier = true)
     {
+        ASSERT(m_allowFlush);
         if (m_numConsts == 0)
             return;
         int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
 
         if (alignPool)
             alignPool = sizeof(uint64_t) - alignPool;
 
         // Callback to protect the constant pool from execution
@@ -308,15 +322,19 @@ private:
 
     uint32_t* m_pool;
     char* m_mask;
     LoadOffsets m_loadOffsets;
 
     int m_numConsts;
     int m_maxDistance;
     int m_lastConstDelta;
+
+#ifdef DEBUG
+    bool    m_allowFlush;
+#endif
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
 
 #endif // AssemblerBufferWithConstantPool_h
--- a/js/src/assembler/assembler/MacroAssemblerARM.h
+++ b/js/src/assembler/assembler/MacroAssemblerARM.h
@@ -1073,16 +1073,23 @@ public:
         m_assembler.ensureSpace(space);
     }
 
     void forceFlushConstantPool()
     {
         m_assembler.forceFlushConstantPool();
     }
 
+#ifdef DEBUG
+    void allowPoolFlush(bool allowFlush)
+    {
+        m_assembler.allowPoolFlush(allowFlush);
+    }
+#endif
+
 protected:
     ARMAssembler::Condition ARMCondition(Condition cond)
     {
         return static_cast<ARMAssembler::Condition>(cond);
     }
 
     void ensureSpace(int insnSpace, int constSpace)
     {
--- a/js/src/assembler/assembler/MacroAssemblerX86_64.h
+++ b/js/src/assembler/assembler/MacroAssemblerX86_64.h
@@ -375,16 +375,22 @@ public:
 
     void setPtr(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
     {
         m_assembler.cmpq_rr(right, left);
         m_assembler.setCC_r(x86Condition(cond), dest);
         m_assembler.movzbl_rr(dest, dest);
     }
 
+    void setPtr(Condition cond, RegisterID left, ImmPtr right, RegisterID dest)	
+    {
+        move(right, scratchRegister);
+        setPtr(cond, left, scratchRegister, dest);
+    }
+
     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
     {
         m_assembler.cmpq_rr(right, left);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
     Jump branchPtr(Condition cond, RegisterID left, Imm32 right)
     {
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug605015.js
@@ -0,0 +1,9 @@
+// |jit-test| error: TypeError 
+// don't assert
+
+print(this.watch("x",
+function() {
+  Object.defineProperty(this, "x", ({
+    get: (Int8Array)
+  }))
+}))(x = /x/)
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug614915.js
@@ -0,0 +1,2 @@
+var s = [undefined, undefined].sort();
+assertEq(s.length, 2);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/testDenseToSlowArray.js
@@ -0,0 +1,185 @@
+// test dense -> slow array transitions during the recording and on trace
+// for various array functions and property accessors
+
+function test_set_elem() {
+
+    function f() {
+        var bag = [];
+        for (var i = 0; i != 100; ++i) {
+            var a = [0];
+            a[100*100] = i;
+            bag.push(a);
+        }
+        
+        for (var i = 0; i != 100; ++i) {
+            var a = [0];
+            a[200 + i] = i;
+            bag.push(a);
+        }
+        return bag;
+    }
+    
+    var bag = f();
+    
+    for (var i = 0; i != 100; ++i) {
+        var a = bag[i];
+        assertEq(a.length, 100 * 100 + 1);
+        assertEq(a[100*100], i);
+        assertEq(a[0], 0);
+        assertEq(1 + i in a, false);
+    }
+    
+    for (var i = 0; i != 100; ++i) {
+        var a = bag[100 + i];
+        assertEq(a.length, 200 + i + 1);
+        assertEq(a[200 + i], i);
+        assertEq(a[0], 0);
+        assertEq(1 + i in a, false);
+    }
+}
+
+function test_reverse() {
+
+    function prepare_arays() {
+        var bag = [];
+        var base_index = 245;
+        for (var i = 0; i != 50; ++i) {
+            var a = [1, 2, 3, 4, 5];
+            a.length = i + base_index;
+            bag.push(a);
+        }
+        return bag;
+    }
+
+    function test(bag) {
+        for (var i = 0; i != bag.length; ++i) {
+            var a = bag[i];
+            a.reverse();
+            a[0] = 1;
+        }
+    }
+
+    var bag = prepare_arays();
+    test(bag);
+    for (var i = 0; i != bag.length; ++i) {
+        var a = bag[i];
+        assertEq(a[0], 1);
+        for (var j = 1; j <= 5; ++j) {
+            assertEq(a[a.length - j], j);
+        }
+        for (var j = 1; j < a.length - 5; ++j) {
+            assertEq(j in a, false);
+        }
+    }
+    
+}
+
+function test_push() {
+
+    function prepare_arays() {
+        var bag = [];
+        var base_index = 245;
+        for (var i = 0; i != 50; ++i) {
+            var a = [0];
+            a.length = i + base_index;
+            bag.push(a);
+        }
+        return bag;
+    }
+
+    function test(bag) {
+        for (var i = 0; i != bag.length; ++i) {
+            var a = bag[i];
+            a.push(2);
+            a[0] = 1;
+        }
+    }
+
+    var bag = prepare_arays();
+    test(bag);
+    for (var i = 0; i != bag.length; ++i) {
+        var a = bag[i];
+        assertEq(a[0], 1); 
+        assertEq(a[a.length - 1], 2);
+        for (var j = 1; j < a.length - 1; ++j) {
+            assertEq(j in a, false);
+        }
+    }
+}
+
+function test_unshift() {
+
+    function prepare_arays() {
+        var bag = [];
+        var base_index = 245;
+        for (var i = 0; i != 50; ++i) {
+            var a = [0];
+            a.length = i + base_index;
+            bag.push(a);
+        }
+        return bag;
+    }
+
+    function test(bag) {
+        for (var i = 0; i != bag.length; ++i) {
+            var a = bag[i];
+            a.unshift(1);
+            a[2] = 2;
+        }
+    }
+
+    var bag = prepare_arays();
+    test(bag);
+    for (var i = 0; i != bag.length; ++i) {
+        var a = bag[i];
+        assertEq(a[0], 1); 
+        assertEq(a[1], 0); 
+        assertEq(a[2], 2); 
+        for (var j = 3; j < a.length; ++j) {
+            assertEq(j in a, false);
+        }
+    }
+}
+
+function test_splice() {
+
+    function prepare_arays() {
+        var bag = [];
+        var base_index = 245;
+        for (var i = 0; i != 50; ++i) {
+            var a = [1, 2];
+            a.length = i + base_index;
+            bag.push(a);
+        }
+        return bag;
+    }
+
+    function test(bag) {
+        for (var i = 0; i != bag.length; ++i) {
+            var a = bag[i];
+            a.splice(1, 0, "a", "b", "c");
+            a[2] = 100;
+        }
+    }
+
+    var bag = prepare_arays();
+    test(bag);
+    for (var i = 0; i != bag.length; ++i) {
+        var a = bag[i];
+        assertEq(a[0], 1); 
+        assertEq(a[1], "a"); 
+        assertEq(a[2], 100); 
+        assertEq(a[3], "c"); 
+        assertEq(a[4], 2); 
+        for (var j = 5; j < a.length; ++j) {
+            assertEq(j in a, false);
+        }
+    }
+}
+
+test_set_elem();
+test_reverse();
+test_push();
+test_unshift();
+test_splice();
+
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug604381.js
@@ -0,0 +1,14 @@
+// vim: set ts=4 sw=4 tw=99 et:
+
+function F() {
+    var T = { };
+    try {
+        throw 12;
+    } catch (e) {
+        T.x = 5;
+        return T;
+    }
+}
+
+assertEq((new F()).x, 5);
+
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug615440.js
@@ -0,0 +1,5 @@
+Array.prototype.__proto__ = null;
+for (var r = 0; r < 3; ++r) [][0] = 1;
+
+// Don't crash.
+
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug616508.js
@@ -0,0 +1,9 @@
+// |jit-test| error: ReferenceError
+// vim: set ts=4 sw=4 tw=99 et:
+try {
+    (function () {
+        __proto__ = Uint32Array()
+    }())
+} catch (e) {}(function () {
+    length, ([eval()] ? x : 7)
+})()
--- a/js/src/jsapi-tests/Makefile.in
+++ b/js/src/jsapi-tests/Makefile.in
@@ -44,16 +44,17 @@ VPATH		= @srcdir@
 
 include $(DEPTH)/config/autoconf.mk
 
 PROGRAM         = jsapi-tests$(BIN_SUFFIX)
 
 CPPSRCS = \
   tests.cpp \
   selfTest.cpp \
+  testBug604087.cpp \
   testClassGetter.cpp \
   testCloneScript.cpp \
   testConservativeGC.cpp \
   testContexts.cpp \
   testDebugger.cpp \
   testDeepFreeze.cpp \
   testDefineGetterSetterNonEnumerable.cpp \
   testDefineProperty.cpp \
@@ -64,17 +65,17 @@ CPPSRCS = \
   testIntString.cpp \
   testLookup.cpp \
   testNewObject.cpp \
   testOps.cpp \
   testPropCache.cpp \
   testSameValue.cpp \
   testScriptObject.cpp \
   testSetPropertyWithNativeGetterStubSetter.cpp \
-  testBug604087.cpp \
+  testThreadGC.cpp \
   testThreads.cpp \
   testTrap.cpp \
   testUTF8.cpp \
   testVersion.cpp \
   testXDR.cpp \
   $(NULL)
 
 DEFINES         += -DEXPORT_JS_API
--- a/js/src/jsapi-tests/testDeepFreeze.cpp
+++ b/js/src/jsapi-tests/testDeepFreeze.cpp
@@ -1,14 +1,54 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sw=4 et tw=99:
  */
 
 #include "tests.h"
 
 BEGIN_TEST(testDeepFreeze_bug535703)
 {
-    JSObject *obj = JS_NewObject(cx, NULL, NULL, NULL);
-    CHECK(obj);
-    JS_DeepFreezeObject(cx, obj);  // don't crash
+    jsval v;
+    EVAL("var x = {}; x;", &v);
+    CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(v)));  // don't crash
+    EVAL("Object.isFrozen(x)", &v);
+    CHECK_SAME(v, JSVAL_TRUE);
     return true;
 }
 END_TEST(testDeepFreeze_bug535703)
+
+BEGIN_TEST(testDeepFreeze_deep)
+{
+    jsval a, o;
+    EXEC("var a = {}, o = a;\n"
+         "for (var i = 0; i < 10000; i++)\n"
+         "    a = {x: a, y: a};\n");
+    EVAL("a", &a);
+    EVAL("o", &o);
+
+    CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(a)));
+
+    jsval b;
+    EVAL("Object.isFrozen(a)", &b);
+    CHECK_SAME(b, JSVAL_TRUE);
+    EVAL("Object.isFrozen(o)", &b);
+    CHECK_SAME(b, JSVAL_TRUE);
+    return true;
+}
+END_TEST(testDeepFreeze_deep)
+
+BEGIN_TEST(testDeepFreeze_loop)
+{
+    jsval x, y;
+    EXEC("var x = [], y = {x: x}; y.y = y; x.push(x, y);");
+    EVAL("x", &x);
+    EVAL("y", &y);
+
+    CHECK(JS_DeepFreezeObject(cx, JSVAL_TO_OBJECT(x)));
+
+    jsval b;
+    EVAL("Object.isFrozen(x)", &b);
+    CHECK_SAME(b, JSVAL_TRUE);
+    EVAL("Object.isFrozen(y)", &b);
+    CHECK_SAME(b, JSVAL_TRUE);
+    return true;
+}
+END_TEST(testDeepFreeze_loop)
new file mode 100644
--- /dev/null
+++ b/js/src/jsapi-tests/testThreadGC.cpp
@@ -0,0 +1,195 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=99:
+ */
+
+#ifdef JS_THREADSAFE
+
+#include "tests.h"
+#include "prthread.h"
+
+#include "jscntxt.h"
+
+/*
+ * We test that if a GC callback cancels the GC on a child thread the GC can
+ * still proceed on the main thread even if the child thread continue to
+ * run uninterrupted.
+ */
+
+struct SharedData {
+    enum ChildState {
+        CHILD_STARTING,
+        CHILD_RUNNING,
+        CHILD_DONE,
+        CHILD_ERROR
+    };
+
+    JSRuntime   *const runtime;
+    PRThread    *const mainThread;
+    PRLock      *const lock;
+    PRCondVar   *const signal;
+    ChildState  childState;
+    bool        childShouldStop;
+    JSContext   *childContext;
+
+    SharedData(JSRuntime *rt, bool *ok)
+      : runtime(rt),
+        mainThread(PR_GetCurrentThread()),
+        lock(PR_NewLock()),
+        signal(lock ? PR_NewCondVar(lock) : NULL),
+        childState(CHILD_STARTING),
+        childShouldStop(false),
+        childContext(NULL)
+    {
+        JS_ASSERT(!*ok);
+        *ok = !!signal;
+    }
+
+    ~SharedData() {
+        if (signal)
+            PR_DestroyCondVar(signal);
+        if (lock)
+            PR_DestroyLock(lock);
+    }
+};
+
+static SharedData *shared;
+
+static JSBool
+CancelNonMainThreadGCCallback(JSContext *cx, JSGCStatus status)
+{
+    return status != JSGC_BEGIN || PR_GetCurrentThread() == shared->mainThread;
+}
+
+static JSBool
+StopChildOperationCallback(JSContext *cx)
+{
+    bool shouldStop;
+    PR_Lock(shared->lock);
+    shouldStop = shared->childShouldStop;
+    PR_Unlock(shared->lock);
+    return !shouldStop;
+}
+
+static JSBool
+NotifyMainThreadAboutBusyLoop(JSContext *cx, uintN argc, jsval *vp)
+{
+    PR_Lock(shared->lock);
+    JS_ASSERT(shared->childState == SharedData::CHILD_STARTING);
+    shared->childState = SharedData::CHILD_RUNNING;
+    shared->childContext = cx;
+    PR_NotifyCondVar(shared->signal);
+    PR_Unlock(shared->lock);
+
+    return true;
+}
+
+static void
+ChildThreadMain(void *arg)
+{
+    JS_ASSERT(!arg);
+    bool error = true;
+    JSContext *cx = JS_NewContext(shared->runtime, 8192);
+    if (cx) {
+        JS_SetOperationCallback(cx, StopChildOperationCallback);
+        JSAutoRequest ar(cx);
+        JSObject *global = JS_NewCompartmentAndGlobalObject(cx, JSAPITest::basicGlobalClass(),
+                                                            NULL);
+        if (global) {
+            JS_SetGlobalObject(cx, global);
+            if (JS_InitStandardClasses(cx, global) &&
+                JS_DefineFunction(cx, global, "notify", NotifyMainThreadAboutBusyLoop, 0, 0)) {
+
+                jsval rval;
+                static const char code[] = "var i = 0; notify(); for (var i = 0; ; ++i);";
+                JSBool ok = JS_EvaluateScript(cx, global, code, strlen(code),
+                                              __FILE__, __LINE__, &rval);
+                if (!ok && !JS_IsExceptionPending(cx)) {
+                    /* Evaluate should only return via the callback cancellation. */
+                    error = false;
+                }
+            }
+        }
+    }
+
+    PR_Lock(shared->lock);
+    shared->childState = error ? SharedData::CHILD_DONE : SharedData::CHILD_ERROR;
+    shared->childContext = NULL;
+    PR_NotifyCondVar(shared->signal);
+    PR_Unlock(shared->lock);
+
+    if (cx)
+        JS_DestroyContextNoGC(cx);
+}
+
+BEGIN_TEST(testThreadGC_bug590533)
+    {
+        /*
+         * Test the child thread busy running while the current thread calls
+         * the GC both with JSRuntime->gcIsNeeded set and unset.
+         */
+        bool ok = TestChildThread(true);
+        CHECK(ok);
+        ok = TestChildThread(false);
+        CHECK(ok);
+        return ok;
+    }
+
+    bool TestChildThread(bool setGCIsNeeded)
+    {
+        bool ok = false;
+        shared = new SharedData(rt, &ok);
+        CHECK(ok);
+
+        JSGCCallback oldGCCallback = JS_SetGCCallback(cx, CancelNonMainThreadGCCallback);
+
+        PRThread *thread =
+            PR_CreateThread(PR_USER_THREAD, ChildThreadMain, NULL,
+                            PR_PRIORITY_NORMAL, PR_LOCAL_THREAD, PR_JOINABLE_THREAD, 0);
+        if (!thread)
+            return false;
+
+        PR_Lock(shared->lock);
+        while (shared->childState == SharedData::CHILD_STARTING)
+            PR_WaitCondVar(shared->signal, PR_INTERVAL_NO_TIMEOUT);
+        JS_ASSERT(shared->childState != SharedData::CHILD_DONE);
+        ok = (shared->childState == SharedData::CHILD_RUNNING);
+        PR_Unlock(shared->lock);
+
+        CHECK(ok);
+
+        if (setGCIsNeeded) {
+            /*
+             * Use JS internal API to set the GC trigger flag after we know
+             * that the child is in a request and is about to run an infinite
+             * loop. Then run the GC with JSRuntime->gcIsNeeded flag set.
+             */
+            js::AutoLockGC lock(rt);
+            js::TriggerGC(rt);
+        }
+
+        JS_GC(cx);
+
+        PR_Lock(shared->lock);
+        shared->childShouldStop = true;
+        while (shared->childState == SharedData::CHILD_RUNNING) {
+            JS_TriggerOperationCallback(shared->childContext);
+            PR_WaitCondVar(shared->signal, PR_INTERVAL_NO_TIMEOUT);
+        }
+        JS_ASSERT(shared->childState != SharedData::CHILD_STARTING);
+        ok = (shared->childState == SharedData::CHILD_DONE);
+        PR_Unlock(shared->lock);
+
+        JS_SetGCCallback(cx, oldGCCallback);
+
+        PR_JoinThread(thread);
+
+        delete shared;
+        shared = NULL;
+
+        return true;
+    }
+
+
+END_TEST(testThreadGC_bug590533)
+
+#endif
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -3044,17 +3044,17 @@ JS_FreezeObject(JSContext *cx, JSObject 
 
 JS_PUBLIC_API(JSBool)
 JS_DeepFreezeObject(JSContext *cx, JSObject *obj)
 {
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, obj);
 
     /* Assume that non-extensible objects are already deep-frozen, to avoid divergence. */
-    if (obj->isExtensible())
+    if (!obj->isExtensible())
         return true;
 
     if (!obj->freeze(cx))
         return false;
 
     /* Walk slots in obj and if any value is a non-null object, seal it. */
     for (uint32 i = 0, n = obj->slotSpan(); i < n; ++i) {
         const Value &v = obj->getSlot(i);
@@ -3949,34 +3949,27 @@ JS_NewPropertyIterator(JSContext *cx, JS
     iterobj->getSlotRef(JSSLOT_ITER_INDEX).setInt32(index);
     return iterobj;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
 {
     jsint i;
-    JSObject *obj;
     const Shape *shape;
     JSIdArray *ida;
 
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, iterobj);
     i = iterobj->getSlot(JSSLOT_ITER_INDEX).toInt32();
     if (i < 0) {
         /* Native case: private data is a property tree node pointer. */
-        obj = iterobj->getParent();
-        JS_ASSERT(obj->isNative());
+        JS_ASSERT(iterobj->getParent()->isNative());
         shape = (Shape *) iterobj->getPrivate();
 
-        /*
-         * If the next property mapped by obj in the property tree ancestor
-         * line is not enumerable, or it's an alias, skip it and keep on trying
-         * to find an enumerable property that is still in obj.
-         */
         while (shape->previous() && (!shape->enumerable() || shape->isAlias()))
             shape = shape->previous();
 
         if (!shape->previous()) {
             JS_ASSERT(JSID_IS_EMPTY(shape->id));
             *idp = JSID_VOID;
         } else {
             iterobj->setPrivate(const_cast<Shape *>(shape->previous()));
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -57,19 +57,19 @@
  *
  * NB: the capacity and length of a dense array are entirely unrelated!  The
  * length may be greater than, less than, or equal to the capacity.  See
  * array_length_setter for an explanation of how the first, most surprising
  * case may occur.
  *
  * Arrays are converted to use js_SlowArrayClass when any of these conditions
  * are met:
- *  - the load factor (COUNT / capacity) is less than 0.25, and there are
- *    more than MIN_SPARSE_INDEX slots total
- *  - a property is set that is not indexed (and not "length"); or
+ *  - there are more than MIN_SPARSE_INDEX slots total
+ *  - the load factor (COUNT / capacity) is less than 0.25
+ *  - a property is set that is not indexed (and not "length")
  *  - a property is defined that has non-default property attributes.
  *
  * Dense arrays do not track property creation order, so unlike other native
  * objects and slow arrays, enumerating an array does not necessarily visit the
  * properties in the order they were created.  We could instead maintain the
  * scope to track property enumeration order, but still use the fast slot
  * access.  That would have the same memory cost as just using a
  * js_SlowArrayClass, but have the same performance characteristics as a dense
@@ -110,40 +110,16 @@
 
 using namespace js;
 using namespace js::gc;
 
 /* 2^32 - 1 as a number and a string */
 #define MAXINDEX 4294967295u
 #define MAXSTR   "4294967295"
 
-/*
- * Use the limit on number of object slots for sanity and consistency (see the
- * assertion in JSObject::makeDenseArraySlow).
- */
-static inline bool
-INDEX_TOO_BIG(jsuint index)
-{
-    return index >= JSObject::NSLOTS_LIMIT;
-}
-
-static inline  bool
-INDEX_TOO_SPARSE(JSObject *array, jsuint index)
-{
-    /* Small arrays with less than 256 elements are dense, no matter what. */
-    if (index < 256)
-        return false;
-
-    /*
-     * Otherwise if the index becomes too large or is more than 256 past
-     * the current capacity, we have to slowify.
-     */
-    return INDEX_TOO_BIG(index) || (index > array->getDenseArrayCapacity() + 256);
-}
-
 static inline bool
 ENSURE_SLOW_ARRAY(JSContext *cx, JSObject *obj)
 {
     return obj->getClass() == &js_SlowArrayClass ||
            obj->makeDenseArraySlow(cx);
 }
 
 /*
@@ -305,16 +281,44 @@ BigIndexToId(JSContext *cx, JSObject *ob
         if (!atom)
             return JS_FALSE;
     }
 
     *idp = ATOM_TO_JSID(atom);
     return JS_TRUE;
 }
 
+bool
+JSObject::willBeSparseDenseArray(uintN requiredCapacity, uintN newElementsHint)
+{
+    JS_ASSERT(isDenseArray());
+    JS_ASSERT(requiredCapacity > MIN_SPARSE_INDEX);
+
+    uintN cap = numSlots();
+    JS_ASSERT(requiredCapacity >= cap);
+
+    if (requiredCapacity >= JSObject::NSLOTS_LIMIT)
+        return true;
+    
+    uintN minimalDenseCount = requiredCapacity / 4;
+    if (newElementsHint >= minimalDenseCount)
+        return false;
+    minimalDenseCount -= newElementsHint;
+
+    if (minimalDenseCount > cap)
+        return true;
+    
+    Value *elems = getDenseArrayElements();
+    for (uintN i = 0; i < cap; i++) {
+        if (!elems[i].isMagic(JS_ARRAY_HOLE) && !--minimalDenseCount)
+            return false;
+    }
+    return true;
+}
+
 static bool
 ReallyBigIndexToId(JSContext* cx, jsdouble index, jsid* idp)
 {
     return js_ValueToStringId(cx, DoubleValue(index), idp);
 }
 
 static bool
 IndexToId(JSContext* cx, JSObject* obj, jsdouble index, JSBool* hole, jsid* idp,
@@ -434,29 +438,33 @@ GetElements(JSContext *cx, JSObject *aob
  */
 static JSBool
 SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, const Value &v)
 {
     JS_ASSERT(index >= 0);
 
     if (obj->isDenseArray()) {
         /* Predicted/prefetched code should favor the remains-dense case. */
-        if (index <= jsuint(-1)) {
+        JSObject::EnsureDenseResult result = JSObject::ED_SPARSE;
+        do {
+            if (index > jsuint(-1))
+                break;
             jsuint idx = jsuint(index);
-            if (!INDEX_TOO_SPARSE(obj, idx)) {
-                JS_ASSERT(idx + 1 > idx);
-                if (!obj->ensureDenseArrayElements(cx, idx + 1))
-                    return JS_FALSE;
-                if (idx >= obj->getArrayLength())
-                    obj->setArrayLength(idx + 1);
-                obj->setDenseArrayElement(idx, v);
-                return JS_TRUE;
-            }
-        }
-
+            result = obj->ensureDenseArrayElements(cx, idx, 1);
+            if (result != JSObject::ED_OK)
+                break;
+            if (idx >= obj->getArrayLength())
+                obj->setArrayLength(idx + 1);
+            obj->setDenseArrayElement(idx, v);
+            return true;
+        } while (false);
+
+        if (result == JSObject::ED_FAILED)
+            return false;
+        JS_ASSERT(result == JSObject::ED_SPARSE);
         if (!obj->makeDenseArraySlow(cx))
             return JS_FALSE;
     }
 
     AutoIdRooter idr(cx);
 
     if (!IndexToId(cx, obj, index, NULL, idr.addr(), JS_TRUE))
         return JS_FALSE;
@@ -469,23 +477,17 @@ SetArrayElement(JSContext *cx, JSObject 
 #ifdef JS_TRACER
 JSBool JS_FASTCALL
 js_EnsureDenseArrayCapacity(JSContext *cx, JSObject *obj, jsint i)
 {
 #ifdef DEBUG
     Class *origObjClasp = obj->clasp; 
 #endif
     jsuint u = jsuint(i);
-    jsuint capacity = obj->getDenseArrayCapacity();
-    if (u < capacity)
-        return true;
-    if (INDEX_TOO_SPARSE(obj, u))
-        return false;
-
-    JSBool ret = obj->ensureDenseArrayElements(cx, u + 1);
+    JSBool ret = (obj->ensureDenseArrayElements(cx, u, 1) == JSObject::ED_OK);
 
     /* Partially check the CallInfo's storeAccSet is correct. */
     JS_ASSERT(obj->clasp == origObjClasp);
     return ret;
 }
 /* This function and its callees do not touch any object's .clasp field. */
 JS_DEFINE_CALLINFO_3(extern, BOOL, js_EnsureDenseArrayCapacity, CONTEXT, OBJECT, INT32,
                      0, nanojit::ACCSET_STORE_ANY & ~tjit::ACCSET_OBJ_CLASP)
@@ -796,30 +798,39 @@ array_setProperty(JSContext *cx, JSObjec
     uint32 i;
 
     if (JSID_IS_ATOM(id, cx->runtime->atomState.lengthAtom))
         return array_length_setter(cx, obj, id, vp, strict);
 
     if (!obj->isDenseArray())
         return js_SetProperty(cx, obj, id, vp, strict);
 
-    if (!js_IdIsIndex(id, &i) || js_PrototypeHasIndexedProperties(cx, obj) ||
-        INDEX_TOO_SPARSE(obj, i)) {
-        if (!obj->makeDenseArraySlow(cx))
-            return false;
-        return js_SetProperty(cx, obj, id, vp, strict);
-    }
-
-    if (!obj->ensureDenseArrayElements(cx, i + 1))
+    do {
+        if (!js_IdIsIndex(id, &i))
+            break;
+        if (js_PrototypeHasIndexedProperties(cx, obj))
+            break;
+
+        JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, i, 1);
+        if (result != JSObject::ED_OK) {
+            if (result == JSObject::ED_FAILED)
+                return false;
+            JS_ASSERT(result == JSObject::ED_SPARSE);
+            break;
+        }
+
+        if (i >= obj->getArrayLength())
+            obj->setArrayLength(i + 1);
+        obj->setDenseArrayElement(i, *vp);
+        return true;
+    } while (false);
+
+    if (!obj->makeDenseArraySlow(cx))
         return false;
-
-    if (i >= obj->getArrayLength())
-        obj->setArrayLength(i + 1);
-    obj->setDenseArrayElement(i, *vp);
-    return true;
+    return js_SetProperty(cx, obj, id, vp, strict);
 }
 
 static JSBool
 slowarray_setProperty(JSContext *cx, JSObject *obj, jsid id, Value *vp, JSBool strict)
 {
     JS_ASSERT(obj->isSlowArray());
 
     if (JSID_IS_ATOM(id, cx->runtime->atomState.lengthAtom))
@@ -856,17 +867,17 @@ array_defineProperty(JSContext *cx, JSOb
 {
     uint32 i = 0;       // init to shut GCC up
     JSBool isIndex;
 
     if (JSID_IS_ATOM(id, cx->runtime->atomState.lengthAtom))
         return JS_TRUE;
 
     isIndex = js_IdIsIndex(id, &i);
-    if (!isIndex || attrs != JSPROP_ENUMERATE || !obj->isDenseArray() || INDEX_TOO_SPARSE(obj, i)) {
+    if (!isIndex || attrs != JSPROP_ENUMERATE) {
         if (!ENSURE_SLOW_ARRAY(cx, obj))
             return JS_FALSE;
         return js_DefineProperty(cx, obj, id, value, getter, setter, attrs);
     }
 
     Value tmp = *value;
     return array_setProperty(cx, obj, id, &tmp, false);
 }
@@ -910,30 +921,19 @@ array_deleteProperty(JSContext *cx, JSOb
     return JS_TRUE;
 }
 
 static void
 array_trace(JSTracer *trc, JSObject *obj)
 {
     JS_ASSERT(obj->isDenseArray());
 
-    size_t holes = 0;
     uint32 capacity = obj->getDenseArrayCapacity();
-    for (uint32 i = 0; i < capacity; i++) {
-        Value v = obj->getDenseArrayElement(i);
-        if (v.isMagic(JS_ARRAY_HOLE))
-            ++holes;
-        else
-            MarkValue(trc, obj->getDenseArrayElement(i), "dense_array_elems");
-    }
-
-    if (IS_GC_MARKING_TRACER(trc) && holes > MIN_SPARSE_INDEX && holes > capacity / 4 * 3) {
-        /* This might fail, in which case we don't slowify it. */
-        static_cast<GCMarker *>(trc)->arraysToSlowify.append(obj);
-    }
+    for (uint32 i = 0; i < capacity; i++)
+        MarkValue(trc, obj->getDenseArrayElement(i), "dense_array_elems");
 }
 
 static JSBool
 array_fix(JSContext *cx, JSObject *obj, bool *success, AutoIdVector *props)
 {
     JS_ASSERT(obj->isDenseArray());
 
     /*
@@ -1374,32 +1374,38 @@ static JSBool
 InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, Value *vector)
 {
     JS_ASSERT(count < MAXINDEX);
 
     /*
      * Optimize for dense arrays so long as adding the given set of elements
      * wouldn't otherwise make the array slow.
      */
-    if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
-        start <= MAXINDEX - count && !INDEX_TOO_BIG(start + count)) {
-
+    do {
+        if (!obj->isDenseArray())
+            break;
+        if (js_PrototypeHasIndexedProperties(cx, obj))
+            break;
+
+        JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, start, count);
+        if (result != JSObject::ED_OK) {
+            if (result == JSObject::ED_FAILED)
+                return false;
+            JS_ASSERT(result == JSObject::ED_SPARSE);
+            break;
+        }
         jsuint newlen = start + count;
-        JS_ASSERT(jsdouble(start) + count == jsdouble(newlen));
-        if (!obj->ensureDenseArrayElements(cx, newlen))
-            return JS_FALSE;
-
         if (newlen > obj->getArrayLength())
             obj->setArrayLength(newlen);
 
         JS_ASSERT(count < uint32(-1) / sizeof(Value));
         memcpy(obj->getDenseArrayElements() + start, vector, sizeof(jsval) * count);
         JS_ASSERT_IF(count != 0, !obj->getDenseArrayElement(newlen - 1).isMagic(JS_ARRAY_HOLE));
-        return JS_TRUE;
-    }
+        return true;
+    } while (false);
 
     Value* end = vector + count;
     while (vector != end && start < MAXINDEX) {
         if (!JS_CHECK_OPERATION_LIMIT(cx) ||
             !SetArrayElement(cx, obj, start++, *vector++)) {
             return JS_FALSE;
         }
     }
@@ -1431,17 +1437,19 @@ static JSBool
 InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, const Value *vector)
 {
     JS_ASSERT(obj->isArray());
 
     JS_ASSERT(obj->isDenseArray());
     obj->setArrayLength(length);
     if (!vector || !length)
         return true;
-    if (!obj->ensureDenseArrayElements(cx, length))
+
+    /* Avoid ensureDenseArrayElements to skip sparse array checks there. */
+    if (!obj->ensureSlots(cx, length))
         return false;
     memcpy(obj->getDenseArrayElements(), vector, length * sizeof(Value));
     return true;
 }
 
 /*
  * Perl-inspired join, reverse, and sort.
  */
@@ -1465,47 +1473,57 @@ static JSBool
 array_reverse(JSContext *cx, uintN argc, Value *vp)
 {
     jsuint len;
     JSObject *obj = ComputeThisFromVp(cx, vp);
     if (!obj || !js_GetLengthProperty(cx, obj, &len))
         return JS_FALSE;
     vp->setObject(*obj);
 
-    if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj)) {
+    do {
+        if (!obj->isDenseArray())
+            break;
+        if (js_PrototypeHasIndexedProperties(cx, obj))
+            break;
+        
         /* An empty array or an array with no elements is already reversed. */
         if (len == 0 || obj->getDenseArrayCapacity() == 0)
             return JS_TRUE;
 
         /*
          * It's actually surprisingly complicated to reverse an array due to the
          * orthogonality of array length and array capacity while handling
          * leading and trailing holes correctly.  Reversing seems less likely to
          * be a common operation than other array mass-mutation methods, so for
          * now just take a probably-small memory hit (in the absence of too many
          * holes in the array at its start) and ensure that the capacity is
          * sufficient to hold all the elements in the array if it were full.
          */
-        if (!obj->ensureDenseArrayElements(cx, len))
-            return JS_FALSE;
+        JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, len, 0);
+        if (result != JSObject::ED_OK) {
+            if (result == JSObject::ED_FAILED)
+                return false;
+            JS_ASSERT(result == JSObject::ED_SPARSE);
+            break;
+        }
 
         uint32 lo = 0, hi = len - 1;
         for (; lo < hi; lo++, hi--) {
             Value tmp = obj->getDenseArrayElement(lo);
             obj->setDenseArrayElement(lo, obj->getDenseArrayElement(hi));
             obj->setDenseArrayElement(hi, tmp);
         }
 
         /*
          * Per ECMA-262, don't update the length of the array, even if the new
          * array has trailing holes (and thus the original array began with
          * holes).
          */
         return JS_TRUE;
-    }
+    } while (false);
 
     AutoValueRooter tvr(cx);
     for (jsuint i = 0, half = len / 2; i < half; i++) {
         JSBool hole, hole2;
         if (!JS_CHECK_OPERATION_LIMIT(cx) ||
             !GetElement(cx, obj, i, &hole, tvr.addr()) ||
             !GetElement(cx, obj, len - i - 1, &hole2, vp) ||
             !SetOrDeleteArrayElement(cx, obj, len - i - 1, hole, tvr.value()) ||
@@ -1838,18 +1856,20 @@ js::array_sort(JSContext *cx, uintN argc
                 continue;
             }
 
             allStrings = allStrings && vec[newlen].isString();
 
             ++newlen;
         }
 
-        if (newlen == 0)
+        if (newlen == 0) {
+            vp->setObject(*obj);
             return true; /* The array has only holes and undefs. */
+        }
 
         /*
          * The first newlen elements of vec are copied from the array object
          * (above). The remaining newlen positions are used as GC-rooted scratch
          * space for mergesort. We must clear the space before including it to
          * the root set covered by tvr.count.
          */
         Value *mergesort_tmp = vec + newlen;
@@ -1998,49 +2018,60 @@ array_push_slowly(JSContext *cx, JSObjec
     rval->setNumber(newlength);
     return js_SetLengthProperty(cx, obj, newlength);
 }
 
 static JSBool
 array_push1_dense(JSContext* cx, JSObject* obj, const Value &v, Value *rval)
 {
     uint32 length = obj->getArrayLength();
-    if (INDEX_TOO_SPARSE(obj, length)) {
-        if (!obj->makeDenseArraySlow(cx))
-            return JS_FALSE;
-        Value tmp = v;
-        return array_push_slowly(cx, obj, 1, &tmp, rval);
-    }
-
-    if (!obj->ensureDenseArrayElements(cx, length + 1))
-        return JS_FALSE;
-    obj->setArrayLength(length + 1);
-
-    JS_ASSERT(obj->getDenseArrayElement(length).isMagic(JS_ARRAY_HOLE));
-    obj->setDenseArrayElement(length, v);
-    rval->setNumber(obj->getArrayLength());
-    return JS_TRUE;
+    do {
+        JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, 1);
+        if (result != JSObject::ED_OK) {
+            if (result == JSObject::ED_FAILED)
+                return false;
+            JS_ASSERT(result == JSObject::ED_SPARSE);
+            break;
+        }
+
+        obj->setArrayLength(length + 1);
+
+        JS_ASSERT(obj->getDenseArrayElement(length).isMagic(JS_ARRAY_HOLE));
+        obj->setDenseArrayElement(length, v);
+        rval->setNumber(obj->getArrayLength());
+        return true;
+    } while (false);
+
+    if (!obj->makeDenseArraySlow(cx))
+        return false;
+    Value tmp = v;
+    return array_push_slowly(cx, obj, 1, &tmp, rval);
 }
 
 JS_ALWAYS_INLINE JSBool
 ArrayCompPushImpl(JSContext *cx, JSObject *obj, const Value &v)
 {
     JS_ASSERT(obj->isDenseArray());
     uint32_t length = obj->getArrayLength();
     JS_ASSERT(length <= obj->getDenseArrayCapacity());
 
     if (length == obj->getDenseArrayCapacity()) {
         if (length > JS_ARGS_LENGTH_MAX) {
             JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
                                    JSMSG_ARRAY_INIT_TOO_BIG);
             return JS_FALSE;
         }
 
-        if (!obj->ensureDenseArrayElements(cx, length + 1))
-            return JS_FALSE;
+        /*
+         * Array comprehension cannot add holes to the array and never leaks
+         * the array before it is fully initialized. So we can use ensureSlots
+         * instead of ensureDenseArrayElements.
+         */
+        if (!obj->ensureSlots(cx, length + 1))
+            return false;
     }
     obj->setArrayLength(length + 1);
     obj->setDenseArrayElement(length, v);
     return JS_TRUE;
 }
 
 JSBool
 js_ArrayCompPush(JSContext *cx, JSObject *obj, const Value &vp)
@@ -2188,26 +2219,37 @@ array_unshift(JSContext *cx, uintN argc,
     JSObject *obj = ComputeThisFromVp(cx, vp);
     if (!obj || !js_GetLengthProperty(cx, obj, &length))
         return JS_FALSE;
     newlen = length;
     if (argc > 0) {
         /* Slide up the array to make room for argc at the bottom. */
         argv = JS_ARGV(cx, vp);
         if (length > 0) {
-            if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
-                !INDEX_TOO_SPARSE(obj, unsigned(newlen + argc))) {
-                JS_ASSERT(newlen + argc == length + argc);
-                if (!obj->ensureDenseArrayElements(cx, length + argc))
-                    return JS_FALSE;
+            bool optimized = false;
+            do {
+                if (!obj->isDenseArray())
+                    break;
+                if (js_PrototypeHasIndexedProperties(cx, obj))
+                    break;
+                JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, argc);
+                if (result != JSObject::ED_OK) {
+                    if (result == JSObject::ED_FAILED)
+                        return false;
+                    JS_ASSERT(result == JSObject::ED_SPARSE);
+                    break;
+                }
                 Value *elems = obj->getDenseArrayElements();
                 memmove(elems + argc, elems, length * sizeof(jsval));
                 for (uint32 i = 0; i < argc; i++)
                     obj->setDenseArrayElement(i, MagicValue(JS_ARRAY_HOLE));
-            } else {
+                optimized = true;
+            } while (false);
+
+            if (!optimized) {
                 last = length;
                 jsdouble upperIndex = last + argc;
                 AutoValueRooter tvr(cx);
                 do {
                     --last, --upperIndex;
                     if (!JS_CHECK_OPERATION_LIMIT(cx) ||
                         !GetElement(cx, obj, last, &hole, tvr.addr()) ||
                         !SetOrDeleteArrayElement(cx, obj, upperIndex, hole, tvr.value())) {
@@ -2317,31 +2359,45 @@ array_splice(JSContext *cx, uintN argc, 
                 return JS_FALSE;
         }
     }
 
     /* Find the direction (up or down) to copy and make way for argv. */
     if (argc > count) {
         delta = (jsuint)argc - count;
         last = length;
-        if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
-            length <= obj->getDenseArrayCapacity() &&
-            (length == 0 || !obj->getDenseArrayElement(length - 1).isMagic(JS_ARRAY_HOLE))) {
-            if (!obj->ensureDenseArrayElements(cx, length + delta))
-                return JS_FALSE;
-
+        bool optimized = false;
+        do {
+            if (!obj->isDenseArray())
+                break;
+            if (js_PrototypeHasIndexedProperties(cx, obj))
+                break;
+            if (length > obj->getDenseArrayCapacity())
+                break;
+            if (length != 0 && obj->getDenseArrayElement(length - 1).isMagic(JS_ARRAY_HOLE))
+                break;
+            JSObject::EnsureDenseResult result = obj->ensureDenseArrayElements(cx, length, delta);
+            if (result != JSObject::ED_OK) {
+                if (result == JSObject::ED_FAILED)
+                    return false;
+                JS_ASSERT(result == JSObject::ED_SPARSE);
+                break;
+            }
             Value *arraybeg = obj->getDenseArrayElements();
             Value *srcbeg = arraybeg + last - 1;
             Value *srcend = arraybeg + end - 1;
             Value *dstbeg = srcbeg + delta;
             for (Value *src = srcbeg, *dst = dstbeg; src > srcend; --src, --dst)
                 *dst = *src;
 
             obj->setArrayLength(obj->getArrayLength() + delta);
-        } else {
+            optimized = true;
+        } while (false);
+
+        if (!optimized) {
             /* (uint) end could be 0, so we can't use a vanilla >= test. */
             while (last-- > end) {
                 if (!JS_CHECK_OPERATION_LIMIT(cx) ||
                     !GetElement(cx, obj, last, &hole, tvr.addr()) ||
                     !SetOrDeleteArrayElement(cx, obj, last + delta, hole, tvr.value())) {
                     return JS_FALSE;
                 }
             }
@@ -2972,17 +3028,19 @@ JS_DEFINE_CALLINFO_3(extern, OBJECT, js_
 #endif
 
 JSObject* JS_FASTCALL
 js_NewPreallocatedArray(JSContext* cx, JSObject* proto, int32 len)
 {
     JSObject *obj = js_NewEmptyArray(cx, proto, len);
     if (!obj)
         return NULL;
-    if (!obj->ensureDenseArrayElements(cx, len))
+
+    /* Avoid ensureDenseArrayElements to skip sparse array checks there. */
+    if (!obj->ensureSlots(cx, len))     
         return NULL;
     return obj;
 }
 #ifdef JS_TRACER
 JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewPreallocatedArray, CONTEXT, OBJECT, INT32,
                      0, nanojit::ACCSET_STORE_ANY)
 #endif
 
--- a/js/src/jsarray.h
+++ b/js/src/jsarray.h
@@ -41,16 +41,56 @@
 #define jsarray_h___
 /*
  * JS Array interface.
  */
 #include "jsprvtd.h"
 #include "jspubtd.h"
 #include "jsobj.h"
 
+/* Small arrays are dense, no matter what. */
+const uintN MIN_SPARSE_INDEX = 256;
+
+inline JSObject::EnsureDenseResult
+JSObject::ensureDenseArrayElements(JSContext *cx, uintN index, uintN extra)
+{
+    JS_ASSERT(isDenseArray());
+    uintN currentCapacity = numSlots();
+
+    uintN requiredCapacity;
+    if (extra == 1) {
+        /* Optimize for the common case. */
+        if (index < currentCapacity)
+            return ED_OK;
+        requiredCapacity = index + 1;
+        if (requiredCapacity == 0) {
+            /* Overflow. */
+            return ED_SPARSE;
+        }
+    } else {
+        requiredCapacity = index + extra;
+        if (requiredCapacity < index) {
+            /* Overflow. */
+            return ED_SPARSE;
+        }
+        if (requiredCapacity <= currentCapacity)
+            return ED_OK;
+    }
+
+    /*
+     * We use the extra argument also as a hint about number of non-hole
+     * elements to be inserted.
+     */
+    if (requiredCapacity > MIN_SPARSE_INDEX &&
+        willBeSparseDenseArray(requiredCapacity, extra)) {
+        return ED_SPARSE;
+    }
+    return growSlots(cx, requiredCapacity) ? ED_OK : ED_FAILED;
+}
+
 extern JSBool
 js_StringIsIndex(JSString *str, jsuint *indexp);
 
 inline JSBool
 js_IdIsIndex(jsid id, jsuint *indexp)
 {
     if (JSID_IS_INT(id)) {
         jsint i;
@@ -139,19 +179,16 @@ js_InitContextBusyArrayTable(JSContext *
 
 extern JSObject *
 js_NewArrayObject(JSContext *cx, jsuint length, const js::Value *vector);
 
 /* Create an array object that starts out already made slow/sparse. */
 extern JSObject *
 js_NewSlowArrayObject(JSContext *cx);
 
-/* Minimum size at which a dense array can be made sparse. */
-const uint32 MIN_SPARSE_INDEX = 256;
-
 extern JSBool
 js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
 
 extern JSBool
 js_SetLengthProperty(JSContext *cx, JSObject *obj, jsdouble length);
 
 extern JSBool
 js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
--- a/js/src/jsclone.cpp
+++ b/js/src/jsclone.cpp
@@ -74,16 +74,19 @@ enum StructuredDataType {
     SCTAG_BOOLEAN,
     SCTAG_INDEX,
     SCTAG_STRING,
     SCTAG_DATE_OBJECT,
     SCTAG_REGEXP_OBJECT,
     SCTAG_ARRAY_OBJECT,
     SCTAG_OBJECT_OBJECT,
     SCTAG_ARRAY_BUFFER_OBJECT,
+    SCTAG_BOOLEAN_OBJECT,
+    SCTAG_STRING_OBJECT,
+    SCTAG_NUMBER_OBJECT,
     SCTAG_TYPED_ARRAY_MIN = 0xFFFF0100,
     SCTAG_TYPED_ARRAY_MAX = SCTAG_TYPED_ARRAY_MIN + TypedArray::TYPE_MAX - 1,
     SCTAG_END_OF_BUILTIN_TYPES
 };
 
 JS_STATIC_ASSERT(SCTAG_END_OF_BUILTIN_TYPES <= JS_SCTAG_USER_MIN);
 JS_STATIC_ASSERT(JS_SCTAG_USER_MIN <= JS_SCTAG_USER_MAX);
 
@@ -340,31 +343,31 @@ SCOutput::extractBuffer(uint64_t **datap
 {
     *sizep = buf.length() * sizeof(uint64_t);
     return (*datap = buf.extractRawBuffer()) != NULL;
 }
 
 JS_STATIC_ASSERT(JSString::MAX_LENGTH < UINT32_MAX);
 
 bool
-JSStructuredCloneWriter::writeString(JSString *str)
+JSStructuredCloneWriter::writeString(uint32_t tag, JSString *str)
 {
     const jschar *chars;
     size_t length;
     str->getCharsAndLength(chars, length);
-    return out.writePair(SCTAG_STRING, uint32_t(length)) && out.writeChars(chars, length);
+    return out.writePair(tag, uint32_t(length)) && out.writeChars(chars, length);
 }
 
 bool
 JSStructuredCloneWriter::writeId(jsid id)
 {
     if (JSID_IS_INT(id))
         return out.writePair(SCTAG_INDEX, uint32_t(JSID_TO_INT(id)));
     JS_ASSERT(JSID_IS_STRING(id));
-    return writeString(JSID_TO_STRING(id));
+    return writeString(SCTAG_STRING, JSID_TO_STRING(id));
 }
 
 inline void
 JSStructuredCloneWriter::checkStack()
 {
 #ifdef DEBUG
     /* To avoid making serialization O(n^2), limit stack-checking at 10. */
     const size_t MAX = 10;
@@ -491,40 +494,47 @@ JSStructuredCloneWriter::startObject(JSO
     /* Write the header for obj. */
     return out.writePair(obj->isArray() ? SCTAG_ARRAY_OBJECT : SCTAG_OBJECT_OBJECT, 0);
 }
 
 bool
 JSStructuredCloneWriter::startWrite(const js::Value &v)
 {
     if (v.isString()) {
-        return writeString(v.toString());
+        return writeString(SCTAG_STRING, v.toString());
     } else if (v.isNumber()) {
         return out.writeDouble(v.toNumber());
     } else if (v.isBoolean()) {
         return out.writePair(SCTAG_BOOLEAN, v.toBoolean());
     } else if (v.isNull()) {
         return out.writePair(SCTAG_NULL, 0);
     } else if (v.isUndefined()) {
         return out.writePair(SCTAG_UNDEFINED, 0);
     } else if (v.isObject()) {
         JSObject *obj = &v.toObject();
         if (obj->isRegExp()) {
             RegExp *re = RegExp::extractFrom(obj);
             return out.writePair(SCTAG_REGEXP_OBJECT, re->getFlags()) &&
-                   writeString(re->getSource());
+                   writeString(SCTAG_STRING, re->getSource());
         } else if (obj->isDate()) {
             jsdouble d = js_DateGetMsecSinceEpoch(context(), obj);
             return out.writePair(SCTAG_DATE_OBJECT, 0) && out.writeDouble(d);
         } else if (obj->isObject() || obj->isArray()) {
             return startObject(obj);
         } else if (js_IsTypedArray(obj)) {
             return writeTypedArray(obj);
         } else if (js_IsArrayBuffer(obj) && ArrayBuffer::fromJSObject(obj)) {
             return writeArrayBuffer(obj);
+        } else if (obj->isBoolean()) {
+            return out.writePair(SCTAG_BOOLEAN_OBJECT, obj->getPrimitiveThis().toBoolean());
+        } else if (obj->isNumber()) {
+            return out.writePair(SCTAG_NUMBER_OBJECT, 0) &&
+                   out.writeDouble(obj->getPrimitiveThis().toNumber());
+        } else if (obj->isString()) {
+            return writeString(SCTAG_STRING_OBJECT, obj->getPrimitiveThis().toString());
         }
 
         const JSStructuredCloneCallbacks *cb = context()->runtime->structuredCloneCallbacks;
         if (cb)
             return cb->write(context(), this, obj);
         /* else fall through */
     }
 
@@ -569,16 +579,27 @@ JSStructuredCloneWriter::write(const Val
             memory.remove(obj);
             objs.popBack();
             counts.popBack();
         }
     }
     return true;
 }
 
+bool
+JSStructuredCloneReader::checkDouble(jsdouble d)
+{
+    if (IsNonCanonicalizedNaN(d)) {
+        JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL,
+                             JSMSG_SC_BAD_SERIALIZED_DATA, "unrecognized NaN");
+        return false;
+    }
+    return true;
+}
+
 class Chars {
     JSContext *cx;
     jschar *p;
   public:
     Chars() : p(NULL) {}
     ~Chars() { if (p) cx->free(p); }
 
     bool allocate(JSContext *cx, size_t len) {
@@ -664,31 +685,52 @@ JSStructuredCloneReader::startRead(Value
         vp->setNull();
         break;
 
       case SCTAG_UNDEFINED:
         vp->setUndefined();
         break;
 
       case SCTAG_BOOLEAN:
+      case SCTAG_BOOLEAN_OBJECT:
         vp->setBoolean(!!data);
+        if (tag == SCTAG_BOOLEAN_OBJECT && !js_PrimitiveToObject(context(), vp))
+            return false;
         break;
 
-      case SCTAG_STRING: {
+      case SCTAG_STRING:
+      case SCTAG_STRING_OBJECT: {
         JSString *str = readString(data);
         if (!str)
             return false;
         vp->setString(str);
+        if (tag == SCTAG_STRING_OBJECT && !js_PrimitiveToObject(context(), vp))
+            return false;
+        break;
+      }
+
+      case SCTAG_NUMBER_OBJECT: {
+        jsdouble d;
+        if (!in.readDouble(&d) || !checkDouble(d))
+            return false;
+        vp->setDouble(d);
+        if (!js_PrimitiveToObject(context(), vp))
+            return false;
         break;
       }
 
       case SCTAG_DATE_OBJECT: {
         jsdouble d;
-        if (!in.readDouble(&d))
+        if (!in.readDouble(&d) || !checkDouble(d))
             return false;
+        if (d == d && d != TIMECLIP(d)) {
+            JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL, JSMSG_SC_BAD_SERIALIZED_DATA,
+                                 "date");
+            return false;
+        }
         JSObject *obj = js_NewDateObjectMsec(context(), d);
         if (!obj)
             return false;
         vp->setObject(*obj);
         break;
       }
 
       case SCTAG_REGEXP_OBJECT: {
@@ -725,21 +767,18 @@ JSStructuredCloneReader::startRead(Value
       }
 
       case SCTAG_ARRAY_BUFFER_OBJECT:
         return readArrayBuffer(data, vp);
 
       default: {
         if (tag <= SCTAG_FLOAT_MAX) {
             jsdouble d = ReinterpretPairAsDouble(tag, data);
-            if (IsNonCanonicalizedNaN(d)) {
-                JS_ReportErrorNumber(context(), js_GetErrorMessage, NULL,
-                                     JSMSG_SC_BAD_SERIALIZED_DATA, "unrecognized NaN");
+            if (!checkDouble(d))
                 return false;
-            }
             vp->setNumber(d);
             break;
         }
 
         if (SCTAG_TYPED_ARRAY_MIN <= tag && tag <= SCTAG_TYPED_ARRAY_MAX)
             return readTypedArray(tag, data, vp);
 
         const JSStructuredCloneCallbacks *cb = context()->runtime->structuredCloneCallbacks;
--- a/js/src/jsclone.h
+++ b/js/src/jsclone.h
@@ -113,16 +113,17 @@ struct JSStructuredCloneReader {
         : in(in), objs(in.context()) {}
 
     js::SCInput &input() { return in; }
     bool read(js::Value *vp);
 
   private:
     JSContext *context() { return in.context(); }
 
+    bool checkDouble(jsdouble d);
     JSString *readString(uint32_t nchars);
     bool readTypedArray(uint32_t tag, uint32_t nelems, js::Value *vp);
     bool readArrayBuffer(uint32_t nbytes, js::Value *vp);
     bool readId(jsid *idp);
     bool startRead(js::Value *vp);
 
     js::SCInput &in;
 
@@ -140,17 +141,17 @@ struct JSStructuredCloneWriter {
 
     bool write(const js::Value &v);
 
     js::SCOutput &output() { return out; }
 
   private:
     JSContext *context() { return out.context(); }
 
-    bool writeString(JSString *str);
+    bool writeString(uint32_t tag, JSString *str);
     bool writeId(jsid id);
     bool writeArrayBuffer(JSObject *obj);
     bool writeTypedArray(JSObject *obj);
     bool startObject(JSObject *obj);
     bool startWrite(const js::Value &v);
 
     inline void checkStack();
 
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -1840,54 +1840,56 @@ js_InvokeOperationCallback(JSContext *cx
 {
     JSRuntime *rt = cx->runtime;
     JSThreadData *td = JS_THREAD_DATA(cx);
 
     JS_ASSERT_REQUEST_DEPTH(cx);
     JS_ASSERT(td->interruptFlags != 0);
 
     /*
-     * Reset the callback counter first, then yield. If another thread is racing
-     * us here we will accumulate another callback request which will be
-     * serviced at the next opportunity.
+     * Reset the callback counter first, then run GC and yield. If another
+     * thread is racing us here we will accumulate another callback request
+     * which will be serviced at the next opportunity.
      */
     JS_LOCK_GC(rt);
     td->interruptFlags = 0;
 #ifdef JS_THREADSAFE
     JS_ATOMIC_DECREMENT(&rt->interruptCounter);
 #endif
     JS_UNLOCK_GC(rt);
 
-    /*
-     * Unless we are going to run the GC, we automatically yield the current
-     * context every time the operation callback is hit since we might be
-     * called as a result of an impending GC, which would deadlock if we do
-     * not yield. Operation callbacks are supposed to happen rarely (seconds,
-     * not milliseconds) so it is acceptable to yield at every callback.
-     */
     if (rt->gcIsNeeded) {
         js_GC(cx, GC_NORMAL);
 
         /*
          * On trace we can exceed the GC quota, see comments in NewGCArena. So
          * we check the quota and report OOM here when we are off trace.
          */
         bool delayedOutOfMemory;
         JS_LOCK_GC(rt);
         delayedOutOfMemory = (rt->gcBytes > rt->gcMaxBytes);
         JS_UNLOCK_GC(rt);
         if (delayedOutOfMemory) {
             js_ReportOutOfMemory(cx);
             return false;
         }
     }
+    
 #ifdef JS_THREADSAFE
-    else {
-        JS_YieldRequest(cx);
-    }
+    /*
+     * We automatically yield the current context every time the operation
+     * callback is hit since we might be called as a result of an impending
+     * GC on another thread, which would deadlock if we do not yield.
+     * Operation callbacks are supposed to happen rarely (seconds, not
+     * milliseconds) so it is acceptable to yield at every callback.
+     *
+     * As the GC can be canceled before it does any request checks we yield
+     * even if rt->gcIsNeeded was true above. See bug 590533.
+     */
+    JS_YieldRequest(cx);
 #endif
 
     JSOperationCallback cb = cx->operationCallback;
 
     /*
      * Important: Additional callbacks can occur inside the callback handler
      * if it re-enters the JS engine. The embedding must ensure that the
      * callback is disconnected before attempting such re-entry.
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -923,17 +923,17 @@ struct TraceMonitor {
     /*
      * Cached storage to use when executing on trace. While we may enter nested
      * traces, we always reuse the outer trace's storage, so never need more
      * than of these.
      */
     TraceNativeStorage      *storage;
 
     /*
-     * There are 5 allocators here.  This might seem like overkill, but they
+     * There are 4 allocators here.  This might seem like overkill, but they
      * have different lifecycles, and by keeping them separate we keep the
      * amount of retained memory down significantly.  They are flushed (ie.
      * all the allocated memory is freed) periodically.
      *
      * - dataAlloc has the lifecycle of the monitor.  It's flushed only when
      *   the monitor is flushed.  It's used for fragments.
      *
      * - traceAlloc has the same flush lifecycle as the dataAlloc, but it is
@@ -941,29 +941,24 @@ struct TraceMonitor {
      *   if recording aborts.  So you can put things in it that are only
      *   reachable on a successful record/compile cycle like GuardRecords and
      *   SideExits.
      *
      * - tempAlloc is flushed after each recording, successful or not.  It's
      *   used to store LIR code and for all other elements in the LIR
      *   pipeline.
      *
-     * - reTempAlloc is just like tempAlloc, but is used for regexp
-     *   compilation in RegExpNativeCompiler rather than normal compilation in
-     *   TraceRecorder.
-     *
      * - codeAlloc has the same lifetime as dataAlloc, but its API is
      *   different (CodeAlloc vs. VMAllocator).  It's used for native code.
      *   It's also a good idea to keep code and data separate to avoid I-cache
      *   vs. D-cache issues.
      */
     VMAllocator*            dataAlloc;
     VMAllocator*            traceAlloc;
     VMAllocator*            tempAlloc;
-    VMAllocator*            reTempAlloc;
     nanojit::CodeAlloc*     codeAlloc;
     nanojit::Assembler*     assembler;
     FrameInfoCache*         frameCache;
 
     /* This gets incremented every time the monitor is flushed. */
     uintN                   flushEpoch;
 
     Oracle*                 oracle;
--- a/js/src/jsdate.cpp
+++ b/js/src/jsdate.cpp
@@ -147,17 +147,16 @@ using namespace js;
  * how this does on win32.  (Tried it on irix.)  Types could use a
  * general going-over.
  */
 
 /*
  * Supporting functions - ECMA 15.9.1.*
  */
 
-#define HalfTimeDomain  8.64e15
 #define HoursPerDay     24.0
 #define MinutesPerDay   (HoursPerDay * MinutesPerHour)
 #define MinutesPerHour  60.0
 #define SecondsPerDay   (MinutesPerDay * SecondsPerMinute)
 #define SecondsPerHour  (MinutesPerHour * SecondsPerMinute)
 #define SecondsPerMinute 60.0
 
 #if defined(XP_WIN) || defined(XP_OS2)
@@ -477,20 +476,16 @@ static intN
 msFromTime(jsdouble t)
 {
     intN result = (intN) fmod(t, msPerSecond);
     if (result < 0)
         result += (intN)msPerSecond;
     return result;
 }
 
-#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
-                      && !((d < 0 ? -d : d) > HalfTimeDomain)) \
-                     ? js_DoubleToInteger(d + (+0.)) : js_NaN)
-
 /**
  * end of ECMA 'support' functions
  */
 
 /*
  * Other Support routines and definitions
  */
 
--- a/js/src/jsdate.h
+++ b/js/src/jsdate.h
@@ -49,16 +49,22 @@
 extern js::Class js_DateClass;
 
 inline bool
 JSObject::isDate() const
 {
     return getClass() == &js_DateClass;
 }
 
+#define HalfTimeDomain  8.64e15
+
+#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
+                      && !((d < 0 ? -d : d) > HalfTimeDomain)) \
+                     ? js_DoubleToInteger(d + (+0.)) : js_NaN)
+
 extern JSObject *
 js_InitDateClass(JSContext *cx, JSObject *obj);
 
 /*
  * These functions provide a C interface to the date/time object
  */
 
 /*
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -110,16 +110,38 @@ IsScriptLive(JSContext *cx, JSScript *sc
 #endif
 
 JS_PUBLIC_API(void)
 JS_SetRuntimeDebugMode(JSRuntime *rt, JSBool debug)
 {
     rt->debugMode = debug;
 }
 
+static void
+PurgeCallICs(JSContext *cx, JSScript *start)
+{
+#ifdef JS_METHODJIT
+    for (JSScript *script = start;
+         &script->links != &cx->compartment->scripts;
+         script = (JSScript *)script->links.next)
+    {
+        // Debug mode does not use call ICs.
+        if (script->debugMode)
+            continue;
+
+        JS_ASSERT(!IsScriptLive(cx, script));
+
+        if (script->jitNormal)
+            script->jitNormal->nukeScriptDependentICs();
+        if (script->jitCtor)
+            script->jitCtor->nukeScriptDependentICs();
+    }
+#endif
+}
+
 JS_FRIEND_API(JSBool)
 js_SetDebugMode(JSContext *cx, JSBool debug)
 {
     cx->compartment->debugMode = debug;
 #ifdef JS_METHODJIT
     for (JSScript *script = (JSScript *)cx->compartment->scripts.next;
          &script->links != &cx->compartment->scripts;
          script = (JSScript *)script->links.next) {
@@ -129,16 +151,22 @@ js_SetDebugMode(JSContext *cx, JSBool de
             /*
              * In the event that this fails, debug mode is left partially on,
              * leading to a small performance overhead but no loss of
              * correctness. We set the debug flag to false so that the caller
              * will not later attempt to use debugging features.
              */
             js::mjit::Recompiler recompiler(cx, script);
             if (!recompiler.recompile()) {
+                /*
+                 * If recompilation failed, we could be in a state where
+                 * remaining compiled scripts hold call IC references that
+                 * have been destroyed by recompilation. Clear those ICs now.
+                 */
+                PurgeCallICs(cx, script);
                 cx->compartment->debugMode = JS_FALSE;
                 return JS_FALSE;
             }
         }
     }
 #endif
     return JS_TRUE;
 }
--- a/js/src/jsemit.cpp
+++ b/js/src/jsemit.cpp
@@ -6804,22 +6804,18 @@ js_EmitTree(JSContext *cx, JSCodeGenerat
 
             /* Emit the usual op needed for decompilation. */
             if (!EmitEndInit(cx, cg, 1))
                 return JS_FALSE;
             break;
         }
 #endif /* JS_HAS_GENERATORS */
 
-        /*
-         * Use the slower NEWINIT for arrays in scripts containing sharps, and when
-         * the array length exceeds MIN_SPARSE_INDEX and can be slowified during GC.
-         * :FIXME: bug 607825 handle slowify case.
-         */
-        if (cg->hasSharps() || pn->pn_count >= MIN_SPARSE_INDEX) {
+        /* Use the slower NEWINIT for arrays in scripts containing sharps. */
+        if (cg->hasSharps()) {
             if (!EmitNewInit(cx, cg, JSProto_Array, pn, sharpnum))
                 return JS_FALSE;
         } else {
             ptrdiff_t off = js_EmitN(cx, cg, JSOP_NEWARRAY, 3);
             if (off < 0)
                 return JS_FALSE;
             pc = CG_CODE(cg, off);
             SET_UINT24(pc, pn->pn_count);
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1369,26 +1369,16 @@ GCMarker::markDelayedChildren()
             default:
                 JS_NOT_REACHED("wrong thingkind");
         }
     }
     JS_ASSERT(markLaterCount == 0);
     JS_ASSERT(!unmarkedArenaStackTop);
 }
 
-void
-GCMarker::slowifyArrays()
-{
-    while (!arraysToSlowify.empty()) {
-        JSObject *obj = arraysToSlowify.back();
-        arraysToSlowify.popBack();
-        if (obj->isMarked())
-            obj->makeDenseArraySlow(context);
-    }
-}
 } /* namespace js */
 
 static void
 gc_root_traversal(JSTracer *trc, const RootEntry &entry)
 {
 #ifdef DEBUG
     void *ptr;
     if (entry.value.type == JS_GC_ROOT_GCTHING_PTR) {
@@ -2238,19 +2228,16 @@ MarkAndSweep(JSContext *cx, JSGCInvocati
     /*
      * Sweep script filenames after sweeping functions in the generic loop
      * above. In this way when a scripted function's finalizer destroys the
      * script and calls rt->destroyScriptHook, the hook can still access the
      * script's filename. See bug 323267.
      */
     js_SweepScriptFilenames(rt);
 
-    /* Slowify arrays we have accumulated. */
-    gcmarker.slowifyArrays();
-
     /*
      * Destroy arenas after we finished the sweeping so finalizers can safely
      * use js_IsAboutToBeFinalized().
      */
     ExpireGCChunks(rt);
     TIMESTAMP(sweepDestroyEnd);
 
     if (rt->gcCallback)
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -978,18 +978,16 @@ struct GCMarker : public JSTracer {
 #ifdef JS_DUMP_CONSERVATIVE_GC_ROOTS
     struct ConservativeRoot { void *thing; uint32 thingKind; };
     Vector<ConservativeRoot, 0, SystemAllocPolicy> conservativeRoots;
     const char *conservativeDumpFileName;
 
     void dumpConservativeRoots();
 #endif
 
-    js::Vector<JSObject *, 0, js::SystemAllocPolicy> arraysToSlowify;
-
   public:
     explicit GCMarker(JSContext *cx);
     ~GCMarker();
 
     uint32 getMarkColor() const {
         return color;
     }
 
@@ -1000,18 +998,16 @@ struct GCMarker : public JSTracer {
          */
         markDelayedChildren();
         color = newColor;
     }
 
     void delayMarkingChildren(void *thing);
 
     JS_FRIEND_API(void) markDelayedChildren();
-
-    void slowifyArrays();
 };
 
 void
 MarkStackRangeConservatively(JSTracer *trc, Value *begin, Value *end);
 
 } /* namespace js */
 
 extern void
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -5906,17 +5906,18 @@ BEGIN_CASE(JSOP_NEWINIT)
 }
 END_CASE(JSOP_NEWINIT)
 
 BEGIN_CASE(JSOP_NEWARRAY)
 {
     unsigned count = GET_UINT24(regs.pc);
     JSObject *obj = js_NewArrayObject(cx, count, NULL);
 
-    if (!obj || !obj->ensureDenseArrayElements(cx, count))
+    /* Avoid ensureDenseArrayElements to skip sparse array checks there. */
+    if (!obj || !obj->ensureSlots(cx, count))
         goto error;
 
     PUSH_OBJECT(*obj);
     CHECK_INTERRUPT_HANDLER();
 }
 END_CASE(JSOP_NEWARRAY)
 
 BEGIN_CASE(JSOP_NEWOBJECT)
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -747,19 +747,34 @@ struct JSObject : js::gc::Cell {
     inline uint32 getArrayLength() const;
     inline void setArrayLength(uint32 length);
 
     inline uint32 getDenseArrayCapacity();
     inline js::Value* getDenseArrayElements();
     inline const js::Value &getDenseArrayElement(uintN idx);
     inline js::Value* addressOfDenseArrayElement(uintN idx);
     inline void setDenseArrayElement(uintN idx, const js::Value &val);
-    inline bool ensureDenseArrayElements(JSContext *cx, uintN cap);
     inline void shrinkDenseArrayElements(JSContext *cx, uintN cap);
 
+    /*
+     * ensureDenseArrayElements ensures that the dense array can hold at least
+     * index + extra elements. It returns ED_OK on success, ED_FAILED on
+     * failure to grow the array, ED_SPARSE when the array is too sparse to
+     * grow (this includes the case of index + extra overflow). In the last
+     * two cases the array is kept intact.
+     */
+    enum EnsureDenseResult { ED_OK, ED_FAILED, ED_SPARSE };
+    inline EnsureDenseResult ensureDenseArrayElements(JSContext *cx, uintN index, uintN extra);
+
+    /*
+     * Check if after growing the dense array will be too sparse.
+     * newElementsHint is an estimated number of elements to be added.
+     */
+    bool willBeSparseDenseArray(uintN requiredCapacity, uintN newElementsHint);
+
     JSBool makeDenseArraySlow(JSContext *cx);
 
     /*
      * Arguments-specific getters and setters.
      */
 
   private:
     /*
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -185,21 +185,23 @@ ChangesMethodValue(const js::Value &prev
     return prev.isObject() && (prevObj = &prev.toObject())->isFunction() &&
            (!v.isObject() || &v.toObject() != prevObj);
 }
 
 inline bool
 JSObject::methodWriteBarrier(JSContext *cx, const js::Shape &shape, const js::Value &v)
 {
     if (flags & (BRANDED | METHOD_BARRIER)) {
-        const js::Value &prev = nativeGetSlot(shape.slot);
+        if (shape.slot != SHAPE_INVALID_SLOT) {
+            const js::Value &prev = nativeGetSlot(shape.slot);
 
-        if (ChangesMethodValue(prev, v)) {
-            JS_FUNCTION_METER(cx, mwritebarrier);
-            return methodShapeChange(cx, shape);
+            if (ChangesMethodValue(prev, v)) {
+                JS_FUNCTION_METER(cx, mwritebarrier);
+                return methodShapeChange(cx, shape);
+            }
         }
     }
     return true;
 }
 
 inline bool
 JSObject::methodWriteBarrier(JSContext *cx, uint32 slot, const js::Value &v)
 {
@@ -324,23 +326,16 @@ JSObject::addressOfDenseArrayElement(uin
 
 inline void
 JSObject::setDenseArrayElement(uintN idx, const js::Value &val)
 {
     JS_ASSERT(isDenseArray());
     setSlot(idx, val);
 }
 
-inline bool
-JSObject::ensureDenseArrayElements(JSContext *cx, uintN cap)
-{
-    JS_ASSERT(isDenseArray());
-    return ensureSlots(cx, cap);
-}
-
 inline void
 JSObject::shrinkDenseArrayElements(JSContext *cx, uintN cap)
 {
     JS_ASSERT(isDenseArray());
     shrinkSlots(cx, cap);
 }
 
 inline void
--- a/js/src/jsotypes.h
+++ b/js/src/jsotypes.h
@@ -86,16 +86,24 @@ typedef JSIntn intn;
 /*
  * On AIX 4.3, sys/inttypes.h (which is included by sys/types.h, a very
  * common header file) defines the types int8, int16, int32, and int64.
  * So we don't define these four types here to avoid conflicts in case
  * the code also includes sys/types.h.
  */
 #if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
 #include <sys/inttypes.h>
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+typedef JSInt64 int64;
+
+/* Explicit signed keyword for bitfield types is required. */
+/* Some compilers may treat them as unsigned without it. */
+typedef signed int int32;
+typedef signed short int16;
+typedef signed char int8;
 #else
 typedef JSInt64 int64;
 
 /* /usr/include/model.h on HP-UX defines int8, int16, and int32 */
 typedef JSInt32 int32;
 typedef JSInt16 int16;
 typedef JSInt8 int8;
 #endif /* AIX && HAVE_SYS_INTTYPES_H */
--- a/js/src/jspropertytree.cpp
+++ b/js/src/jspropertytree.cpp
@@ -135,17 +135,17 @@ KidsChunk::destroy(JSContext *cx, KidsCh
 
     KidsChunk *nextChunk = chunk->next;
     js_free(chunk);
     return nextChunk;
 }
 
 /*
  * NB: Called with cx->runtime->gcLock held, always.
- * On failure, return null after unlocking the GC and reporting out of memory.
+ * On failure, return false after unlocking the GC and reporting out of memory.
  */
 bool
 PropertyTree::insertChild(JSContext *cx, Shape *parent, Shape *child)
 {
     JS_ASSERT(!parent->inDictionary());
     JS_ASSERT(!child->parent);
     JS_ASSERT(!child->inDictionary());
     JS_ASSERT(!JSID_IS_VOID(parent->id));
@@ -214,18 +214,21 @@ PropertyTree::insertChild(JSContext *cx,
         *chunkp = chunk;
         chunk->kids[0] = child;
         return true;
     }
    
     KidsHash *hash = kidp->toHash();
     KidsHash::AddPtr addPtr = hash->lookupForAdd(child);
     if (!addPtr) {
-        if (!hash->add(addPtr, child))
+        if (!hash->add(addPtr, child)) {
+            JS_UNLOCK_GC(cx->runtime);
+            JS_ReportOutOfMemory(cx);
             return false;
+        }
     } else {
         // FIXME ignore duplicate child case here, going thread-local soon!
     }
     return true;
 }
 
 /* NB: Called with cx->runtime->gcLock held. */
 void
--- a/js/src/jsregexpinlines.h
+++ b/js/src/jsregexpinlines.h
@@ -217,28 +217,24 @@ RegExp::initArena(JSContext *cx)
     return true;
 }
 
 inline void
 RegExp::checkMatchPairs(JSString *input, int *buf, size_t matchItemCount)
 {
 #if DEBUG
     size_t inputLength = input->length();
-    int largestStartSeen = 0;
     for (size_t i = 0; i < matchItemCount; i += 2) {
         int start = buf[i];
         int limit = buf[i + 1];
         JS_ASSERT(limit >= start); /* Limit index must be larger than the start index. */
         if (start == -1)
             continue;
         JS_ASSERT(start >= 0);
         JS_ASSERT(size_t(limit) <= inputLength);
-        /* Test the monotonically increasing nature of left parens. */
-        JS_ASSERT(start >= largestStartSeen);
-        largestStartSeen = start;
     }
 #endif
 }
 
 inline JSObject *
 RegExp::createResult(JSContext *cx, JSString *input, int *buf, size_t matchItemCount)
 {
     /*
--- a/js/src/jsscan.cpp
+++ b/js/src/jsscan.cpp
@@ -1696,34 +1696,34 @@ TokenStream::getTokenInternal()
 
         if (flags & TSF_OPERAND) {
             uintN reflags, length;
             JSBool inCharClass = JS_FALSE;
 
             tokenbuf.clear();
             for (;;) {
                 c = getChar();
-                if (c == '\n' || c == EOF) {
-                    ungetChar(c);
-                    ReportCompileErrorNumber(cx, this, NULL, JSREPORT_ERROR,
-                                             JSMSG_UNTERMINATED_REGEXP);
-                    goto error;
-                }
                 if (c == '\\') {
                     if (!tokenbuf.append(c))
                         goto error;
                     c = getChar();
                 } else if (c == '[') {
                     inCharClass = JS_TRUE;
                 } else if (c == ']') {
                     inCharClass = JS_FALSE;
                 } else if (c == '/' && !inCharClass) {
                     /* For compat with IE, allow unescaped / in char classes. */
                     break;
                 }
+                if (c == '\n' || c == EOF) {
+                    ungetChar(c);
+                    ReportCompileErrorNumber(cx, this, NULL, JSREPORT_ERROR,
+                                             JSMSG_UNTERMINATED_REGEXP);
+                    goto error;
+                }
                 if (!tokenbuf.append(c))
                     goto error;
             }
             for (reflags = 0, length = tokenbuf.length() + 1; ; length++) {
                 c = peekChar();
                 if (c == 'g' && !(reflags & JSREG_GLOB))
                     reflags |= JSREG_GLOB;
                 else if (c == 'i' && !(reflags & JSREG_FOLD))
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -905,17 +905,17 @@ JS_STATIC_ASSERT(sizeof(jsbytecode) % si
  */
 JS_STATIC_ASSERT(sizeof(JSScript) + 2 * sizeof(JSObjectArray) +
                  sizeof(JSUpvarArray) < JS_BIT(8));
 
 JSScript *
 JSScript::NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms,
                     uint32 nobjects, uint32 nupvars, uint32 nregexps,
                     uint32 ntrynotes, uint32 nconsts, uint32 nglobals,
-                    uint32 nClosedArgs, uint32 nClosedVars)
+                    uint16 nClosedArgs, uint16 nClosedVars)
 {
     size_t size, vectorSize;
     JSScript *script;
     uint8 *cursor;
     unsigned constPadding = 0;
 
     uint32 totalClosed = nClosedArgs + nClosedVars;
 
@@ -1155,22 +1155,25 @@ JSScript::NewScriptFromCG(JSContext *cx,
             JS_RUNTIME_METER(cx->runtime, liveEmptyScripts);
             JS_RUNTIME_METER(cx->runtime, totalEmptyScripts);
             return empty;
         }
     }
 
   skip_empty:
     CG_COUNT_FINAL_SRCNOTES(cg, nsrcnotes);
+    uint16 nClosedArgs = uint16(cg->closedArgs.length());
+    JS_ASSERT(nClosedArgs == cg->closedArgs.length());
+    uint16 nClosedVars = uint16(cg->closedVars.length());
+    JS_ASSERT(nClosedVars == cg->closedVars.length());
     script = NewScript(cx, prologLength + mainLength, nsrcnotes,
                        cg->atomList.count, cg->objectList.length,
                        cg->upvarList.count, cg->regexpList.length,
                        cg->ntrynotes, cg->constList.length(),
-                       cg->globalUses.length(), cg->closedArgs.length(),
-                       cg->closedVars.length());
+                       cg->globalUses.length(), nClosedArgs, nClosedVars);
     if (!script)
         return NULL;
 
     /* Now that we have script, error control flow must go to label bad. */
     script->main += prologLength;
     memcpy(script->code, CG_PROLOG_BASE(cg), prologLength * sizeof(jsbytecode));
     memcpy(script->main, CG_BASE(cg), mainLength * sizeof(jsbytecode));
     nfixed = cg->inFunction()
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -203,17 +203,17 @@ struct JSScript {
      * script singleton (JSScript::emptyScript()). Callers who know they can use
      * that read-only singleton are responsible for choosing it instead of calling
      * NewScript with length and nsrcnotes equal to 1 and other parameters save
      * cx all zero.
      */
     static JSScript *NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms,
                                uint32 nobjects, uint32 nupvars, uint32 nregexps,
                                uint32 ntrynotes, uint32 nconsts, uint32 nglobals,
-                               uint32 nClosedArgs, uint32 nClosedVars);
+                               uint16 nClosedArgs, uint16 nClosedVars);
 
     static JSScript *NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg);
 
     /* FIXME: bug 586181 */
     JSCList         links;      /* Links for compartment script list */
     jsbytecode      *code;      /* bytecodes and their immediate operands */
     uint32          length;     /* length of code vector */
     uint16          version;    /* JS version under which script was compiled */
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -2731,17 +2731,16 @@ TraceMonitor::flush()
 
     flushEpoch++;
 
     frameCache->reset();
     dataAlloc->reset();
     traceAlloc->reset();
     codeAlloc->reset();
     tempAlloc->reset();
-    reTempAlloc->reset();
     oracle->clear();
     loopProfiles->clear();
 
     Allocator& alloc = *dataAlloc;
 
     for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
         globalStates[i].globalShape = -1;
         globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
@@ -7585,17 +7584,16 @@ InitJIT(TraceMonitor *tm)
         abort();
 
     tm->flushEpoch = 0;
     
     JS_ASSERT(!tm->dataAlloc && !tm->traceAlloc && !tm->codeAlloc);
     tm->dataAlloc = new VMAllocator();
     tm->traceAlloc = new VMAllocator();
     tm->tempAlloc = new VMAllocator();
-    tm->reTempAlloc = new VMAllocator();
     tm->codeAlloc = new CodeAlloc();
     tm->frameCache = new FrameInfoCache(tm->dataAlloc);
     tm->storage = new TraceNativeStorage();
     tm->cachedTempTypeMap = new TypeMap(0);
     tm->flush();
     verbose_only( tm->branches = NULL; )
 
 #if !defined XP_WIN
@@ -7722,21 +7720,16 @@ FinishJIT(TraceMonitor *tm)
         tm->traceAlloc = NULL;
     }
 
     if (tm->tempAlloc) {
         delete tm->tempAlloc;
         tm->tempAlloc = NULL;
     }
 
-    if (tm->reTempAlloc) {
-        delete tm->reTempAlloc;
-        tm->reTempAlloc = NULL;
-    }
-
     if (tm->storage) {
         delete tm->storage;
         tm->storage = NULL;
     }
 
     delete tm->cachedTempTypeMap;
     tm->cachedTempTypeMap = NULL;
 }
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -202,37 +202,52 @@ class Repatcher : public JSC::RepatchBuf
  * live. Dumping a huge constant pool into the middle of an IC's inline path
  * makes the distance between emitted instructions potentially variable and/or
  * large, which makes the IC offsets invalid. We must reserve contiguous space
  * up front to prevent this from happening.
  */
 #ifdef JS_CPU_ARM
 class AutoReserveICSpace {
     typedef Assembler::Label Label;
-    static const size_t reservedSpace = 64;
+    static const size_t reservedSpace = 68;
 
     Assembler           &masm;
 #ifdef DEBUG
     Label               startLabel;
 #endif
 
   public:
     AutoReserveICSpace(Assembler &masm) : masm(masm) {
         masm.ensureSpace(reservedSpace);
 #ifdef DEBUG
         startLabel = masm.label();
+
+        /* Assert that the constant pool is not flushed until we reach a safe point. */
+        masm.allowPoolFlush(false);
+
+        JaegerSpew(JSpew_Insns, " -- BEGIN CONSTANT-POOL-FREE REGION -- \n");
 #endif
     }
 
     ~AutoReserveICSpace() {
 #ifdef DEBUG
         Label endLabel = masm.label();
         int spaceUsed = masm.differenceBetween(startLabel, endLabel);
+
+        /* Spew the space used, to help tuning of reservedSpace. */
+        JaegerSpew(JSpew_Insns,
+                   " -- END CONSTANT-POOL-FREE REGION: %u bytes used of %u reserved. -- \n",
+                   spaceUsed, reservedSpace);
+
+        /* Assert that we didn't emit more code than we protected. */
         JS_ASSERT(spaceUsed >= 0);
         JS_ASSERT(size_t(spaceUsed) <= reservedSpace);
+
+        /* Allow the pool to be flushed. */
+        masm.allowPoolFlush(true);
 #endif
     }
 };
 # define RESERVE_IC_SPACE(__masm) AutoReserveICSpace arics(__masm)
 #else
 # define RESERVE_IC_SPACE(__masm) /* Nothing. */
 #endif
 
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -2172,27 +2172,31 @@ mjit::Compiler::loadReturnValue(Assemble
 void
 mjit::Compiler::fixPrimitiveReturn(Assembler *masm, FrameEntry *fe)
 {
     JS_ASSERT(isConstructing);
 
     bool ool = (masm != &this->masm);
     Address thisv(JSFrameReg, JSStackFrame::offsetOfThis(fun));
 
-    // Easy cases - no return value, or known primitive, so just return thisv.
-    if (!fe || (fe->isTypeKnown() && fe->getKnownType() != JSVAL_TYPE_OBJECT)) {
+    // We can just load |thisv| if either of the following is true:
+    //  (1) There is no explicit return value, AND fp->rval is not used.
+    //  (2) There is an explicit return value, and it's known to be primitive.
+    if ((!fe && !analysis->usesReturnValue()) ||
+        (fe && fe->isTypeKnown() && fe->getKnownType() != JSVAL_TYPE_OBJECT))
+    {
         if (ool)
             masm->loadValueAsComponents(thisv, JSReturnReg_Type, JSReturnReg_Data);
         else
             frame.loadThisForReturn(JSReturnReg_Type, JSReturnReg_Data, Registers::ReturnReg);
         return;
     }
 
     // If the type is known to be an object, just load the return value as normal.
-    if (fe->isTypeKnown() && fe->getKnownType() == JSVAL_TYPE_OBJECT) {
+    if (fe && fe->isTypeKnown() && fe->getKnownType() == JSVAL_TYPE_OBJECT) {
         loadReturnValue(masm, fe);
         return;
     }
 
     // There's a return value, and its type is unknown. Test the type and load
     // |thisv| if necessary.
     loadReturnValue(masm, fe);
     Jump j = masm->testObject(Assembler::Equal, JSReturnReg_Type);
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -909,16 +909,63 @@ mjit::Compiler::jsop_typeof()
 
         if (atom) {
             frame.pop();
             frame.push(StringValue(ATOM_TO_STRING(atom)));
             return;
         }
     }
 
+    JSOp fused = JSOp(PC[JSOP_TYPEOF_LENGTH]);
+    if (fused == JSOP_STRING && !fe->isTypeKnown()) {
+        JSOp op = JSOp(PC[JSOP_TYPEOF_LENGTH + JSOP_STRING_LENGTH]);
+
+        if (op == JSOP_STRICTEQ || op == JSOP_EQ || op == JSOP_STRICTNE || op == JSOP_NE) {
+            JSAtom *atom = script->getAtom(fullAtomIndex(PC + JSOP_TYPEOF_LENGTH));
+            JSRuntime *rt = cx->runtime;
+            JSValueType type = JSVAL_TYPE_UNINITIALIZED;
+            Assembler::Condition cond = (op == JSOP_STRICTEQ || op == JSOP_EQ)
+                                        ? Assembler::Equal
+                                        : Assembler::NotEqual;
+            
+            if (atom == rt->atomState.typeAtoms[JSTYPE_VOID]) {
+                type = JSVAL_TYPE_UNDEFINED;
+            } else if (atom == rt->atomState.typeAtoms[JSTYPE_STRING]) {
+                type = JSVAL_TYPE_STRING;
+            } else if (atom == rt->atomState.typeAtoms[JSTYPE_BOOLEAN]) {
+                type = JSVAL_TYPE_BOOLEAN;
+            } else if (atom == rt->atomState.typeAtoms[JSTYPE_NUMBER]) {
+                type = JSVAL_TYPE_INT32;
+
+                /* JSVAL_TYPE_DOUBLE is 0x0 and JSVAL_TYPE_INT32 is 0x1, use <= or > to match both */
+                cond = (cond == Assembler::Equal) ? Assembler::BelowOrEqual : Assembler::Above;
+            }
+
+            if (type != JSVAL_TYPE_UNINITIALIZED) {
+                PC += JSOP_STRING_LENGTH;;
+                PC += JSOP_EQ_LENGTH;
+
+                RegisterID result = frame.allocReg(Registers::SingleByteRegs);
+
+#if defined JS_NUNBOX32
+                if (frame.shouldAvoidTypeRemat(fe))
+                    masm.set32(cond, masm.tagOf(frame.addressOf(fe)), ImmType(type), result);
+                else
+                    masm.set32(cond, frame.tempRegForType(fe), ImmType(type), result);
+#elif defined JS_PUNBOX64
+                masm.setPtr(cond, frame.tempRegForType(fe), ImmType(type), result);
+#endif
+
+                frame.pop();
+                frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, result);
+                return;
+            }
+        }
+    }
+
     prepareStubCall(Uses(1));
     INLINE_STUBCALL(stubs::TypeOf);
     frame.pop();
     frame.takeReg(Registers::ReturnReg);
     frame.pushTypedPayload(JSVAL_TYPE_STRING, Registers::ReturnReg);
 }
 
 bool
@@ -1044,108 +1091,69 @@ mjit::Compiler::jsop_andor(JSOp op, jsby
     }
 
     return booleanJumpScript(op, target);
 }
 
 void
 mjit::Compiler::jsop_localinc(JSOp op, uint32 slot, bool popped)
 {
-    bool post = (op == JSOP_LOCALINC || op == JSOP_LOCALDEC);
-    int32 amt = (op == JSOP_INCLOCAL || op == JSOP_LOCALINC) ? 1 : -1;
+    if (popped || (op == JSOP_INCLOCAL || op == JSOP_DECLOCAL)) {
+        int amt = (op == JSOP_LOCALINC || op == JSOP_INCLOCAL) ? -1 : 1;
 
-    frame.pushLocal(slot);
-
-    FrameEntry *fe = frame.peek(-1);
+        // Before: 
+        // After:  V
+        frame.pushLocal(slot);
 
-    if (fe->isConstant() && fe->getValue().isPrimitive()) {
-        Value v = fe->getValue();
-        double d;
-        ValueToNumber(cx, v, &d);
-        if (post) {
-            frame.push(NumberValue(d + amt));
-            frame.storeLocal(slot);
-            frame.pop();
-        } else {
-            frame.pop();
-            frame.push(NumberValue(d + amt));
-            frame.storeLocal(slot);
-        }
+        // Before: V
+        // After:  V 1
+        frame.push(Int32Value(amt));
+
+        // Note, SUB will perform integer conversion for us.
+        // Before: V 1
+        // After:  N+1
+        jsop_binary(JSOP_SUB, stubs::Sub);
+
+        // Before: N+1
+        // After:  N+1
+        frame.storeLocal(slot, popped);
+
         if (popped)
             frame.pop();
-        return;
-    }
+    } else {
+        int amt = (op == JSOP_LOCALINC || op == JSOP_INCLOCAL) ? 1 : -1;
+
+        // Before:
+        // After: V
+        frame.pushLocal(slot);
 
-    /*
-     * If the local variable is not known to be an int32, or the pre-value
-     * is observed, then do the simple thing and decompose x++ into simpler
-     * opcodes.
-     */
-    if (fe->isNotType(JSVAL_TYPE_INT32) || (post && !popped)) {
-        /* V */
+        // Before: V
+        // After:  N
         jsop_pos();
-        /* N */
+
+        // Before: N
+        // After:  N N
+        frame.dup();
 
-        if (post && !popped) {
-            frame.dup();
-            /* N N */
-        }
+        // Before: N N
+        // After:  N N 1
+        frame.push(Int32Value(amt));
 
-        frame.push(Int32Value(1));
-        /* N? N 1 */
+        // Before: N N 1
+        // After:  N N+1
+        jsop_binary(JSOP_ADD, stubs::Add);
 
-        if (amt == 1)
-            jsop_binary(JSOP_ADD, stubs::Add);
-        else
-            jsop_binary(JSOP_SUB, stubs::Sub);
-        /* N? N+1 */
+        // Before: N N+1
+        // After:  N N+1
+        frame.storeLocal(slot, true);
 
-        frame.storeLocal(slot, post || popped);
-        /* N? N+1 */
-
-        if (post || popped)
-            frame.pop();
-
-        return;
+        // Before: N N+1
+        // After:  N
+        frame.pop();
     }
-
-    /* If the pre value is not observed, we can emit better code. */
-    if (!fe->isTypeKnown()) {
-        Jump intFail = frame.testInt32(Assembler::NotEqual, fe);
-        stubcc.linkExit(intFail, Uses(1));
-    }
-
-    RegisterID reg = frame.copyDataIntoReg(fe);
-
-    Jump ovf;
-    if (amt > 0)
-        ovf = masm.branchAdd32(Assembler::Overflow, Imm32(1), reg);
-    else
-        ovf = masm.branchSub32(Assembler::Overflow, Imm32(1), reg);
-    stubcc.linkExit(ovf, Uses(1));
-
-    /* Note, stub call will push the original value again no matter what. */
-    stubcc.leave();
-
-    stubcc.masm.move(Imm32(slot), Registers::ArgReg1);
-    if (op == JSOP_LOCALINC || op == JSOP_INCLOCAL)
-        OOL_STUBCALL(stubs::IncLocal);
-    else
-        OOL_STUBCALL(stubs::DecLocal);
-
-    frame.pop();
-    frame.pushTypedPayload(JSVAL_TYPE_INT32, reg);
-    frame.storeLocal(slot, popped, false);
-
-    if (popped)
-        frame.pop();
-    else
-        frame.forgetType(frame.peek(-1));
-
-    stubcc.rejoin(Changes(0));
 }
 
 void
 mjit::Compiler::jsop_arginc(JSOp op, uint32 slot, bool popped)
 {
     if (popped || (op == JSOP_INCARG || op == JSOP_DECARG)) {
         int amt = (op == JSOP_ARGINC || op == JSOP_INCARG) ? -1 : 1;
 
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -82,17 +82,17 @@ struct VMFrame
     void *savedEBX;
     void *savedEDI;
     void *savedESI;
     void *savedEBP;
     void *savedEIP;
 
 # ifdef JS_NO_FASTCALL
     inline void** returnAddressLocation() {
-        return reinterpret_cast<void**>(this) - 3;
+        return reinterpret_cast<void**>(this) - 5;
     }
 # else
     inline void** returnAddressLocation() {
         return reinterpret_cast<void**>(this) - 1;
     }
 # endif
 #elif defined(JS_CPU_X64)
     void *savedRBX;
@@ -327,16 +327,17 @@ struct JITScript {
     ~JITScript();
 
     bool isValidCode(void *ptr) {
         char *jitcode = (char *)code.m_code.executableAddress();
         char *jcheck = (char *)ptr;
         return jcheck >= jitcode && jcheck < jitcode + code.m_size;
     }
 
+    void nukeScriptDependentICs();
     void sweepCallICs();
     void purgeMICs();
     void purgePICs();
 };
 
 /*
  * Execute the given mjit code. This is a low-level call and callers must
  * provide the same guarantees as JaegerShot/CheckStackAndEnterMethodJIT.
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -1093,16 +1093,36 @@ ic::PurgeMICs(JSContext *cx, JSScript *s
 
     if (script->jitNormal)
         script->jitNormal->purgeMICs();
     if (script->jitCtor)
         script->jitCtor->purgeMICs();
 }
 
 void
+JITScript::nukeScriptDependentICs()
+{
+    if (!nCallICs)
+        return;
+
+    Repatcher repatcher(this);
+
+    for (uint32 i = 0; i < nCallICs; i++) {
+        ic::CallICInfo &ic = callICs[i];
+        if (!ic.fastGuardedObject)
+            continue;
+        repatcher.repatch(ic.funGuard, NULL);
+        repatcher.relink(ic.funJump, ic.slowPathStart);
+        ic.releasePool(CallICInfo::Pool_ClosureStub);
+        ic.fastGuardedObject = NULL;
+        ic.hasJsFunCheck = false;
+    }
+}
+
+void
 JITScript::sweepCallICs()
 {
     if (!nCallICs)
         return;
 
     Repatcher repatcher(this);
 
     for (uint32 i = 0; i < nCallICs; i++) {
--- a/js/src/methodjit/PolyIC.cpp
+++ b/js/src/methodjit/PolyIC.cpp
@@ -1472,27 +1472,26 @@ class ScopeNameCompiler : public PICStub
                     vp->setUndefined();
                     return true;
                 }
             }
             ReportAtomNotDefined(cx, atom);
             return false;
         }
 
-        if (!obj->isNative() || !holder->isNative()) {
-            if (!obj->getProperty(cx, ATOM_TO_JSID(atom), vp))
-                return false;
-        } else {
-            const Shape *shape = getprop.shape;
-            JS_ASSERT(shape);
-            JSObject *normalized = obj;
-            if (obj->getClass() == &js_WithClass && !shape->hasDefaultGetter())
-                normalized = js_UnwrapWithObject(cx, obj);
-            NATIVE_GET(cx, normalized, holder, shape, JSGET_METHOD_BARRIER, vp, return false);
-        }
+        // If the property was found, but we decided not to cache it, then
+        // take a slow path and do a full property fetch.
+        if (!getprop.shape)
+            return obj->getProperty(cx, ATOM_TO_JSID(atom), vp);
+
+        const Shape *shape = getprop.shape;
+        JSObject *normalized = obj;
+        if (obj->getClass() == &js_WithClass && !shape->hasDefaultGetter())
+            normalized = js_UnwrapWithObject(cx, obj);
+        NATIVE_GET(cx, normalized, holder, shape, JSGET_METHOD_BARRIER, vp, return false);
 
         return true;
     }
 };
 
 class BindNameCompiler : public PICStubCompiler
 {
     JSObject *scopeChain;
@@ -1546,16 +1545,18 @@ class BindNameCompiler : public PICStubC
                 return disable("non-cacheable obj in scope chain");
             masm.loadPtr(parent, pic.objReg);
             Jump nullTest = masm.branchTestPtr(Assembler::Zero, pic.objReg, pic.objReg);
             if (!fails.append(nullTest))
                 return error();
             masm.loadShape(pic.objReg, pic.shapeReg);
             Jump shapeTest = masm.branch32(Assembler::NotEqual, pic.shapeReg,
                                            Imm32(tobj->shape()));
+            if (!fails.append(shapeTest))
+                return error();
             tobj = tobj->getParent();
         }
         if (tobj != obj)
             return disable("indirect hit");
 
         Jump done = masm.jump();
 
         // All failures flow to here, so there is a common point to patch.
@@ -2368,34 +2369,31 @@ SetElementIC::attachHoleStub(JSContext *
     JS_ASSERT((jsuint)keyval >= obj->getDenseArrayCapacity() ||
               obj->getDenseArrayElement(keyval).isMagic(JS_ARRAY_HOLE));
 
     if (js_PrototypeHasIndexedProperties(cx, obj))
         return disable(cx, "prototype has indexed properties");
 
     Assembler masm;
 
-    // Test for indexed properties in Array.prototype. It is safe to bake in
-    // this pointer because changing __proto__ will slowify.
-    JSObject *arrayProto = obj->getProto();
-    masm.move(ImmPtr(arrayProto), objReg);
-    Jump extendedArray = masm.branchTest32(Assembler::NonZero,
-                                           Address(objReg, offsetof(JSObject, flags)),
-                                           Imm32(JSObject::INDEXED));
-
-    // Text for indexed properties in Object.prototype. Guard that
-    // Array.prototype doesn't change, too.
-    JSObject *objProto = arrayProto->getProto();
-    Jump sameProto = masm.branchPtr(Assembler::NotEqual,
-                                    Address(objReg, offsetof(JSObject, proto)),
-                                    ImmPtr(objProto));
-    masm.move(ImmPtr(objProto), objReg);
-    Jump extendedObject = masm.branchTest32(Assembler::NonZero,
-                                            Address(objReg, offsetof(JSObject, flags)),
-                                            Imm32(JSObject::INDEXED));
+    Vector<Jump, 4> fails(cx);
+
+    // Test for indexed properties in Array.prototype. We test each shape
+    // along the proto chain. This affords us two optimizations:
+    //  1) Loading the prototype can be avoided because the shape would change;
+    //     instead we can bake in their identities.
+    //  2) We only have to test the shape, rather than INDEXED.
+    for (JSObject *pobj = obj->getProto(); pobj; pobj = pobj->getProto()) {
+        if (!pobj->isNative())
+            return disable(cx, "non-native array prototype");
+        masm.move(ImmPtr(pobj), objReg);
+        Jump j = masm.guardShape(objReg, pobj);
+        if (!fails.append(j))
+            return error(cx);
+    }
 
     // Restore |obj|.
     masm.rematPayload(StateRemat::FromInt32(objRemat), objReg);
 
     // Guard against negative indices.
     MaybeJump keyGuard;
     if (!hasConstantKey)
         keyGuard = masm.branch32(Assembler::LessThan, keyReg, Imm32(0));
@@ -2433,19 +2431,18 @@ SetElementIC::attachHoleStub(JSContext *
     execPool = buffer.init(cx);
     if (!execPool)
         return error(cx);
 
     if (!buffer.verifyRange(cx->fp()->jit()))
         return disable(cx, "code memory is out of range");
 
     // Patch all guards.
-    buffer.link(extendedArray, slowPathStart);
-    buffer.link(sameProto, slowPathStart);
-    buffer.link(extendedObject, slowPathStart);
+    for (size_t i = 0; i < fails.length(); i++)
+        buffer.link(fails[i], slowPathStart);
     buffer.link(done, fastPathRejoin);
 
     CodeLocationLabel cs = buffer.finalize();
     JaegerSpew(JSpew_PICs, "generated dense array hole stub at %p\n", cs.executableAddress());
 
     Repatcher repatcher(cx->fp()->jit());
     repatcher.relink(fastPathStart.jumpAtOffset(inlineHoleGuard), cs);
     inlineHoleGuardPatched = true;
--- a/js/src/methodjit/TrampolineCompiler.cpp
+++ b/js/src/methodjit/TrampolineCompiler.cpp
@@ -141,17 +141,17 @@ TrampolineCompiler::generateForceReturn(
 bool
 TrampolineCompiler::generateForceReturnFast(Assembler &masm)
 {
 #ifdef _WIN64
     masm.addPtr(Imm32(32), Registers::StackPointer);
 #else
     // In case of no fast call, when we change the return address,
     // we need to make sure add esp by 8.
-    masm.addPtr(Imm32(8), Registers::StackPointer);
+    masm.addPtr(Imm32(16), Registers::StackPointer);
 #endif
     return generateForceReturn(masm);
 }
 #endif
 
 } /* namespace mjit */
 } /* namespace js */
 
--- a/js/src/methodjit/TrampolineSUNWX86.s
+++ b/js/src/methodjit/TrampolineSUNWX86.s
@@ -1,9 +1,9 @@
-/ -*- Mode: C++/ tab-width: 4/ indent-tabs-mode: nil/ c-basic-offset: 4 -*-
+/ -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
 / ***** BEGIN LICENSE BLOCK *****
 / Version: MPL 1.1/GPL 2.0/LGPL 2.1
 /
 / The contents of this file are subject to the Mozilla Public License Version
 / 1.1 (the "License")/ you may not use this file except in compliance with
 / the License. You may obtain a copy of the License at
 / http://www.mozilla.org/MPL/
 /
@@ -87,18 +87,18 @@ JaegerTrampolineReturn:
 .size   JaegerTrampolineReturn, . - JaegerTrampolineReturn
 
 
 / void *JaegerThrowpoline(js::VMFrame *vmFrame)
 .global JaegerThrowpoline
 .type   JaegerThrowpoline, @function
 JaegerThrowpoline:
     /* For Sun Studio there is no fast call. */
-    /* We add the stack by 8 before. */
-    addl $0x8, %esp
+    /* We add the stack by 16 before. */
+    addl $0x10, %esp
     /* Align the stack to 16 bytes. */
     pushl %esp 
     pushl (%esp)
     pushl (%esp)
     pushl (%esp)
     call js_InternalThrow
     /* Bump the stack by 0x2c, as in the basic trampoline, but */
     /* also one more word to clean up the stack for jsl_InternalThrow,*/
@@ -122,14 +122,14 @@ throwpoline_exit:
 
 .global InjectJaegerReturn
 .type   InjectJaegerReturn, @function
 InjectJaegerReturn:
     movl 0x18(%ebx), %edx                        /* fp->rval_ data */
     movl 0x1C(%ebx), %ecx                        /* fp->rval_ type */
     movl 0x14(%ebx), %eax                        /* fp->ncode_ */
     /* For Sun Studio there is no fast call. */
-    /* We add the stack by 8 before. */
-    addl $0x8, %esp
+    /* We add the stack by 16 before. */
+    addl $0x10, %esp
     /* Restore frame regs. */
     movl 0x1C(%esp), %ebx                        /* f.fp */
     jmp *%eax
 .size   InjectJaegerReturn, . - InjectJaegerReturn
--- a/js/src/nanojit-import-rev
+++ b/js/src/nanojit-import-rev
@@ -1,1 +1,1 @@
-1f90e61950c44193ea5a1800c06d7dba8240cfd9
+4effe362e918583ec7b98b08da24f02c0833d306
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -1168,39 +1168,43 @@ namespace nanojit
 
         RegisterMask allow = ins->isD() ? FpRegs : GpRegs;
 
         Register rr = prepareResultReg(ins, allow);
 
         Register rf = findRegFor(iffalse, allow & ~rmask(rr));
 
         if (ins->isop(LIR_cmovd)) {
+            // See Nativei386.cpp:asm_cmov() for an explanation of the subtleties here.
             NIns* target = _nIns;
             asm_nongp_copy(rr, rf);
-            asm_branch(false, cond, target);
+            asm_branch_helper(false, cond, target);
 
             // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
             Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
 
             if (rr != rt)
                 asm_nongp_copy(rr, rt);
+
             freeResourcesOf(ins);
             if (!iftrue->isInReg()) {
                 NanoAssert(rt == rr);
                 findSpecificRegForUnallocated(iftrue, rr);
             }
+
+            asm_cmp(cond);
             return;
         }
 
         // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
         Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
 
         // WARNING: We cannot generate any code that affects the condition
-        // codes between the MRcc generation here and the asm_cmp() call
-        // below.  See asm_cmp() for more details.
+        // codes between the MRcc generation here and the asm_cmpi() call
+        // below.  See asm_cmpi() for more details.
         LOpcode condop = cond->opcode();
         if (ins->isop(LIR_cmovi)) {
             switch (condop) {
             case LIR_eqi:  case LIR_eqq:    CMOVNE( rr, rf);  break;
             case LIR_lti:  case LIR_ltq:    CMOVNL( rr, rf);  break;
             case LIR_gti:  case LIR_gtq:    CMOVNG( rr, rf);  break;
             case LIR_lei:  case LIR_leq:    CMOVNLE(rr, rf);  break;
             case LIR_gei:  case LIR_geq:    CMOVNGE(rr, rf);  break;
@@ -1229,40 +1233,46 @@ namespace nanojit
             MR(rr, rt);
 
         freeResourcesOf(ins);
         if (!iftrue->isInReg()) {
             NanoAssert(rt == rr);
             findSpecificRegForUnallocated(iftrue, rr);
         }
 
-        asm_cmp(cond);
+        asm_cmpi(cond);
     }
 
-    NIns* Assembler::asm_branch(bool onFalse, LIns *cond, NIns *target) {
-        NanoAssert(cond->isCmp());
-        LOpcode condop = cond->opcode();
+    NIns* Assembler::asm_branch(bool onFalse, LIns* cond, NIns* target) {
+        NIns* patch = asm_branch_helper(onFalse, cond, target);
+        asm_cmp(cond);
+        return patch;
+    }
 
+    NIns* Assembler::asm_branch_helper(bool onFalse, LIns *cond, NIns *target) {
         if (target && !isTargetWithinS32(target)) {
-            // conditional jumps beyond 32bit range, so invert the branch/compare
-            // and emit an unconditional jump to the target
+            // A conditional jump beyond 32-bit range, so invert the
+            // branch/compare and emit an unconditional jump to the target:
             //         j(inverted) B1
             //         jmp target
             //     B1:
             NIns* shortTarget = _nIns;
             JMP(target);
             target = shortTarget;
-
             onFalse = !onFalse;
         }
-        if (isCmpDOpcode(condop))
-            return asm_branchd(onFalse, cond, target);
+        return isCmpDOpcode(cond->opcode())
+             ? asm_branchd_helper(onFalse, cond, target)
+             : asm_branchi_helper(onFalse, cond, target);
+    }
 
+    NIns* Assembler::asm_branchi_helper(bool onFalse, LIns *cond, NIns *target) {
         // We must ensure there's room for the instruction before calculating
         // the offset.  And the offset determines the opcode (8bit or 32bit).
+        LOpcode condop = cond->opcode();
         if (target && isTargetWithinS8(target)) {
             if (onFalse) {
                 switch (condop) {
                 case LIR_eqi:  case LIR_eqq:    JNE8( 8, target); break;
                 case LIR_lti:  case LIR_ltq:    JNL8( 8, target); break;
                 case LIR_gti:  case LIR_gtq:    JNG8( 8, target); break;
                 case LIR_lei:  case LIR_leq:    JNLE8(8, target); break;
                 case LIR_gei:  case LIR_geq:    JNGE8(8, target); break;
@@ -1310,42 +1320,44 @@ namespace nanojit
                 case LIR_ltui: case LIR_ltuq:   JB( 8, target);   break;
                 case LIR_gtui: case LIR_gtuq:   JA( 8, target);   break;
                 case LIR_leui: case LIR_leuq:   JBE(8, target);   break;
                 case LIR_geui: case LIR_geuq:   JAE(8, target);   break;
                 default:                        NanoAssert(0);    break;
                 }
             }
         }
-        NIns *patch = _nIns;    // address of instruction to patch
-        asm_cmp(cond);
-        return patch;
+        return _nIns;   // address of instruction to patch
     }
 
     NIns* Assembler::asm_branch_ov(LOpcode, NIns* target) {
         if (target && !isTargetWithinS32(target)) {
             setError(ConditionalBranchTooFar);
             NanoAssert(0);
         }
         // We must ensure there's room for the instr before calculating
         // the offset.  And the offset determines the opcode (8bit or 32bit).
         if (target && isTargetWithinS8(target))
             JO8(8, target);
         else
             JO( 8, target);
         return _nIns;
     }
 
+    void Assembler::asm_cmp(LIns *cond) {
+        isCmpDOpcode(cond->opcode()) ? asm_cmpd(cond) : asm_cmpi(cond);
+    }
+
     // WARNING: this function cannot generate code that will affect the
     // condition codes prior to the generation of the test/cmp.  See
-    // Nativei386.cpp:asm_cmp() for details.
-    void Assembler::asm_cmp(LIns *cond) {
+    // Nativei386.cpp:asm_cmpi() for details.
+    void Assembler::asm_cmpi(LIns *cond) {
         LIns *b = cond->oprnd2();
         if (isImm32(b)) {
-            asm_cmp_imm(cond);
+            asm_cmpi_imm(cond);
             return;
         }
         LIns *a = cond->oprnd1();
         Register ra, rb;
         if (a != b) {
             findRegFor2(GpRegs, a, ra, GpRegs, b, rb);
         } else {
             // optimize-me: this will produce a const result!
@@ -1356,17 +1368,17 @@ namespace nanojit
         if (isCmpQOpcode(condop)) {
             CMPQR(ra, rb);
         } else {
             NanoAssert(isCmpIOpcode(condop));
             CMPLR(ra, rb);
         }
     }
 
-    void Assembler::asm_cmp_imm(LIns *cond) {
+    void Assembler::asm_cmpi_imm(LIns *cond) {
         LOpcode condop = cond->opcode();
         LIns *a = cond->oprnd1();
         LIns *b = cond->oprnd2();
         Register ra = findRegFor(a, GpRegs);
         int32_t imm = getImm32(b);
         if (isCmpQOpcode(condop)) {
             if (isS8(imm))
                 CMPQR8(ra, imm);
@@ -1394,21 +1406,19 @@ namespace nanojit
     //
     //  Here are the cases, using conditionals:
     //
     //  branch  >=  >   <=       <        =
     //  ------  --- --- ---      ---      ---
     //  LIR_jt  jae ja  swap+jae swap+ja  jp over je
     //  LIR_jf  jb  jbe swap+jb  swap+jbe jne+jp
 
-    NIns* Assembler::asm_branchd(bool onFalse, LIns *cond, NIns *target) {
+    NIns* Assembler::asm_branchd_helper(bool onFalse, LIns *cond, NIns *target) {
         LOpcode condop = cond->opcode();
         NIns *patch;
-        LIns *a = cond->oprnd1();
-        LIns *b = cond->oprnd2();
         if (condop == LIR_eqd) {
             if (onFalse) {
                 // branch if unordered or !=
                 JP(16, target);     // underrun of 12 needed, round up for overhang --> 16
                 JNE(0, target);     // no underrun needed, previous was enough
                 patch = _nIns;
             } else {
                 // jp skip (2byte)
@@ -1417,78 +1427,77 @@ namespace nanojit
                 underrunProtect(16); // underrun of 7 needed but we write 2 instr --> 16
                 NIns *skip = _nIns;
                 JE(0, target);      // no underrun needed, previous was enough
                 patch = _nIns;
                 JP8(0, skip);       // ditto
             }
         }
         else {
-            if (condop == LIR_ltd) {
-                condop = LIR_gtd;
-                LIns *t = a; a = b; b = t;
-            } else if (condop == LIR_led) {
-                condop = LIR_ged;
-                LIns *t = a; a = b; b = t;
-            }
-            if (condop == LIR_gtd) {
-                if (onFalse)
-                    JBE(8, target);
-                else
-                    JA(8, target);
-            } else { // LIR_ged
-                if (onFalse)
-                    JB(8, target);
-                else
-                    JAE(8, target);
+            // LIR_ltd and LIR_gtd are handled by the same case because
+            // asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise for
+            // LIR_led/LIR_ged.
+            switch (condop) {
+            case LIR_ltd:
+            case LIR_gtd: if (onFalse) JBE(8, target); else JA(8, target);  break;
+            case LIR_led:
+            case LIR_ged: if (onFalse) JB(8, target);  else JAE(8, target); break;
+            default:      NanoAssert(0);                                    break;
             }
             patch = _nIns;
         }
-        asm_cmpd(a, b);
         return patch;
     }
 
     void Assembler::asm_condd(LIns *ins) {
         LOpcode op = ins->opcode();
-        LIns *a = ins->oprnd1();
-        LIns *b = ins->oprnd2();
         if (op == LIR_eqd) {
             // result = ZF & !PF, must do logic on flags
             // r = al|bl|cl|dl, can only use rh without rex prefix
             Register r = prepareResultReg(ins, 1<<REGNUM(RAX) | 1<<REGNUM(RCX) |
                                                1<<REGNUM(RDX) | 1<<REGNUM(RBX));
             MOVZX8(r, r);       // movzx8   r,rl     r[8:63] = 0
             X86_AND8R(r);       // and      rl,rh    rl &= rh
             X86_SETNP(r);       // setnp    rh       rh = !PF
             X86_SETE(r);        // sete     rl       rl = ZF
         } else {
-            if (op == LIR_ltd) {
-                op = LIR_gtd;
-                LIns *t = a; a = b; b = t;
-            } else if (op == LIR_led) {
-                op = LIR_ged;
-                LIns *t = a; a = b; b = t;
-            }
+            // LIR_ltd and LIR_gtd are handled by the same case because
+            // asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise for
+            // LIR_led/LIR_ged.
             Register r = prepareResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
             MOVZX8(r, r);
-            if (op == LIR_gtd)
-                SETA(r);
-            else
-                SETAE(r);
+            switch (op) {
+            case LIR_ltd:
+            case LIR_gtd: SETA(r);       break;
+            case LIR_led:
+            case LIR_ged: SETAE(r);      break;
+            default:      NanoAssert(0); break;
+            }
         }
 
         freeResourcesOf(ins);
 
-        asm_cmpd(a, b);
+        asm_cmpd(ins);
     }
 
     // WARNING: This function cannot generate any code that will affect the
-    // condition codes prior to the generation of the ucomisd.  See asm_cmp()
+    // condition codes prior to the generation of the ucomisd.  See asm_cmpi()
     // for more details.
-    void Assembler::asm_cmpd(LIns *a, LIns *b) {
+    void Assembler::asm_cmpd(LIns *cond) {
+        LOpcode opcode = cond->opcode();
+        LIns* a = cond->oprnd1();
+        LIns* b = cond->oprnd2();
+        // First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
+        if (opcode == LIR_ltd) {
+            opcode = LIR_gtd;
+            LIns* t = a; a = b; b = t;
+        } else if (opcode == LIR_led) {
+            opcode = LIR_ged;
+            LIns* t = a; a = b; b = t;
+        }
         Register ra, rb;
         findRegFor2(FpRegs, a, ra, FpRegs, b, rb);
         UCOMISD(ra, rb);
     }
 
     // Return true if we can generate code for this instruction that neither
     // sets CCs nor clobbers any input register.
     // LEA is the only native instruction that fits those requirements.
@@ -1513,17 +1522,17 @@ namespace nanojit
         return false;
     }
 
     bool Assembler::canRemat(LIns* ins) {
         return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
     }
 
     // WARNING: the code generated by this function must not affect the
-    // condition codes.  See asm_cmp() for details.
+    // condition codes.  See asm_cmpi() for details.
     void Assembler::asm_restore(LIns *ins, Register r) {
         if (ins->isop(LIR_allocp)) {
             int d = arDisp(ins);
             LEAQRM(r, d, FP);
         }
         else if (ins->isImmI()) {
             asm_immi(r, ins->immI(), /*canClobberCCs*/false);
         }
@@ -1582,17 +1591,17 @@ namespace nanojit
         case LIR_leui:   SETBE(r);   break;
         case LIR_gtuq:
         case LIR_gtui:   SETA(r);    break;
         case LIR_geuq:
         case LIR_geui:   SETAE(r);   break;
         }
         freeResourcesOf(ins);
 
-        asm_cmp(ins);
+        asm_cmpi(ins);
     }
 
     void Assembler::asm_ret(LIns *ins) {
         genEpilogue();
 
         // Restore RSP from RBP, undoing SUB(RSP,amt) in the prologue
         MR(RSP,FP);
 
--- a/js/src/nanojit/NativeX64.h
+++ b/js/src/nanojit/NativeX64.h
@@ -418,19 +418,22 @@ namespace nanojit
         void asm_arith_imm(LIns*);\
         void beginOp1Regs(LIns *ins, RegisterMask allow, Register &rr, Register &ra);\
         void beginOp2Regs(LIns *ins, RegisterMask allow, Register &rr, Register &ra, Register &rb);\
         void endOpRegs(LIns *ins, Register rr, Register ra);\
         void beginLoadRegs(LIns *ins, RegisterMask allow, Register &rr, int32_t &d, Register &rb);\
         void endLoadRegs(LIns *ins);\
         void dis(NIns *p, int bytes);\
         void asm_cmp(LIns*);\
-        void asm_cmp_imm(LIns*);\
-        void asm_cmpd(LIns*, LIns*);\
-        NIns* asm_branchd(bool, LIns*, NIns*);\
+        void asm_cmpi(LIns*);\
+        void asm_cmpi_imm(LIns*);\
+        void asm_cmpd(LIns*);\
+        NIns* asm_branch_helper(bool, LIns*, NIns*);\
+        NIns* asm_branchi_helper(bool, LIns*, NIns*);\
+        NIns* asm_branchd_helper(bool, LIns*, NIns*);\
         void asm_div(LIns *ins);\
         void asm_div_mod(LIns *ins);\
         int max_stk_used;\
         void PUSHR(Register r);\
         void POPR(Register r);\
         void NOT(Register r);\
         void NEG(Register r);\
         void IDIV(Register r);\
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -849,18 +849,16 @@ namespace nanojit
         asm_output("test ax, %d", i);
     }
 
     inline void Assembler::FNSTSW_AX() { count_fpu(); FPUc(0xdfe0);    asm_output("fnstsw_ax"); }
     inline void Assembler::FCHS()      { count_fpu(); FPUc(0xd9e0);    asm_output("fchs"); }
     inline void Assembler::FLD1()      { count_fpu(); FPUc(0xd9e8);    asm_output("fld1"); fpu_push(); }
     inline void Assembler::FLDZ()      { count_fpu(); FPUc(0xd9ee);    asm_output("fldz"); fpu_push(); }
 
-    inline void Assembler::FFREE(R r)  { count_fpu(); FPU(0xddc0, r);  asm_output("ffree %s",gpn(r)); }
-
     inline void Assembler::FST32(bool p, I32 d, R b){ count_stq(); FPUm(0xd902|(p?1:0), d, b);   asm_output("fst%s32 %d(%s)", (p?"p":""), d, gpn(b)); if (p) fpu_pop(); }
     inline void Assembler::FSTQ(bool p, I32 d, R b) { count_stq(); FPUm(0xdd02|(p?1:0), d, b);   asm_output("fst%sq %d(%s)", (p?"p":""), d, gpn(b)); if (p) fpu_pop(); }
 
     inline void Assembler::FSTPQ(I32 d, R b) { FSTQ(1, d, b); }
 
     inline void Assembler::FCOM(bool p, I32 d, R b) { count_fpuld(); FPUm(0xdc02|(p?1:0), d, b); asm_output("fcom%s %d(%s)", (p?"p":""), d, gpn(b)); if (p) fpu_pop(); }
     inline void Assembler::FCOMdm(bool p, const double* dm) {
         count_fpuld();
@@ -889,18 +887,16 @@ namespace nanojit
     inline void Assembler::FDIV( I32 d, R b) { count_fpu(); FPUm(0xdc06, d, b); asm_output("fdiv %d(%s)", d, gpn(b)); }
     inline void Assembler::FDIVR(I32 d, R b) { count_fpu(); FPUm(0xdc07, d, b); asm_output("fdivr %d(%s)", d, gpn(b)); }
 
     inline void Assembler::FADDdm( const double *dm) { count_ldq(); FPUdm(0xdc00, dm); asm_output("fadd (%p)", (void*)dm); }
     inline void Assembler::FSUBRdm(const double* dm) { count_ldq(); FPUdm(0xdc05, dm); asm_output("fsubr (%p)", (void*)dm); }
     inline void Assembler::FMULdm( const double* dm) { count_ldq(); FPUdm(0xdc01, dm); asm_output("fmul (%p)", (void*)dm); }
     inline void Assembler::FDIVRdm(const double* dm) { count_ldq(); FPUdm(0xdc07, dm); asm_output("fdivr (%p)", (void*)dm); }
 
-    inline void Assembler::FINCSTP()   { count_fpu(); FPUc(0xd9f7); asm_output("fincstp"); fpu_pop(); }
-
     inline void Assembler::FCOMP()     { count_fpu(); FPUc(0xD8D9);    asm_output("fcomp"); fpu_pop();}
     inline void Assembler::FCOMPP()    { count_fpu(); FPUc(0xDED9);    asm_output("fcompp"); fpu_pop();fpu_pop();}
     inline void Assembler::FLDr(R r)   { count_ldq(); FPU(0xd9c0, r);  asm_output("fld %s", gpn(r)); fpu_push(); }
     inline void Assembler::EMMS()      { count_fpu(); FPUc(0x0f77);    asm_output("emms"); }
 
     // standard direct call
     inline void Assembler::CALL(const CallInfo* ci) {
         count_call();
@@ -1203,17 +1199,17 @@ namespace nanojit
     }
 
     bool Assembler::canRemat(LIns* ins)
     {
         return ins->isImmAny() || ins->isop(LIR_allocp) || canRematLEA(ins);
     }
 
     // WARNING: the code generated by this function must not affect the
-    // condition codes.  See asm_cmp().
+    // condition codes.  See asm_cmpi().
     void Assembler::asm_restore(LIns* ins, Register r)
     {
         NanoAssert(ins->getReg() == r);
 
         uint32_t arg;
         uint32_t abi_regcount;
         if (ins->isop(LIR_allocp)) {
             // The value of a LIR_allocp instruction is the address of the
@@ -1516,56 +1512,60 @@ namespace nanojit
             Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
             ST(rd, dd+4, t);
             LD(t, ds+4, rs);
             ST(rd, dd, t);
             LD(t, ds, rs);
         }
     }
 
-    NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
+    NIns* Assembler::asm_branch_helper(bool branchOnFalse, LIns* cond, NIns* targ)
     {
-        LOpcode condop = cond->opcode();
-        NanoAssert(cond->isCmp());
-
-        // Handle float conditions separately.
-        if (isCmpDOpcode(condop)) {
-            return asm_branchd(branchOnFalse, cond, targ);
-        }
-
+        return isCmpDOpcode(cond->opcode())
+             ? asm_branchd_helper(branchOnFalse, cond, targ)
+             : asm_branchi_helper(branchOnFalse, cond, targ);
+    }
+
+    NIns* Assembler::asm_branchi_helper(bool branchOnFalse, LIns* cond, NIns* targ)
+    {
         if (branchOnFalse) {
             // op == LIR_xf/LIR_jf
-            switch (condop) {
+            switch (cond->opcode()) {
             case LIR_eqi:   JNE(targ);      break;
             case LIR_lti:   JNL(targ);      break;
             case LIR_lei:   JNLE(targ);     break;
             case LIR_gti:   JNG(targ);      break;
             case LIR_gei:   JNGE(targ);     break;
             case LIR_ltui:  JNB(targ);      break;
             case LIR_leui:  JNBE(targ);     break;
             case LIR_gtui:  JNA(targ);      break;
             case LIR_geui:  JNAE(targ);     break;
             default:        NanoAssert(0);  break;
             }
         } else {
             // op == LIR_xt/LIR_jt
-            switch (condop) {
+            switch (cond->opcode()) {
             case LIR_eqi:   JE(targ);       break;
             case LIR_lti:   JL(targ);       break;
             case LIR_lei:   JLE(targ);      break;
             case LIR_gti:   JG(targ);       break;
             case LIR_gei:   JGE(targ);      break;
             case LIR_ltui:  JB(targ);       break;
             case LIR_leui:  JBE(targ);      break;
             case LIR_gtui:  JA(targ);       break;
             case LIR_geui:  JAE(targ);      break;
             default:        NanoAssert(0);  break;
             }
         }
-        NIns* at = _nIns;
+        return _nIns;
+    }
+
+    NIns* Assembler::asm_branch(bool branchOnFalse, LIns* cond, NIns* targ)
+    {
+        NIns* at = asm_branch_helper(branchOnFalse, cond, targ);
         asm_cmp(cond);
         return at;
     }
 
     NIns* Assembler::asm_branch_ov(LOpcode, NIns* target)
     {
         JO(target);
         return _nIns;
@@ -1579,16 +1579,21 @@ namespace nanojit
     }
 
     void Assembler::asm_jtbl(LIns* ins, NIns** table)
     {
         Register indexreg = findRegFor(ins->oprnd1(), GpRegs);
         JMP_indexed(indexreg, 2, table);
     }
 
+    void Assembler::asm_cmp(LIns *cond)
+    {
+        isCmpDOpcode(cond->opcode()) ? asm_cmpd(cond) : asm_cmpi(cond);
+    }
+
     // This generates a 'test' or 'cmp' instruction for a condition, which
     // causes the condition codes to be set appropriately.  It's used with
     // conditional branches, conditional moves, and when generating
     // conditional values.  For example:
     //
     //   LIR:   eq1 = eq a, 0
     //   LIR:   xf1: xf eq1 -> ...
     //   asm:       test edx, edx       # generated by this function
@@ -1618,17 +1623,17 @@ namespace nanojit
     // regstate, this function cannot generate any code that will affect the
     // condition codes prior to the generation of the test/cmp, because any
     // such code will be run after the test/cmp but before the instruction
     // that consumes the condition code.  And because this function calls
     // findRegFor() before the test/cmp is generated, and findRegFor() calls
     // asm_restore(), that means that asm_restore() cannot generate code which
     // affects the condition codes.
     //
-    void Assembler::asm_cmp(LIns *cond)
+    void Assembler::asm_cmpi(LIns *cond)
     {
         LIns* lhs = cond->oprnd1();
         LIns* rhs = cond->oprnd2();
 
         NanoAssert(lhs->isI() && rhs->isI());
 
         // Ready to issue the compare.
         if (rhs->isImmI()) {
@@ -1729,17 +1734,17 @@ namespace nanojit
         case LIR_leui:  SETBE(r);       break;
         case LIR_gtui:  SETA(r);        break;
         case LIR_geui:  SETAE(r);       break;
         default:        NanoAssert(0);  break;
         }
 
         freeResourcesOf(ins);
 
-        asm_cmp(ins);
+        asm_cmpi(ins);
     }
 
     // Two example cases for "ins = add lhs, rhs".  '*' lines are those
     // generated in this function.
     //
     //   asm:   define lhs into rr
     //   asm:   define rhs into rb
     //          ...
@@ -2046,76 +2051,96 @@ namespace nanojit
         LIns* iftrue  = ins->oprnd2();
         LIns* iffalse = ins->oprnd3();
 
         NanoAssert(condval->isCmp());
         NanoAssert((ins->isop(LIR_cmovi) && iftrue->isI() && iffalse->isI()) ||
                    (ins->isop(LIR_cmovd) && iftrue->isD() && iffalse->isD()));
 
         if (!_config.i386_sse2 && ins->isop(LIR_cmovd)) {
+            // See the SSE2 case below for an explanation of the subtleties here.
             debug_only( Register rr = ) prepareResultReg(ins, x87Regs);
             NanoAssert(FST0 == rr);
-            NanoAssert(!iftrue->isInReg() || iftrue->getReg() == FST0);
-
-            NanoAssert(!iffalse->isInReg());
+            NanoAssert(!iftrue->isInReg() && !iffalse->isInReg());
 
             NIns* target = _nIns;
 
             if (iffalse->isImmD()) {
                 asm_immd(FST0, iffalse->immDasQ(), iffalse->immD(), /*canClobberCCs*/false);
             } else {
                 int df = findMemFor(iffalse);
                 FLDQ(df, FP);
             }
-
-            FINCSTP();
-            // Its not sufficient to merely decrement the FP stack pointer, we have to
-            // also free FST0, otherwise the load above fails.
-            FFREE(FST0);
-            asm_branch(false, condval, target);
-
+            FSTP(FST0);     // pop the stack
+            asm_branch_helper(false, condval, target);
+
+            NanoAssert(ins->getReg() == rr);
             freeResourcesOf(ins);
             if (!iftrue->isInReg())
                 findSpecificRegForUnallocated(iftrue, FST0);
 
+            asm_cmp(condval);
+
             return;
         }
 
         RegisterMask allow = ins->isD() ? XmmRegs : GpRegs;
-
         Register rr = prepareResultReg(ins, allow);
-
         Register rf = findRegFor(iffalse, allow & ~rmask(rr));
 
         if (ins->isop(LIR_cmovd)) {
+            // The obvious way to handle this is as follows:
+            //
+            //     mov rr, rt       # only needed if rt is live afterwards
+            //     do comparison
+            //     jt end
+            //     mov rr, rf
+            //   end:
+            //
+            // The problem with this is that doing the comparison can cause
+            // registers to be evicted, possibly including 'rr', which holds
+            // 'ins'.  And that screws things up.  So instead we do this:
+            //
+            //     do comparison
+            //     mov rr, rt       # only needed if rt is live afterwards
+            //     jt end
+            //     mov rr, rf
+            //   end:
+            //
+            // Putting the 'mov' between the comparison and the jump is ok
+            // because move instructions don't modify the condition codes.
+            //
             NIns* target = _nIns;
             asm_nongp_copy(rr, rf);
-            asm_branch(false, condval, target);
+            asm_branch_helper(false, condval, target);
 
             // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
             Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
 
             if (rr != rt)
                 asm_nongp_copy(rr, rt);
+
+            NanoAssert(ins->getReg() == rr);
             freeResourcesOf(ins);
             if (!iftrue->isInReg()) {
                 NanoAssert(rt == rr);
                 findSpecificRegForUnallocated(iftrue, rr);
             }
+
+            asm_cmp(condval);
             return;
         }
 
         // If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
         Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
-
         NanoAssert(ins->isop(LIR_cmovi));
 
         // WARNING: We cannot generate any code that affects the condition
-        // codes between the MRcc generation here and the asm_cmp() call
-        // below.  See asm_cmp() for more details.
+        // codes between the MRcc generation here and the asm_cmpi() call
+        // below.  See asm_cmpi() for more details.
         switch (condval->opcode()) {
             // Note that these are all opposites...
             case LIR_eqi:    MRNE(rr, rf);   break;
             case LIR_lti:    MRGE(rr, rf);   break;
             case LIR_lei:    MRG( rr, rf);   break;
             case LIR_gti:    MRLE(rr, rf);   break;
             case LIR_gei:    MRL( rr, rf);   break;
             case LIR_ltui:   MRAE(rr, rf);   break;
@@ -2123,16 +2148,17 @@ namespace nanojit
             case LIR_gtui:   MRBE(rr, rf);   break;
             case LIR_geui:   MRB( rr, rf);   break;
             default: NanoAssert(0); break;
         }
 
         if (rr != rt)
             MR(rr, rt);
 
+        NanoAssert(ins->getReg() == rr);
         freeResourcesOf(ins);
         if (!iftrue->isInReg()) {
             NanoAssert(rt == rr);
             findSpecificRegForUnallocated(iftrue, rr);
         }
 
         asm_cmp(condval);
     }
@@ -2609,17 +2635,17 @@ namespace nanojit
         } else if ((rmask(rd) & GpRegs) && (rmask(rs) & XmmRegs)) {
             // xmm -> gp
             SSE_MOVD(rd, rs);
         } else {
             NanoAssertMsgf(false, "bad asm_nongp_copy(%s, %s)", gpn(rd), gpn(rs));
         }
     }
 
-    NIns* Assembler::asm_branchd(bool branchOnFalse, LIns *cond, NIns *targ)
+    NIns* Assembler::asm_branchd_helper(bool branchOnFalse, LIns* cond, NIns *targ)
     {
         NIns* at = 0;
         LOpcode opcode = cond->opcode();
 
         if (_config.i386_sse2) {
             // LIR_ltd and LIR_gtd are handled by the same case because
             // asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise
             // for LIR_led/LIR_ged.
@@ -2668,24 +2694,23 @@ namespace nanojit
             if (branchOnFalse)
                 JP(targ);
             else
                 JNP(targ);
         }
 
         if (!at)
             at = _nIns;
-        asm_cmpd(cond);
 
         return at;
     }
 
     // WARNING: This function cannot generate any code that will affect the
     // condition codes prior to the generation of the
-    // ucomisd/fcompp/fcmop/fcom.  See asm_cmp() for more details.
+    // ucomisd/fcompp/fcmop/fcom.  See asm_cmpi() for more details.
     void Assembler::asm_cmpd(LIns *cond)
     {
         LOpcode condop = cond->opcode();
         NanoAssert(isCmpDOpcode(condop));
         LIns* lhs = cond->oprnd1();
         LIns* rhs = cond->oprnd2();
         NanoAssert(lhs->isD() && rhs->isD());
 
@@ -2694,24 +2719,23 @@ namespace nanojit
             if (condop == LIR_ltd) {
                 condop = LIR_gtd;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             } else if (condop == LIR_led) {
                 condop = LIR_ged;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             }
 
-
             // LIR_eqd, if lhs == rhs:
             //   ucomisd       ZPC   outcome (SETNP/JNP succeeds if P==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETNP/JNP fails
             //   EQUAL         100   SETNP/JNP succeeds
             //
-            // LIR_eqd, if lsh != rhs;
+            // LIR_eqd, if lhs != rhs;
             //   ucomisd       ZPC   outcome (SETP/JP succeeds if P==0,
             //                                SETE/JE succeeds if Z==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETP/JP succeeds (and skips to fail target)
             //   EQUAL         100   SETP/JP fails, SETE/JE succeeds
             //   GREATER_THAN  000   SETP/JP fails, SETE/JE fails
             //   LESS_THAN     001   SETP/JP fails, SETE/JE fails
             //
@@ -2805,23 +2829,20 @@ namespace nanojit
                 if (pop)
                     FCOMPP();
                 else
                     FCOMP();
                 FLDr(FST0); // DUP
             } else {
                 TEST_AH(mask);
                 FNSTSW_AX();        // requires rEAX to be free
-                if (rhs->isImmD())
-                {
+                if (rhs->isImmD()) {
                     const uint64_t* p = findImmDFromPool(rhs->immDasQ());
                     FCOMdm(pop, (const double*)p);
-                }
-                else
-                {
+                } else {
                     int d = findMemFor(rhs);
                     FCOM(pop, d, FP);
                 }
             }
         }
     }
 
     // Increment the 32-bit profiling counter at pCtr, without
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -194,19 +194,22 @@ namespace nanojit
         void nativePageSetup();\
         void underrunProtect(int);\
         bool hardenNopInsertion(const Config& c) { return c.harden_nop_insertion; } \
         void asm_immi(Register r, int32_t val, bool canClobberCCs);\
         void asm_stkarg(LIns* p, int32_t& stkd);\
         void asm_farg(LIns*, int32_t& stkd);\
         void asm_arg(ArgType ty, LIns* p, Register r, int32_t& stkd);\
         void asm_pusharg(LIns*);\
+        void asm_cmp(LIns *cond); \
+        void asm_cmpi(LIns *cond); \
         void asm_cmpd(LIns *cond);\
-        NIns* asm_branchd(bool, LIns*, NIns*);\
-        void asm_cmp(LIns *cond); \
+        NIns* asm_branch_helper(bool, LIns* cond, NIns*);\
+        NIns* asm_branchi_helper(bool, LIns* cond, NIns*);\
+        NIns* asm_branchd_helper(bool, LIns* cond, NIns*);\
         void asm_div_mod(LIns *cond); \
         void asm_load(int d, Register r); \
         void asm_immd(Register r, uint64_t q, double d, bool canClobberCCs); \
         \
         /* These function generate fragments of instructions. */ \
         void IMM8(int32_t i) { /* Length: 1 byte. */ \
             _nIns -= 1; \
             *((int8_t*)_nIns) = int8_t(i); \
@@ -424,17 +427,16 @@ namespace nanojit
         void FPUm(int32_t o, int32_t d, Register b); \
         void FPUdm(int32_t o, const double* const m); \
         void TEST_AH(int32_t i); \
         void TEST_AX(int32_t i); \
         void FNSTSW_AX(); \
         void FCHS(); \
         void FLD1(); \
         void FLDZ(); \
-        void FFREE(Register r); \
         void FST32(bool p, int32_t d, Register b); \
         void FSTQ(bool p, int32_t d, Register b); \
         void FSTPQ(int32_t d, Register b); \
         void FCOM(bool p, int32_t d, Register b); \
         void FCOMdm(bool p, const double* dm); \
         void FLD32(int32_t d, Register b); \
         void FLDQ(int32_t d, Register b); \
         void FLDQdm(const double* dm); \
@@ -446,17 +448,16 @@ namespace nanojit
         void FSUBR(int32_t d, Register b); \
         void FMUL( int32_t d, Register b); \
         void FDIV( int32_t d, Register b); \
         void FDIVR(int32_t d, Register b); \
         void FADDdm( const double *dm); \
         void FSUBRdm(const double* dm); \
         void FMULdm( const double* dm); \
         void FDIVRdm(const double* dm); \
-        void FINCSTP(); \
         void FSTP(Register r) { \
             count_fpu(); \
             FPU(0xddd8, r); \
             asm_output("fstp %s", gpn(r)); \
             fpu_pop(); \
         }; \
         void FCOMP(); \
         void FCOMPP(); \
--- a/js/src/prmjtime.cpp
+++ b/js/src/prmjtime.cpp
@@ -578,43 +578,52 @@ PRMJ_FormatTime(char *buf, int buflen, c
 #if defined(XP_UNIX) || defined(XP_WIN) || defined(XP_OS2) || defined(XP_BEOS)
     struct tm a;
     int fake_tm_year = 0;
 #ifdef NS_HAVE_INVALID_PARAMETER_HANDLER
     _invalid_parameter_handler oldHandler;
     int oldReportMode;
 #endif
 
-    /* Zero out the tm struct.  Linux, SunOS 4 struct tm has extra members int
-     * tm_gmtoff, char *tm_zone; when tm_zone is garbage, strftime gets
-     * confused and dumps core.  NSPR20 prtime.c attempts to fill these in by
-     * calling mktime on the partially filled struct, but this doesn't seem to
-     * work as well; the result string has "can't get timezone" for ECMA-valid
-     * years.  Might still make sense to use this, but find the range of years
-     * for which valid tz information exists, and map (per ECMA hint) from the
-     * given year into that range.
-
-     * N.B. This hasn't been tested with anything that actually _uses_
-     * tm_gmtoff; zero might be the wrong thing to set it to if you really need
-     * to format a time.  This fix is for jsdate.c, which only uses
-     * JS_FormatTime to get a string representing the time zone.  */
     memset(&a, 0, sizeof(struct tm));
 
     a.tm_sec = prtm->tm_sec;
     a.tm_min = prtm->tm_min;
     a.tm_hour = prtm->tm_hour;
     a.tm_mday = prtm->tm_mday;
     a.tm_mon = prtm->tm_mon;
     a.tm_wday = prtm->tm_wday;
 
+    /*
+     * On systems where |struct tm| has members tm_gmtoff and tm_zone, we
+     * must fill in those values, or else strftime will return wrong results
+     * (e.g., bug 511726, bug 554338).
+     */
 #if defined(HAVE_LOCALTIME_R) && defined(HAVE_TM_ZONE_TM_GMTOFF)
     {
+        /*
+         * Fill out |td| to the time represented by |prtm|, leaving the
+         * timezone fields zeroed out. localtime_r will then fill in the
+         * timezone fields for that local time according to the system's
+         * timezone parameters.
+         */
         struct tm td;
-        time_t bogus = 0;
-        localtime_r(&bogus, &td);
+        memset(&td, 0, sizeof(td));
+        td.tm_sec = prtm->tm_sec;
+        td.tm_min = prtm->tm_min;
+        td.tm_hour = prtm->tm_hour;
+        td.tm_mday = prtm->tm_mday;
+        td.tm_mon = prtm->tm_mon;
+        td.tm_wday = prtm->tm_wday;
+        td.tm_year = prtm->tm_year - 1900;
+        td.tm_yday = prtm->tm_yday;
+        td.tm_isdst = prtm->tm_isdst;
+        time_t t = mktime(&td);
+        localtime_r(&t, &td);
+
         a.tm_gmtoff = td.tm_gmtoff;
         a.tm_zone = td.tm_zone;
     }
 #endif
 
     /*
      * Years before 1900 and after 9999 cause strftime() to abort on Windows.
      * To avoid that we replace it with FAKE_YEAR_BASE + year % 100 and then
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -2083,20 +2083,32 @@ Disassemble(JSContext *cx, uintN argc, j
         JSString *str = JSVAL_TO_STRING(argv[0]);
         lines |= !!JS_MatchStringAndAscii(str, "-l");
         recursive |= !!JS_MatchStringAndAscii(str, "-r");
         if (!lines && !recursive)
             break;
         argv++, argc--;
     }
 
-    for (uintN i = 0; i < argc; i++) {
-        if (!DisassembleValue(cx, argv[i], lines, recursive))
-            return false;
+    if (argc == 0) {
+        /* Without arguments, disassemble the current script. */
+        if (JSStackFrame *frame = JS_GetScriptedCaller(cx, NULL)) {
+            JSScript *script = JS_GetFrameScript(cx, frame);
+            if (!js_Disassemble(cx, script, lines, stdout))
+                return false;
+            SrcNotes(cx, script);
+            TryNotes(cx, script);
+        }
+    } else {
+        for (uintN i = 0; i < argc; i++) {
+            if (!DisassembleValue(cx, argv[i], lines, recursive))
+                return false;
+        }
     }
+
     JS_SET_RVAL(cx, vp, JSVAL_VOID);
     return true;
 }
 
 static JSBool
 DisassFile(JSContext *cx, uintN argc, jsval *vp)
 {
     jsval *argv = JS_ARGV(cx, vp);
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_5/RegExp/7.8.5-01.js
@@ -0,0 +1,35 @@
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var BUGNUMBER = 615070;
+var summary = "Line terminator after backslash is invalid in regexp literals";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+var regexps = ["/\\\u000A/", "/\\\u000D/", "/\\\u2028/", "/\\\u2029/",
+	       "/ab\\\n/", "/ab\\\r/", "/ab\\\u2028/", "/ab\\\u2029/",
+	       "/ab[c\\\n]/", "/a[bc\\", "/\\"];
+
+for(var i=0; i<regexps.length; i++) {
+    var src = regexps[i];
+    try {
+	x = eval(src).source;
+    } catch(e) {
+	assertEq(e.constructor, SyntaxError);
+	continue;
+    }
+    assertEq(0, 1);
+}
+
+/**************/
+
+if (typeof reportCompare === "function")
+  reportCompare(true, true);
+
+print("All tests passed!");
--- a/js/src/tests/ecma_5/RegExp/jstests.list
+++ b/js/src/tests/ecma_5/RegExp/jstests.list
@@ -1,3 +1,4 @@
 url-prefix ../../jsreftest.html?test=ecma_5/RegExp/
+script 7.8.5-01.js
 script 15.10.5-01.js
 script 15.10.7.5-01.js
--- a/js/src/tests/js1_5/extensions/regress-336410-1.js
+++ b/js/src/tests/js1_5/extensions/regress-336410-1.js
@@ -68,14 +68,14 @@ try
   printStatus('Creating array');
   var o=[r, r, r, r, r, r, r, r, r];
   printStatus('object.toSource()');
   var rr = o.toSource();
   printStatus('Done.');
 }
 catch(ex)
 {
-  expect = 'InternalError: script stack space quota is exhausted';
+  expect = 'InternalError: allocation size overflow';
   actual = ex + '';
   print(actual);
 }
 
 reportCompare(expect, actual, summary);
new file mode 100644
--- /dev/null
+++ b/js/src/tests/js1_8_5/extensions/clone-forge.js
@@ -0,0 +1,31 @@
+// -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// Any copyright is dedicated to the Public Domain.
+// http://creativecommons.org/licenses/publicdomain/
+
+function assertThrows(f) {
+    var ok = false;
+    try {
+        f();
+    } catch (exc) {
+        ok = true;
+    }
+    if (!ok)
+        throw new TypeError("Assertion failed: " + f + " did not throw as expected");
+}
+
+// Don't allow forging bogus Date objects.
+var buf = serialize(new Date(NaN));
+var a = [1/0, -1/0,
+         Number.MIN_VALUE, -Number.MIN_VALUE,
+         Math.PI, 1286523948674.5,
+         Number.MAX_VALUE, -Number.MAX_VALUE,
+         8.64e15 + 1, -(8.64e15 + 1)];
+for (var i = 0; i < a.length; i++) {
+    var n = a[i];
+    var nbuf = serialize(n);
+    for (var j = 0; j < 8; j++)
+        buf[j + 8] = nbuf[j];
+    assertThrows(function () { deserialize(buf); });
+}
+
+reportCompare(0, 0);
new file mode 100644
--- /dev/null
+++ b/js/src/tests/js1_8_5/extensions/clone-leaf-object.js
@@ -0,0 +1,63 @@
+// -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+// Any copyright is dedicated to the Public Domain.
+// http://creativecommons.org/licenses/publicdomain/
+
+var a = [new Boolean(true),
+         new Boolean(false),
+         new Number(0),
+         new Number(-0),
+         new Number(Math.PI),
+         new Number(0x7fffffff),
+         new Number(-0x7fffffff),
+         new Number(0x80000000),
+         new Number(-0x80000000),
+         new Number(0xffffffff),
+         new Number(-0xffffffff),
+         new Number(0x100000000),
+         new Number(-0x100000000),
+         new Number(Number.MIN_VALUE),
+         new Number(-Number.MIN_VALUE),
+         new Number(Number.MAX_VALUE),
+         new Number(-Number.MAX_VALUE),
+         new Number(1/0),
+         new Number(-1/0),
+         new Number(0/0),
+         new String(""),
+         new String("\0123\u4567"),
+         new Date(0),
+         new Date(-0),
+         new Date(0x7fffffff),
+         new Date(-0x7fffffff),
+         new Date(0x80000000),
+         new Date(-0x80000000),
+         new Date(0xffffffff),
+         new Date(-0xffffffff),
+         new Date(0x100000000),
+         new Date(-0x100000000),
+         new Date(1286523948674),
+         new Date(8.64e15), // hard-coded in ES5 spec, hard-coded here
+         new Date(-8.64e15),
+         new Date(NaN)];
+
+function primitive(a) {
+    return a instanceof Date ? +a : a.constructor(a);
+}
+
+for (var i = 0; i < a.length; i++) {
+    var x = a[i];
+    var expectedSource = x.toSource();
+    var expectedPrimitive = primitive(x);
+    var expectedProto = x.__proto__;
+    var expectedString = Object.prototype.toString.call(x);
+    x.expando = 1;
+    x.__proto__ = {};
+
+    var y = deserialize(serialize(x));
+    assertEq(y.toSource(), expectedSource);
+    assertEq(primitive(y), expectedPrimitive);
+    assertEq(y.__proto__, expectedProto);
+    assertEq(Object.prototype.toString.call(y), expectedString);
+    assertEq("expando" in y, false);
+}
+
+reportCompare(0, 0);
--- a/js/src/tests/js1_8_5/extensions/clone-regexp.js
+++ b/js/src/tests/js1_8_5/extensions/clone-regexp.js
@@ -10,16 +10,17 @@ function testRegExp(b) {
     for (p in a)
         throw new Error("cloned RegExp should have no enumerable properties");
 
     assertEq(a.source, b.source);
     assertEq(a.global, b.global);
     assertEq(a.ignoreCase, b.ignoreCase);
     assertEq(a.multiline, b.multiline);
     assertEq(a.sticky, b.sticky);
+    assertEq("expando" in a, false);
 }
 
 testRegExp(RegExp(""));
 testRegExp(/(?:)/);
 testRegExp(/^(.*)$/gimy);
 testRegExp(RegExp.prototype);
 
 var re = /\bx\b/gi;
--- a/js/src/tests/js1_8_5/extensions/jstests.list
+++ b/js/src/tests/js1_8_5/extensions/jstests.list
@@ -13,12 +13,14 @@ script array-length-protochange.js
 script parseInt-octal.js
 script proxy-enumerateOwn-duplicates.js
 skip-if(!xulRuntime.shell) script proxy-proto-setter.js
 skip-if(!xulRuntime.shell) script reflect-parse.js
 script destructure-accessor.js
 script censor-strict-caller.js
 skip-if(!xulRuntime.shell) script clone-simple.js
 skip-if(!xulRuntime.shell) script clone-regexp.js
+skip-if(!xulRuntime.shell) script clone-leaf-object.js
 skip-if(!xulRuntime.shell) script clone-object.js
 skip-if(!xulRuntime.shell) script clone-typed-array.js
 skip-if(!xulRuntime.shell) script clone-errors.js
+skip-if(!xulRuntime.shell) script clone-forge.js
 script set-property-non-extensible.js
--- a/js/src/tests/js1_8_5/regress/regress-595230-1.js
+++ b/js/src/tests/js1_8_5/regress/regress-595230-1.js
@@ -6,13 +6,14 @@ var box = evalcx('lazy');
 
 var src =
     'try {\n' +
     '    __proto__ = Proxy.createFunction((function() {}), function() {})\n' +
     '    var x\n' +
     '    *\n' +
     '} catch(e) {}\n' +
     'default xml namespace = x\n' +
-    'for (let b in [0, 0]) <x/>\n';
+    'for (let b in [0, 0]) <x/>\n' +
+    '0\n';
 
 evalcx(src, box);
 
 this.reportCompare(0, 0, "ok");
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -320,16 +320,19 @@ if __name__ == '__main__':
             print('Multiple tests match command line arguments, debugger can only run one')
             for tc in test_list:
                 print('    %s'%tc.path)
             sys.exit(2)
 
         cmd = test_list[0].get_command(TestTask.js_cmd_prefix)
         if OPTIONS.show_cmd:
             print subprocess.list2cmdline(cmd)
+        manifest_dir = os.path.dirname(OPTIONS.manifest)
+        if manifest_dir not in ('', '.'):
+            os.chdir(os.path.dirname(OPTIONS.manifest))
         call(cmd)
         sys.exit()
 
     if not test_list:
         print 'no tests selected'
     else:
         curdir = os.getcwd()
         manifest_dir = os.path.dirname(OPTIONS.manifest)
--- a/js/src/tests/manifest.py
+++ b/js/src/tests/manifest.py
@@ -93,17 +93,20 @@ def parse(filename, xul_tester, reldir =
         f = open(filename)
     except IOError:
         print "warning: include file not found: '%s'"%filename
         return ans
 
     for line in f:
         sline = comment_re.sub('', line)
         parts = sline.split()
-        if parts[0] == 'include':
+        if len(parts) == 0:
+            # line is empty or just a comment, skip
+            pass
+        elif parts[0] == 'include':
             include_file = parts[1]
             include_reldir = os.path.join(reldir, os.path.dirname(include_file))
             ans += parse(os.path.join(dir, include_file), xul_tester, include_reldir)
         elif parts[0] == 'url-prefix':
             # Doesn't apply to shell tests
             pass
         else:
             script = None
--- a/js/src/xpconnect/src/xpcforwards.h
+++ b/js/src/xpconnect/src/xpcforwards.h
@@ -78,16 +78,17 @@ class IID2NativeInterfaceMap;
 class ClassInfo2NativeSetMap;
 class ClassInfo2WrappedNativeProtoMap;
 class NativeSetMap;
 class IID2ThisTranslatorMap;
 class XPCNativeScriptableSharedMap;
 class XPCWrappedNativeProtoMap;
 class XPCNativeWrapperMap;
 class WrappedNative2WrapperMap;
+class JSObject2JSObjectMap;
 
 class nsXPCComponents;
 class nsXPCComponents_Interfaces;
 class nsXPCComponents_InterfacesByID;
 class nsXPCComponents_Classes;
 class nsXPCComponents_ClassesByID;
 class nsXPCComponents_Results;
 class nsXPCComponents_ID;
--- a/js/src/xpconnect/src/xpcjsruntime.cpp
+++ b/js/src/xpconnect/src/xpcjsruntime.cpp
@@ -235,16 +235,22 @@ ContextCallback(JSContext *cx, uintN ope
         else if(operation == JSCONTEXT_DESTROY)
         {
             delete XPCContext::GetXPCContext(cx);
         }
     }
     return JS_TRUE;
 }
 
+xpc::CompartmentPrivate::~CompartmentPrivate()
+{
+    if (waiverWrapperMap)
+        delete waiverWrapperMap;
+}
+
 static JSBool
 CompartmentCallback(JSContext *cx, JSCompartment *compartment, uintN op)
 {
     if(op == JSCOMPARTMENT_NEW)
         return JS_TRUE;
 
     XPCJSRuntime* self = nsXPConnect::GetRuntimeInstance();
     if(!self)
@@ -540,16 +546,37 @@ DoDeferredRelease(nsTArray<T> &array)
             break;
         }
         T wrapper = array[count-1];
         array.RemoveElementAt(count-1);
         NS_RELEASE(wrapper);
     }
 }
 
+static JSDHashOperator
+SweepWaiverWrappers(JSDHashTable *table, JSDHashEntryHdr *hdr,
+                    uint32 number, void *arg)
+{
+    JSObject *key = ((JSObject2JSObjectMap::Entry *)hdr)->key;
+    JSObject *value = ((JSObject2JSObjectMap::Entry *)hdr)->value;
+    if(IsAboutToBeFinalized(key) || IsAboutToBeFinalized(value))
+        return JS_DHASH_REMOVE;
+    return JS_DHASH_NEXT;
+}
+
+static PLDHashOperator
+SweepCompartment(nsCStringHashKey& aKey, JSCompartment *compartment, void *aClosure)
+{
+    xpc::CompartmentPrivate *priv = (xpc::CompartmentPrivate *)
+        JS_GetCompartmentPrivate((JSContext *)aClosure, compartment);
+    if (priv->waiverWrapperMap)
+        priv->waiverWrapperMap->Enumerate(SweepWaiverWrappers, nsnull);
+    return PL_DHASH_NEXT;
+}
+
 // static
 JSBool XPCJSRuntime::GCCallback(JSContext *cx, JSGCStatus status)
 {
     XPCJSRuntime* self = nsXPConnect::GetRuntimeInstance();
     if(self)
     {
         switch(status)
         {
@@ -592,19 +619,24 @@ JSBool XPCJSRuntime::GCCallback(JSContex
                     // refcount of these wrappers.
                     // We add them to the array now and Release the array members
                     // later to avoid the posibility of doing any JS GCThing
                     // allocations during the gc cycle.
                     self->mWrappedJSMap->
                         Enumerate(WrappedJSDyingJSObjectFinder, &data);
                 }
 
-                // Find dying scopes...
+                // Find dying scopes.
                 XPCWrappedNativeScope::FinishedMarkPhaseOfGC(cx, self);
 
+                // Sweep compartments.
+                self->GetCompartmentMap().EnumerateRead(
+                    (XPCCompartmentMap::EnumReadFunction)
+                    SweepCompartment, cx);
+
                 self->mDoingFinalization = JS_TRUE;
                 break;
             }
             case JSGC_FINALIZE_END:
             {
                 NS_ASSERTION(self->mDoingFinalization, "bad state");
                 self->mDoingFinalization = JS_FALSE;
 
--- a/js/src/xpconnect/src/xpcmaps.cpp
+++ b/js/src/xpconnect/src/xpcmaps.cpp
@@ -758,8 +758,23 @@ WrappedNative2WrapperMap::AddLink(JSObje
     PR_INSERT_LINK(newLink, oldLink);
     PR_REMOVE_AND_INIT_LINK(oldLink);
     newLink->obj = oldLink->obj;
 
     return PR_TRUE;
 }
 
 /***************************************************************************/
+// implement JSObject2JSObjectMap...
+
+struct JSDHashTableOps
+JSObject2JSObjectMap::sOps = {
+    JS_DHashAllocTable,
+    JS_DHashFreeTable,
+    JS_DHashVoidPtrKeyStub,
+    JS_DHashMatchEntryStub,
+    JS_DHashMoveEntryStub,
+    JS_DHashClearEntryStub,
+    JS_DHashFinalizeStub,
+    nsnull
+};
+
+/***************************************************************************/
--- a/js/src/xpconnect/src/xpcmaps.h
+++ b/js/src/xpconnect/src/xpcmaps.h
@@ -705,9 +705,85 @@ public:
 private:
     WrappedNative2WrapperMap();    // no implementation
     WrappedNative2WrapperMap(int size);
 
 private:
     JSDHashTable *mTable;
 };
 
+class JSObject2JSObjectMap
+{
+    static struct JSDHashTableOps sOps;
+
+public:
+    struct Entry : public JSDHashEntryHdr
+    {
+        JSObject* key;
+        JSObject* value;
+    };
+
+    static JSObject2JSObjectMap* newMap(int size)
+    {
+        JSObject2JSObjectMap* map = new JSObject2JSObjectMap(size);
+        if(map && map->mTable)
+            return map;
+        delete map;
+        return nsnull;
+    }
+
+    inline JSObject* Find(JSObject* key)
+    {
+        NS_PRECONDITION(key, "bad param");
+        Entry* entry = (Entry*)
+            JS_DHashTableOperate(mTable, key, JS_DHASH_LOOKUP);
+        if(JS_DHASH_ENTRY_IS_FREE(entry))
+            return nsnull;
+        return entry->value;
+    }
+
+    // Note: If the entry already exists, return the old value.
+    inline JSObject* Add(JSObject *key, JSObject *value)
+    {
+        NS_PRECONDITION(key,"bad param");
+        Entry* entry = (Entry*)
+            JS_DHashTableOperate(mTable, key, JS_DHASH_ADD);
+        if(!entry)
+            return nsnull;
+        if(entry->key)
+            return entry->value;
+        entry->key = key;
+        entry->value = value;
+        return value;
+    }
+
+    inline void Remove(JSObject* key)
+    {
+        NS_PRECONDITION(key,"bad param");
+        JS_DHashTableOperate(mTable, key, JS_DHASH_REMOVE);
+    }
+
+    inline uint32 Count() {return mTable->entryCount;}
+
+    inline uint32 Enumerate(JSDHashEnumerator f, void *arg)
+    {
+        return JS_DHashTableEnumerate(mTable, f, arg);
+    }
+
+    ~JSObject2JSObjectMap()
+    {
+        if(mTable)
+            JS_DHashTableDestroy(mTable);
+    }
+
+private:
+    JSObject2JSObjectMap(int size)
+    {
+        mTable = JS_NewDHashTable(&sOps, nsnull, sizeof(Entry), size);
+    }
+
+    JSObject2JSObjectMap(); // no implementation
+
+private:
+    JSDHashTable *mTable;
+};
+
 #endif /* xpcmaps_h___ */
--- a/js/src/xpconnect/src/xpcprivate.h
+++ b/js/src/xpconnect/src/xpcprivate.h
@@ -235,16 +235,17 @@ void DEBUG_CheckWrapperThreadSafety(cons
 #define XPC_NATIVE_PROTO_MAP_SIZE           16
 #define XPC_DYING_NATIVE_PROTO_MAP_SIZE     16
 #define XPC_DETACHED_NATIVE_PROTO_MAP_SIZE  32
 #define XPC_NATIVE_INTERFACE_MAP_SIZE       64
 #define XPC_NATIVE_SET_MAP_SIZE             64
 #define XPC_NATIVE_JSCLASS_MAP_SIZE         32
 #define XPC_THIS_TRANSLATOR_MAP_SIZE         8
 #define XPC_NATIVE_WRAPPER_MAP_SIZE         16
+#define XPC_WRAPPER_MAP_SIZE                16
 
 /***************************************************************************/
 // data declarations...
 extern const char* XPC_ARG_FORMATTER_FORMAT_STRINGS[]; // format strings
 extern const char XPC_CONTEXT_STACK_CONTRACTID[];
 extern const char XPC_RUNTIME_CONTRACTID[];
 extern const char XPC_EXCEPTION_CONTRACTID[];
 extern const char XPC_CONSOLE_CONTRACTID[];
@@ -318,17 +319,17 @@ class PtrAndPrincipalHashKey : public PL
 // thread at any time could attempt to insert |key| into |map|, so it works
 // well enough for our uses.
 typedef nsDataHashtableMT<nsISupportsHashKey, JSCompartment *> XPCMTCompartmentMap;
 
 // This map is only used on the main thread.
 typedef nsDataHashtable<xpc::PtrAndPrincipalHashKey, JSCompartment *> XPCCompartmentMap;
 
 /***************************************************************************/
-// useful macros...
+// Useful macros...
 
 #define XPC_STRING_GETTER_BODY(dest, src) \
     NS_ENSURE_ARG_POINTER(dest); \
     char* result; \
     if(src) \
         result = (char*) nsMemory::Clone(src, \
                                 sizeof(char)*(strlen(src)+1)); \
     else \
@@ -4479,32 +4480,38 @@ XPC_GetIdentityObject(JSContext *cx, JSO
 namespace xpc {
 
 struct CompartmentPrivate
 {
   CompartmentPrivate(PtrAndPrincipalHashKey *key, bool wantXrays, bool cycleCollectionEnabled)
     : key(key),
       ptr(nsnull),
       wantXrays(wantXrays),
-      cycleCollectionEnabled(cycleCollectionEnabled)
+      cycleCollectionEnabled(cycleCollectionEnabled),
+      waiverWrapperMap(nsnull)
   {
   }
+
   CompartmentPrivate(nsISupports *ptr, bool wantXrays, bool cycleCollectionEnabled)
     : key(nsnull),
       ptr(ptr),
       wantXrays(wantXrays),
-      cycleCollectionEnabled(cycleCollectionEnabled)
+      cycleCollectionEnabled(cycleCollectionEnabled),
+      waiverWrapperMap(nsnull)
   {
   }
 
+  ~CompartmentPrivate();
+
   // NB: key and ptr are mutually exclusive.
   nsAutoPtr<PtrAndPrincipalHashKey> key;
   nsCOMPtr<nsISupports> ptr;
   bool wantXrays;
   bool cycleCollectionEnabled;
+  JSObject2JSObjectMap *waiverWrapperMap;
 };
 
 inline bool
 CompartmentParticipatesInCycleCollection(JSContext *cx, JSCompartment *compartment)
 {
    CompartmentPrivate *priv =
        static_cast<CompartmentPrivate *>(JS_GetCompartmentPrivate(cx, compartment));
    NS_ASSERTION(priv, "This should never be null!");
--- a/js/src/xpconnect/tests/chrome/Makefile.in
+++ b/js/src/xpconnect/tests/chrome/Makefile.in
@@ -53,16 +53,17 @@ include $(topsrcdir)/config/rules.mk
 		test_evalInSandbox.xul \
 		test_sandboxImport.xul \
 		test_wrappers.xul \
 		test_bug484459.xul \
 		test_cows.xul \
 		test_bug517163.xul \
 		test_bug571849.xul \
 		test_bug601803.xul \
+		test_bug610390.xul \
 		$(NULL)
 
 # Disabled until this test gets updated to test the new proxy based
 # wrappers.
 #		test_wrappers-2.xul \
 
 libs:: $(_CHROME_FILES)
 	$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/chrome/$(relativesrcdir)
new file mode 100644
--- /dev/null
+++ b/js/src/xpconnect/tests/chrome/test_bug610390.xul
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet href="chrome://global/skin" type="text/css"?>
+<?xml-stylesheet href="chrome://mochikit/content/tests/SimpleTest/test.css"
+                 type="text/css"?>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=610390
+-->
+<window title="Mozilla Bug 610390"
+  xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
+  <script type="application/javascript"
+          src="chrome://mochikit/content/MochiKit/packed.js"></script>
+  <script type="application/javascript"
+          src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script>
+
+  <!-- test results are displayed in the html:body -->
+  <body xmlns="http://www.w3.org/1999/xhtml">
+    <iframe type="content"
+      src="data:text/html,&lt;script&gt;var x=3&lt;/script&gt;"
+      onload="go()"
+      id="ifr">
+    </iframe>
+  </body>
+
+  <!-- test code goes here -->
+  <script type="application/javascript"><![CDATA[
+    SimpleTest.waitForExplicitFinish();
+
+    function go() {
+      var w = $('ifr').contentWindow;
+      is(w.wrappedJSObject, w.wrappedJSObject, "wrappedJSObject identity not maintained");
+      SimpleTest.finish();
+    }
+  ]]></script>
+</window>
--- a/js/src/xpconnect/wrappers/WrapperFactory.cpp
+++ b/js/src/xpconnect/wrappers/WrapperFactory.cpp
@@ -43,16 +43,17 @@
 #include "WrapperFactory.h"
 #include "CrossOriginWrapper.h"
 #include "FilteringWrapper.h"
 #include "XrayWrapper.h"
 #include "AccessCheck.h"
 #include "XPCWrapper.h"
 
 #include "xpcprivate.h"
+#include "xpcmaps.h"
 
 namespace xpc {
 
 // When chrome pulls a naked property across the membrane using
 // .wrappedJSObject, we want it to cross the membrane into the
 // chrome compartment without automatically being wrapped into an
 // X-ray wrapper. We achieve this by wrapping it into a special
 // transparent wrapper in the origin (non-chrome) compartment. When
@@ -302,20 +303,43 @@ WrapperFactory::WaiveXrayAndWrap(JSConte
 
     JSObject *obj = JSVAL_TO_OBJECT(*vp)->unwrap();
 
     // We have to make sure that if we're wrapping an outer window, that
     // the .wrappedJSObject also wraps the outer window.
     obj = GetCurrentOuter(cx, obj);
 
     {
-        js::SwitchToCompartment sc(cx, obj->compartment());
-        obj = JSWrapper::New(cx, obj, NULL, obj->getGlobal(), &WaiveXrayWrapperWrapper);
-        if (!obj)
-            return false;
+        // See if we already have a waiver wrapper for this object.
+        CompartmentPrivate *priv =
+            (CompartmentPrivate *)JS_GetCompartmentPrivate(cx, obj->compartment());
+        JSObject *wobj = nsnull;
+        if (priv && priv->waiverWrapperMap)
+            wobj = priv->waiverWrapperMap->Find(obj);
+
+        // No wrapper yet, make one.
+        if (!wobj) {
+            js::SwitchToCompartment sc(cx, obj->compartment());
+            wobj = JSWrapper::New(cx, obj, NULL, obj->getGlobal(), &WaiveXrayWrapperWrapper);
+            if (!wobj)
+                return false;
+
+            // Add the new wrapper so we find it next time.
+            if (priv) {
+                if (!priv->waiverWrapperMap) {
+                    priv->waiverWrapperMap = JSObject2JSObjectMap::newMap(XPC_WRAPPER_MAP_SIZE);
+                    if (!priv->waiverWrapperMap)
+                        return false;
+                }
+                if (!priv->waiverWrapperMap->Add(obj, wobj))
+                    return false;
+            }
+        }
+
+        obj = wobj;
     }
 
     *vp = OBJECT_TO_JSVAL(obj);
     return JS_WrapValue(cx, vp);
 }
 
 JSObject *
 WrapperFactory::WrapSOWObject(JSContext *cx, JSObject *obj)