Merge the last green changeset of mozilla-inbound to mozilla-central
authorEhsan Akhgari <ehsan@mozilla.com>
Tue, 20 Sep 2011 10:22:17 -0400
changeset 77185 2a4671d164b484282abafe855a2180ca8d5fecd5
parent 77184 ab120ebd437be3e0e28a900d27a755876df7ee11 (current diff)
parent 77149 a89ac13dbeb9d815b065dc4865ac1eb40a4b1ad8 (diff)
child 77186 b15856d4b1148976cc05f09bf70d5faf46f99215
child 77197 4792fc2600869a844e28c56e46667e8a20fcebce
child 77243 c2d1c841d003379d9327f9c2712e5d19a9467ed8
child 77262 cbb03c3991c61109c4a8de5e200db640e57b0a82
push id3
push userfelipc@gmail.com
push dateFri, 30 Sep 2011 20:09:13 +0000
milestone9.0a1
Merge the last green changeset of mozilla-inbound to mozilla-central
content/base/test/Makefile.in
js/src/jsfun.cpp
js/src/jsfun.h
js/src/jsobj.cpp
--- a/content/base/test/Makefile.in
+++ b/content/base/test/Makefile.in
@@ -507,16 +507,17 @@ include $(topsrcdir)/config/rules.mk
 		file_bug675121.sjs \
 		test_bug654352.html \
 		test_bug675166.html \
 		test_bug682554.html \
 		test_bug682592.html \
 		bug682592-subframe.html \
 		bug682592-subframe-ref.html \
 		test_bug685798.html \
+		test_bug686449.xhtml \
 		$(NULL)
 
 _CHROME_FILES =	\
 		test_bug357450.js \
 		$(NULL)
 
 # This test fails on the Mac for some reason
 ifneq (,$(filter gtk2 windows,$(MOZ_WIDGET_TOOLKIT)))
new file mode 100644
--- /dev/null
+++ b/content/base/test/test_bug686449.xhtml
@@ -0,0 +1,79 @@
+<html xmlns="http://www.w3.org/1999/xhtml">
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=686449
+-->
+<head>
+  <title>Test for Bug 686449</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=686449">Mozilla Bug 686449</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+  
+</div>
+<div id="rangetest">abcd<div id="picontainer1"><?pi efgh?></div><div>ijkl</div><div id="picontainer2"><?pi mnop?></div>qrst</div>
+<pre id="test">
+<script type="application/javascript">
+<![CDATA[
+
+/** Test for Bug 686449 **/
+
+var pi = document.createProcessingInstruction("t", "data");
+ok("target" in pi, "No target?");
+ok("data" in pi, "No data?");
+ok("length" in pi, "No length?");
+ok("substringData" in pi, "No substringData?");
+ok("appendData" in pi, "No appendData?");
+ok("insertData" in pi, "No insertData?");
+ok("deleteData" in pi, "No deleteData?");
+ok("replaceData" in pi, "No replaceData?");
+
+is(pi.substringData(0, pi.length), pi.data, "wrong data?");
+var olddata = pi.data;
+var appenddata = "foo"
+pi.appendData(appenddata);
+is(pi.data, olddata + appenddata, "appendData doesn't work?");
+pi.deleteData(olddata.length, appenddata.length);
+is(pi.data, olddata, "deleteData doesn't work?");
+pi.replaceData(0, 0, olddata);
+is(pi.data, olddata + olddata, "replaceData doesn't work?");
+pi.insertData(0, olddata);
+is(pi.data, olddata + olddata + olddata, "insertData doesn't work?");
+pi.data = olddata;
+is(pi.data, olddata, "setting data doesn't work?");
+
+var r = document.createRange();
+r.selectNodeContents(pi);
+is(r.startContainer, pi, "Wrong startContainer!");
+is(r.startOffset, 0, "Wrong startOffset!");
+is(r.endContainer, pi, "Wrong endContainer!");
+is(r.endOffset, pi.length, "Wrong endOffset!");
+
+var df = r.cloneContents();
+is(df.childNodes.length, 1, "Unexpected child nodes?");
+ok(df.firstChild.isEqualNode(pi), "Wrong cloning?");
+
+r.setStart(pi, 1);
+r.setEnd(pi, 3);
+df = r.cloneContents();
+is(df.childNodes.length, 1, "Unexpected child nodes?");
+ok(!df.firstChild.isEqualNode(pi), "Should clone to similar pi!");
+is(df.firstChild.data, "at", "Wrong data cloning?");
+
+r.selectNode(document.getElementById("rangetest"));
+is(r.toString(), document.getElementById("rangetest").textContent,
+   "Wrong range stringification!");
+ok(r.cloneContents().firstChild.firstChild.nextSibling.firstChild.
+     isEqualNode(document.getElementById("picontainer1").firstChild),
+   "Wrong pi cloning!");
+ok(r.cloneContents().firstChild.firstChild.nextSibling.nextSibling.nextSibling.firstChild.
+     isEqualNode(document.getElementById("picontainer2").firstChild),
+   "Wrong pi cloning!");
+
+]]>
+</script>
+</pre>
+</body>
+</html>
--- a/content/html/content/test/Makefile.in
+++ b/content/html/content/test/Makefile.in
@@ -272,15 +272,16 @@ include $(topsrcdir)/config/rules.mk
 		test_bug666666.html \
 		test_bug674558.html \
 		test_bug583533.html \
 		test_restore_from_parser_fragment.html \
 		test_bug617528.html \
 		test_checked.html \
 		test_bug677658.html \
 		test_bug677463.html \
+		test_bug682886.html \
 		file_fullscreen-api.html \
 		file_fullscreen-api-keys.html \
 		test_fullscreen-api.html \
 		$(NULL)
 
 libs:: $(_TEST_FILES)
 	$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/tests/$(relativesrcdir)
new file mode 100644
--- /dev/null
+++ b/content/html/content/test/test_bug682886.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<!--
+https://bugzilla.mozilla.org/show_bug.cgi?id=682886
+-->
+<head>
+  <title>Test for Bug 682886</title>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+</head>
+<body>
+<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=682886">Mozilla Bug 682886</a>
+<p id="display"></p>
+<div id="content" style="display: none">
+  
+</div>
+<pre id="test">
+<script type="application/javascript">
+
+/** Test for Bug 682886 **/
+
+
+  var m = document.createElement("menu");
+  var s = "<menuitem>foo</menuitem>";
+  m.innerHTML = s;
+  is(m.innerHTML, s, "Wrong menuitem serialization!");
+
+
+
+</script>
+</pre>
+</body>
+</html>
--- a/content/xml/content/src/nsXMLProcessingInstruction.cpp
+++ b/content/xml/content/src/nsXMLProcessingInstruction.cpp
@@ -97,16 +97,17 @@ nsXMLProcessingInstruction::~nsXMLProces
 
 
 DOMCI_NODE_DATA(ProcessingInstruction, nsXMLProcessingInstruction)
 
 // QueryInterface implementation for nsXMLProcessingInstruction
 NS_INTERFACE_TABLE_HEAD(nsXMLProcessingInstruction)
   NS_NODE_OFFSET_AND_INTERFACE_TABLE_BEGIN(nsXMLProcessingInstruction)
     NS_INTERFACE_TABLE_ENTRY(nsXMLProcessingInstruction, nsIDOMNode)
+    NS_INTERFACE_TABLE_ENTRY(nsXMLProcessingInstruction, nsIDOMCharacterData)
     NS_INTERFACE_TABLE_ENTRY(nsXMLProcessingInstruction,
                              nsIDOMProcessingInstruction)
   NS_OFFSET_AND_INTERFACE_TABLE_END
   NS_OFFSET_AND_INTERFACE_TABLE_TO_MAP_SEGUE
   NS_DOM_INTERFACE_MAP_ENTRY_CLASSINFO(ProcessingInstruction)
 NS_INTERFACE_MAP_END_INHERITING(nsGenericDOMDataNode)
 
 
@@ -117,28 +118,16 @@ NS_IMPL_RELEASE_INHERITED(nsXMLProcessin
 NS_IMETHODIMP
 nsXMLProcessingInstruction::GetTarget(nsAString& aTarget)
 {
   aTarget = NodeName();
 
   return NS_OK;
 }
 
-NS_IMETHODIMP
-nsXMLProcessingInstruction::SetData(const nsAString& aData)
-{
-  return SetNodeValue(aData);
-}
-
-NS_IMETHODIMP
-nsXMLProcessingInstruction::GetData(nsAString& aData)
-{
-  return nsGenericDOMDataNode::GetData(aData);
-}
-
 PRBool
 nsXMLProcessingInstruction::GetAttrValue(nsIAtom *aName, nsAString& aValue)
 {
   nsAutoString data;
 
   GetData(data);
   return nsParserUtils::GetQuotedAttributeValue(data, aName, aValue);
 }
--- a/content/xml/content/src/nsXMLProcessingInstruction.h
+++ b/content/xml/content/src/nsXMLProcessingInstruction.h
@@ -54,16 +54,19 @@ public:
   virtual ~nsXMLProcessingInstruction();
 
   // nsISupports
   NS_DECL_ISUPPORTS_INHERITED
 
   // nsIDOMNode
   NS_FORWARD_NSIDOMNODE(nsGenericDOMDataNode::)
 
+  // nsIDOMCharacterData
+  NS_FORWARD_NSIDOMCHARACTERDATA(nsGenericDOMDataNode::)
+
   // nsIDOMProcessingInstruction
   NS_DECL_NSIDOMPROCESSINGINSTRUCTION
 
   // DOM Memory Reporter participant.
   NS_DECL_AND_IMPL_DOM_MEMORY_REPORTER_SIZEOF(nsXMLProcessingInstruction,
                                               nsGenericDOMDataNode)
 
   // nsINode
--- a/dom/base/nsDOMClassInfo.cpp
+++ b/dom/base/nsDOMClassInfo.cpp
@@ -2395,16 +2395,17 @@ nsDOMClassInfo::Init()
 
   DOM_CLASSINFO_MAP_BEGIN(CDATASection, nsIDOMCDATASection)
     DOM_CLASSINFO_MAP_ENTRY(nsIDOMCDATASection)
     DOM_CLASSINFO_MAP_ENTRY(nsIDOMEventTarget)
   DOM_CLASSINFO_MAP_END
 
   DOM_CLASSINFO_MAP_BEGIN(ProcessingInstruction, nsIDOMProcessingInstruction)
     DOM_CLASSINFO_MAP_ENTRY(nsIDOMProcessingInstruction)
+    DOM_CLASSINFO_MAP_ENTRY(nsIDOMCharacterData)
     DOM_CLASSINFO_MAP_ENTRY(nsIDOMEventTarget)
   DOM_CLASSINFO_MAP_END
 
   DOM_CLASSINFO_MAP_BEGIN(NodeList, nsIDOMNodeList)
     DOM_CLASSINFO_MAP_ENTRY(nsIDOMNodeList)
   DOM_CLASSINFO_MAP_END
 
   DOM_CLASSINFO_MAP_BEGIN(NamedNodeMap, nsIDOMNamedNodeMap)
--- a/dom/interfaces/core/nsIDOMProcessingInstruction.idl
+++ b/dom/interfaces/core/nsIDOMProcessingInstruction.idl
@@ -32,26 +32,25 @@
  * use your version of this file under the terms of the MPL, indicate your
  * decision by deleting the provisions above and replace them with the notice
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
-#include "nsIDOMNode.idl"
+#include "nsIDOMCharacterData.idl"
 
 /**
  * The nsIDOMProcessingInstruction interface represents a 
  * "processing instruction", used in XML as a way to keep processor-specific 
  * information in the text of the document.
  *
  * For more information on this interface please see 
- * http://www.w3.org/TR/DOM-Level-2-Core/
+ * http://www.w3.org/TR/DOM-Level-2-Core/ and
+ * http://dvcs.w3.org/hg/domcore/raw-file/tip/Overview.html
  */
 
-[scriptable, uuid(5964f639-1183-487d-a87d-4c93111eae85)]
-interface nsIDOMProcessingInstruction : nsIDOMNode
+[scriptable, uuid(d754433f-4637-4a5f-9034-0388173ea254)]
+interface nsIDOMProcessingInstruction : nsIDOMCharacterData
 {
   readonly attribute DOMString        target;
-           attribute DOMString        data;
-                                       // raises(DOMException) on setting
 };
--- a/js/src/assembler/assembler/ARMAssembler.h
+++ b/js/src/assembler/assembler/ARMAssembler.h
@@ -942,22 +942,20 @@ namespace JSC {
             m_buffer.ensureSpace(space);
         }
 
         int sizeOfConstantPool()
         {
             return m_buffer.sizeOfConstantPool();
         }
 
-#ifdef DEBUG
-        void allowPoolFlush(bool allowFlush)
+        int flushCount()
         {
-            m_buffer.allowPoolFlush(allowFlush);
+            return m_buffer.flushCount();
         }
-#endif
 
         JmpDst label()
         {
             JmpDst label(m_buffer.size());
             js::JaegerSpew(js::JSpew_Insns, IPFX "#label     ((%d))\n", MAYBE_PAD, label.m_offset);
             return label;
         }
 
@@ -1451,17 +1449,22 @@ namespace JSC {
         }
 
         ARMWord SM(int reg)
         {
             ASSERT(reg <= ARMRegisters::d31);
             // Encoded as bits [5,3:0].
             return ((reg << 5) & 0x20) | ((reg >> 1) & 0xf);
         }
-
+        ARMWord SN(int reg)
+        {
+            ASSERT(reg <= ARMRegisters::d31);
+            // Encoded as bits [19:16,7].
+            return ((reg << 15) & 0xf0000) | ((reg & 1) << 7);
+        }
         static ARMWord getConditionalField(ARMWord i)
         {
             return i & 0xf0000000;
         }
 
         int genInt(int reg, ARMWord imm, bool positive);
 
         ARMBuffer m_buffer;
@@ -1595,137 +1598,136 @@ namespace JSC {
             } else {
                 js::JaegerSpew(js::JSpew_Insns,
                                IPFX   "%-15s %s, %s, %s\n", MAYBE_PAD, "vmov",
                                nameFpRegD(rFP), nameGpReg(r1), nameGpReg(r2));
             }
             emitVFPInst(static_cast<ARMWord>(cc) | VFP_DXFER | VFP_MOV |
                         (fromFP ? DT_LOAD : 0) |
                         (isDbl ? VFP_DBL : 0), RD(r1), RN(r2), isDbl ? DM(rFP) : SM(rFP));
-            
         }
 
         void fcpyd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s\n", MAYBE_PAD, "vmov.f64", 
                            nameFpRegD(dd), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FCPYD, dd, dd, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FCPYD, DD(dd), DM(dm), 0);
         }
 
         void faddd_r(int dd, int dn, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s, %s\n", MAYBE_PAD, "vadd.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FADDD, DD(dd), DN(dn), DM(dm));
         }
 
         void fnegd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s\n", MAYBE_PAD, "fnegd", nameFpRegD(dd), nameFpRegD(dm));
             m_buffer.putInt(static_cast<ARMWord>(cc) | FNEGD | DD(dd) | DM(dm));
         }
 
         void fdivd_r(int dd, int dn, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s, %s\n", MAYBE_PAD, "vdiv.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FDIVD, DD(dd), DN(dn), DM(dm));
         }
 
         void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s, %s\n", MAYBE_PAD, "vsub.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FSUBD, DD(dd), DN(dn), DM(dm));
         }
 
         void fabsd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s\n", MAYBE_PAD, "fabsd", nameFpRegD(dd), nameFpRegD(dm));
             m_buffer.putInt(static_cast<ARMWord>(cc) | FABSD | DD(dd) | DM(dm));
         }
 
         void fmuld_r(int dd, int dn, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s, %s\n", MAYBE_PAD, "vmul.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FMULD, DD(dd), DN(dn), DM(dm));
         }
 
         void fcmpd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s\n", MAYBE_PAD, "vcmp.f64", nameFpRegD(dd), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FCMPD, DD(dd), 0, DM(dm));
         }
 
         void fsqrtd_r(int dd, int dm, Condition cc = AL)
         {
             js::JaegerSpew(js::JSpew_Insns,
                     IPFX   "%-15s %s, %s\n", MAYBE_PAD, "vsqrt.f64", nameFpRegD(dd), nameFpRegD(dm));
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FSQRTD, dd, 0, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FSQRTD, DD(dd), 0, DM(dm));
         }
 
         void fmsr_r(int dd, int rn, Condition cc = AL)
         {
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
+            emitVFPInst(static_cast<ARMWord>(cc) | FMSR, RD(rn), SN(dd), 0);
         }
 
         void fmrs_r(int rd, int dn, Condition cc = AL)
         {
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0);
+            emitVFPInst(static_cast<ARMWord>(cc) | FMRS, RD(rd), SN(dn), 0);
         }
 
+        // dear god :(
+        // integer registers ar encoded the same as single registers
         void fsitod_r(int dd, int dm, Condition cc = AL)
         {
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FSITOD, DD(dd), 0, SM(dm));
         }
 
         void fuitod_r(int dd, int dm, Condition cc = AL)
         {
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FUITOD, dd, 0, dm);
+            emitVFPInst(static_cast<ARMWord>(cc) | FUITOD, DD(dd), 0, SM(dm));
         }
 
         void ftosid_r(int fd, int dm, Condition cc = AL)
         {
-            // TODO: emitInst doesn't work for VFP instructions, though it
-            // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm);
+            // TODO: I don't actually know what the encoding is i'm guessing SD and DM.
+            emitVFPInst(static_cast<ARMWord>(cc) | FTOSID, SD(fd), 0, DM(dm));
         }
 
         void ftosizd_r(int fd, int dm, Condition cc = AL)
         {
-            // TODO: emitInst doesn't work for VFP instructions, though it
-            // seems to work for current usage.
-            emitInst(static_cast<ARMWord>(cc) | FTOSIZD, fd, 0, dm);
+            // TODO: I don't actually know what the encoding is i'm guessing SD and DM.
+            emitVFPInst(static_cast<ARMWord>(cc) | FTOSIZD, SD(fd), 0, DM(dm));
         }
 
         void fmstat(Condition cc = AL)
         {
             // TODO: emitInst doesn't work for VFP instructions, though it
             // seems to work for current usage.
             m_buffer.putInt(static_cast<ARMWord>(cc) | FMSTAT);
         }
--- a/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
+++ b/js/src/assembler/assembler/AssemblerBufferWithConstantPool.h
@@ -101,19 +101,17 @@ public:
         UnusedEntry
     };
 
     AssemblerBufferWithConstantPool()
         : AssemblerBuffer()
         , m_numConsts(0)
         , m_maxDistance(maxPoolSize)
         , m_lastConstDelta(0)
-#ifdef DEBUG
-        , m_allowFlush(true)
-#endif
+        , m_flushCount(0)
     {
         m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
         m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
     }
 
     ~AssemblerBufferWithConstantPool()
     {
         free(m_mask);
@@ -236,49 +234,47 @@ public:
         return m_pool;
     }
 
     int sizeOfConstantPool()
     {
         return m_numConsts;
     }
 
-#ifdef DEBUG
-    // Guard constant pool flushes to ensure that they don't occur during
-    // regions where offsets into the code have to be maintained (such as PICs).
-    void allowPoolFlush(bool allowFlush)
+    int flushCount()
     {
-        m_allowFlush = allowFlush;
+        return m_flushCount;
     }
-#endif
 
 private:
     void correctDeltas(int insnSize)
     {
         m_maxDistance -= insnSize;
+        ASSERT(m_maxDistance >= 0);
         m_lastConstDelta -= insnSize;
         if (m_lastConstDelta < 0)
             m_lastConstDelta = 0;
     }
 
     void correctDeltas(int insnSize, int constSize)
     {
         correctDeltas(insnSize);
 
         m_maxDistance -= m_lastConstDelta;
+        ASSERT(m_maxDistance >= 0);
         m_lastConstDelta = constSize;
     }
 
     void flushConstantPool(bool useBarrier = true)
     {
         js::JaegerSpew(js::JSpew_Insns, " -- FLUSHING CONSTANT POOL WITH %d CONSTANTS --\n",
                        m_numConsts);
-        ASSERT(m_allowFlush);
         if (m_numConsts == 0)
             return;
+        m_flushCount++;
         int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
 
         if (alignPool)
             alignPool = sizeof(uint64_t) - alignPool;
 
         // Callback to protect the constant pool from execution
         if (useBarrier)
             AssemblerBuffer::putInt(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool));
@@ -299,46 +295,49 @@ private:
         for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) {
             void* loadAddr = reinterpret_cast<void*>(m_buffer + *iter);
             AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast<void*>(m_buffer + constPoolOffset));
         }
 
         m_loadOffsets.clear();
         m_numConsts = 0;
         m_maxDistance = maxPoolSize;
+        ASSERT(m_maxDistance >= 0);
+
     }
 
     void flushIfNoSpaceFor(int nextInsnSize)
     {
-        if (m_numConsts == 0)
+        if (m_numConsts == 0) {
+            m_maxDistance = maxPoolSize;
             return;
+        }
         int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
         if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
             flushConstantPool();
     }
 
     void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
     {
-        if (m_numConsts == 0)
+        if (m_numConsts == 0) {
+            m_maxDistance = maxPoolSize;
             return;
+        }
         if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
             (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
             flushConstantPool();
     }
 
     uint32_t* m_pool;
     char* m_mask;
     LoadOffsets m_loadOffsets;
 
     int m_numConsts;
     int m_maxDistance;
     int m_lastConstDelta;
-
-#ifdef DEBUG
-    bool    m_allowFlush;
-#endif
+    int m_flushCount;
 };
 
 } // namespace JSC
 
 #endif // ENABLE(ASSEMBLER)
 
 #endif // AssemblerBufferWithConstantPool_h
--- a/js/src/assembler/assembler/MacroAssemblerARM.h
+++ b/js/src/assembler/assembler/MacroAssemblerARM.h
@@ -1287,24 +1287,24 @@ public:
 
     void sqrtDouble(FPRegisterID src, FPRegisterID dest)
     {
         m_assembler.fsqrtd_r(dest, src);
     }
 
     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
     {
-        m_assembler.fmsr_r(dest, src);
-        m_assembler.fsitod_r(dest, dest);
+        m_assembler.fmsr_r(floatShadow(dest), src);
+        m_assembler.fsitod_r(dest, floatShadow(dest));
     }
 
     void convertUInt32ToDouble(RegisterID src, FPRegisterID dest)
     {
-        m_assembler.fmsr_r(dest, src);
-        m_assembler.fuitod_r(dest, dest);
+        m_assembler.fmsr_r(floatShadow(dest), src);
+        m_assembler.fuitod_r(dest, floatShadow(dest));
     }
 
     void convertInt32ToDouble(Address src, FPRegisterID dest)
     {
         // flds does not worth the effort here
         load32(src, ARMRegisters::S1);
         convertInt32ToDouble(ARMRegisters::S1, dest);
     }
@@ -1332,37 +1332,37 @@ public:
         return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
     }
 
     // Truncates 'src' to an integer, and places the resulting 'dest'.
     // If the result is not representable as a 32 bit value, branch.
     // May also branch for some values that are representable in 32 bits
     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
     {
-        m_assembler.ftosizd_r(ARMRegisters::SD0, src);
+        m_assembler.ftosizd_r(floatShadow(ARMRegisters::SD0), src);
         // If FTOSIZD (VCVT.S32.F64) can't fit the result into a 32-bit
         // integer, it saturates at INT_MAX or INT_MIN. Testing this is
         // probably quicker than testing FPSCR for exception.
-        m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+        m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
         m_assembler.cmn_r(dest, ARMAssembler::getOp2(-0x7fffffff));
         m_assembler.cmp_r(dest, ARMAssembler::getOp2(0x80000000), ARMCondition(NonZero));
         return Jump(m_assembler.jmp(ARMCondition(Zero)));
     }
 
     // Convert 'src' to an integer, and places the resulting 'dest'.
     // If the result is not representable as a 32 bit value, branch.
     // May also branch for some values that are representable in 32 bits
     // (specifically, in this case, 0).
     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
     {
-        m_assembler.ftosid_r(ARMRegisters::SD0, src);
-        m_assembler.fmrs_r(dest, ARMRegisters::SD0);
+        m_assembler.ftosid_r(floatShadow(ARMRegisters::SD0), src);
+        m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
 
         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
-        m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
+        m_assembler.fsitod_r(ARMRegisters::SD0, floatShadow(ARMRegisters::SD0));
         failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
 
         // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
         failureCases.append(branchTest32(Zero, dest));
     }
 
     void zeroDouble(FPRegisterID srcDest)
     {
@@ -1375,22 +1375,20 @@ public:
         m_assembler.ensureSpace(space);
     }
 
     void forceFlushConstantPool()
     {
         m_assembler.forceFlushConstantPool();
     }
 
-#ifdef DEBUG
-    void allowPoolFlush(bool allowFlush)
+    int flushCount()
     {
-        m_assembler.allowPoolFlush(allowFlush);
+        return m_assembler.flushCount();
     }
-#endif
 
 protected:
     ARMAssembler::Condition ARMCondition(Condition cond)
     {
         return static_cast<ARMAssembler::Condition>(cond);
     }
 
     void ensureSpace(int insnSpace, int constSpace)
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug683140.js
@@ -0,0 +1,13 @@
+
+var g = newGlobal("same-compartment");
+g.eval("this.f = function(a) {" +
+       "assertEq(a instanceof Array, false);" +
+       "a = Array.prototype.slice.call(a);" +
+       "assertEq(a instanceof Array, true); }");
+g.f([1, 2, 3]);
+
+var g2 = newGlobal("new-compartment");
+g2.a = g2.Array(10);
+assertEq(g2.a instanceof Array, false);
+g2.a = Array.prototype.slice(g2.a);
+assertEq(g2.a instanceof Array, true);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug685313.js
@@ -0,0 +1,11 @@
+
+function foo() {
+    function D(){}
+    arr = [
+	   new (function D   (  ) { 
+		   D += '' + foo; 
+	       }), 
+        new D
+	   ];
+}
+foo();
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/basic/bug686396.js
@@ -0,0 +1,16 @@
+
+(function () { 
+  assertEquals = function assertEquals(expected, found, name_opt) {  };
+})();
+function testOne(receiver, key, result) {
+  for(var i = 0; i != 10; i++ ) {
+    assertEquals(result, receiver[key]());
+  }
+}
+function TypeOfThis() { return typeof this; }
+Number.prototype.type = TypeOfThis;
+String.prototype.type = TypeOfThis;
+Boolean.prototype.type = TypeOfThis;
+testOne(2.3, 'type', 'object');
+testOne('x', 'type', 'object');
+testOne(true, 'type', 'object');
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug684084-2.js
@@ -0,0 +1,8 @@
+function Function() {
+    try {
+    var g = this;
+    g.c("evil", eval);
+    } catch(b) {}
+}
+var o0 = Function.prototype;
+var f = new Function( (null ) );
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug684824.js
@@ -0,0 +1,7 @@
+
+function X(n) {
+    while ('' + (n--)) {
+        break;
+    }
+}
+X();
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/bug684943.js
@@ -0,0 +1,7 @@
+
+function foo(x) {
+  for (var i = 0; i < 100; i++) {
+    x.f === i;
+  }
+}
+foo({f:"three"});
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/getelem-sanity-8.js
@@ -0,0 +1,13 @@
+
+// TI does not account for GETELEM accessing strings, so the GETELEM PIC must
+// update type constraints according to generated stubs.
+function foo(a, b) {
+  for (var j = 0; j < 5; j++)
+    a[b[j]] + " what";
+}
+var a = {a:"zero", b:"one", c:"two", d:"three", e:"four"};
+var b = ["a", "b", "c", "d", "e"];
+foo(a, b);
+foo(a, b);
+a.e = 4;
+foo(a, b);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/getter-hook-1.js
@@ -0,0 +1,18 @@
+// GETPROP PIC with multiple stubs containing getter hooks.
+
+function foo(arr) {
+  for (var i = 0; i < 100; i++)
+    arr[i].caller;
+}
+arr = Object.create(Object.prototype);
+first = Object.create({});
+first.caller = null;
+second = Object.create({});
+second.caller = null;
+for (var i = 0; i < 100; ) {
+  arr[i++] = first;
+  arr[i++] = foo;
+  arr[i++] = second;
+}
+foo.caller;
+foo(arr);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/jaeger/getter-hook-2.js
@@ -0,0 +1,19 @@
+// PIC on CALLPROP invoking getter hook.
+
+function foo(arr) {
+  for (var i = 0; i < 100; i++)
+    arr[i].caller(false);
+}
+arr = Object.create(Object.prototype);
+first = Object.create({});
+first.caller = bar;
+second = Object.create({});
+second.caller = bar;
+for (var i = 0; i < 100; )
+  arr[i++] = foo;
+foo.caller;
+function bar(x) {
+  if (x)
+    foo(arr);
+}
+bar(true);
--- a/js/src/jsanalyze.h
+++ b/js/src/jsanalyze.h
@@ -135,16 +135,24 @@ class Bytecode
      * Side effects of this bytecode were not determined by type inference.
      * Either a property set with unknown lvalue, or call with unknown callee.
      */
     bool monitoredTypes : 1;
 
     /* Call whose result should be monitored. */
     bool monitoredTypesReturn : 1;
 
+    /*
+     * Dynamically observed state about the execution of this opcode. These are
+     * hints about the script for use during compilation.
+     */
+    bool arrayWriteHole: 1;  /* SETELEM which has written to an array hole. */
+    bool getStringElement:1; /* GETELEM which has accessed string properties. */
+    bool accessGetter: 1;    /* Property read on a shape with a getter hook. */
+
     /* Stack depth before this opcode. */
     uint32 stackDepth;
 
   private:
     /*
      * The set of locals defined at this point. This does not include locals which
      * were unconditionally defined at an earlier point in the script.
      */
@@ -959,25 +967,23 @@ class ScriptAnalysis
      * True if there are any LOCAL opcodes aliasing values on the stack (above
      * script->nfixed).
      */
     bool localsAliasStack() { return localsAliasStack_; }
 
     /* Accessors for bytecode information. */
 
     Bytecode& getCode(uint32 offset) {
-        JS_ASSERT(script->compartment()->activeAnalysis);
         JS_ASSERT(offset < script->length);
         JS_ASSERT(codeArray[offset]);
         return *codeArray[offset];
     }
     Bytecode& getCode(const jsbytecode *pc) { return getCode(pc - script->code); }
 
     Bytecode* maybeCode(uint32 offset) {
-        JS_ASSERT(script->compartment()->activeAnalysis);
         JS_ASSERT(offset < script->length);
         return codeArray[offset];
     }
     Bytecode* maybeCode(const jsbytecode *pc) { return maybeCode(pc - script->code); }
 
     bool jumpTarget(uint32 offset) {
         JS_ASSERT(offset < script->length);
         return codeArray[offset] && codeArray[offset]->jumpTarget;
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -2610,44 +2610,46 @@ array_unshift(JSContext *cx, uintN argc,
     if (!js_SetLengthProperty(cx, obj, newlen))
         return JS_FALSE;
 
     /* Follow Perl by returning the new array length. */
     vp->setNumber(newlen);
     return JS_TRUE;
 }
 
+static inline void
+TryReuseArrayType(JSObject *obj, JSObject *nobj)
+{
+    /*
+     * Try to change the type of a newly created array nobj to the same type
+     * as obj. This can only be performed if the original object is an array
+     * and has the same prototype.
+     */
+    JS_ASSERT(nobj->isDenseArray());
+    JS_ASSERT(nobj->type() == nobj->getProto()->newType);
+
+    if (obj->isArray() && !obj->hasSingletonType() && obj->getProto() == nobj->getProto())
+        nobj->setType(obj->type());
+}
+
 static JSBool
 array_splice(JSContext *cx, uintN argc, Value *vp)
 {
     JSObject *obj = ToObject(cx, &vp[1]);
     if (!obj)
         return false;
 
     jsuint length, begin, end, count, delta, last;
     JSBool hole;
 
-    /*
-     * Get the type of the result object: the original type when splicing an
-     * array, a generic array type otherwise.
-     */
-    TypeObject *type;
-    if (obj->isArray() && !obj->hasSingletonType()) {
-        type = obj->type();
-    } else {
-        type = GetTypeNewObject(cx, JSProto_Array);
-        if (!type)
-            return false;
-    }
-
     /* Create a new array value to return. */
     JSObject *obj2 = NewDenseEmptyArray(cx);
     if (!obj2)
         return JS_FALSE;
-    obj2->setType(type);
+    TryReuseArrayType(obj, obj2);
     vp->setObject(*obj2);
 
     /* Nothing to do if no args.  Otherwise get length. */
     if (argc == 0)
         return JS_TRUE;
     Value *argv = JS_ARGV(cx, vp);
     if (!js_GetLengthProperty(cx, obj, &length))
         return JS_FALSE;
@@ -2796,18 +2798,17 @@ array_concat(JSContext *cx, uintN argc, 
     jsuint length;
     if (aobj->isDenseArray()) {
         length = aobj->getArrayLength();
         const Value *vector = aobj->getDenseArrayElements();
         jsuint initlen = aobj->getDenseArrayInitializedLength();
         nobj = NewDenseCopiedArray(cx, initlen, vector);
         if (!nobj)
             return JS_FALSE;
-        if (nobj->getProto() == aobj->getProto() && !aobj->hasSingletonType())
-            nobj->setType(aobj->type());
+        TryReuseArrayType(aobj, nobj);
         nobj->setArrayLength(cx, length);
         if (!aobj->isPackedDenseArray())
             nobj->markDenseArrayNotPacked(cx);
         vp->setObject(*nobj);
         if (argc == 0)
             return JS_TRUE;
         argc--;
         p++;
@@ -2900,43 +2901,33 @@ array_slice(JSContext *cx, uintN argc, V
             }
             end = (jsuint)d;
         }
     }
 
     if (begin > end)
         begin = end;
 
-    /* Get the type object for the returned array, as for array_splice. */
-    TypeObject *type;
-    if (obj->isArray() && !obj->hasSingletonType()) {
-        type = obj->type();
-    } else {
-        type = GetTypeNewObject(cx, JSProto_Array);
-        if (!type)
-            return false;
-    }
-
     if (obj->isDenseArray() && end <= obj->getDenseArrayInitializedLength() &&
         !js_PrototypeHasIndexedProperties(cx, obj)) {
         nobj = NewDenseCopiedArray(cx, end - begin, obj->getDenseArrayElements() + begin);
         if (!nobj)
             return JS_FALSE;
-        nobj->setType(type);
+        TryReuseArrayType(obj, nobj);
         if (!obj->isPackedDenseArray())
             nobj->markDenseArrayNotPacked(cx);
         vp->setObject(*nobj);
         return JS_TRUE;
     }
 
     /* Create a new Array object and root it using *vp. */
     nobj = NewDenseAllocatedArray(cx, end - begin);
     if (!nobj)
         return JS_FALSE;
-    nobj->setType(type);
+    TryReuseArrayType(obj, nobj);
     vp->setObject(*nobj);
 
     AutoValueRooter tvr(cx);
     for (slot = begin; slot < end; slot++) {
         if (!JS_CHECK_OPERATION_LIMIT(cx) ||
             !GetElement(cx, obj, slot, &hole, tvr.addr())) {
             return JS_FALSE;
         }
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -2161,18 +2161,16 @@ JS_GetFunctionCallback(JSContext *cx)
     return cx->functionCallback;
 }
 
 #endif /* MOZ_TRACE_JSCALLS */
 
 JS_PUBLIC_API(void)
 JS_DumpBytecode(JSContext *cx, JSScript *script)
 {
-    JS_ASSERT(!cx->runtime->gcRunning);
-
 #if defined(DEBUG)
     AutoArenaAllocator mark(&cx->tempPool);
     Sprinter sprinter;
     INIT_SPRINTER(cx, &sprinter, &cx->tempPool, 0);
 
     fprintf(stdout, "--- SCRIPT %s:%d ---\n", script->filename, script->lineno);
     js_Disassemble(cx, script, true, &sprinter);
     fputs(sprinter.base, stdout);
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -88,16 +88,25 @@ JS_FRIEND_API(JSBool)
 JS_SplicePrototype(JSContext *cx, JSObject *obj, JSObject *proto)
 {
     /*
      * Change the prototype of an object which hasn't been used anywhere
      * and does not share its type with another object. Unlike JS_SetPrototype,
      * does not nuke type information for the object.
      */
     CHECK_REQUEST(cx);
+
+    if (!obj->hasSingletonType()) {
+        /*
+         * We can see non-singleton objects when trying to splice prototypes
+         * due to mutable __proto__ (ugh).
+         */
+        return JS_SetPrototype(cx, obj, proto);
+    }
+
     return obj->splicePrototype(cx, proto);
 }
 
 JS_FRIEND_API(JSObject *)
 JS_NewObjectWithUniqueType(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent)
 {
     JSObject *obj = JS_NewObject(cx, clasp, proto, parent);
     if (!obj || !obj->setSingletonType(cx))
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -1261,17 +1261,17 @@ StackFrame::getValidCalleeObject(JSConte
              * barrier, so we must clone fun and store it in fp's callee to
              * avoid re-cloning upon repeated foo.caller access.
              *
              * This must mean the code in js_DeleteProperty could not find this
              * stack frame on the stack when the method was deleted. We've lost
              * track of the method, so we associate it with the first barriered
              * object found starting from thisp on the prototype chain.
              */
-            JSObject *newfunobj = CloneFunctionObject(cx, fun, fun->getParent(), true);
+            JSObject *newfunobj = CloneFunctionObject(cx, fun);
             if (!newfunobj)
                 return false;
             newfunobj->setMethodObj(*first_barriered_thisp);
             overwriteCallee(*newfunobj);
             vp->setObject(*newfunobj);
             return true;
         }
     }
--- a/js/src/jsfun.h
+++ b/js/src/jsfun.h
@@ -457,16 +457,34 @@ CloneFunctionObject(JSContext *cx, JSFun
         JS_ASSERT(fun->getProto() == proto);
         fun->setParent(parent);
         return fun;
     }
 
     return js_CloneFunctionObject(cx, fun, parent, proto);
 }
 
+inline JSObject *
+CloneFunctionObject(JSContext *cx, JSFunction *fun)
+{
+    /*
+     * Variant which makes an exact clone of fun, preserving parent and proto.
+     * Calling the above version CloneFunctionObject(cx, fun, fun->getParent())
+     * is not equivalent: API clients, including XPConnect, can reparent
+     * objects so that fun->getGlobal() != fun->getProto()->getGlobal().
+     * See ReparentWrapperIfFound.
+     */
+    JS_ASSERT(fun->getParent() && fun->getProto());
+
+    if (fun->hasSingletonType())
+        return fun;
+
+    return js_CloneFunctionObject(cx, fun, fun->getParent(), fun->getProto());
+}
+
 extern JSObject * JS_FASTCALL
 js_AllocFlatClosure(JSContext *cx, JSFunction *fun, JSObject *scopeChain);
 
 extern JSObject *
 js_NewFlatClosure(JSContext *cx, JSFunction *fun, JSOp op, size_t oplen);
 
 extern JSFunction *
 js_DefineFunction(JSContext *cx, JSObject *obj, jsid id, js::Native native,
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -686,34 +686,35 @@ TypeSet::addTransformThis(JSContext *cx,
  * discovered scripted functions.
  */
 class TypeConstraintPropagateThis : public TypeConstraint
 {
 public:
     JSScript *script;
     jsbytecode *callpc;
     Type type;
-
-    TypeConstraintPropagateThis(JSScript *script, jsbytecode *callpc, Type type)
-        : TypeConstraint("propagatethis"), script(script), callpc(callpc), type(type)
+    TypeSet *types;
+
+    TypeConstraintPropagateThis(JSScript *script, jsbytecode *callpc, Type type, TypeSet *types)
+        : TypeConstraint("propagatethis"), script(script), callpc(callpc), type(type), types(types)
     {}
 
     void newType(JSContext *cx, TypeSet *source, Type type);
 };
 
 void
-TypeSet::addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type)
+TypeSet::addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type, TypeSet *types)
 {
     /* Don't add constraints when the call will be 'new' (see addCallProperty). */
     jsbytecode *callpc = script->analysis()->getCallPC(pc);
     UntrapOpcode untrap(cx, script, callpc);
     if (JSOp(*callpc) == JSOP_NEW)
         return;
 
-    add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool, script, callpc, type));
+    add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool, script, callpc, type, types));
 }
 
 /* Subset constraint which filters out primitive types. */
 class TypeConstraintFilterPrimitive : public TypeConstraint
 {
 public:
     TypeSet *target;
     TypeSet::FilterKind filter;
@@ -1058,20 +1059,20 @@ TypeConstraintProp::newType(JSContext *c
 }
 
 void
 TypeConstraintCallProp::newType(JSContext *cx, TypeSet *source, Type type)
 {
     UntrapOpcode untrap(cx, script, callpc);
 
     /*
-     * For CALLPROP and CALLELEM, we need to update not just the pushed types
-     * but also the 'this' types of possible callees. If we can't figure out
-     * that set of callees, monitor the call to make sure discovered callees
-     * get their 'this' types updated.
+     * For CALLPROP, we need to update not just the pushed types but also the
+     * 'this' types of possible callees. If we can't figure out that set of
+     * callees, monitor the call to make sure discovered callees get their
+     * 'this' types updated.
      */
 
     if (UnknownPropertyAccess(script, type)) {
         cx->compartment->types.monitorBytecode(cx, script, callpc - script->code);
         return;
     }
 
     TypeObject *object = GetPropertyObject(cx, script, type);
@@ -1081,17 +1082,18 @@ TypeConstraintCallProp::newType(JSContex
         } else {
             TypeSet *types = object->getProperty(cx, id, false);
             if (!types)
                 return;
             if (!types->hasPropagatedProperty())
                 object->getFromPrototypes(cx, id, types);
             /* Bypass addPropagateThis, we already have the callpc. */
             types->add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool,
-                                                                 script, callpc, type));
+                                                                 script, callpc, type,
+                                                                 (TypeSet *) NULL));
         }
     }
 }
 
 void
 TypeConstraintSetElement::newType(JSContext *cx, TypeSet *source, Type type)
 {
     if (type.isUnknown() ||
@@ -1225,18 +1227,18 @@ TypeConstraintCall::newType(JSContext *c
 
 void
 TypeConstraintPropagateThis::newType(JSContext *cx, TypeSet *source, Type type)
 {
     if (type.isUnknown() || type.isAnyObject()) {
         /*
          * The callee is unknown, make sure the call is monitored so we pick up
          * possible this/callee correlations. This only comes into play for
-         * CALLPROP and CALLELEM, for other calls we are past the type barrier
-         * already and a TypeConstraintCall will also monitor the call.
+         * CALLPROP, for other calls we are past the type barrier and a
+         * TypeConstraintCall will also monitor the call.
          */
         cx->compartment->types.monitorBytecode(cx, script, callpc - script->code);
         return;
     }
 
     /* Ignore calls to natives, these will be handled by TypeConstraintCall. */
     JSFunction *callee = NULL;
 
@@ -1253,17 +1255,21 @@ TypeConstraintPropagateThis::newType(JSC
     } else {
         /* Ignore calls to primitives, these will go through a stub. */
         return;
     }
 
     if (!callee->script()->ensureHasTypes(cx, callee))
         return;
 
-    TypeScript::ThisTypes(callee->script())->addType(cx, this->type);
+    TypeSet *thisTypes = TypeScript::ThisTypes(callee->script());
+    if (this->types)
+        this->types->addSubset(cx, thisTypes);
+    else
+        thisTypes->addType(cx, this->type);
 }
 
 void
 TypeConstraintArith::newType(JSContext *cx, TypeSet *source, Type type)
 {
     /*
      * We only model a subset of the arithmetic behavior that is actually
      * possible. The following need to be watched for at runtime:
@@ -1886,20 +1892,18 @@ TypeSet::hasGlobalObject(JSContext *cx, 
 
 TypeObject types::emptyTypeObject(NULL, false, true);
 
 void
 TypeCompartment::init(JSContext *cx)
 {
     PodZero(this);
 
-#ifndef JS_CPU_ARM
     if (cx && cx->getRunOptions() & JSOPTION_TYPE_INFERENCE)
         inferenceEnabled = true;
-#endif
 }
 
 TypeObject *
 TypeCompartment::newTypeObject(JSContext *cx, JSScript *script,
                                JSProtoKey key, JSObject *proto, bool unknown)
 {
     TypeObject *object = NewGCThing<TypeObject>(cx, gc::FINALIZE_TYPE_OBJECT, sizeof(TypeObject));
     if (!object)
@@ -3216,16 +3220,24 @@ ScriptAnalysis::resolveNameAccess(JSCont
             access.slot = (kind == ARGUMENT) ? ArgSlot(index) : LocalSlot(script, index);
             access.arg = (kind == ARGUMENT);
             access.index = index;
             return access;
         } else if (kind != NONE) {
             return access;
         }
 
+        /*
+         * The script's bindings do not contain a name for the function itself,
+         * don't resolve name accesses on lambdas in DeclEnv objects on the
+         * scope chain.
+         */
+        if (atom == CallObjectLambdaName(script->function()))
+            return access;
+
         if (!script->nesting()->parent)
             return access;
         script = script->nesting()->parent;
     }
 
     return access;
 }
 
@@ -3671,29 +3683,28 @@ ScriptAnalysis::analyzeTypesBytecode(JSC
        * which is accessing a non-integer property must be monitored.
        */
 
       case JSOP_GETELEM:
       case JSOP_CALLELEM: {
         TypeSet *seen = script->analysis()->bytecodeTypes(pc);
 
         poppedTypes(pc, 1)->addGetProperty(cx, script, pc, seen, JSID_VOID);
-        if (op == JSOP_CALLELEM)
-            poppedTypes(pc, 1)->addCallProperty(cx, script, pc, JSID_VOID);
 
         seen->addSubset(cx, &pushed[0]);
-        if (op == JSOP_CALLELEM)
+        if (op == JSOP_CALLELEM) {
             poppedTypes(pc, 1)->addFilterPrimitives(cx, &pushed[1], TypeSet::FILTER_NULL_VOID);
+            pushed[0].addPropagateThis(cx, script, pc, Type::UndefinedType(), &pushed[1]);
+        }
         if (CheckNextTest(pc))
             pushed[0].addType(cx, Type::UndefinedType());
         break;
       }
 
       case JSOP_SETELEM:
-      case JSOP_SETHOLE:
         poppedTypes(pc, 1)->addSetElement(cx, script, pc, poppedTypes(pc, 2), poppedTypes(pc, 0));
         poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
         break;
 
       case JSOP_TOID:
         /*
          * This is only used for element inc/dec ops; any id produced which
          * is not an integer must be monitored.
@@ -6016,16 +6027,23 @@ TypeScript::Sweep(JSContext *cx, JSScrip
 
     /*
      * Method JIT code depends on the type inference data which is about to
      * be purged, so purge the jitcode as well.
      */
 #ifdef JS_METHODJIT
     mjit::ReleaseScriptCode(cx, script);
 #endif
+
+    /*
+     * Use counts for scripts are reset on GC. After discarding code we need to
+     * let it warm back up to get information like which opcodes are setting
+     * array holes or accessing getter properties.
+     */
+    script->resetUseCount();
 }
 
 void
 TypeScript::destroy()
 {
     while (dynamicList) {
         TypeResult *next = dynamicList->next;
         Foreground::delete_(dynamicList);
--- a/js/src/jsinfer.h
+++ b/js/src/jsinfer.h
@@ -447,17 +447,18 @@ class TypeSet
     void addSetProperty(JSContext *cx, JSScript *script, jsbytecode *pc,
                         TypeSet *target, jsid id);
     void addCallProperty(JSContext *cx, JSScript *script, jsbytecode *pc, jsid id);
     void addSetElement(JSContext *cx, JSScript *script, jsbytecode *pc,
                        TypeSet *objectTypes, TypeSet *valueTypes);
     void addCall(JSContext *cx, TypeCallsite *site);
     void addArith(JSContext *cx, TypeSet *target, TypeSet *other = NULL);
     void addTransformThis(JSContext *cx, JSScript *script, TypeSet *target);
-    void addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type);
+    void addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc,
+                          Type type, TypeSet *types = NULL);
     void addFilterPrimitives(JSContext *cx, TypeSet *target, FilterKind filter);
     void addSubsetBarrier(JSContext *cx, JSScript *script, jsbytecode *pc, TypeSet *target);
     void addLazyArguments(JSContext *cx, TypeSet *target);
 
     /*
      * Make an type set with the specified debugging name, not embedded in
      * another structure.
      */
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -3898,23 +3898,23 @@ BEGIN_CASE(JSOP_GETELEM)
             id = INT_TO_JSID(i);
         } else {
           intern_big_int:
             if (!js_InternNonIntElementId(cx, obj, rref, &id))
                 goto error;
         }
     }
 
+    if (JSID_IS_STRING(id) && script->hasAnalysis() && !regs.fp()->hasImacropc())
+        script->analysis()->getCode(regs.pc).getStringElement = true;
+
     if (!obj->getProperty(cx, id, &rval))
         goto error;
     copyFrom = &rval;
 
-    if (!JSID_IS_INT(id))
-        TypeScript::MonitorUnknown(cx, script, regs.pc);
-
   end_getelem:
     regs.sp--;
     regs.sp[-1] = *copyFrom;
     assertSameCompartment(cx, regs.sp[-1]);
     TypeScript::Monitor(cx, script, regs.pc, regs.sp[-1]);
 }
 END_CASE(JSOP_GETELEM)
 
@@ -3942,24 +3942,21 @@ BEGIN_CASE(JSOP_CALLELEM)
         if (!OnUnknownMethod(cx, regs.sp - 2))
             goto error;
     } else
 #endif
     {
         regs.sp[-1] = thisv;
     }
 
-    if (!JSID_IS_INT(id))
-        TypeScript::MonitorUnknown(cx, script, regs.pc);
     TypeScript::Monitor(cx, script, regs.pc, regs.sp[-2]);
 }
 END_CASE(JSOP_CALLELEM)
 
 BEGIN_CASE(JSOP_SETELEM)
-BEGIN_CASE(JSOP_SETHOLE)
 {
     JSObject *obj;
     FETCH_OBJECT(cx, -3, obj);
     jsid id;
     FETCH_ELEMENT_ID(obj, -2, id);
     Value rval;
     TypeScript::MonitorAssign(cx, script, regs.pc, obj, id, regs.sp[-1]);
     do {
@@ -3967,22 +3964,22 @@ BEGIN_CASE(JSOP_SETHOLE)
             jsuint length = obj->getDenseArrayInitializedLength();
             jsint i = JSID_TO_INT(id);
             if ((jsuint)i < length) {
                 if (obj->getDenseArrayElement(i).isMagic(JS_ARRAY_HOLE)) {
                     if (js_PrototypeHasIndexedProperties(cx, obj))
                         break;
                     if ((jsuint)i >= obj->getArrayLength())
                         obj->setArrayLength(cx, i + 1);
-                    *regs.pc = JSOP_SETHOLE;
                 }
                 obj->setDenseArrayElementWithType(cx, i, regs.sp[-1]);
                 goto end_setelem;
             } else {
-                *regs.pc = JSOP_SETHOLE;
+                if (script->hasAnalysis() && !regs.fp()->hasImacropc())
+                    script->analysis()->getCode(regs.pc).arrayWriteHole = true;
             }
         }
     } while (0);
     rval = regs.sp[-1];
     if (!obj->setProperty(cx, id, &rval, script->strictModeCode))
         goto error;
   end_setelem:;
 }
@@ -4681,16 +4678,17 @@ BEGIN_CASE(JSOP_DEFFUN)
      * and event handlers shared among Firefox or other Mozilla app chrome
      * windows, and user-defined JS functions precompiled and then shared among
      * requests in server-side JS.
      */
     if (obj->getParent() != obj2) {
         obj = CloneFunctionObject(cx, fun, obj2, true);
         if (!obj)
             goto error;
+        JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
     }
 
     /*
      * ECMA requires functions defined when entering Eval code to be
      * impermanent.
      */
     uintN attrs = regs.fp()->isEvalFrame()
                   ? JSPROP_ENUMERATE
@@ -4814,16 +4812,18 @@ BEGIN_CASE(JSOP_DEFLOCALFUN)
                 AbortRecording(cx, "DEFLOCALFUN for closure");
 #endif
             obj = CloneFunctionObject(cx, fun, parent, true);
             if (!obj)
                 goto error;
         }
     }
 
+    JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
+
     uint32 slot = GET_SLOTNO(regs.pc);
     TRACE_2(DefLocalFunSetSlot, slot, obj);
 
     regs.fp()->slots()[slot].setObject(*obj);
 }
 END_CASE(JSOP_DEFLOCALFUN)
 
 BEGIN_CASE(JSOP_DEFLOCALFUN_FC)
@@ -4932,28 +4932,31 @@ BEGIN_CASE(JSOP_LAMBDA)
         }
 
         obj = CloneFunctionObject(cx, fun, parent, true);
         if (!obj)
             goto error;
     } while (0);
 
     JS_ASSERT(obj->getProto());
+    JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
+
     PUSH_OBJECT(*obj);
 }
 END_CASE(JSOP_LAMBDA)
 
 BEGIN_CASE(JSOP_LAMBDA_FC)
 {
     JSFunction *fun;
     LOAD_FUNCTION(0);
 
     JSObject *obj = js_NewFlatClosure(cx, fun, JSOP_LAMBDA_FC, JSOP_LAMBDA_FC_LENGTH);
     if (!obj)
         goto error;
+    JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
 
     PUSH_OBJECT(*obj);
 }
 END_CASE(JSOP_LAMBDA_FC)
 
 BEGIN_CASE(JSOP_CALLEE)
     JS_ASSERT(regs.fp()->isNonEvalFunctionFrame());
     PUSH_COPY(argv[-2]);
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -5660,42 +5660,49 @@ js_NativeGetInline(JSContext *cx, JSObje
     int32 sample;
 
     JS_ASSERT(pobj->isNative());
 
     slot = shape->slot;
     if (slot != SHAPE_INVALID_SLOT) {
         *vp = pobj->nativeGetSlot(slot);
         JS_ASSERT(!vp->isMagic());
+        JS_ASSERT_IF(!pobj->hasSingletonType() && shape->hasDefaultGetterOrIsMethod(),
+                     js::types::TypeHasProperty(cx, pobj->type(), shape->propid, *vp));
     } else {
         vp->setUndefined();
     }
     if (shape->hasDefaultGetter())
         return true;
 
     if (JS_UNLIKELY(shape->isMethod()) && (getHow & JSGET_NO_METHOD_BARRIER)) {
         JS_ASSERT(shape->methodObject() == vp->toObject());
         return true;
     }
 
+    jsbytecode *pc;
+    JSScript *script = cx->stack.currentScript(&pc);
+    if (script && script->hasAnalysis() && !cx->fp()->hasImacropc()) {
+        analyze::Bytecode *code = script->analysis()->maybeCode(pc);
+        if (code)
+            code->accessGetter = true;
+    }
+
     sample = cx->runtime->propertyRemovals;
     if (!shape->get(cx, receiver, obj, pobj, vp))
         return false;
 
     if (pobj->containsSlot(slot) &&
         (JS_LIKELY(cx->runtime->propertyRemovals == sample) ||
          pobj->nativeContains(cx, *shape))) {
         if (!pobj->methodWriteBarrier(cx, *shape, *vp))
             return false;
         pobj->nativeSetSlot(slot, *vp);
     }
 
-    /* Record values produced by shapes without a default getter. */
-    AddTypePropertyId(cx, obj, shape->propid, *vp);
-
     return true;
 }
 
 JSBool
 js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, const Shape *shape, uintN getHow,
              Value *vp)
 {
     return js_NativeGetInline(cx, obj, obj, pobj, shape, getHow, vp);
@@ -5986,17 +5993,17 @@ CloneFunctionForSetMethod(JSContext *cx,
     JSObject *funobj = &vp->toObject();
     JSFunction *fun = funobj->getFunctionPrivate();
 
     /*
      * If fun is already different from the original JSFunction, it does not
      * need to be cloned again.
      */
     if (fun == funobj) {
-        funobj = CloneFunctionObject(cx, fun, fun->parent, true);
+        funobj = CloneFunctionObject(cx, fun);
         if (!funobj)
             return false;
         vp->setObject(*funobj);
     }
     return true;
 }
 
 JSBool
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -139,18 +139,16 @@ JSObject::getProperty(JSContext *cx, JSO
 {
     js::PropertyIdOp op = getOps()->getProperty;
     if (op) {
         if (!op(cx, this, receiver, id, vp))
             return false;
     } else {
         if (!js_GetProperty(cx, this, receiver, id, vp))
             return false;
-        JS_ASSERT_IF(!hasSingletonType() && nativeContains(cx, js_CheckForStringIndex(id)),
-                     js::types::TypeHasProperty(cx, type(), id, *vp));
     }
     return true;
 }
 
 inline JSBool
 JSObject::getProperty(JSContext *cx, jsid id, js::Value *vp)
 {
     return getProperty(cx, this, id, vp);
@@ -262,17 +260,17 @@ JSObject::methodReadBarrier(JSContext *c
     JS_ASSERT(shape.hasDefaultSetter());
     JS_ASSERT(!isGlobal());  /* i.e. we are not changing the global shape */
 
     JSObject *funobj = &vp->toObject();
     JSFunction *fun = funobj->getFunctionPrivate();
     JS_ASSERT(fun == funobj);
     JS_ASSERT(fun->isNullClosure());
 
-    funobj = CloneFunctionObject(cx, fun, funobj->getParent(), true);
+    funobj = CloneFunctionObject(cx, fun);
     if (!funobj)
         return NULL;
     funobj->setMethodObj(*this);
 
     /*
      * Replace the method property with an ordinary data property. This is
      * equivalent to this->setProperty(cx, shape.id, vp) except that any
      * watchpoint on the property is not triggered.
--- a/js/src/jsopcode.cpp
+++ b/js/src/jsopcode.cpp
@@ -3961,17 +3961,16 @@ Decompile(SprintStack *ss, jsbytecode *p
                                   (JOF_OPMODE(lastop) == JOF_XMLNAME)
                                   ? dot_format
                                   : index_format,
                                   lval, xval);
                 }
                 break;
 
               case JSOP_SETELEM:
-              case JSOP_SETHOLE:
                 rval = POP_STR();
                 op = JSOP_NOP;          /* turn off parens */
                 xval = POP_STR();
                 cs = &js_CodeSpec[ss->opcodes[ss->top]];
                 op = JSOP_GETELEM;      /* lval must have high precedence */
                 lval = POP_STR();
                 op = saveop;
                 if (*xval == '\0')
--- a/js/src/jsopcode.tbl
+++ b/js/src/jsopcode.tbl
@@ -587,13 +587,10 @@ OPDEF(JSOP_OBJTOP,        225,"objtop", 
  */
 OPDEF(JSOP_SETMETHOD,     226,"setmethod",     NULL,  3,  2,  1,  3,  JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING)
 OPDEF(JSOP_INITMETHOD,    227,"initmethod",    NULL,  3,  2,  1,  3,  JOF_ATOM|JOF_PROP|JOF_SET|JOF_DETECTING)
 OPDEF(JSOP_UNBRAND,       228,"unbrand",       NULL,  1,  1,  1,  0,  JOF_BYTE)
 OPDEF(JSOP_UNBRANDTHIS,   229,"unbrandthis",   NULL,  1,  0,  0,  0,  JOF_BYTE)
 
 OPDEF(JSOP_SHARPINIT,     230,"sharpinit",     NULL,  3,  0,  0,  0,  JOF_UINT16|JOF_SHARPSLOT)
 
-/* Substituted for JSOP_SETELEM to indicate opcodes which have written holes in dense arrays. */
-OPDEF(JSOP_SETHOLE,       231, "sethole",      NULL,  1,  3,  1,  3,  JOF_BYTE |JOF_ELEM|JOF_SET|JOF_DETECTING)
-
 /* Pop the stack, convert to a jsid (int or string), and push back. */
-OPDEF(JSOP_TOID,          232, "toid",         NULL,  1,  1,  1,  0,  JOF_BYTE)
+OPDEF(JSOP_TOID,          231, "toid",         NULL,  1,  1,  1,  0,  JOF_BYTE)
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -6870,18 +6870,17 @@ LeaveTree(TraceMonitor *tm, TracerState&
             FrameRegs* regs = &cx->regs();
             JSOp op = (JSOp) *regs->pc;
 
             /*
              * JSOP_SETELEM can be coalesced with a JSOP_POP in the interpeter.
              * Since this doesn't re-enter the recorder, the post-state snapshot
              * is invalid. Fix it up here.
              */
-            if ((op == JSOP_SETELEM || op == JSOP_SETHOLE) &&
-                JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
+            if (op == JSOP_SETELEM && JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
                 regs->sp -= js_CodeSpec[JSOP_SETELEM].nuses;
                 regs->sp += js_CodeSpec[JSOP_SETELEM].ndefs;
                 regs->pc += JSOP_SETELEM_LENGTH;
                 op = JSOP_POP;
             }
 
             const JSCodeSpec& cs = js_CodeSpec[op];
             regs->sp -= (cs.format & JOF_INVOKE) ? GET_ARGC(regs->pc) + 2 : cs.nuses;
@@ -13423,34 +13422,28 @@ TraceRecorder::setElem(int lval_spindex,
         }
         w.resumeAddingCSEValues();
 
         // Right, actually set the element.
         box_value_into(v, v_ins, dslotAddr);
     }
 
     jsbytecode* pc = cx->regs().pc;
-    if ((*pc == JSOP_SETELEM || *pc == JSOP_SETHOLE) && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
+    if (*pc == JSOP_SETELEM && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
         set(&lval, v_ins);
 
     return ARECORD_CONTINUE;
 }
 
 JS_REQUIRES_STACK AbortableRecordingStatus
 TraceRecorder::record_JSOP_SETELEM()
 {
     return setElem(-3, -2, -1);
 }
 
-JS_REQUIRES_STACK AbortableRecordingStatus
-TraceRecorder::record_JSOP_SETHOLE()
-{
-    return setElem(-3, -2, -1);
-}
-
 static JSBool FASTCALL
 CheckSameGlobal(JSObject *obj, JSObject *globalObj)
 {
     return obj->getGlobal() == globalObj;
 }
 JS_DEFINE_CALLINFO_2(static, BOOL, CheckSameGlobal, OBJECT, OBJECT, 0, ACCSET_STORE_ANY)
 
 JS_REQUIRES_STACK AbortableRecordingStatus
@@ -17067,17 +17060,17 @@ LoopProfile::profileOperation(JSContext*
     }
 
     if (op == JSOP_EVAL)
         increment(OP_EVAL);
 
     if (op == JSOP_NEW)
         increment(OP_NEW);
 
-    if (op == JSOP_GETELEM || op == JSOP_SETELEM || op == JSOP_SETHOLE) {
+    if (op == JSOP_GETELEM || op == JSOP_SETELEM) {
         Value& lval = cx->regs().sp[op == JSOP_GETELEM ? -2 : -3];
         if (lval.isObject() && js_IsTypedArray(&lval.toObject()))
             increment(OP_TYPED_ARRAY);
         else if (lval.isObject() && lval.toObject().isDenseArray() && op == JSOP_GETELEM)
             increment(OP_ARRAY_READ);
     }
 
     if (op == JSOP_GETPROP || op == JSOP_CALLPROP) {
--- a/js/src/methodjit/BaseAssembler.h
+++ b/js/src/methodjit/BaseAssembler.h
@@ -120,25 +120,16 @@ class Assembler : public ValueAssembler
         JSC::FunctionPtr fun;
     };
 
     struct DoublePatch {
         double d;
         DataLabelPtr label;
     };
 
-    /* Need a temp reg that is not ArgReg1. */
-#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
-    static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
-#elif defined(JS_CPU_ARM)
-    static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
-#elif defined(JS_CPU_SPARC)
-    static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
-#endif
-
     /* :TODO: OOM */
     Label startLabel;
     Vector<CallPatch, 64, SystemAllocPolicy> callPatches;
     Vector<DoublePatch, 16, SystemAllocPolicy> doublePatches;
 
     // Registers that can be clobbered during a call sequence.
     Registers   availInCall;
 
@@ -548,24 +539,24 @@ static const JSC::MacroAssembler::Regist
         } else {
             // Memory-to-memory, but no temporary registers are free.
             // This shouldn't happen on any platforms, because
             // (TempRegs) Union (ArgRegs) != 0
             JS_NOT_REACHED("too much reg pressure");
         }
     }
 
-    void storeArg(uint32 i, Imm32 imm) {
+    void storeArg(uint32 i, ImmPtr imm) {
         JS_ASSERT(callIsAligned);
         RegisterID to;
         if (Registers::regForArg(callConvention, i, &to)) {
             move(imm, to);
             availInCall.takeRegUnchecked(to);
         } else {
-            store32(imm, addressOfArg(i));
+            storePtr(imm, addressOfArg(i));
         }
     }
 
     // High-level call helper, given an optional function pointer and a
     // calling convention. setupABICall() must have been called beforehand,
     // as well as each numbered argument stored with storeArg().
     //
     // After callWithABI(), the call state is reset, so a new call may begin.
@@ -620,27 +611,31 @@ static const JSC::MacroAssembler::Regist
 
     STUB_CALL_TYPE(JSObjStub);
     STUB_CALL_TYPE(VoidPtrStubUInt32);
     STUB_CALL_TYPE(VoidStubUInt32);
     STUB_CALL_TYPE(VoidStub);
 
 #undef STUB_CALL_TYPE
 
-    void setupInfallibleVMFrame(int32 frameDepth) {
+    void setupFrameDepth(int32 frameDepth) {
         // |frameDepth < 0| implies ic::SplatApplyArgs has been called which
         // means regs.sp has already been set in the VMFrame.
         if (frameDepth >= 0) {
             // sp = fp->slots() + frameDepth
             // regs->sp = sp
             addPtr(Imm32(sizeof(StackFrame) + frameDepth * sizeof(jsval)),
                    JSFrameReg,
-                   ClobberInCall);
-            storePtr(ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
+                   Registers::ClobberInCall);
+            storePtr(Registers::ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
         }
+    }
+
+    void setupInfallibleVMFrame(int32 frameDepth) {
+        setupFrameDepth(frameDepth);
 
         // The JIT has moved Arg1 already, and we've guaranteed to not clobber
         // it. Move ArgReg0 into place now. setupFallibleVMFrame will not
         // clobber it either.
         move(MacroAssembler::stackPointerRegister, Registers::ArgReg0);
     }
 
     void setupFallibleVMFrame(bool inlining, jsbytecode *pc,
@@ -659,16 +654,29 @@ static const JSC::MacroAssembler::Regist
                                                  FrameAddress(VMFrame::offsetOfInlined));
             if (pinlined)
                 *pinlined = ptr;
         }
 
         restoreStackBase();
     }
 
+    void setupFallibleABICall(bool inlining, jsbytecode *pc, int32 frameDepth) {
+        setupFrameDepth(frameDepth);
+
+        /* Store fp and pc */
+        storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
+        storePtr(ImmPtr(pc), FrameAddress(offsetof(VMFrame, regs.pc)));
+
+        if (inlining) {
+            /* ABI calls cannot be made from inlined frames. */
+            storePtr(ImmPtr(NULL), FrameAddress(VMFrame::offsetOfInlined));
+        }
+    }
+
     void restoreStackBase() {
 #if defined(JS_CPU_X86)
         /*
          * We use the %ebp base stack pointer on x86 to store the JSStackFrame.
          * Restore this before calling so that debuggers can construct a
          * coherent stack if we crash outside of JIT code.
          */
         JS_STATIC_ASSERT(JSFrameReg == JSC::X86Registers::ebp);
@@ -862,16 +870,17 @@ static const JSC::MacroAssembler::Regist
         loadPtr(Address(objReg, JSObject::offsetOfSlots()), dataReg);
         loadValueAsComponents(Address(dataReg, index * sizeof(Value)), typeReg, dataReg);
     }
 
     void loadObjProp(JSObject *obj, RegisterID objReg,
                      const js::Shape *shape,
                      RegisterID typeReg, RegisterID dataReg)
     {
+        JS_ASSERT(shape->hasSlot());
         if (shape->isMethod())
             loadValueAsComponents(ObjectValue(shape->methodObject()), typeReg, dataReg);
         else if (obj->isFixedSlot(shape->slot))
             loadInlineSlot(objReg, shape->slot, typeReg, dataReg);
         else
             loadDynamicSlot(objReg, obj->dynamicSlotIndex(shape->slot), typeReg, dataReg);
     }
 
--- a/js/src/methodjit/BaseCompiler.h
+++ b/js/src/methodjit/BaseCompiler.h
@@ -170,16 +170,50 @@ class LinkerHelper : public JSC::LinkBuf
         link(jump.get(), label);
     }
 
     size_t size() const {
         return m_size;
     }
 };
 
+class NativeStubLinker : public LinkerHelper
+{
+  public:
+#ifdef JS_CPU_X64
+    typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
+#else
+    typedef JSC::MacroAssembler::Jump FinalJump;
+#endif
+
+    NativeStubLinker(Assembler &masm, JITScript *jit, jsbytecode *pc, FinalJump done)
+        : LinkerHelper(masm, JSC::METHOD_CODE), jit(jit), pc(pc), done(done)
+    {}
+
+    bool init(JSContext *cx);
+
+    void patchJump(JSC::CodeLocationLabel target) {
+#ifdef JS_CPU_X64
+        patch(done, target);
+#else
+        link(done, target);
+#endif
+    }
+
+  private:
+    JITScript *jit;
+    jsbytecode *pc;
+    FinalJump done;
+};
+
+bool
+NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
+                   int32 initialFrameDepth, int32 vpOffset,
+                   MaybeRegisterID typeReg, MaybeRegisterID dataReg);
+
 /*
  * On ARM, we periodically flush a constant pool into the instruction stream
  * where constants are found using PC-relative addressing. This is necessary
  * because the fixed-width instruction set doesn't support wide immediates.
  *
  * ICs perform repatching on the inline (fast) path by knowing small and
  * generally fixed code location offset values where the patchable instructions
  * live. Dumping a huge constant pool into the middle of an IC's inline path
@@ -188,77 +222,53 @@ class LinkerHelper : public JSC::LinkBuf
  * up front to prevent this from happening.
  */
 #ifdef JS_CPU_ARM
 template <size_t reservedSpace>
 class AutoReserveICSpace {
     typedef Assembler::Label Label;
 
     Assembler           &masm;
-#ifdef DEBUG
-    Label               startLabel;
     bool                didCheck;
-#endif
+    bool                *overflowSpace;
+    int                 flushCount;
 
   public:
-    AutoReserveICSpace(Assembler &masm) : masm(masm) {
+    AutoReserveICSpace(Assembler &masm, bool *overflowSpace)
+        : masm(masm), didCheck(false), overflowSpace(overflowSpace)
+    {
         masm.ensureSpace(reservedSpace);
-#ifdef DEBUG
-        didCheck = false;
-
-        startLabel = masm.label();
-
-        /* Assert that the constant pool is not flushed until we reach a safe point. */
-        masm.allowPoolFlush(false);
-
-        JaegerSpew(JSpew_Insns, " -- BEGIN CONSTANT-POOL-FREE REGION -- \n");
-#endif
+        flushCount = masm.flushCount();
     }
 
     /* Allow manual IC space checks so that non-patchable code at the end of an IC section can be
      * free to use constant pools. */
     void check() {
-#ifdef DEBUG
         JS_ASSERT(!didCheck);
         didCheck = true;
 
-        Label endLabel = masm.label();
-        int spaceUsed = masm.differenceBetween(startLabel, endLabel);
-
-        /* Spew the space used, to help tuning of reservedSpace. */
-        JaegerSpew(JSpew_Insns,
-                   " -- END CONSTANT-POOL-FREE REGION: %u bytes used of %u reserved. -- \n",
-                   spaceUsed, reservedSpace);
-
-        /* Assert that we didn't emit more code than we protected. */
-        JS_ASSERT(spaceUsed >= 0);
-        JS_ASSERT(size_t(spaceUsed) <= reservedSpace);
-
-        /* Allow the pool to be flushed. */
-        masm.allowPoolFlush(true);
-#endif
+        if (masm.flushCount() != flushCount)
+            *overflowSpace = true;
     }
 
     ~AutoReserveICSpace() {
-#ifdef DEBUG
         /* Automatically check the IC space if we didn't already do it manually. */
         if (!didCheck) {
             check();
         }
-#endif
     }
 };
 
-# define RESERVE_IC_SPACE(__masm)       AutoReserveICSpace<128> arics(__masm)
+# define RESERVE_IC_SPACE(__masm)       AutoReserveICSpace<256> arics(__masm, &this->overflowICSpace)
 # define CHECK_IC_SPACE()               arics.check()
 
 /* The OOL path can need a lot of space because we save and restore a lot of registers. The actual
  * sequene varies. However, dumping the literal pool before an OOL block is probably a good idea
  * anyway, as we branch directly to the start of the block from the fast path. */
-# define RESERVE_OOL_SPACE(__masm)      AutoReserveICSpace<256> arics_ool(__masm)
+# define RESERVE_OOL_SPACE(__masm)      AutoReserveICSpace<2048> arics_ool(__masm, &this->overflowICSpace)
 
 /* Allow the OOL patch to be checked before object destruction. Often, non-patchable epilogues or
  * rejoining sequences are emitted, and it isn't necessary to protect these from literal pools. */
 # define CHECK_OOL_SPACE()              arics_ool.check()
 #else
 # define RESERVE_IC_SPACE(__masm)       /* Do nothing. */
 # define CHECK_IC_SPACE()               /* Do nothing. */
 # define RESERVE_OOL_SPACE(__masm)      /* Do nothing. */
--- a/js/src/methodjit/Compiler.cpp
+++ b/js/src/methodjit/Compiler.cpp
@@ -128,16 +128,17 @@ mjit::Compiler::Compiler(JSContext *cx, 
 #if defined JS_TRACER
     addTraceHints(cx->traceJitEnabled),
 #else
     addTraceHints(false),
 #endif
     inlining_(false),
     hasGlobalReallocation(false),
     oomInVector(false),
+    overflowICSpace(false),
     gcNumber(cx->runtime->gcNumber),
     applyTricks(NoApplyTricks),
     pcLengths(NULL)
 {
     /* :FIXME: bug 637856 disabling traceJit if inference is enabled */
     if (cx->typeInferenceEnabled())
         addTraceHints = false;
 
@@ -889,16 +890,21 @@ mjit::Compiler::finishThisUp(JITScript *
 
     /*
      * Watch for GCs which occurred during compilation. These may have
      * renumbered shapes baked into the jitcode.
      */
     if (cx->runtime->gcNumber != gcNumber)
         return Compile_Retry;
 
+    if (overflowICSpace) {
+        JaegerSpew(JSpew_Scripts, "dumped a constant pool while generating an IC\n");
+        return Compile_Abort;
+    }
+
     for (size_t i = 0; i < branchPatches.length(); i++) {
         Label label = labelOf(branchPatches[i].pc, branchPatches[i].inlineIndex);
         branchPatches[i].jump.linkTo(label, &masm);
     }
 
 #ifdef JS_CPU_ARM
     masm.forceFlushConstantPool();
     stubcc.masm.forceFlushConstantPool();
@@ -2022,17 +2028,16 @@ mjit::Compiler::generateMethod()
                 return Compile_Error;
           END_CASE(JSOP_GETELEM)
 
           BEGIN_CASE(JSOP_TOID)
             jsop_toid();
           END_CASE(JSOP_TOID)
 
           BEGIN_CASE(JSOP_SETELEM)
-          BEGIN_CASE(JSOP_SETHOLE)
           {
             jsbytecode *next = &PC[JSOP_SETELEM_LENGTH];
             bool pop = (JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next));
             if (!jsop_setelem(pop))
                 return Compile_Error;
           }
           END_CASE(JSOP_SETELEM);
 
@@ -4052,27 +4057,27 @@ mjit::Compiler::inlineScriptedFunction(u
  */
 void
 mjit::Compiler::addCallSite(const InternalCallSite &site)
 {
     callSites.append(site);
 }
 
 void
-mjit::Compiler::inlineStubCall(void *stub, RejoinState rejoin)
+mjit::Compiler::inlineStubCall(void *stub, RejoinState rejoin, Uses uses)
 {
     DataLabelPtr inlinePatch;
     Call cl = emitStubCall(stub, &inlinePatch);
     InternalCallSite site(masm.callReturnOffset(cl), a->inlineIndex, PC,
                           rejoin, false);
     site.inlinePatch = inlinePatch;
     if (loop && loop->generatingInvariants()) {
         Jump j = masm.jump();
         Label l = masm.label();
-        loop->addInvariantCall(j, l, false, false, callSites.length());
+        loop->addInvariantCall(j, l, false, false, callSites.length(), uses);
     }
     addCallSite(site);
 }
 
 bool
 mjit::Compiler::compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs)
 {
     JS_ASSERT(lhs.isPrimitive());
@@ -4195,31 +4200,33 @@ void
 mjit::Compiler::jsop_getprop_slow(JSAtom *atom, bool usePropCache)
 {
     /* See ::jsop_getprop */
     RejoinState rejoin = usePropCache ? REJOIN_GETTER : REJOIN_THIS_PROTOTYPE;
 
     prepareStubCall(Uses(1));
     if (usePropCache) {
         INLINE_STUBCALL(stubs::GetProp, rejoin);
+        testPushedType(rejoin, -1, /* ool = */ false);
     } else {
         masm.move(ImmPtr(atom), Registers::ArgReg1);
         INLINE_STUBCALL(stubs::GetPropNoCache, rejoin);
     }
 
     frame.pop();
     frame.pushSynced(JSVAL_TYPE_UNKNOWN);
 }
 
 bool
 mjit::Compiler::jsop_callprop_slow(JSAtom *atom)
 {
     prepareStubCall(Uses(1));
     masm.move(ImmPtr(atom), Registers::ArgReg1);
     INLINE_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -1, /* ool = */ false);
     frame.pop();
     pushSyncedEntry(0);
     pushSyncedEntry(1);
     return true;
 }
 
 #ifdef JS_MONOIC
 void
@@ -4305,16 +4312,18 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
          */
         if (!types->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY)) {
             bool isObject = top->isTypeKnown();
             if (!isObject) {
                 Jump notObject = frame.testObject(Assembler::NotEqual, top);
                 stubcc.linkExit(notObject, Uses(1));
                 stubcc.leave();
                 OOL_STUBCALL(stubs::GetProp, rejoin);
+                if (rejoin == REJOIN_GETTER)
+                    testPushedType(rejoin, -1);
             }
             RegisterID reg = frame.tempRegForData(top);
             frame.pop();
             frame.pushWord(Address(reg, offsetof(JSObject, privateData)), JSVAL_TYPE_INT32);
             if (!isObject)
                 stubcc.rejoin(Changes(1));
             return true;
         }
@@ -4325,16 +4334,18 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
          */
         if (!types->hasObjectFlags(cx, types::OBJECT_FLAG_NON_TYPED_ARRAY)) {
             bool isObject = top->isTypeKnown();
             if (!isObject) {
                 Jump notObject = frame.testObject(Assembler::NotEqual, top);
                 stubcc.linkExit(notObject, Uses(1));
                 stubcc.leave();
                 OOL_STUBCALL(stubs::GetProp, rejoin);
+                if (rejoin == REJOIN_GETTER)
+                    testPushedType(rejoin, -1);
             }
             RegisterID reg = frame.copyDataIntoReg(top);
             frame.pop();
             frame.pushWord(Address(reg, TypedArray::lengthOffset()), JSVAL_TYPE_INT32);
             frame.freeReg(reg);
             if (!isObject)
                 stubcc.rejoin(Changes(1));
             return true;
@@ -4387,16 +4398,18 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
             types->addFreeze(cx);
             uint32 slot = propertyTypes->definiteSlot();
             bool isObject = top->isTypeKnown();
             if (!isObject) {
                 Jump notObject = frame.testObject(Assembler::NotEqual, top);
                 stubcc.linkExit(notObject, Uses(1));
                 stubcc.leave();
                 OOL_STUBCALL(stubs::GetProp, rejoin);
+                if (rejoin == REJOIN_GETTER)
+                    testPushedType(rejoin, -1);
             }
             RegisterID reg = frame.tempRegForData(top);
             frame.pop();
 
             Address address(reg, JSObject::getFixedSlotOffset(slot));
             BarrierState barrier = pushAddressMaybeBarrier(address, knownType, false);
             if (!isObject)
                 stubcc.rejoin(Changes(1));
@@ -4442,16 +4455,29 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
         pic.typeReg = Registers::ReturnReg;
     }
 
     if (atom != cx->runtime->atomState.lengthAtom) {
         objReg = frame.copyDataIntoReg(top);
         shapeReg = frame.allocReg();
     }
 
+    /*
+     * If this access has been on a shape with a getter hook, make preparations
+     * so that we can generate a stub to call the hook directly (rather than be
+     * forced to make a stub call). Sync the stack up front and kill all
+     * registers so that PIC stubs can contain calls, and always generate a
+     * type barrier if inference is enabled (known property types do not
+     * reflect properties with getter hooks).
+     */
+    pic.canCallHook = pic.forcedTypeBarrier =
+        usePropCache && JSOp(*PC) == JSOP_GETPROP && analysis->getCode(PC).accessGetter;
+    if (pic.canCallHook)
+        frame.syncAndKillEverything();
+
     pic.shapeReg = shapeReg;
     pic.atom = atom;
 
     /* Guard on shape. */
     masm.loadShape(objReg, shapeReg);
     pic.shapeGuard = masm.label();
 
     DataLabel32 inlineShapeLabel;
@@ -4462,16 +4488,18 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
 
     RESERVE_OOL_SPACE(stubcc.masm);
     pic.slowPathStart = stubcc.linkExit(j, Uses(1));
 
     stubcc.leave();
     passICAddress(&pic);
     pic.slowPathCall = OOL_STUBCALL(usePropCache ? ic::GetProp : ic::GetPropNoCache, rejoin);
     CHECK_OOL_SPACE();
+    if (rejoin == REJOIN_GETTER)
+        testPushedType(rejoin, -1);
 
     /* Load the base slot address. */
     Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
                                                                objReg);
 
     /* Copy the slot value to the expression stack. */
     Address slot(objReg, 1 << 24);
     frame.pop();
@@ -4490,19 +4518,22 @@ mjit::Compiler::jsop_getprop(JSAtom *ato
     if (pic.hasTypeCheck)
         labels.setInlineTypeJump(masm, pic.fastPathStart, typeCheck);
 #ifdef JS_CPU_X64
     labels.setInlineShapeJump(masm, inlineShapeLabel, inlineShapeJump);
 #else
     labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
 #endif
 
+    CHECK_IC_SPACE();
+
     pic.objReg = objReg;
     frame.pushRegs(shapeReg, objReg, knownType);
-    BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
+    BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
+                                       /* force = */ pic.canCallHook);
 
     stubcc.rejoin(Changes(1));
     pics.append(pic);
 
     finishBarrier(barrier, rejoin, 0);
     return true;
 }
 
@@ -4542,16 +4573,20 @@ mjit::Compiler::jsop_callprop_generic(JS
     RETURN_IF_OOM(false);
 
     pic.typeCheck = stubcc.linkExit(typeCheckJump, Uses(1));
     pic.hasTypeCheck = true;
     pic.objReg = objReg;
     pic.shapeReg = shapeReg;
     pic.atom = atom;
 
+    pic.canCallHook = pic.forcedTypeBarrier = analysis->getCode(PC).accessGetter;
+    if (pic.canCallHook)
+        frame.syncAndKillEverything();
+
     /*
      * Store the type and object back. Don't bother keeping them in registers,
      * since a sync will be needed for the upcoming call.
      */
     uint32 thisvSlot = frame.totalDepth();
     Address thisv = Address(JSFrameReg, sizeof(StackFrame) + thisvSlot * sizeof(Value));
 
 #if defined JS_NUNBOX32
@@ -4576,16 +4611,18 @@ mjit::Compiler::jsop_callprop_generic(JS
     /* Slow path. */
     RESERVE_OOL_SPACE(stubcc.masm);
     pic.slowPathStart = stubcc.linkExit(j, Uses(1));
     stubcc.leave();
     passICAddress(&pic);
     pic.slowPathCall = OOL_STUBCALL(ic::CallProp, REJOIN_FALLTHROUGH);
     CHECK_OOL_SPACE();
 
+    testPushedType(REJOIN_FALLTHROUGH, -1);
+
     /* Load the base slot address. */
     Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
                                                                objReg);
 
     /* Copy the slot value to the expression stack. */
     Address slot(objReg, 1 << 24);
 
     Label fastValueLoad = masm.loadValueWithAddressOffsetPatch(slot, shapeReg, objReg);
@@ -4603,20 +4640,23 @@ mjit::Compiler::jsop_callprop_generic(JS
     labels.setValueLoad(masm, pic.fastPathRejoin, fastValueLoad);
     labels.setInlineTypeJump(masm, pic.fastPathStart, typeCheck);
 #ifdef JS_CPU_X64
     labels.setInlineShapeJump(masm, inlineShapeLabel, inlineShapeJump);
 #else
     labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
 #endif
 
+    CHECK_IC_SPACE();
+
     /* Adjust the frame. */
     frame.pop();
     frame.pushRegs(shapeReg, objReg, knownPushedType(0));
-    BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
+    BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
+                                       /* force = */ pic.canCallHook);
 
     pushSyncedEntry(1);
 
     stubcc.rejoin(Changes(2));
     pics.append(pic);
 
     finishBarrier(barrier, REJOIN_FALLTHROUGH, 1);
     return true;
@@ -4705,16 +4745,20 @@ mjit::Compiler::jsop_callprop_obj(JSAtom
     RegisterID objReg;
     if (top->isConstant()) {
         objReg = frame.allocReg();
         masm.move(ImmPtr(&top->getValue().toObject()), objReg);
     } else {
         objReg = frame.copyDataIntoReg(top);
     }
 
+    pic.canCallHook = pic.forcedTypeBarrier = analysis->getCode(PC).accessGetter;
+    if (pic.canCallHook)
+        frame.syncAndKillEverything();
+
     /* Guard on shape. */
     masm.loadShape(objReg, shapeReg);
     pic.shapeGuard = masm.label();
 
     DataLabel32 inlineShapeLabel;
     Jump j = masm.branch32WithPatch(Assembler::NotEqual, shapeReg,
                            Imm32(int32(INVALID_SHAPE)),
                            inlineShapeLabel);
@@ -4723,38 +4767,43 @@ mjit::Compiler::jsop_callprop_obj(JSAtom
     /* Slow path. */
     RESERVE_OOL_SPACE(stubcc.masm);
     pic.slowPathStart = stubcc.linkExit(j, Uses(1));
     stubcc.leave();
     passICAddress(&pic);
     pic.slowPathCall = OOL_STUBCALL(ic::CallProp, REJOIN_FALLTHROUGH);
     CHECK_OOL_SPACE();
 
+    testPushedType(REJOIN_FALLTHROUGH, -1);
+
     /* Load the base slot address. */
     Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
                                                                objReg);
 
     /* Copy the slot value to the expression stack. */
     Address slot(objReg, 1 << 24);
 
     Label fastValueLoad = masm.loadValueWithAddressOffsetPatch(slot, shapeReg, objReg);
 
     pic.fastPathRejoin = masm.label();
     pic.objReg = objReg;
 
+    CHECK_IC_SPACE();
+
     /*
      * 1) Dup the |this| object.
      * 2) Store the property value below the |this| value.
      * This is safe as a stack transition, because JSOP_CALLPROP has
      * JOF_TMPSLOT. It is also safe for correctness, because if we know the LHS
      * is an object, it is the resulting vp[1].
      */
     frame.dup();
     frame.storeRegs(-2, shapeReg, objReg, knownPushedType(0));
-    BarrierState barrier = testBarrier(shapeReg, objReg);
+    BarrierState barrier = testBarrier(shapeReg, objReg, false, false,
+                                       /* force = */ pic.canCallHook);
 
     /*
      * Assert correctness of hardcoded offsets.
      * No type guard: type is asserted.
      */
     RETURN_IF_OOM(false);
 
     GetPropLabels &labels = pic.getPropLabels();
@@ -5011,16 +5060,17 @@ mjit::Compiler::jsop_callprop_dispatch(J
     }
 
     for (unsigned i = 0; i < rejoins.length(); i++)
         rejoins[i].linkTo(masm.label(), &masm);
 
     stubcc.leave();
     stubcc.masm.move(ImmPtr(atom), Registers::ArgReg1);
     OOL_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -1);
 
     frame.dup();
     // THIS THIS
 
     frame.pushTypedPayload(JSVAL_TYPE_OBJECT, pushreg);
     // THIS THIS FUN
 
     frame.shift(-2);
@@ -5041,16 +5091,17 @@ mjit::Compiler::jsop_callprop(JSAtom *at
     if (singleton && singleton->isFunction() && !hasTypeBarriers(PC) &&
         testSingletonPropertyTypes(top, ATOM_TO_JSID(atom), &testObject)) {
         if (testObject) {
             Jump notObject = frame.testObject(Assembler::NotEqual, top);
             stubcc.linkExit(notObject, Uses(1));
             stubcc.leave();
             stubcc.masm.move(ImmPtr(atom), Registers::ArgReg1);
             OOL_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
+            testPushedType(REJOIN_FALLTHROUGH, -1);
         }
 
         // THIS
 
         frame.dup();
         // THIS THIS
 
         frame.push(ObjectValue(*singleton));
@@ -5311,23 +5362,26 @@ mjit::Compiler::jsop_name(JSAtom *atom, 
     Jump inlineJump = masm.jump();
     {
         RESERVE_OOL_SPACE(stubcc.masm);
         pic.slowPathStart = stubcc.linkExit(inlineJump, Uses(0));
         stubcc.leave();
         passICAddress(&pic);
         pic.slowPathCall = OOL_STUBCALL(isCall ? ic::CallName : ic::Name, rejoin);
         CHECK_OOL_SPACE();
+        testPushedType(rejoin, 0);
     }
     pic.fastPathRejoin = masm.label();
 
     /* Initialize op labels. */
     ScopeNameLabels &labels = pic.scopeNameLabels();
     labels.setInlineJump(masm, pic.fastPathStart, inlineJump);
 
+    CHECK_IC_SPACE();
+
     /*
      * We can't optimize away the PIC for the NAME access itself, but if we've
      * only seen a single value pushed by this access, mark it as such and
      * recompile if a different value becomes possible.
      */
     JSObject *singleton = pushedSingleton(0);
     if (singleton) {
         frame.push(ObjectValue(*singleton));
@@ -5396,26 +5450,29 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
     Jump inlineJump = masm.jump();
     {
         RESERVE_OOL_SPACE(stubcc.masm);
         pic.slowPathStart = stubcc.linkExit(inlineJump, Uses(1));
         stubcc.leave();
         passICAddress(&pic);
         pic.slowPathCall = OOL_STUBCALL(ic::XName, REJOIN_GETTER);
         CHECK_OOL_SPACE();
+        testPushedType(REJOIN_GETTER, -1);
     }
 
     pic.fastPathRejoin = masm.label();
 
     RETURN_IF_OOM(false);
 
     /* Initialize op labels. */
     ScopeNameLabels &labels = pic.scopeNameLabels();
     labels.setInlineJumpOffset(masm.differenceBetween(pic.fastPathStart, inlineJump));
 
+    CHECK_IC_SPACE();
+
     frame.pop();
     frame.pushRegs(pic.shapeReg, pic.objReg, knownPushedType(0));
 
     BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, /* testUndefined = */ true);
 
     stubcc.rejoin(Changes(1));
 
     pics.append(pic);
@@ -5490,17 +5547,18 @@ mjit::Compiler::jsop_bindname(JSAtom *at
 }
 
 #else /* !JS_POLYIC */
 
 void
 mjit::Compiler::jsop_name(JSAtom *atom, JSValueType type, bool isCall)
 {
     prepareStubCall(Uses(0));
-        INLINE_STUBCALL(isCall ? stubs::CallName : stubs::Name, REJOIN_FALLTHROUGH);
+    INLINE_STUBCALL(isCall ? stubs::CallName : stubs::Name, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, 0, /* ool = */ false);
     frame.pushSynced(type);
     if (isCall)
         frame.pushSynced(JSVAL_TYPE_UNKNOWN);
 }
 
 bool
 mjit::Compiler::jsop_xname(JSAtom *atom)
 {
@@ -5866,16 +5924,17 @@ mjit::Compiler::iterEnd()
     stubcc.rejoin(Changes(1));
 }
 
 void
 mjit::Compiler::jsop_getgname_slow(uint32 index)
 {
     prepareStubCall(Uses(0));
     INLINE_STUBCALL(stubs::GetGlobalName, REJOIN_GETTER);
+    testPushedType(REJOIN_GETTER, 0, /* ool = */ false);
     frame.pushSynced(JSVAL_TYPE_UNKNOWN);
 }
 
 void
 mjit::Compiler::jsop_bindgname()
 {
     if (globalObj) {
         frame.push(ObjectValue(*globalObj));
@@ -5979,16 +6038,20 @@ mjit::Compiler::jsop_getgname(uint32 ind
         frame.freeReg(reg);
     }
     stubcc.linkExit(shapeGuard, Uses(0));
 
     stubcc.leave();
     passMICAddress(ic);
     ic.slowPathCall = OOL_STUBCALL(ic::GetGlobalName, REJOIN_GETTER);
 
+    CHECK_IC_SPACE();
+
+    testPushedType(REJOIN_GETTER, 0);
+
     /* Garbage value. */
     uint32 slot = 1 << 24;
 
     masm.loadPtr(Address(objReg, offsetof(JSObject, slots)), objReg);
     Address address(objReg, slot);
 
     /* Allocate any register other than objReg. */
     RegisterID treg = frame.allocReg();
@@ -6250,16 +6313,17 @@ mjit::Compiler::jsop_setelem_slow()
     frame.pushSynced(JSVAL_TYPE_UNKNOWN);
 }
 
 void
 mjit::Compiler::jsop_getelem_slow()
 {
     prepareStubCall(Uses(2));
     INLINE_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -2, /* ool = */ false);
     frame.popn(2);
     pushSyncedEntry(0);
 }
 
 void
 mjit::Compiler::jsop_unbrand()
 {
     prepareStubCall(Uses(1));
@@ -6834,49 +6898,64 @@ mjit::Compiler::leaveBlock()
 //
 bool
 mjit::Compiler::constructThis()
 {
     JS_ASSERT(isConstructing);
 
     JSFunction *fun = script->function();
 
-    if (cx->typeInferenceEnabled() && !fun->getType(cx)->unknownProperties()) {
+    do {
+        if (!cx->typeInferenceEnabled() || fun->getType(cx)->unknownProperties())
+            break;
+
         jsid id = ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom);
         types::TypeSet *protoTypes = fun->getType(cx)->getProperty(cx, id, false);
 
         JSObject *proto = protoTypes->getSingleton(cx, true);
-        if (proto) {
-            JSObject *templateObject = js_CreateThisForFunctionWithProto(cx, fun, proto);
-            if (!templateObject)
-                return false;
-
-            /*
-             * The template incorporates a shape and/or fixed slots from any
-             * newScript on its type, so make sure recompilation is triggered
-             * should this information change later.
-             */
-            if (templateObject->type()->newScript)
-                types::TypeSet::WatchObjectStateChange(cx, templateObject->type());
-
-            RegisterID result = frame.allocReg();
-            Jump emptyFreeList = masm.getNewObject(cx, result, templateObject);
-
-            stubcc.linkExit(emptyFreeList, Uses(0));
-            stubcc.leave();
-
-            stubcc.masm.move(ImmPtr(proto), Registers::ArgReg1);
-            OOL_STUBCALL(stubs::CreateThis, REJOIN_RESUME);
-
-            frame.setThis(result);
-
-            stubcc.rejoin(Changes(1));
-            return true;
-        }
-    }
+        if (!proto)
+            break;
+
+        /*
+         * Generate an inline path to create a 'this' object with the given
+         * prototype. Only do this if the type is actually known as a possible
+         * 'this' type of the script.
+         */
+        types::TypeObject *type = proto->getNewType(cx, fun);
+        if (!type)
+            return false;
+        if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::ObjectType(type)))
+            break;
+
+        JSObject *templateObject = js_CreateThisForFunctionWithProto(cx, fun, proto);
+        if (!templateObject)
+            return false;
+
+        /*
+         * The template incorporates a shape and/or fixed slots from any
+         * newScript on its type, so make sure recompilation is triggered
+         * should this information change later.
+         */
+        if (templateObject->type()->newScript)
+            types::TypeSet::WatchObjectStateChange(cx, templateObject->type());
+
+        RegisterID result = frame.allocReg();
+        Jump emptyFreeList = masm.getNewObject(cx, result, templateObject);
+
+        stubcc.linkExit(emptyFreeList, Uses(0));
+        stubcc.leave();
+
+        stubcc.masm.move(ImmPtr(proto), Registers::ArgReg1);
+        OOL_STUBCALL(stubs::CreateThis, REJOIN_RESUME);
+
+        frame.setThis(result);
+
+        stubcc.rejoin(Changes(1));
+        return true;
+    } while (false);
 
     // Load the callee.
     frame.pushCallee();
 
     // Get callee.prototype.
     if (!jsop_getprop(cx->runtime->atomState.classPrototypeAtom, JSVAL_TYPE_UNKNOWN, false, false))
         return false;
 
@@ -6994,16 +7073,17 @@ mjit::Compiler::jsop_tableswitch(jsbytec
 #endif
 }
 
 void
 mjit::Compiler::jsop_callelem_slow()
 {
     prepareStubCall(Uses(2));
     INLINE_STUBCALL(stubs::CallElem, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -2, /* ool = */ false);
     frame.popn(2);
     pushSyncedEntry(0);
     pushSyncedEntry(1);
 }
 
 void
 mjit::Compiler::jsop_toid()
 {
@@ -7152,17 +7232,22 @@ mjit::Compiler::updateJoinVarTypes()
 
     /* Update variable types for all new values at this bytecode. */
     const SlotValue *newv = analysis->newValues(PC);
     if (newv) {
         while (newv->slot) {
             if (newv->slot < TotalSlots(script)) {
                 VarType &vt = a->varTypes[newv->slot];
                 vt.types = analysis->getValueTypes(newv->value);
-                vt.type = vt.types->getKnownTypeTag(cx);
+                JSValueType newType = vt.types->getKnownTypeTag(cx);
+                if (newType != vt.type) {
+                    FrameEntry *fe = frame.getSlotEntry(newv->slot);
+                    frame.forgetLoopReg(fe);
+                }
+                vt.type = newType;
             }
             newv++;
         }
     }
 }
 
 void
 mjit::Compiler::restoreVarType()
@@ -7231,21 +7316,16 @@ mjit::Compiler::monitored(jsbytecode *pc
 }
 
 bool
 mjit::Compiler::hasTypeBarriers(jsbytecode *pc)
 {
     if (!cx->typeInferenceEnabled())
         return false;
 
-#if 0
-    /* Stress test. */
-    return js_CodeSpec[*pc].format & JOF_TYPESET;
-#endif
-
     return analysis->typeBarriers(cx, pc) != NULL;
 }
 
 void
 mjit::Compiler::pushSyncedEntry(uint32 pushed)
 {
     frame.pushSynced(knownPushedType(pushed));
 }
@@ -7435,17 +7515,17 @@ mjit::Compiler::addTypeTest(types::TypeS
     for (unsigned i = 0; i < matches.length(); i++)
         matches[i].linkTo(masm.label(), &masm);
 
     return mismatch;
 }
 
 mjit::Compiler::BarrierState
 mjit::Compiler::testBarrier(RegisterID typeReg, RegisterID dataReg,
-                            bool testUndefined, bool testReturn)
+                            bool testUndefined, bool testReturn, bool force)
 {
     BarrierState state;
     state.typeReg = typeReg;
     state.dataReg = dataReg;
 
     if (!cx->typeInferenceEnabled() || !(js_CodeSpec[*PC].format & JOF_TYPESET))
         return state;
 
@@ -7457,28 +7537,22 @@ mjit::Compiler::testBarrier(RegisterID t
          */
         return state;
     }
 
     if (testReturn) {
         JS_ASSERT(!testUndefined);
         if (!analysis->getCode(PC).monitoredTypesReturn)
             return state;
-    } else if (!hasTypeBarriers(PC)) {
+    } else if (!hasTypeBarriers(PC) && !force) {
         if (testUndefined && !types->hasType(types::Type::UndefinedType()))
             state.jump.setJump(masm.testUndefined(Assembler::Equal, typeReg));
         return state;
     }
 
-#if 0
-    /* Stress test. */
-    state.jump.setJump(masm.testInt32(Assembler::NotEqual, typeReg));
-    return state;
-#endif
-
     types->addFreeze(cx);
 
     /* Cannot have type barriers when the result of the operation is already unknown. */
     JS_ASSERT(!types->unknown());
 
     state.jump = trySingleTypeTest(types, typeReg);
     if (!state.jump.isSet())
         state.jump.setJump(addTypeTest(types, typeReg, dataReg));
@@ -7507,8 +7581,43 @@ mjit::Compiler::finishBarrier(const Barr
 
     stubcc.syncExit(Uses(0));
     stubcc.leave();
 
     stubcc.masm.move(ImmPtr((void *) which), Registers::ArgReg1);
     OOL_STUBCALL(stubs::TypeBarrierHelper, rejoin);
     stubcc.rejoin(Changes(0));
 }
+
+void
+mjit::Compiler::testPushedType(RejoinState rejoin, int which, bool ool)
+{
+    if (!cx->typeInferenceEnabled() || !(js_CodeSpec[*PC].format & JOF_TYPESET))
+        return;
+
+    types::TypeSet *types = analysis->bytecodeTypes(PC);
+    if (types->unknown())
+        return;
+
+    Assembler &masm = ool ? stubcc.masm : this->masm;
+
+    JS_ASSERT(which <= 0);
+    Address address = (which == 0) ? frame.addressOfTop() : frame.addressOf(frame.peek(which));
+
+    Vector<Jump> mismatches(cx);
+    if (!masm.generateTypeCheck(cx, address, types, &mismatches)) {
+        oomInVector = true;
+        return;
+    }
+
+    Jump j = masm.jump();
+
+    for (unsigned i = 0; i < mismatches.length(); i++)
+        mismatches[i].linkTo(masm.label(), &masm);
+
+    masm.move(Imm32(which), Registers::ArgReg1);
+    if (ool)
+        OOL_STUBCALL(stubs::StubTypeHelper, rejoin);
+    else
+        INLINE_STUBCALL(stubs::StubTypeHelper, rejoin);
+
+    j.linkTo(masm.label(), &masm);
+}
--- a/js/src/methodjit/Compiler.h
+++ b/js/src/methodjit/Compiler.h
@@ -179,30 +179,34 @@ class Compiler : public BaseCompiler
         DataLabelPtr fastNcodePatch;
         DataLabelPtr slowNcodePatch;
         bool hasFastNcode;
         bool hasSlowNcode;
         bool joinSlow;
     };
 
     struct BaseICInfo {
-        BaseICInfo(JSOp op) : op(op)
+        BaseICInfo(JSOp op) : op(op), canCallHook(false), forcedTypeBarrier(false)
         { }
         Label fastPathStart;
         Label fastPathRejoin;
         Label slowPathStart;
         Call slowPathCall;
         DataLabelPtr paramAddr;
         JSOp op;
+        bool canCallHook;
+        bool forcedTypeBarrier;
 
         void copyTo(ic::BaseIC &to, JSC::LinkBuffer &full, JSC::LinkBuffer &stub) {
             to.fastPathStart = full.locationOf(fastPathStart);
             to.fastPathRejoin = full.locationOf(fastPathRejoin);
             to.slowPathStart = stub.locationOf(slowPathStart);
             to.slowPathCall = stub.locationOf(slowPathCall);
+            to.canCallHook = canCallHook;
+            to.forcedTypeBarrier = forcedTypeBarrier;
             to.op = op;
             JS_ASSERT(to.op == op);
         }
     };
 
     struct GetElementICInfo : public BaseICInfo {
         GetElementICInfo(JSOp op) : BaseICInfo(op)
         { }
@@ -463,16 +467,17 @@ class Compiler : public BaseCompiler
     Label argsCheckFallthrough;
     Jump argsCheckJump;
 #endif
     bool debugMode_;
     bool addTraceHints;
     bool inlining_;
     bool hasGlobalReallocation;
     bool oomInVector;       // True if we have OOM'd appending to a vector. 
+    bool overflowICSpace;   // True if we added a constant pool in a reserved space.
     uint32 gcNumber;
     enum { NoApplyTricks, LazyArgsObj } applyTricks;
     PCLengthEntry *pcLengths;
 
     Compiler *thisFromCtor() { return this; }
 
     friend class CompilerAllocPolicy;
   public:
@@ -481,17 +486,17 @@ class Compiler : public BaseCompiler
 
     CompileStatus compile();
 
     Label getLabel() { return masm.label(); }
     bool knownJump(jsbytecode *pc);
     Label labelOf(jsbytecode *target, uint32 inlineIndex);
     void addCallSite(const InternalCallSite &callSite);
     void addReturnSite();
-    void inlineStubCall(void *stub, RejoinState rejoin);
+    void inlineStubCall(void *stub, RejoinState rejoin, Uses uses);
 
     bool debugMode() { return debugMode_; }
     bool inlining() { return inlining_; }
     bool constructing() { return isConstructing; }
 
     jsbytecode *outerPC() {
         if (a == outer)
             return PC;
@@ -560,19 +565,22 @@ class Compiler : public BaseCompiler
         RegisterID dataReg;
     };
 
     MaybeJump trySingleTypeTest(types::TypeSet *types, RegisterID typeReg);
     Jump addTypeTest(types::TypeSet *types, RegisterID typeReg, RegisterID dataReg);
     BarrierState pushAddressMaybeBarrier(Address address, JSValueType type, bool reuseBase,
                                          bool testUndefined = false);
     BarrierState testBarrier(RegisterID typeReg, RegisterID dataReg,
-                             bool testUndefined = false, bool testReturn = false);
+                             bool testUndefined = false, bool testReturn = false,
+                             bool force = false);
     void finishBarrier(const BarrierState &barrier, RejoinState rejoin, uint32 which);
 
+    void testPushedType(RejoinState rejoin, int which, bool ool = true);
+
     /* Non-emitting helpers. */
     void pushSyncedEntry(uint32 pushed);
     uint32 fullAtomIndex(jsbytecode *pc);
     bool jumpInScript(Jump j, jsbytecode *pc);
     bool compareTwoValues(JSContext *cx, JSOp op, const Value &lhs, const Value &rhs);
     bool canUseApplyTricks();
 
     /* Emitting helpers. */
@@ -770,24 +778,28 @@ class Compiler : public BaseCompiler
 
     void prepareStubCall(Uses uses);
     Call emitStubCall(void *ptr, DataLabelPtr *pinline);
 };
 
 // Given a stub call, emits the call into the inline assembly path. rejoin
 // indicates how to rejoin should this call trigger expansion/discarding.
 #define INLINE_STUBCALL(stub, rejoin)                                       \
-    inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin)
+    inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
+#define INLINE_STUBCALL_USES(stub, rejoin, uses)                            \
+    inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
 
 // Given a stub call, emits the call into the out-of-line assembly path.
 // Unlike the INLINE_STUBCALL variant, this returns the Call offset.
 #define OOL_STUBCALL(stub, rejoin)                                          \
-    stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin)
+    stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
+#define OOL_STUBCALL_USES(stub, rejoin, uses)                               \
+    stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
 
 // Same as OOL_STUBCALL, but specifies a slot depth.
 #define OOL_STUBCALL_LOCAL_SLOTS(stub, rejoin, slots)                       \
-    stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, (slots))
+    stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0), (slots))
 
 } /* namespace js */
 } /* namespace mjit */
 
 #endif
 
--- a/js/src/methodjit/FastOps.cpp
+++ b/js/src/methodjit/FastOps.cpp
@@ -629,17 +629,17 @@ mjit::Compiler::jsop_not()
             frame.pop();
             frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, reg);
             break;
           }
 
           default:
           {
             prepareStubCall(Uses(1));
-            INLINE_STUBCALL(stubs::ValueToBoolean, REJOIN_NONE);
+            INLINE_STUBCALL_USES(stubs::ValueToBoolean, REJOIN_NONE, Uses(1));
 
             RegisterID reg = Registers::ReturnReg;
             frame.takeReg(reg);
             masm.xor32(Imm32(1), reg);
 
             frame.pop();
             frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, reg);
             break;
@@ -1741,16 +1741,17 @@ mjit::Compiler::jsop_getelem_dense(bool 
         holeCheck = masm.fastArrayLoadSlot(slot, !isPacked, typeReg, dataReg);
     }
 
     if (!isPacked && !allowUndefined)
         stubcc.linkExit(holeCheck, Uses(2));
 
     stubcc.leave();
     OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -2);
 
     frame.popn(2);
 
     BarrierState barrier;
     if (typeReg.isSet()) {
         frame.pushRegs(typeReg.reg(), dataReg, type);
         barrier = testBarrier(typeReg.reg(), dataReg, false);
     } else {
@@ -1832,16 +1833,17 @@ mjit::Compiler::jsop_getelem_args()
     } else {
         JS_ASSERT(key.reg() != dataReg);
         BaseIndex arg(actualsReg, key.reg(), masm.JSVAL_SCALE);
         masm.loadValueAsComponents(arg, typeReg, dataReg);
     }
 
     stubcc.leave();
     OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -2);
 
     frame.popn(2);
     frame.pushRegs(typeReg, dataReg, knownPushedType(0));
     BarrierState barrier = testBarrier(typeReg, dataReg, false);
 
     stubcc.rejoin(Changes(2));
 
     finishBarrier(barrier, REJOIN_FALLTHROUGH, 0);
@@ -1963,16 +1965,17 @@ mjit::Compiler::jsop_getelem_typed(int a
     if (atype == TypedArray::TYPE_UINT32 &&
         !pushedTypes->hasType(types::Type::DoubleType())) {
         Jump isDouble = masm.testDouble(Assembler::Equal, typeReg.reg());
         stubcc.linkExit(isDouble, Uses(2));
     }
 
     stubcc.leave();
     OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
+    testPushedType(REJOIN_FALLTHROUGH, -2);
 
     frame.popn(2);
 
     BarrierState barrier;
     if (dataReg.isFPReg()) {
         frame.pushDouble(dataReg.fpreg());
     } else if (typeReg.isSet()) {
         frame.pushRegs(typeReg.reg(), dataReg.reg(), knownPushedType(0));
@@ -2144,21 +2147,27 @@ mjit::Compiler::jsop_getelem(bool isCall
         ic.slowPathCall = OOL_STUBCALL(ic::GetElement, REJOIN_FALLTHROUGH);
 #else
     if (isCall)
         ic.slowPathCall = OOL_STUBCALL(stubs::CallElem, REJOIN_FALLTHROUGH);
     else
         ic.slowPathCall = OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
 #endif
 
+    testPushedType(REJOIN_FALLTHROUGH, -2);
+
     ic.fastPathRejoin = masm.label();
+    ic.forcedTypeBarrier = analysis->getCode(PC).getStringElement;
+
+    CHECK_IC_SPACE();
 
     frame.popn(2);
     frame.pushRegs(ic.typeReg, ic.objReg, knownPushedType(0));
-    BarrierState barrier = testBarrier(ic.typeReg, ic.objReg, false);
+    BarrierState barrier = testBarrier(ic.typeReg, ic.objReg, false, false,
+                                       /* force = */ ic.forcedTypeBarrier);
     if (isCall)
         frame.pushSynced(knownPushedType(1));
 
     stubcc.rejoin(Changes(isCall ? 2 : 1));
 
 #ifdef JS_POLYIC
     if (!getElemICs.append(ic))
         return false;
@@ -2339,19 +2348,19 @@ mjit::Compiler::jsop_stricteq(JSOp op)
     }
 
     /* Is it impossible that both Values are ints? */
     if ((lhs->isTypeKnown() && lhs->isNotType(JSVAL_TYPE_INT32)) ||
         (rhs->isTypeKnown() && rhs->isNotType(JSVAL_TYPE_INT32))) {
         prepareStubCall(Uses(2));
 
         if (op == JSOP_STRICTEQ)
-            INLINE_STUBCALL(stubs::StrictEq, REJOIN_NONE);
+            INLINE_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
         else
-            INLINE_STUBCALL(stubs::StrictNe, REJOIN_NONE);
+            INLINE_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
 
         frame.popn(2);
         frame.pushSynced(JSVAL_TYPE_BOOLEAN);
         return;
     }
 
 #if !defined JS_CPU_ARM && !defined JS_CPU_SPARC
     /* Try an integer fast-path. */
@@ -2393,34 +2402,34 @@ mjit::Compiler::jsop_stricteq(JSOp op)
         masm.set32(cond, testReg, otherReg, resultReg);
     }
 
     frame.unpinReg(testReg);
 
     if (needStub) {
         stubcc.leave();
         if (op == JSOP_STRICTEQ)
-            OOL_STUBCALL(stubs::StrictEq, REJOIN_NONE);
+            OOL_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
         else
-            OOL_STUBCALL(stubs::StrictNe, REJOIN_NONE);
+            OOL_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
     }
 
     frame.popn(2);
     frame.pushTypedPayload(JSVAL_TYPE_BOOLEAN, resultReg);
 
     if (needStub)
         stubcc.rejoin(Changes(1));
 #else
     /* TODO: Port set32() logic to ARM. */
     prepareStubCall(Uses(2));
 
     if (op == JSOP_STRICTEQ)
-        INLINE_STUBCALL(stubs::StrictEq, REJOIN_NONE);
+        INLINE_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
     else
-        INLINE_STUBCALL(stubs::StrictNe, REJOIN_NONE);
+        INLINE_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
 
     frame.popn(2);
     frame.pushSynced(JSVAL_TYPE_BOOLEAN);
     return;
 #endif
 }
 
 void
--- a/js/src/methodjit/FrameState.cpp
+++ b/js/src/methodjit/FrameState.cpp
@@ -2867,28 +2867,28 @@ FrameState::clearTemporaries()
         forgetAllRegs(fe);
         fe->resetSynced();
     }
 
     temporariesTop = temporaries;
 }
 
 Vector<TemporaryCopy> *
-FrameState::getTemporaryCopies()
+FrameState::getTemporaryCopies(Uses uses)
 {
     /* :XXX: handle OOM */
     Vector<TemporaryCopy> *res = NULL;
 
     for (FrameEntry *fe = temporaries; fe < temporariesTop; fe++) {
         if (!fe->isTracked())
             continue;
         if (fe->isCopied()) {
             for (uint32 i = fe->trackerIndex() + 1; i < tracker.nentries; i++) {
                 FrameEntry *nfe = tracker[i];
-                if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe) {
+                if (!deadEntry(nfe, uses.nuses) && nfe->isCopy() && nfe->copyOf() == fe) {
                     if (!res)
                         res = cx->new_< Vector<TemporaryCopy> >(cx);
                     res->append(TemporaryCopy(addressOf(nfe), addressOf(fe)));
                 }
             }
         }
     }
 
--- a/js/src/methodjit/FrameState.h
+++ b/js/src/methodjit/FrameState.h
@@ -942,18 +942,21 @@ class FrameState
 
     /* Maximum number of analysis temporaries the FrameState can track. */
     static const uint32 TEMPORARY_LIMIT = 10;
 
     uint32 allocTemporary();  /* -1 if limit reached. */
     void clearTemporaries();
     inline FrameEntry *getTemporary(uint32 which);
 
-    /* Return NULL or a new vector with all current copies of temporaries. */
-    Vector<TemporaryCopy> *getTemporaryCopies();
+    /*
+     * Return NULL or a new vector with all current copies of temporaries,
+     * excluding those about to be popped per 'uses'.
+     */
+    Vector<TemporaryCopy> *getTemporaryCopies(Uses uses);
 
     inline void syncAndForgetFe(FrameEntry *fe, bool markSynced = false);
     inline void forgetLoopReg(FrameEntry *fe);
 
     /*
      * Get an address for the specified name access in another script.
      * The compiler owns the result's base register.
      */
--- a/js/src/methodjit/InvokeHelpers.cpp
+++ b/js/src/methodjit/InvokeHelpers.cpp
@@ -1327,32 +1327,44 @@ js_InternalInterpret(void *returnData, v
             skipTrap = true;
         break;
 
       case REJOIN_FALLTHROUGH:
         f.regs.pc = nextpc;
         break;
 
       case REJOIN_NATIVE:
-      case REJOIN_NATIVE_LOWERED: {
+      case REJOIN_NATIVE_LOWERED:
+      case REJOIN_NATIVE_GETTER: {
         /*
          * We don't rejoin until after the native stub finishes execution, in
          * which case the return value will be in memory. For lowered natives,
-         * the return value will be in the 'this' value's slot.
+         * the return value will be in the 'this' value's slot. For getters,
+         * the result is at nextsp[0] (see ic::CallProp).
          */
-        if (rejoin == REJOIN_NATIVE_LOWERED)
+        if (rejoin == REJOIN_NATIVE_LOWERED) {
             nextsp[-1] = nextsp[0];
+        } else if (rejoin == REJOIN_NATIVE_GETTER) {
+            if (js_CodeSpec[op].format & JOF_CALLOP) {
+                /*
+                 * If we went through jsop_callprop_obj then the 'this' value
+                 * is still in its original slot and hasn't been shifted yet,
+                 * so fix that now. Yuck.
+                 */
+                if (nextsp[-2].isObject())
+                    nextsp[-1] = nextsp[-2];
+                nextsp[-2] = nextsp[0];
+            } else {
+                nextsp[-1] = nextsp[0];
+            }
+        }
 
         /* Release this reference on the orphaned native stub. */
         RemoveOrphanedNative(cx, fp);
 
-        /*
-         * Note: there is no need to monitor the result of the native, the
-         * native stub will always do a type check before finishing.
-         */
         f.regs.pc = nextpc;
         break;
       }
 
       case REJOIN_PUSH_BOOLEAN:
         nextsp[-1].setBoolean(returnReg != NULL);
         f.regs.pc = nextpc;
         break;
@@ -1561,15 +1573,25 @@ js_InternalInterpret(void *returnData, v
       default:
         JS_NOT_REACHED("Missing rejoin");
     }
 
     if (nextDepth == uint32(-1))
         nextDepth = analysis->getCode(f.regs.pc).stackDepth;
     f.regs.sp = fp->base() + nextDepth;
 
+    /*
+     * Monitor the result of the previous op when finishing a JOF_TYPESET op.
+     * The result may not have been marked if we bailed out while inside a stub
+     * for the op.
+     */
+    if (f.regs.pc == nextpc && (js_CodeSpec[op].format & JOF_TYPESET)) {
+        int which = (js_CodeSpec[op].format & JOF_CALLOP) ? -2 : -1;  /* Yuck. */
+        types::TypeScript::Monitor(cx, script, pc, f.regs.sp[which]);
+    }
+
     /* Mark the entry frame as unfinished, and update the regs to resume at. */
     JaegerStatus status = skipTrap ? Jaeger_UnfinishedAtTrap : Jaeger_Unfinished;
     cx->compartment->jaegerCompartment()->setLastUnfinished(status);
     *f.oldregs = f.regs;
 
     return NULL;
 }
--- a/js/src/methodjit/LoopState.cpp
+++ b/js/src/methodjit/LoopState.cpp
@@ -190,25 +190,25 @@ LoopState::addJoin(unsigned index, bool 
 {
     StubJoin r;
     r.index = index;
     r.script = script;
     loopJoins.append(r);
 }
 
 void
-LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex)
+LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, Uses uses)
 {
     RestoreInvariantCall call;
     call.jump = jump;
     call.label = label;
     call.ool = ool;
     call.entry = entry;
     call.patchIndex = patchIndex;
-    call.temporaryCopies = frame.getTemporaryCopies();
+    call.temporaryCopies = frame.getTemporaryCopies(uses);
 
     restoreInvariantCalls.append(call);
 }
 
 void
 LoopState::flushLoop(StubCompiler &stubcc)
 {
     clearLoopRegisters();
@@ -1815,17 +1815,16 @@ LoopState::analyzeLoopBody(unsigned fram
 
           case JSOP_EVAL:
           case JSOP_FUNCALL:
           case JSOP_FUNAPPLY:
           case JSOP_NEW:
             skipAnalysis = true;
             break;
 
-          case JSOP_SETHOLE:
           case JSOP_SETELEM: {
             SSAValue objValue = analysis->poppedValue(pc, 2);
             SSAValue elemValue = analysis->poppedValue(pc, 1);
 
             TypeSet *objTypes = analysis->getValueTypes(objValue);
             TypeSet *elemTypes = analysis->getValueTypes(elemValue);
 
             /*
@@ -1839,17 +1838,17 @@ LoopState::analyzeLoopBody(unsigned fram
 
             objTypes->addFreeze(cx);
             for (unsigned i = 0; i < objTypes->getObjectCount(); i++) {
                 TypeObject *object = objTypes->getTypeObject(i);
                 if (!object)
                     continue;
                 if (!addModifiedProperty(object, JSID_VOID))
                     return;
-                if (op == JSOP_SETHOLE && !addGrowArray(object))
+                if (analysis->getCode(pc).arrayWriteHole && !addGrowArray(object))
                     return;
             }
 
             if (constrainedLoop && !definiteArrayAccess(objValue, elemValue))
                 constrainedLoop = false;
             break;
           }
 
--- a/js/src/methodjit/LoopState.h
+++ b/js/src/methodjit/LoopState.h
@@ -248,17 +248,17 @@ class LoopState : public MacroAssemblerT
     {
         if (uint32(pc - outerScript->code) == lifetime->entry && lifetime->entry != lifetime->head)
             reachedEntryPoint = true;
     }
 
     bool generatingInvariants() { return !skipAnalysis; }
 
     /* Add a call with trailing jump/label, after which invariants need to be restored. */
-    void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex);
+    void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, Uses uses);
 
     uint32 headOffset() { return lifetime->head; }
     uint32 getLoopRegs() { return loopRegs.freeMask; }
 
     Jump entryJump() { return entry; }
     uint32 entryOffset() { return lifetime->entry; }
     uint32 backedgeOffset() { return lifetime->backedge; }
 
--- a/js/src/methodjit/MachineRegs.h
+++ b/js/src/methodjit/MachineRegs.h
@@ -113,33 +113,35 @@ struct Registers {
 #endif
 
     // Register that homes the current JSStackFrame.
 #if defined(JS_CPU_X86)
     static const RegisterID JSFrameReg = JSC::X86Registers::ebp;
 #elif defined(JS_CPU_X64)
     static const RegisterID JSFrameReg = JSC::X86Registers::ebx;
 #elif defined(JS_CPU_ARM)
-    static const RegisterID JSFrameReg = JSC::ARMRegisters::r11;
+    static const RegisterID JSFrameReg = JSC::ARMRegisters::r10;
 #elif defined(JS_CPU_SPARC)
     static const RegisterID JSFrameReg = JSC::SparcRegisters::l0;
 #endif
 
 #if defined(JS_CPU_X86) || defined(JS_CPU_X64)
     static const RegisterID ReturnReg = JSC::X86Registers::eax;
 # if defined(JS_CPU_X86) || defined(_WIN64)
     static const RegisterID ArgReg0 = JSC::X86Registers::ecx;
     static const RegisterID ArgReg1 = JSC::X86Registers::edx;
 #  if defined(JS_CPU_X64)
     static const RegisterID ArgReg2 = JSC::X86Registers::r8;
+    static const RegisterID ArgReg3 = JSC::X86Registers::r9;
 #  endif
 # else
     static const RegisterID ArgReg0 = JSC::X86Registers::edi;
     static const RegisterID ArgReg1 = JSC::X86Registers::esi;
     static const RegisterID ArgReg2 = JSC::X86Registers::edx;
+    static const RegisterID ArgReg3 = JSC::X86Registers::ecx;
 # endif
 #elif JS_CPU_ARM
     static const RegisterID ReturnReg = JSC::ARMRegisters::r0;
     static const RegisterID ArgReg0 = JSC::ARMRegisters::r0;
     static const RegisterID ArgReg1 = JSC::ARMRegisters::r1;
     static const RegisterID ArgReg2 = JSC::ARMRegisters::r2;
 #elif JS_CPU_SPARC
     static const RegisterID ReturnReg = JSC::SparcRegisters::o0;
@@ -220,19 +222,18 @@ struct Registers {
     // r12 is IP, and is used for stub calls.
 
     static const uint32 SavedRegs =
           (1 << JSC::ARMRegisters::r4)
         | (1 << JSC::ARMRegisters::r5)
         | (1 << JSC::ARMRegisters::r6)
         | (1 << JSC::ARMRegisters::r7)
     // r8 is reserved as a scratch register for the assembler.
-        | (1 << JSC::ARMRegisters::r9)
-        | (1 << JSC::ARMRegisters::r10);
-    // r11 is reserved for JSFrameReg.
+        | (1 << JSC::ARMRegisters::r9);
+    // r10 is reserved for JSFrameReg.
     // r13 is SP and must always point to VMFrame whilst in generated code.
     // r14 is LR and is used for return sequences.
     // r15 is PC (program counter).
 
     static const uint32 SingleByteRegs = TempRegs | SavedRegs;
 #elif defined(JS_CPU_SPARC)
     static const uint32 TempRegs =
           (1 << JSC::SparcRegisters::o0)
@@ -383,16 +384,25 @@ struct Registers {
         | (1 << JSC::SparcRegisters::f4)
         | (1 << JSC::SparcRegisters::f6)
         ) << TotalRegisters;
     static const FPRegisterID FPConversionTemp = JSC::SparcRegisters::f8;
 #else
 # error "Unsupported platform"
 #endif
 
+    /* Temp reg that can be clobbered when setting up a fallible fast or ABI call. */
+#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
+    static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
+#elif defined(JS_CPU_ARM)
+    static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
+#elif defined(JS_CPU_SPARC)
+    static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
+#endif
+
     static const uint32 AvailFPRegs = TempFPRegs;
 
     static inline uint32 maskReg(FPRegisterID reg) {
         return (1 << reg) << TotalRegisters;
     }
 
     /* Common code. */
 
@@ -407,16 +417,30 @@ struct Registers {
     /* Get a register which is not live before a FASTCALL. */
     static inline RegisterID tempCallReg() {
         Registers regs(AvailRegs);
         regs.takeReg(Registers::ArgReg0);
         regs.takeReg(Registers::ArgReg1);
         return regs.takeAnyReg().reg();
     }
 
+    /* Get a register which is not live before a normal ABI call with at most four args. */
+    static inline Registers tempCallRegMask() {
+        Registers regs(AvailRegs);
+#ifndef JS_CPU_X86
+        regs.takeReg(ArgReg0);
+        regs.takeReg(ArgReg1);
+        regs.takeReg(ArgReg2);
+#if defined(JS_CPU_SPARC) || defined(JS_CPU_X64)
+        regs.takeReg(ArgReg3);
+#endif
+#endif
+        return regs;
+    }
+
     Registers(uint32 freeMask)
       : freeMask(freeMask)
     { }
 
     Registers(const Registers &other)
       : freeMask(other.freeMask)
     { }
 
--- a/js/src/methodjit/MethodJIT.cpp
+++ b/js/src/methodjit/MethodJIT.cpp
@@ -477,17 +477,17 @@ JS_STATIC_ASSERT(sizeof(VMFrame)%8 == 0)
 JS_STATIC_ASSERT(offsetof(VMFrame, savedLR) ==          (4*21));
 JS_STATIC_ASSERT(offsetof(VMFrame, entryfp) ==          (4*10));
 JS_STATIC_ASSERT(offsetof(VMFrame, stackLimit) ==       (4*9));
 JS_STATIC_ASSERT(offsetof(VMFrame, cx) ==               (4*8));
 JS_STATIC_ASSERT(VMFrame::offsetOfFp ==                 (4*7));
 JS_STATIC_ASSERT(offsetof(VMFrame, scratch) ==          (4*3));
 JS_STATIC_ASSERT(offsetof(VMFrame, previous) ==         (4*2));
 
-JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r11);
+JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r10);
 JS_STATIC_ASSERT(JSReturnReg_Type == JSC::ARMRegisters::r5);
 JS_STATIC_ASSERT(JSReturnReg_Data == JSC::ARMRegisters::r4);
 
 #ifdef MOZ_THUMB2
 #define FUNCTION_HEADER_EXTRA \
   ".align 2\n" \
   ".thumb\n" \
   ".thumb_func\n"
@@ -542,34 +542,34 @@ SYMBOL_STRING(JaegerTrampoline) ":"     
 "   push    {r3}"                               "\n"    /* stackLimit */
 "   push    {r0}"                               "\n"    /* cx */
 "   push    {r1}"                               "\n"    /* regs.fp */
     /* Remaining fields are set elsewhere, but we need to leave space for them. */
 "   sub     sp, sp, #(4*7)"                     "\n"
 
     /* Preserve 'code' (r2) in an arbitrary callee-saved register. */
 "   mov     r4, r2"                             "\n"
-    /* Preserve 'fp' (r1) in r11 (JSFrameReg). */
-"   mov     r11, r1"                            "\n"
+    /* Preserve 'fp' (r1) in r10 (JSFrameReg). */
+"   mov     r10, r1"                            "\n"
 
 "   mov     r0, sp"                             "\n"
 "   blx  " SYMBOL_STRING_VMFRAME(SetVMFrameRegs)   "\n"
 "   mov     r0, sp"                             "\n"
 "   blx  " SYMBOL_STRING_VMFRAME(PushActiveVMFrame)"\n"
 
     /* Call the compiled JavaScript function. */
 "   bx     r4"                                  "\n"
 );
 
 asm (
 ".text\n"
 FUNCTION_HEADER_EXTRA
 ".globl " SYMBOL_STRING(JaegerTrampolineReturn)   "\n"
 SYMBOL_STRING(JaegerTrampolineReturn) ":"         "\n"
-"   strd    r4, r5, [r11, #24]"             "\n" /* fp->rval type,data */
+"   strd    r4, r5, [r10, #24]"             "\n" /* fp->rval type,data */
 
     /* Tidy up. */
 "   mov     r0, sp"                         "\n"
 "   blx  " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
 
     /* Skip past the parameters we pushed (such as cx and the like). */
 "   add     sp, sp, #(4*7 + 4*6)"           "\n"
 
@@ -605,31 +605,31 @@ SYMBOL_STRING(JaegerThrowpoline) ":"    
 
 asm (
 ".text\n"
 FUNCTION_HEADER_EXTRA
 ".globl " SYMBOL_STRING(JaegerInterpolineScripted)  "\n"
 SYMBOL_STRING(JaegerInterpolineScripted) ":"        "\n"
     /* The only difference between JaegerInterpoline and JaegerInpolineScripted is that the
      * scripted variant has to walk up to the previous StackFrame first. */
-"   ldr     r11, [r11, #(4*4)]"             "\n"    /* Load f->prev_ */
-"   str     r11, [sp, #(4*7)]"              "\n"    /* Update f->regs->fp_ */
+"   ldr     r10, [r10, #(4*4)]"             "\n"    /* Load f->prev_ */
+"   str     r10, [sp, #(4*7)]"              "\n"    /* Update f->regs->fp_ */
     /* Fall through into JaegerInterpoline. */
 
 FUNCTION_HEADER_EXTRA
 ".globl " SYMBOL_STRING(JaegerInterpoline)  "\n"
 SYMBOL_STRING(JaegerInterpoline) ":"        "\n"
 "   mov     r3, sp"                         "\n"    /* f */
 "   mov     r2, r0"                         "\n"    /* returnReg */
 "   mov     r1, r5"                         "\n"    /* returnType */
 "   mov     r0, r4"                         "\n"    /* returnData */
 "   blx  " SYMBOL_STRING_RELOC(js_InternalInterpret) "\n"
 "   cmp     r0, #0"                         "\n"
-"   ldr     ip, [sp, #(4*7)]"               "\n"    /* Load (StackFrame*)f->regs->fp_ */
-"   ldrd    r4, r5, [ip, #(4*6)]"           "\n"    /* Load rval payload and type. */
+"   ldr     r10, [sp, #(4*7)]"              "\n"    /* Load (StackFrame*)f->regs->fp_ */
+"   ldrd    r4, r5, [r10, #(4*6)]"          "\n"    /* Load rval payload and type. */
 "   ldr     r1, [sp, #(4*3)]"               "\n"    /* Load scratch. */
 "   it      ne"                             "\n"
 "   bxne    r0"                             "\n"
     /* Tidy up, then return 0. */
 "   mov     r0, sp"                         "\n"
 "   blx  " SYMBOL_STRING_VMFRAME(PopActiveVMFrame) "\n"
 "   add     sp, sp, #(4*7 + 4*6)"           "\n"
 "   mov     r0, #0"                         "\n"
@@ -1110,16 +1110,22 @@ mjit::JITScript::~JITScript()
 
     for (JSC::ExecutablePool **pExecPool = execPools.begin();
          pExecPool != execPools.end();
          ++pExecPool)
     {
         (*pExecPool)->release();
     }
 
+    for (unsigned i = 0; i < nativeCallStubs.length(); i++) {
+        JSC::ExecutablePool *pool = nativeCallStubs[i].pool;
+        if (pool)
+            pool->release();
+    }
+
     ic::CallICInfo *callICs_ = callICs();
     for (uint32 i = 0; i < nCallICs; i++) {
         callICs_[i].releasePools();
         if (callICs_[i].fastGuardedObject)
             callICs_[i].purgeGuardedObject();
     }
 
     // Fixup any ICs still referring to this JIT.
--- a/js/src/methodjit/MethodJIT.h
+++ b/js/src/methodjit/MethodJIT.h
@@ -253,20 +253,22 @@ enum RejoinState {
     REJOIN_TRAP,
 
     /* State is coherent for the start of the next (fallthrough) bytecode. */
     REJOIN_FALLTHROUGH,
 
     /*
      * As for REJOIN_FALLTHROUGH, but holds a reference on the compartment's
      * orphaned native pools which needs to be reclaimed by InternalInterpret.
-     * The return value needs to be adjusted if REJOIN_NATIVE_LOWERED.
+     * The return value needs to be adjusted if REJOIN_NATIVE_LOWERED, and
+     * REJOIN_NATIVE_GETTER is for ABI calls made for property accesses.
      */
     REJOIN_NATIVE,
     REJOIN_NATIVE_LOWERED,
+    REJOIN_NATIVE_GETTER,
 
     /*
      * Dummy rejoin stored in VMFrames to indicate they return into a native
      * stub (and their FASTCALL return address should not be observed) but
      * that they have already been patched and can be ignored.
      */
     REJOIN_NATIVE_PATCHED,
 
@@ -548,16 +550,40 @@ struct NativeMapEntry {
 };
 
 /* Per-op counts of performance metrics. */
 struct PCLengthEntry {
     double          codeLength; /* amount of inline code generated */
     double          picsLength; /* amount of PIC stub code generated */
 };
 
+/*
+ * Pools and patch locations for managing stubs for non-FASTCALL C++ calls made
+ * from native call and PropertyOp stubs. Ownership of these may be transferred
+ * into the orphanedNativePools for the compartment.
+ */
+struct NativeCallStub {
+    /* PC for the stub. Native call stubs cannot be added for inline frames. */
+    jsbytecode *pc;
+
+    /* Pool for the stub, NULL if it has been removed from the script. */
+    JSC::ExecutablePool *pool;
+
+    /*
+     * Fallthrough jump returning to jitcode which may be patched during
+     * recompilation. On x64 this is an indirect jump to avoid issues with far
+     * jumps on relative branches.
+     */
+#ifdef JS_CPU_X64
+    JSC::CodeLocationDataLabelPtr jump;
+#else
+    JSC::CodeLocationJump jump;
+#endif
+};
+
 struct JITScript {
     typedef JSC::MacroAssemblerCodeRef CodeRef;
     CodeRef         code;       /* pool & code addresses */
 
     JSScript        *script;
 
     void            *invokeEntry;       /* invoke address */
     void            *fastEntry;         /* cached entry, fastest */
@@ -606,16 +632,19 @@ struct JITScript {
     JSCList          callers;
 
 #ifdef JS_MONOIC
     // Additional ExecutablePools that IC stubs were generated into.
     typedef Vector<JSC::ExecutablePool *, 0, SystemAllocPolicy> ExecPoolVector;
     ExecPoolVector execPools;
 #endif
 
+    // Additional ExecutablePools for native call and getter stubs.
+    Vector<NativeCallStub, 0, SystemAllocPolicy> nativeCallStubs;
+
     NativeMapEntry *nmap() const;
     js::mjit::InlineFrame *inlineFrames() const;
     js::mjit::CallSite *callSites() const;
     JSObject **rootedObjects() const;
 #ifdef JS_MONOIC
     ic::GetGlobalNameIC *getGlobalNames() const;
     ic::SetGlobalNameIC *setGlobalNames() const;
     ic::CallICInfo *callICs() const;
--- a/js/src/methodjit/MonoIC.cpp
+++ b/js/src/methodjit/MonoIC.cpp
@@ -550,16 +550,111 @@ SlowCallFromIC(VMFrame &f, ic::CallICInf
 
 static void * JS_FASTCALL
 SlowNewFromIC(VMFrame &f, ic::CallICInfo *ic)
 {
     stubs::SlowNew(f, ic->frameSize.staticArgc());
     return NULL;
 }
 
+bool
+NativeStubLinker::init(JSContext *cx)
+{
+    JSC::ExecutablePool *pool = LinkerHelper::init(cx);
+    if (!pool)
+        return false;
+
+    NativeCallStub stub;
+    stub.pc = pc;
+    stub.pool = pool;
+    stub.jump = locationOf(done);
+    if (!jit->nativeCallStubs.append(stub)) {
+        pool->release();
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Generate epilogue code to run after a stub ABI call to a native or getter.
+ * This checks for an exception, and either type checks the result against the
+ * observed types for the opcode or loads the result into a register pair
+ * (it will go through a type barrier afterwards).
+ */
+bool
+mjit::NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
+                         int32 initialFrameDepth, int32 vpOffset,
+                         MaybeRegisterID typeReg, MaybeRegisterID dataReg)
+{
+    /* Reload fp, which may have been clobbered by restoreStackBase(). */
+    masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
+
+    Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
+                                          Registers::ReturnReg);
+
+    Address resultAddress(JSFrameReg, vpOffset);
+
+    Vector<Jump> mismatches(f.cx);
+    if (f.cx->typeInferenceEnabled()) {
+        if (!typeReg.isSet()) {
+            /*
+             * Test the result of this native against the known result type set
+             * for the call. We don't assume knowledge about the types that
+             * natives can return, except when generating specialized paths in
+             * FastBuiltins.
+             */
+            types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
+            if (!masm.generateTypeCheck(f.cx, resultAddress, types, &mismatches))
+                THROWV(false);
+        }
+
+        /*
+         * Can no longer trigger recompilation in this stub, clear the stub
+         * rejoin on the VMFrame.
+         */
+        masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
+    }
+
+    if (typeReg.isSet())
+        masm.loadValueAsComponents(resultAddress, typeReg.reg(), dataReg.reg());
+
+    /*
+     * The final jump is a indirect on x64, so that we'll always be able
+     * to repatch it to the interpoline later.
+     */
+    Label finished = masm.label();
+#ifdef JS_CPU_X64
+    JSC::MacroAssembler::DataLabelPtr done = masm.moveWithPatch(ImmPtr(NULL), Registers::ValueReg);
+    masm.jump(Registers::ValueReg);
+#else
+    Jump done = masm.jump();
+#endif
+
+    /* Generate a call for type check failures on the native result. */
+    if (!mismatches.empty()) {
+        for (unsigned i = 0; i < mismatches.length(); i++)
+            mismatches[i].linkTo(masm.label(), &masm);
+        masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
+        masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
+                            f.regs.pc, NULL, initialFrameDepth);
+        masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
+        masm.jump().linkTo(finished, &masm);
+    }
+
+    /* Move JaegerThrowpoline into register for very far jump on x64. */
+    hasException.linkTo(masm.label(), &masm);
+    if (f.cx->typeInferenceEnabled())
+        masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
+    masm.throwInJIT();
+
+    *result = done;
+    return true;
+}
+
 /*
  * Calls have an inline path and an out-of-line path. The inline path is used
  * in the fastest case: the method has JIT'd code, and |argc == nargs|.
  * 
  * The inline path and OOL path are separated by a guard on the identity of
  * the callee object. This guard starts as NULL and always fails on the first
  * hit. On the OOL path, the callee is verified to be both a function and a
  * scripted function. If these conditions hold, |ic::Call| is invoked.
@@ -888,42 +983,22 @@ class CallCompiler : public BaseCompiler
         /* N.B. After this call, the frame will have a dynamic frame size. */
         if (ic.frameSize.isDynamic()) {
             masm.bumpStubCounter(f.script(), f.pc(), Registers::tempCallReg());
             masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                 JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
                                 f.regs.pc, NULL, initialFrameDepth);
         }
 
-        Registers tempRegs(Registers::AvailRegs);
-#ifndef JS_CPU_X86
-        tempRegs.takeReg(Registers::ArgReg0);
-        tempRegs.takeReg(Registers::ArgReg1);
-        tempRegs.takeReg(Registers::ArgReg2);
-#endif
+        Registers tempRegs = Registers::tempCallRegMask();
         RegisterID t0 = tempRegs.takeAnyReg().reg();
         masm.bumpStubCounter(f.script(), f.pc(), t0);
 
-        /* Store pc. */
-        masm.storePtr(ImmPtr(f.regs.pc),
-                      FrameAddress(offsetof(VMFrame, regs.pc)));
-
-        /* Store inlined. */
-        masm.storePtr(ImmPtr(f.regs.inlined()),
-                      FrameAddress(VMFrame::offsetOfInlined));
-
-        /* Store sp (if not already set by ic::SplatApplyArgs). */
-        if (ic.frameSize.isStatic()) {
-            uint32 spOffset = sizeof(StackFrame) + initialFrameDepth * sizeof(Value);
-            masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
-            masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
-        }
-
-        /* Store fp. */
-        masm.storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
+        int32 storeFrameDepth = ic.frameSize.isStatic() ? initialFrameDepth : -1;
+        masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.regs.pc, storeFrameDepth);
 
         /* Grab cx. */
 #ifdef JS_CPU_X86
         RegisterID cxReg = tempRegs.takeAnyReg().reg();
 #else
         RegisterID cxReg = Registers::ArgReg0;
 #endif
         masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
@@ -954,17 +1029,17 @@ class CallCompiler : public BaseCompiler
             v.setMagicWithObjectOrNullPayload(NULL);
             masm.storeValue(v, Address(vpReg, sizeof(Value)));
         }
 
         masm.restoreStackBase();
         masm.setupABICall(Registers::NormalCall, 3);
         masm.storeArg(2, vpReg);
         if (ic.frameSize.isStatic())
-            masm.storeArg(1, Imm32(ic.frameSize.staticArgc()));
+            masm.storeArg(1, ImmPtr((void *) ic.frameSize.staticArgc()));
         else
             masm.storeArg(1, argcReg.reg());
         masm.storeArg(0, cxReg);
 
         js::Native native = fun->u.n.native;
 
         /*
          * Call RegExp.test instead of exec if the result will not be used or
@@ -972,93 +1047,31 @@ class CallCompiler : public BaseCompiler
          * break inferred types for the call's result and any subsequent test,
          * as RegExp.exec has a type handler with unknown result.
          */
         if (native == js_regexp_exec && !CallResultEscapes(f.pc()))
             native = js_regexp_test;
 
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, native), false);
 
-        /* Reload fp, which may have been clobbered by restoreStackBase(). */
-        masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
-
-        Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
-                                              Registers::ReturnReg);
-
-        Vector<Jump> mismatches(f.cx);
-        if (cx->typeInferenceEnabled()) {
-            types::AutoEnterTypeInference enter(f.cx);
-
-            /*
-             * Test the result of this native against the known result type
-             * set for the call. We don't assume knowledge about the types that
-             * natives can return, except when generating specialized paths in
-             * FastBuiltins. We don't need to record dependencies on the result
-             * type set, as the compiler will already have done so when making
-             * the call IC.
-             */
-            Address address(JSFrameReg, vpOffset);
-            types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
-            if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
-                THROWV(true);
-
-            /*
-             * Can no longer trigger recompilation in this stub, clear the stub
-             * rejoin on the VMFrame.
-             */
-            masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
-        }
-
-        /*
-         * The final jump is a indirect on x64, so that we'll always be able
-         * to repatch it to the interpoline later.
-         */
-        Label finished = masm.label();
-#ifdef JS_CPU_X64
-        void *slowJoin = ic.slowPathStart.labelAtOffset(ic.slowJoinOffset).executableAddress();
-        DataLabelPtr done = masm.moveWithPatch(ImmPtr(slowJoin), Registers::ValueReg);
-        masm.jump(Registers::ValueReg);
-#else
-        Jump done = masm.jump();
-#endif
-
-        /* Generate a call for type check failures on the native result. */
-        if (!mismatches.empty()) {
-            for (unsigned i = 0; i < mismatches.length(); i++)
-                mismatches[i].linkTo(masm.label(), &masm);
-            masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
-            masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
-                                f.regs.pc, NULL, initialFrameDepth);
-            masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
-            masm.jump().linkTo(finished, &masm);
-        }
-
-        /* Move JaegerThrowpoline into register for very far jump on x64. */
-        hasException.linkTo(masm.label(), &masm);
-        if (cx->typeInferenceEnabled())
-            masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
-        masm.throwInJIT();
-
-        LinkerHelper linker(masm, JSC::METHOD_CODE);
-        JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_NativeStub);
-        if (!ep)
+        NativeStubLinker::FinalJump done;
+        if (!NativeStubEpilogue(f, masm, &done, initialFrameDepth, vpOffset, MaybeRegisterID(), MaybeRegisterID()))
+            return false;
+        NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
+        if (!linker.init(f.cx))
             THROWV(true);
 
-        ic.fastGuardedNative = obj;
-
         if (!linker.verifyRange(jit)) {
             disable(jit);
             return true;
         }
 
-        ic.nativeJump = linker.locationOf(done);
+        linker.patchJump(ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
 
-#ifndef JS_CPU_X64
-        linker.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
-#endif
+        ic.fastGuardedNative = obj;
 
         linker.link(funGuard, ic.slowPathStart);
         JSC::CodeLocationLabel start = linker.finalize();
 
         JaegerSpew(JSpew_PICs, "generated native CALL stub %p (%lu bytes)\n",
                    start.executableAddress(), (unsigned long) masm.size());
 
         Repatcher repatch(jit);
@@ -1469,20 +1482,18 @@ JITScript::sweepCallICs(JSContext *cx, b
             ic.hit = false;
         }
 
         if (fastFunDead) {
             repatcher.repatch(ic.funGuard, NULL);
             ic.purgeGuardedObject();
         }
 
-        if (nativeDead) {
-            ic.releasePool(CallICInfo::Pool_NativeStub);
+        if (nativeDead)
             ic.fastGuardedNative = NULL;
-        }
 
         if (purgeAll) {
             ic.releasePool(CallICInfo::Pool_ScriptStub);
             JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
             JSC::CodeLocationLabel icCall = ic.slowPathStart.labelAtOffset(ic.icCallOffset);
             repatcher.relink(oolJump, icCall);
         }
     }
--- a/js/src/methodjit/MonoIC.h
+++ b/js/src/methodjit/MonoIC.h
@@ -211,17 +211,16 @@ struct CallICInfo {
     typedef JSC::MacroAssembler::RegisterID RegisterID;
 
     /* Linked list entry for all ICs guarding on the same JIT entry point in fastGuardedObject. */
     JSCList links;
 
     enum PoolIndex {
         Pool_ScriptStub,
         Pool_ClosureStub,
-        Pool_NativeStub,
         Total_Pools
     };
 
     JSC::ExecutablePool *pools[Total_Pools];
 
     /* Used for rooting and reification. */
     JSObject *fastGuardedObject;
     JSObject *fastGuardedNative;
@@ -235,27 +234,16 @@ struct CallICInfo {
     JSC::CodeLocationDataLabelPtr funGuard;
 
     /* Starting point for all slow call paths. */
     JSC::CodeLocationLabel slowPathStart;
 
     /* Inline to OOL jump, redirected by stubs. */
     JSC::CodeLocationJump funJump;
 
-    /*
-     * Native stub fallthrough jump which may be patched during recompilation.
-     * On x64 this is an indirect jump to avoid issues with far jumps on
-     * relative branches.
-     */
-#ifdef JS_CPU_X64
-    JSC::CodeLocationDataLabelPtr nativeJump;
-#else
-    JSC::CodeLocationJump nativeJump;
-#endif
-
     /* Offset to inline scripted call, from funGuard. */
     uint32 hotJumpOffset   : 16;
     uint32 joinPointOffset : 16;
 
     /* Out of line slow call. */
     uint32 oolCallOffset   : 16;
 
     /* Jump to patch for out-of-line scripted calls. */
@@ -276,23 +264,22 @@ struct CallICInfo {
     bool hasJsFunCheck : 1;
     bool typeMonitored : 1;
 
     inline void reset() {
         fastGuardedObject = NULL;
         fastGuardedNative = NULL;
         hit = false;
         hasJsFunCheck = false;
-        pools[0] = pools[1] = pools[2] = NULL;
+        PodArrayZero(pools);
     }
 
     inline void releasePools() {
         releasePool(Pool_ScriptStub);
         releasePool(Pool_ClosureStub);
-        releasePool(Pool_NativeStub);
     }
 
     inline void releasePool(PoolIndex index) {
         if (pools[index]) {
             pools[index]->release();
             pools[index] = NULL;
         }
     }
--- a/js/src/methodjit/PolyIC.cpp
+++ b/js/src/methodjit/PolyIC.cpp
@@ -146,19 +146,21 @@ class PICStubCompiler : public BaseCompi
     const char *type;
     VMFrame &f;
     JSScript *script;
     ic::PICInfo &pic;
     void *stub;
     uint32 gcNumber;
 
   public:
+    bool canCallHook;
+
     PICStubCompiler(const char *type, VMFrame &f, JSScript *script, ic::PICInfo &pic, void *stub)
       : BaseCompiler(f.cx), type(type), f(f), script(script), pic(pic), stub(stub),
-        gcNumber(f.cx->runtime->gcNumber)
+        gcNumber(f.cx->runtime->gcNumber), canCallHook(pic.canCallHook)
     { }
 
     bool isCallOp() const {
         if (pic.kind == ic::PICInfo::CALL)
             return true;
         return !!(js_CodeSpec[pic.op].format & JOF_CALLOP);
     }
 
@@ -797,20 +799,29 @@ struct GetPropertyHelper {
         if (!IsCacheableProtoChain(obj, holder))
             return ic.disable(cx, "non-native holder");
         shape = (const Shape *)prop;
         return Lookup_Cacheable;
     }
 
     LookupStatus testForGet() {
         if (!shape->hasDefaultGetter()) {
-            if (!shape->isMethod())
-                return ic.disable(cx, "getter");
-            if (!ic.isCallOp())
-                return ic.disable(cx, "method valued shape");
+            if (shape->isMethod()) {
+                if (!ic.isCallOp())
+                    return ic.disable(cx, "method valued shape");
+            } else {
+                if (shape->hasGetterValue())
+                    return ic.disable(cx, "getter value shape");
+                if (shape->hasSlot() && holder != obj)
+                    return ic.disable(cx, "slotful getter hook through prototype");
+                if (!ic.canCallHook)
+                    return ic.disable(cx, "can't call getter hook");
+                if (f.regs.inlined())
+                    return ic.disable(cx, "hook called from inline frame");
+            }
         } else if (!shape->hasSlot()) {
             return ic.disable(cx, "no slot");
         }
 
         return Lookup_Cacheable;
     }
 
     LookupStatus lookupAndTest() {
@@ -998,16 +1009,18 @@ class GetPropCompiler : public PICStubCo
             return disable("String.prototype without compile-and-go global");
 
         GetPropertyHelper<GetPropCompiler> getprop(cx, obj, atom, *this, f);
         LookupStatus status = getprop.lookupAndTest();
         if (status != Lookup_Cacheable)
             return status;
         if (getprop.obj != getprop.holder)
             return disable("proto walk on String.prototype");
+        if (!getprop.shape->hasDefaultGetterOrIsMethod())
+            return disable("getter hook on String.prototype");
         if (hadGC())
             return Lookup_Uncacheable;
 
         Assembler masm;
 
         /* Only strings are allowed. */
         Jump notString = masm.branchPtr(Assembler::NotEqual, pic.typeReg(),
                                         ImmType(JSVAL_TYPE_STRING));
@@ -1139,16 +1152,103 @@ class GetPropCompiler : public PICStubCo
         repatcher.repatch(labels.getInlineShapeData(pic.getFastShapeGuard()), obj->shape());
         repatcher.patchAddressOffsetForValueLoad(labels.getValueLoad(pic.fastPathRejoin), offset);
 
         pic.inlinePathPatched = true;
 
         return Lookup_Cacheable;
     }
 
+    void generateGetterStub(Assembler &masm, const Shape *shape,
+                            Label start, const Vector<Jump, 8> &shapeMismatches)
+    {
+        /*
+         * Getter hook needs to be called from the stub. The state is fully
+         * synced and no registers are live except the result registers.
+         */
+        JS_ASSERT(pic.canCallHook);
+        PropertyOp getter = shape->getterOp();
+
+        if (cx->typeInferenceEnabled()) {
+            masm.storePtr(ImmPtr((void *) REJOIN_NATIVE_GETTER),
+                          FrameAddress(offsetof(VMFrame, stubRejoin)));
+        }
+
+        Registers tempRegs = Registers::tempCallRegMask();
+        if (tempRegs.hasReg(Registers::ClobberInCall))
+            tempRegs.takeReg(Registers::ClobberInCall);
+
+        /* Get a register to hold obj while we set up the rest of the frame. */
+        RegisterID holdObjReg = pic.objReg;
+        if (tempRegs.hasReg(pic.objReg)) {
+            tempRegs.takeReg(pic.objReg);
+        } else {
+            holdObjReg = tempRegs.takeAnyReg().reg();
+            masm.move(pic.objReg, holdObjReg);
+        }
+
+        RegisterID t0 = tempRegs.takeAnyReg().reg();
+        masm.bumpStubCounter(f.script(), f.pc(), t0);
+
+        /*
+         * Initialize vp, which is either a slot in the object (the holder,
+         * actually, which must equal the object here) or undefined.
+         * Use vp == sp (which for CALLPROP will actually be the original
+         * sp + 1), to avoid clobbering stack values.
+         */
+        int32 vpOffset = (char *) f.regs.sp - (char *) f.fp();
+        if (shape->hasSlot()) {
+            masm.loadObjProp(obj, holdObjReg, shape,
+                             Registers::ClobberInCall, t0);
+            masm.storeValueFromComponents(Registers::ClobberInCall, t0, Address(JSFrameReg, vpOffset));
+        } else {
+            masm.storeValue(UndefinedValue(), Address(JSFrameReg, vpOffset));
+        }
+
+        int32 initialFrameDepth = f.regs.sp - f.fp()->slots();
+        masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.regs.pc, initialFrameDepth);
+
+        /* Grab cx. */
+#ifdef JS_CPU_X86
+        RegisterID cxReg = tempRegs.takeAnyReg().reg();
+#else
+        RegisterID cxReg = Registers::ArgReg0;
+#endif
+        masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
+
+        /* Grap vp. */
+        RegisterID vpReg = t0;
+        masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
+
+        masm.restoreStackBase();
+        masm.setupABICall(Registers::NormalCall, 4);
+        masm.storeArg(3, vpReg);
+        masm.storeArg(2, ImmPtr((void *) JSID_BITS(SHAPE_USERID(shape))));
+        masm.storeArg(1, holdObjReg);
+        masm.storeArg(0, cxReg);
+
+        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, getter), false);
+
+        NativeStubLinker::FinalJump done;
+        if (!NativeStubEpilogue(f, masm, &done, 0, vpOffset, pic.shapeReg, pic.objReg))
+            return;
+        NativeStubLinker linker(masm, f.jit(), f.regs.pc, done);
+        if (!linker.init(f.cx))
+            THROW();
+
+        if (!linker.verifyRange(f.jit())) {
+            disable("code memory is out of range");
+            return;
+        }
+
+        linker.patchJump(pic.fastPathRejoin);
+
+        linkerEpilogue(linker, start, shapeMismatches);
+    }
+
     LookupStatus generateStub(JSObject *holder, const Shape *shape)
     {
         Vector<Jump, 8> shapeMismatches(cx);
 
         Assembler masm;
 
         Label start;
         Jump shapeGuardJump;
@@ -1193,87 +1293,108 @@ class GetPropCompiler : public PICStubCo
             if (!shapeMismatches.append(j))
                 return error();
 
             pic.secondShapeGuard = masm.distanceOf(masm.label()) - masm.distanceOf(start);
         } else {
             pic.secondShapeGuard = 0;
         }
 
+        if (!shape->hasDefaultGetterOrIsMethod()) {
+            generateGetterStub(masm, shape, start, shapeMismatches);
+            if (setStubShapeOffset)
+                pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
+            return Lookup_Cacheable;
+        }
+
         /* Load the value out of the object. */
         masm.loadObjProp(holder, holderReg, shape, pic.shapeReg, pic.objReg);
         Jump done = masm.jump();
 
         pic.updatePCCounters(cx, masm);
 
         PICLinker buffer(masm, pic);
         if (!buffer.init(cx))
             return error();
 
         if (!buffer.verifyRange(pic.lastCodeBlock(f.jit())) ||
             !buffer.verifyRange(f.jit())) {
             return disable("code memory is out of range");
         }
 
+        // The final exit jumps to the store-back in the inline stub.
+        buffer.link(done, pic.fastPathRejoin);
+
+        linkerEpilogue(buffer, start, shapeMismatches);
+
+        if (setStubShapeOffset)
+            pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
+        return Lookup_Cacheable;
+    }
+
+    void linkerEpilogue(LinkerHelper &buffer, Label start, const Vector<Jump, 8> &shapeMismatches)
+    {
         // The guard exit jumps to the original slow case.
         for (Jump *pj = shapeMismatches.begin(); pj != shapeMismatches.end(); ++pj)
             buffer.link(*pj, pic.slowPathStart);
 
-        // The final exit jumps to the store-back in the inline stub.
-        buffer.link(done, pic.fastPathRejoin);
         CodeLocationLabel cs = buffer.finalize();
         JaegerSpew(JSpew_PICs, "generated %s stub at %p\n", type, cs.executableAddress());
 
         patchPreviousToHere(cs);
 
         pic.stubsGenerated++;
         pic.updateLastPath(buffer, start);
 
-        if (setStubShapeOffset)
-            pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
-
         if (pic.stubsGenerated == MAX_PIC_STUBS)
             disable("max stubs reached");
         if (obj->isDenseArray())
             disable("dense array");
-
-        return Lookup_Cacheable;
     }
 
     void patchPreviousToHere(CodeLocationLabel cs)
     {
         Repatcher repatcher(pic.lastCodeBlock(f.jit()));
         CodeLocationLabel label = pic.lastPathStart();
 
         // Patch either the inline fast path or a generated stub. The stub
         // omits the prefix of the inline fast path that loads the shape, so
         // the offsets are different.
         int shapeGuardJumpOffset;
         if (pic.stubsGenerated)
             shapeGuardJumpOffset = pic.getPropLabels().getStubShapeJumpOffset();
         else
             shapeGuardJumpOffset = pic.shapeGuard + pic.getPropLabels().getInlineShapeJumpOffset();
+        int secondGuardOffset = getLastStubSecondShapeGuard();
+
+        JaegerSpew(JSpew_PICs, "Patching previous (%d stubs) (start %p) (offset %d) (second %d)\n",
+                   (int) pic.stubsGenerated, label.executableAddress(),
+                   shapeGuardJumpOffset, secondGuardOffset);
+
         repatcher.relink(label.jumpAtOffset(shapeGuardJumpOffset), cs);
-        if (int secondGuardOffset = getLastStubSecondShapeGuard())
+        if (secondGuardOffset)
             repatcher.relink(label.jumpAtOffset(secondGuardOffset), cs);
     }
 
     LookupStatus update()
     {
         JS_ASSERT(pic.hit);
 
         GetPropertyHelper<GetPropCompiler> getprop(cx, obj, atom, *this, f);
         LookupStatus status = getprop.lookupAndTest();
         if (status != Lookup_Cacheable)
             return status;
         if (hadGC())
             return Lookup_Uncacheable;
 
-        if (obj == getprop.holder && !pic.inlinePathPatched)
+        if (obj == getprop.holder &&
+            getprop.shape->hasDefaultGetterOrIsMethod() &&
+            !pic.inlinePathPatched) {
             return patchInline(getprop.holder, getprop.shape);
+        }
 
         return generateStub(getprop.holder, getprop.shape);
     }
 };
 
 class ScopeNameCompiler : public PICStubCompiler
 {
   private:
@@ -1814,21 +1935,19 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic
     if (atom == f.cx->runtime->atomState.lengthAtom) {
         if (f.regs.sp[-1].isString()) {
             GetPropCompiler cc(f, script, NULL, *pic, NULL, DisabledGetPropIC);
             LookupStatus status = cc.generateStringLengthStub();
             if (status == Lookup_Error)
                 THROW();
             JSString *str = f.regs.sp[-1].toString();
             f.regs.sp[-1].setInt32(str->length());
-            types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
             return;
         } else if (f.regs.sp[-1].isMagic(JS_LAZY_ARGUMENTS)) {
             f.regs.sp[-1].setInt32(f.regs.fp()->numActualArgs());
-            types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
             return;
         } else if (!f.regs.sp[-1].isPrimitive()) {
             JSObject *obj = &f.regs.sp[-1].toObject();
             if (obj->isArray() ||
                 (obj->isArguments() && !obj->asArguments()->hasOverriddenLength()) ||
                 obj->isString()) {
                 GetPropCompiler cc(f, script, obj, *pic, NULL, DisabledGetPropIC);
                 if (obj->isArray()) {
@@ -1843,25 +1962,22 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic
                     f.regs.sp[-1].setInt32(int32_t(obj->asArguments()->initialLength()));
                 } else if (obj->isString()) {
                     LookupStatus status = cc.generateStringObjLengthStub();
                     if (status == Lookup_Error)
                         THROW();
                     JSString *str = obj->getPrimitiveThis().toString();
                     f.regs.sp[-1].setInt32(str->length());
                 }
-                types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
                 return;
             }
         }
         atom = f.cx->runtime->atomState.lengthAtom;
     }
 
-    bool usePropCache = pic->usePropCache;
-
     /*
      * ValueToObject can trigger recompilations if it lazily initializes any
      * of the primitive classes (Boolean, Number, String). :XXX: if these
      * classes are made eager then this monitoring is not necessary.
      */
     RecompilationMonitor monitor(f.cx);
 
     JSObject *obj = ValueToObject(f.cx, &f.regs.sp[-1]);
@@ -1878,26 +1994,16 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic
             THROW();
         }
     }
 
     Value v;
     if (!obj->getProperty(f.cx, ATOM_TO_JSID(atom), &v))
         THROW();
 
-    /*
-     * Ignore undefined reads for the 'prototype' property in constructors,
-     * which will be at the start of the script and are never holes due to fun_resolve.
-     * Any undefined value was explicitly stored here, and is known by inference.
-     * :FIXME: looking under the usePropCache abstraction, which is only unset for
-     * reads of the prototype.
-     */
-    if (usePropCache)
-        types::TypeScript::Monitor(f.cx, f.script(), f.pc(), v);
-
     f.regs.sp[-1] = v;
 }
 
 void JS_FASTCALL
 ic::GetPropNoCache(VMFrame &f, ic::PICInfo *pic)
 {
     /*
      * The PIC stores whether to use the property cache or not. We use two different
@@ -2012,16 +2118,21 @@ ic::CallProp(VMFrame &f, ic::PICInfo *pi
             uint32 slot = entry->vword.toSlot();
             rval = obj2->nativeGetSlot(slot);
         } else {
             JS_ASSERT(entry->vword.isShape());
             const Shape *shape = entry->vword.toShape();
             NATIVE_GET(cx, &objv.toObject(), obj2, shape, JSGET_NO_METHOD_BARRIER, &rval,
                        THROW());
         }
+        /*
+         * Adjust the stack to reflect the height after the GETPROP, here and
+         * below. Getter hook ICs depend on this to know which value of sp they
+         * are updating for consistent rejoins, don't modify this!
+         */
         regs.sp++;
         regs.sp[-2] = rval;
         regs.sp[-1] = lval;
     } else {
         /*
          * Cache miss: use the immediate atom that was loaded for us under
          * PropertyCache::test.
          */
@@ -2052,18 +2163,16 @@ ic::CallProp(VMFrame &f, ic::PICInfo *pi
 #if JS_HAS_NO_SUCH_METHOD
     if (JS_UNLIKELY(rval.isPrimitive()) && regs.sp[-1].isObject()) {
         regs.sp[-2].setString(JSID_TO_STRING(id));
         if (!OnUnknownMethod(cx, regs.sp - 2))
             THROW();
     }
 #endif
 
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), regs.sp[-2]);
-
     if (monitor.recompiled())
         return;
 
     GetPropCompiler cc(f, script, &objv.toObject(), *pic, pic->atom, DisabledCallPropIC);
     if (lval.isObject()) {
         if (pic->shouldUpdate(cx)) {
             LookupStatus status = cc.update();
             if (status == Lookup_Error)
@@ -2103,18 +2212,16 @@ ic::XName(VMFrame &f, ic::PICInfo *pic)
     LookupStatus status = cc.updateForXName();
     if (status == Lookup_Error)
         THROW();
 
     Value rval;
     if (!cc.retrieve(&rval, NULL, PICInfo::XNAME))
         THROW();
     f.regs.sp[-1] = rval;
-
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
 }
 
 void JS_FASTCALL
 ic::Name(VMFrame &f, ic::PICInfo *pic)
 {
     JSScript *script = f.fp()->script();
 
     ScopeNameCompiler cc(f, script, &f.fp()->scopeChain(), *pic, pic->atom, DisabledNameIC);
@@ -2122,18 +2229,16 @@ ic::Name(VMFrame &f, ic::PICInfo *pic)
     LookupStatus status = cc.updateForName();
     if (status == Lookup_Error)
         THROW();
 
     Value rval;
     if (!cc.retrieve(&rval, NULL, PICInfo::NAME))
         THROW();
     f.regs.sp[0] = rval;
-
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
 }
 
 static void JS_FASTCALL
 DisabledCallNameIC(VMFrame &f, ic::PICInfo *pic)
 {
     stubs::CallName(f);
 }
 
@@ -2149,18 +2254,16 @@ ic::CallName(VMFrame &f, ic::PICInfo *pi
         THROW();
 
     Value rval, thisval;
     if (!cc.retrieve(&rval, &thisval, PICInfo::CALLNAME))
         THROW();
 
     f.regs.sp[0] = rval;
     f.regs.sp[1] = thisval;
-
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
 }
 
 static void JS_FASTCALL
 DisabledBindNameIC(VMFrame &f, ic::PICInfo *pic)
 {
     stubs::BindName(f);
 }
 
@@ -2327,16 +2430,22 @@ GetElementIC::attachGetProp(VMFrame &f, 
 {
     JS_ASSERT(v.isString());
 
     GetPropertyHelper<GetElementIC> getprop(cx, obj, JSID_TO_ATOM(id), *this, f);
     LookupStatus status = getprop.lookupAndTest();
     if (status != Lookup_Cacheable)
         return status;
 
+    // With TI enabled, string property stubs can only be added to an opcode if
+    // the value read will go through a type barrier afterwards. TI only
+    // accounts for integer-valued properties accessed by GETELEM/CALLELEM.
+    if (cx->typeInferenceEnabled() && !forcedTypeBarrier)
+        return disable(cx, "string element access may not have type barrier");
+
     Assembler masm;
 
     // Guard on the string's type and identity.
     MaybeJump atomTypeGuard;
     if (hasInlineTypeGuard() && !inlineTypeGuardPatched) {
         // We link all string-key dependent stubs together, and store the
         // first set of guards in the IC, separately, from int-key dependent
         // stubs. As long as we guarantee that the first string-key dependent
@@ -2790,19 +2899,16 @@ ic::CallElement(VMFrame &f, ic::GetEleme
         LookupStatus status = ic->update(f, cx, thisObj, idval, id, &f.regs.sp[-2]);
         if (status != Lookup_Uncacheable) {
             if (status == Lookup_Error)
                 THROW();
 
             // If the result can be cached, the value was already retrieved.
             JS_ASSERT(!f.regs.sp[-2].isMagic());
             f.regs.sp[-1].setObject(*thisObj);
-            if (!JSID_IS_INT(id))
-                types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
-            types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
             return;
         }
     }
 
     /* Get or set the element. */
     if (!js_GetMethod(cx, thisObj, id, JSGET_NO_METHOD_BARRIER, &f.regs.sp[-2]))
         THROW();
 
@@ -2812,19 +2918,16 @@ ic::CallElement(VMFrame &f, ic::GetEleme
         f.regs.sp[-1].setObject(*thisObj);
         if (!OnUnknownMethod(cx, f.regs.sp - 2))
             THROW();
     } else
 #endif
     {
         f.regs.sp[-1] = thisv;
     }
-    if (!JSID_IS_INT(id))
-        types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
 }
 
 void JS_FASTCALL
 ic::GetElement(VMFrame &f, ic::GetElementIC *ic)
 {
     JSContext *cx = f.cx;
 
     // Right now, we don't optimize for strings or lazy arguments.
@@ -2856,28 +2959,22 @@ ic::GetElement(VMFrame &f, ic::GetElemen
 #endif
         LookupStatus status = ic->update(f, cx, obj, idval, id, &f.regs.sp[-2]);
         if (status != Lookup_Uncacheable) {
             if (status == Lookup_Error)
                 THROW();
 
             // If the result can be cached, the value was already retrieved.
             JS_ASSERT(!f.regs.sp[-2].isMagic());
-            if (!JSID_IS_INT(id))
-                types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
-            types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
             return;
         }
     }
 
     if (!obj->getProperty(cx, id, &f.regs.sp[-2]))
         THROW();
-    if (!JSID_IS_INT(id))
-        types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
-    types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
 }
 
 #define APPLY_STRICTNESS(f, s)                          \
     (FunctionTemplateConditional(s, f<true>, f<false>))
 
 LookupStatus
 SetElementIC::disable(JSContext *cx, const char *reason)
 {
--- a/js/src/methodjit/PolyIC.h
+++ b/js/src/methodjit/PolyIC.h
@@ -88,25 +88,32 @@ struct BaseIC : public MacroAssemblerTyp
     // asm data location. This is 0 if there is only one shape guard in the
     // last stub.
     int32 secondShapeGuard;
 
     // Whether or not the callsite has been hit at least once.
     bool hit : 1;
     bool slowCallPatched : 1;
 
+    // Whether getter/setter hooks can be called from IC stubs.
+    bool canCallHook : 1;
+
+    // Whether a type barrier is in place for the result of the op.
+    bool forcedTypeBarrier : 1;
+
     // Number of stubs generated.
     uint32 stubsGenerated : 5;
 
     // Opcode this was compiled for.
     JSOp op : 9;
 
     void reset() {
         hit = false;
         slowCallPatched = false;
+        forcedTypeBarrier = false;
         stubsGenerated = 0;
         secondShapeGuard = 0;
     }
     bool shouldUpdate(JSContext *cx);
     void spew(JSContext *cx, const char *event, const char *reason);
     LookupStatus disable(JSContext *cx, const char *reason, void *stub);
     void updatePCCounters(JSContext *cx, Assembler &masm);
     bool isCallOp();
--- a/js/src/methodjit/Retcon.cpp
+++ b/js/src/methodjit/Retcon.cpp
@@ -95,120 +95,129 @@ SetRejoinState(StackFrame *fp, const Cal
         fp->setRejoin(ScriptedRejoin(site.pcOffset));
         *location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolineScripted);
     } else {
         fp->setRejoin(StubRejoin(site.rejoin));
         *location = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
     }
 }
 
+static inline bool
+CallsiteMatches(uint8 *codeStart, const CallSite &site, void *location)
+{
+    if (codeStart + site.codeOffset == location)
+        return true;
+
+#ifdef JS_CPU_ARM
+    if (codeStart + site.codeOffset + 4 == location)
+        return true;
+#endif
+
+    return false;
+}
+
 void
 Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
 {
     uint8* codeStart = (uint8 *)jit->code.m_code.executableAddress();
 
     CallSite *callSites_ = jit->callSites();
     for (uint32 i = 0; i < jit->nCallSites; i++) {
-        if (callSites_[i].codeOffset + codeStart == *location) {
+        if (CallsiteMatches(codeStart, callSites_[i], *location)) {
             JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
             SetRejoinState(fp, callSites_[i], location);
             return;
         }
     }
 
     JS_NOT_REACHED("failed to find call site");
 }
 
 void
 Recompiler::patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
-                        jsbytecode *pc, CallSite *inlined, RejoinState rejoin)
+                        jsbytecode *pc, RejoinState rejoin)
 {
     /*
-     * There is a native IC at pc which triggered a recompilation. The recompilation
-     * could have been triggered either by the native call itself, or by a SplatApplyArgs
-     * preparing for the native call. Either way, we don't want to patch up the call,
-     * but will instead steal the pool for the native IC so it doesn't get freed
-     * with the old script, and patch up the jump at the end to go to the interpoline.
+     * There is a native call or getter IC at pc which triggered recompilation.
+     * The recompilation could have been triggered either by the native call
+     * itself, or by a SplatApplyArgs preparing for the native call. Either
+     * way, we don't want to patch up the call, but will instead steal the pool
+     * for the IC so it doesn't get freed with the JITScript, and patch up the
+     * jump at the end to go to the interpoline.
+     *
+     * When doing this, we do not reset the the IC itself; the JITScript must
+     * be dead and about to be released due to the recompilation (or a GC).
      */
     fp->setRejoin(StubRejoin(rejoin));
 
     /* :XXX: We might crash later if this fails. */
     compartment->jaegerCompartment()->orphanedNativeFrames.append(fp);
 
-    unsigned i;
-    ic::CallICInfo *callICs = jit->callICs();
-    for (i = 0; i < jit->nCallICs; i++) {
-        CallSite *call = callICs[i].call;
-        if (inlined) {
-            /*
-             * The IC and regs.inlined will have two different call sites for
-             * the same point in the script. The IC site refers to the scripted
-             * return and regs.inlined has the prologue site (which was in use
-             * when the native stub was generated.
-             */
-            if (call->inlineIndex == inlined->inlineIndex && call->pcOffset == inlined->pcOffset)
-                break;
-        } else if (call->inlineIndex == uint32(-1) &&
-                   call->pcOffset == uint32(pc - jit->script->code)) {
-            break;
+    DebugOnly<bool> found = false;
+
+    /*
+     * Find and patch all native call stubs attached to the given PC. There may
+     * be multiple ones for getter stubs attached to e.g. a GETELEM.
+     */
+    for (unsigned i = 0; i < jit->nativeCallStubs.length(); i++) {
+        NativeCallStub &stub = jit->nativeCallStubs[i];
+        if (stub.pc != pc)
+            continue;
+
+        found = true;
+
+        /* Check for pools that were already patched. */
+        if (!stub.pool)
+            continue;
+
+        /* Patch the native fallthrough to go to the interpoline. */
+        {
+#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
+            /* Win64 needs stack adjustment */
+            void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
+#else
+            void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
+#endif
+            uint8 *start = (uint8 *)stub.jump.executableAddress();
+            JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
+#ifdef JS_CPU_X64
+            repatch.repatch(stub.jump, interpoline);
+#else
+            repatch.relink(stub.jump, JSC::CodeLocationLabel(interpoline));
+#endif
         }
-    }
-    JS_ASSERT(i < jit->nCallICs);
-    ic::CallICInfo &ic = callICs[i];
-    JS_ASSERT(ic.fastGuardedNative);
 
-    JSC::ExecutablePool *&pool = ic.pools[ic::CallICInfo::Pool_NativeStub];
+        /* :XXX: We leak the pool if this fails. Oh well. */
+        compartment->jaegerCompartment()->orphanedNativePools.append(stub.pool);
 
-    if (!pool) {
-        /* Already stole this stub. */
-        return;
+        /* Mark as stolen in case there are multiple calls on the stack. */
+        stub.pool = NULL;
     }
 
-    /* Patch the native fallthrough to go to the interpoline. */
-    {
-#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
-        /* Win64 needs stack adjustment */
-        void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
-#else
-        void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
-#endif
-        uint8 *start = (uint8 *)ic.nativeJump.executableAddress();
-        JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
-#ifdef JS_CPU_X64
-        repatch.repatch(ic.nativeJump, interpoline);
-#else
-        repatch.relink(ic.nativeJump, JSC::CodeLocationLabel(interpoline));
-#endif
-    }
-
-    /* :XXX: We leak the pool if this fails. Oh well. */
-    compartment->jaegerCompartment()->orphanedNativePools.append(pool);
-
-    /* Mark as stolen in case there are multiple calls on the stack. */
-    pool = NULL;
+    JS_ASSERT(found);
 }
 
 void
 Recompiler::patchFrame(JSCompartment *compartment, VMFrame *f, JSScript *script)
 {
     /*
      * Check if the VMFrame returns directly into the script's jitcode. This
      * depends on the invariant that f->fp() reflects the frame at the point
      * where the call occurred, irregardless of any frames which were pushed
      * inside the call.
      */
     StackFrame *fp = f->fp();
     void **addr = f->returnAddressLocation();
     RejoinState rejoin = (RejoinState) f->stubRejoin;
     if (rejoin == REJOIN_NATIVE ||
-        rejoin == REJOIN_NATIVE_LOWERED) {
+        rejoin == REJOIN_NATIVE_LOWERED ||
+        rejoin == REJOIN_NATIVE_GETTER) {
         /* Native call. */
         if (fp->script() == script) {
-            patchNative(compartment, fp->jit(), fp,
-                        f->regs.pc, NULL, rejoin);
+            patchNative(compartment, fp->jit(), fp, f->regs.pc, rejoin);
             f->stubRejoin = REJOIN_NATIVE_PATCHED;
         }
     } else if (rejoin == REJOIN_NATIVE_PATCHED) {
         /* Already patched, don't do anything. */
     } else if (rejoin) {
         /* Recompilation triggered by CompileFunction. */
         if (fp->script() == script) {
             fp->setRejoin(StubRejoin(rejoin));
@@ -290,22 +299,23 @@ Recompiler::expandInlineFrames(JSCompart
 
     StackFrame *innerfp = expandInlineFrameChain(fp, inner);
 
     /* Check if the VMFrame returns into the inlined frame. */
     if (f->stubRejoin && f->fp() == fp) {
         /* The VMFrame is calling CompileFunction. */
         JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
                   f->stubRejoin != REJOIN_NATIVE_LOWERED &&
+                  f->stubRejoin != REJOIN_NATIVE_GETTER &&
                   f->stubRejoin != REJOIN_NATIVE_PATCHED);
         innerfp->setRejoin(StubRejoin((RejoinState) f->stubRejoin));
         *frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
         f->stubRejoin = 0;
     }
-    if (*frameAddr == codeStart + inlined->codeOffset) {
+    if (CallsiteMatches(codeStart, *inlined, *frameAddr)) {
         /* The VMFrame returns directly into the expanded frame. */
         SetRejoinState(innerfp, *inlined, frameAddr);
     }
 
     if (f->fp() == fp) {
         JS_ASSERT(f->regs.inlined() == inlined);
         f->regs.expandInline(innerfp, innerpc);
     }
--- a/js/src/methodjit/Retcon.h
+++ b/js/src/methodjit/Retcon.h
@@ -95,17 +95,17 @@ public:
     static void patchFrame(JSCompartment *compartment, VMFrame *f, JSScript *script);
 
 private:
     JSContext *cx;
     JSScript *script;
 
     static void patchCall(JITScript *jit, StackFrame *fp, void **location);
     static void patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
-                            jsbytecode *pc, CallSite *inline_, RejoinState rejoin);
+                            jsbytecode *pc, RejoinState rejoin);
 
     static StackFrame *
     expandInlineFrameChain(StackFrame *outer, InlineFrame *inner);
 
     /* Detach jit from any IC callers. */
     static void cleanup(JITScript *jit);
 };
 
--- a/js/src/methodjit/StubCalls.cpp
+++ b/js/src/methodjit/StubCalls.cpp
@@ -370,17 +370,16 @@ NameOp(VMFrame &f, JSObject *obj, bool c
         if (!js_FindPropertyHelper(cx, id, true, global, &obj, &obj2, &prop))
             return NULL;
         if (!prop) {
             /* Kludge to allow (typeof foo == "undefined") tests. */
             JSOp op2 = js_GetOpcode(cx, f.script(), f.pc() + JSOP_NAME_LENGTH);
             if (op2 == JSOP_TYPEOF) {
                 f.regs.sp++;
                 f.regs.sp[-1].setUndefined();
-                TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-1]);
                 return obj;
             }
             ReportAtomNotDefined(cx, atom);
             return NULL;
         }
 
         /* Take the slow path if prop was not found in a native object. */
         if (!obj->isNative() || !obj2->isNative()) {
@@ -397,18 +396,16 @@ NameOp(VMFrame &f, JSObject *obj, bool c
         /*
          * If this is an incop, update the property's types themselves,
          * to capture the type effect on the intermediate value.
          */
         if (rval.isUndefined() && (js_CodeSpec[*f.pc()].format & (JOF_INC|JOF_DEC)))
             AddTypePropertyId(cx, obj, id, Type::UndefinedType());
     }
 
-    TypeScript::Monitor(cx, f.script(), f.pc(), rval);
-
     *f.regs.sp++ = rval;
 
     if (callname)
         PushImplicitThis(f, obj, rval);
 
     return obj;
 }
 
@@ -438,25 +435,23 @@ stubs::GetElem(VMFrame &f)
     if (lref.isString() && rref.isInt32()) {
         JSString *str = lref.toString();
         int32_t i = rref.toInt32();
         if ((size_t)i < str->length()) {
             str = JSAtom::getUnitStringForElement(cx, str, (size_t)i);
             if (!str)
                 THROW();
             f.regs.sp[-2].setString(str);
-            TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-2]);
             return;
         }
     }
 
     if (lref.isMagic(JS_LAZY_ARGUMENTS)) {
         if (rref.isInt32() && size_t(rref.toInt32()) < regs.fp()->numActualArgs()) {
             regs.sp[-2] = regs.fp()->canonicalActualArg(rref.toInt32());
-            TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-2]);
             return;
         }
         MarkArgumentsCreated(cx, f.script());
         JS_ASSERT(!lref.isMagic(JS_LAZY_ARGUMENTS));
     }
 
     JSObject *obj = ValueToObject(cx, &lref);
     if (!obj)
@@ -503,22 +498,18 @@ stubs::GetElem(VMFrame &f)
                 THROW();
         }
     }
 
     if (!obj->getProperty(cx, id, &rval))
         THROW();
     copyFrom = &rval;
 
-    if (!JSID_IS_INT(id))
-        TypeScript::MonitorUnknown(cx, f.script(), f.pc());
-
   end_getelem:
     f.regs.sp[-2] = *copyFrom;
-    TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-2]);
 }
 
 static inline bool
 FetchElementId(VMFrame &f, JSObject *obj, const Value &idval, jsid &id, Value *vp)
 {
     int32_t i_;
     if (ValueFitsInInt32(idval, &i_) && INT_FITS_IN_JSID(i_)) {
         id = INT_TO_JSID(i_);
@@ -554,19 +545,16 @@ stubs::CallElem(VMFrame &f)
         regs.sp[-1].setObject(*thisObj);
         if (!OnUnknownMethod(cx, regs.sp - 2))
             THROW();
     } else
 #endif
     {
         regs.sp[-1] = thisv;
     }
-    if (!JSID_IS_INT(id))
-        TypeScript::MonitorUnknown(cx, f.script(), f.pc());
-    TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-2]);
 }
 
 template<JSBool strict>
 void JS_FASTCALL
 stubs::SetElem(VMFrame &f)
 {
     JSContext *cx = f.cx;
     FrameRegs &regs = f.regs;
@@ -592,28 +580,22 @@ stubs::SetElem(VMFrame &f)
             jsuint length = obj->getDenseArrayInitializedLength();
             jsint i = JSID_TO_INT(id);
             if ((jsuint)i < length) {
                 if (obj->getDenseArrayElement(i).isMagic(JS_ARRAY_HOLE)) {
                     if (js_PrototypeHasIndexedProperties(cx, obj))
                         break;
                     if ((jsuint)i >= obj->getArrayLength())
                         obj->setArrayLength(cx, i + 1);
-                    /*
-                     * Note: this stub is used for ENUMELEM, so watch out
-                     * before overwriting the op.
-                     */
-                    if (JSOp(*f.pc()) == JSOP_SETELEM)
-                        *f.pc() = JSOP_SETHOLE;
                 }
                 obj->setDenseArrayElementWithType(cx, i, rval);
                 goto end_setelem;
             } else {
-                if (JSOp(*f.pc()) == JSOP_SETELEM)
-                    *f.pc() = JSOP_SETHOLE;
+                if (f.script()->hasAnalysis())
+                    f.script()->analysis()->getCode(f.pc()).arrayWriteHole = true;
             }
         }
     } while (0);
     if (!obj->setProperty(cx, id, &rval, strict))
         THROW();
   end_setelem:
     /* :FIXME: Moving the assigned object into the lowest stack slot
      * is a temporary hack. What we actually want is an implementation
@@ -792,16 +774,17 @@ stubs::DefFun(VMFrame &f, JSFunction *fu
      * and event handlers shared among Firefox or other Mozilla app chrome
      * windows, and user-defined JS functions precompiled and then shared among
      * requests in server-side JS.
      */
     if (obj->getParent() != obj2) {
         obj = CloneFunctionObject(cx, fun, obj2, true);
         if (!obj)
             THROW();
+        JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
     }
 
     /*
      * ECMA requires functions defined when entering Eval code to be
      * impermanent.
      */
     uintN attrs = fp->isEvalFrame()
                   ? JSPROP_ENUMERATE
@@ -1442,16 +1425,18 @@ stubs::DefLocalFun(VMFrame &f, JSFunctio
 
         if (obj->getParent() != parent) {
             obj = CloneFunctionObject(f.cx, fun, parent, true);
             if (!obj)
                 THROWV(NULL);
         }
     }
 
+    JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
+
     return obj;
 }
 
 JSObject * JS_FASTCALL
 stubs::DefLocalFun_FC(VMFrame &f, JSFunction *fun)
 {
     JSObject *obj = js_NewFlatClosure(f.cx, fun, JSOP_DEFLOCALFUN_FC, JSOP_DEFLOCALFUN_FC_LENGTH);
     if (!obj)
@@ -1556,31 +1541,31 @@ stubs::Lambda(VMFrame &f, JSFunction *fu
         if (!parent)
             THROWV(NULL);
     }
 
     JSObject *obj = CloneFunctionObject(f.cx, fun, parent, true);
     if (!obj)
         THROWV(NULL);
 
+    JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
     return obj;
 }
 
 static bool JS_ALWAYS_INLINE
 InlineGetProp(VMFrame &f)
 {
     JSContext *cx = f.cx;
     FrameRegs &regs = f.regs;
 
     Value *vp = &f.regs.sp[-1];
 
     if (vp->isMagic(JS_LAZY_ARGUMENTS)) {
         JS_ASSERT(js_GetOpcode(cx, f.script(), f.pc()) == JSOP_LENGTH);
         regs.sp[-1] = Int32Value(regs.fp()->numActualArgs());
-        TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-1]);
         return true;
     }
 
     JSObject *obj = ValueToObject(f.cx, vp);
     if (!obj)
         return false;
 
     Value rval;
@@ -1619,18 +1604,16 @@ InlineGetProp(VMFrame &f)
                     ? JSGET_CACHE_RESULT | JSGET_NO_METHOD_BARRIER
                     : JSGET_CACHE_RESULT | JSGET_METHOD_BARRIER,
                     &rval)
                 : !obj->getProperty(cx, id, &rval)) {
             return false;
         }
     } while(0);
 
-    TypeScript::Monitor(cx, f.script(), f.pc(), rval);
-
     regs.sp[-1] = rval;
     return true;
 }
 
 void JS_FASTCALL
 stubs::GetProp(VMFrame &f)
 {
     if (!InlineGetProp(f))
@@ -1739,17 +1722,16 @@ stubs::CallProp(VMFrame &f, JSAtom *orig
     }
 #if JS_HAS_NO_SUCH_METHOD
     if (JS_UNLIKELY(rval.isPrimitive()) && regs.sp[-1].isObject()) {
         regs.sp[-2].setString(origAtom);
         if (!OnUnknownMethod(cx, regs.sp - 2))
             THROW();
     }
 #endif
-    TypeScript::Monitor(cx, f.script(), f.pc(), rval);
 }
 
 void JS_FASTCALL
 stubs::Iter(VMFrame &f, uint32 flags)
 {
     if (!js_ValueToIterator(f.cx, flags, &f.regs.sp[-1]))
         THROW();
     JS_ASSERT(!f.regs.sp[-1].isPrimitive());
@@ -2391,16 +2373,29 @@ stubs::TypeBarrierHelper(VMFrame &f, uin
     if (f.script()->hasAnalysis() && f.script()->analysis()->ranInference()) {
         AutoEnterTypeInference enter(f.cx);
         f.script()->analysis()->breakTypeBarriers(f.cx, f.pc() - f.script()->code, false);
     }
 
     TypeScript::Monitor(f.cx, f.script(), f.pc(), result);
 }
 
+void JS_FASTCALL
+stubs::StubTypeHelper(VMFrame &f, int32 which)
+{
+    const Value &result = f.regs.sp[which];
+
+    if (f.script()->hasAnalysis() && f.script()->analysis()->ranInference()) {
+        AutoEnterTypeInference enter(f.cx);
+        f.script()->analysis()->breakTypeBarriers(f.cx, f.pc() - f.script()->code, false);
+    }
+
+    TypeScript::Monitor(f.cx, f.script(), f.pc(), result);
+}
+
 /*
  * Variant of TypeBarrierHelper for checking types after making a native call.
  * The stack is already correct, and no fixup should be performed.
  */
 void JS_FASTCALL
 stubs::TypeBarrierReturn(VMFrame &f, Value *vp)
 {
     TypeScript::Monitor(f.cx, f.script(), f.pc(), vp[0]);
@@ -2409,35 +2404,16 @@ stubs::TypeBarrierReturn(VMFrame &f, Val
 void JS_FASTCALL
 stubs::NegZeroHelper(VMFrame &f)
 {
     f.regs.sp[-1].setDouble(-0.0);
     TypeScript::MonitorOverflow(f.cx, f.script(), f.pc());
 }
 
 void JS_FASTCALL
-stubs::CallPropSwap(VMFrame &f)
-{
-    /*
-     * CALLPROP operations on strings are implemented in terms of GETPROP.
-     * If we rejoin from such a GETPROP, we come here at the end of the
-     * CALLPROP to fix up the stack. Right now the stack looks like:
-     *
-     * STRING PROP
-     *
-     * We need it to be:
-     *
-     * PROP STRING
-     */
-    Value v = f.regs.sp[-1];
-    f.regs.sp[-1] = f.regs.sp[-2];
-    f.regs.sp[-2] = v;
-}
-
-void JS_FASTCALL
 stubs::CheckArgumentTypes(VMFrame &f)
 {
     StackFrame *fp = f.fp();
     JSFunction *fun = fp->fun();
     JSScript *script = fun->script();
     RecompilationMonitor monitor(f.cx);
 
     {
--- a/js/src/methodjit/StubCalls.h
+++ b/js/src/methodjit/StubCalls.h
@@ -205,17 +205,18 @@ void JS_FASTCALL UnbrandThis(VMFrame &f)
 /*
  * Helper for triggering recompilation should a name read miss a type barrier,
  * produce undefined or -0.
  */
 void JS_FASTCALL TypeBarrierHelper(VMFrame &f, uint32 which);
 void JS_FASTCALL TypeBarrierReturn(VMFrame &f, Value *vp);
 void JS_FASTCALL NegZeroHelper(VMFrame &f);
 
-void JS_FASTCALL CallPropSwap(VMFrame &f);
+void JS_FASTCALL StubTypeHelper(VMFrame &f, int32 which);
+
 void JS_FASTCALL CheckArgumentTypes(VMFrame &f);
 
 #ifdef DEBUG
 void JS_FASTCALL AssertArgumentTypes(VMFrame &f);
 #endif
 
 void JS_FASTCALL MissedBoundsCheckEntry(VMFrame &f);
 void JS_FASTCALL MissedBoundsCheckHead(VMFrame &f);
--- a/js/src/methodjit/StubCompiler.cpp
+++ b/js/src/methodjit/StubCompiler.cpp
@@ -165,23 +165,23 @@ StubCompiler::linkRejoin(Jump j)
 }
 
 typedef JSC::MacroAssembler::RegisterID RegisterID;
 typedef JSC::MacroAssembler::ImmPtr ImmPtr;
 typedef JSC::MacroAssembler::Imm32 Imm32;
 typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
 
 JSC::MacroAssembler::Call
-StubCompiler::emitStubCall(void *ptr, RejoinState rejoin)
+StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses)
 {
-    return emitStubCall(ptr, rejoin, frame.totalDepth());
+    return emitStubCall(ptr, rejoin, uses, frame.totalDepth());
 }
 
 JSC::MacroAssembler::Call
-StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, int32 slots)
+StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32 slots)
 {
     JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
     masm.bumpStubCounter(cc.script, cc.PC, Registers::tempCallReg());
     DataLabelPtr inlinePatch;
     Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
                                   ptr, cc.outerPC(), &inlinePatch, slots);
     JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
 
@@ -194,17 +194,17 @@ StubCompiler::emitStubCall(void *ptr, Re
     /* Add a hook for restoring loop invariants if necessary. */
     if (cc.loop && cc.loop->generatingInvariants()) {
         site.loopJumpLabel = masm.label();
         Jump j = masm.jump();
         Label l = masm.label();
         /* MissedBoundsCheck* are not actually called, so f.regs need to be written before InvariantFailure. */
         bool entry = (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckEntry))
                   || (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckHead));
-        cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length());
+        cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length(), uses);
     }
 
     cc.addCallSite(site);
     return cl;
 }
 
 void
 StubCompiler::fixCrossJumps(uint8 *ncode, size_t offset, size_t total)
--- a/js/src/methodjit/StubCompiler.h
+++ b/js/src/methodjit/StubCompiler.h
@@ -132,18 +132,18 @@ class StubCompiler
     void rejoin(Changes changes);
     void linkRejoin(Jump j);
 
     /* Finish all native code patching. */
     void fixCrossJumps(uint8 *ncode, size_t offset, size_t total);
     bool jumpInScript(Jump j, jsbytecode *target);
     unsigned crossJump(Jump j, Label l);
 
-    Call emitStubCall(void *ptr, RejoinState rejoin);
-    Call emitStubCall(void *ptr, RejoinState rejoin, int32 slots);
+    Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses);
+    Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32 slots);
 
     void patchJoin(unsigned i, bool script, Assembler::Address address, AnyRegisterID reg);
 };
 
 } /* namepsace mjit */
 } /* namespace js */
 
 #endif /* jsstub_compiler_h__ */
--- a/parser/htmlparser/src/nsElementTable.cpp
+++ b/parser/htmlparser/src/nsElementTable.cpp
@@ -857,17 +857,17 @@ const nsHTMLElement gHTMLElements[] = {
     /*special parents,kids*/            0,&gULKids,
   },
   {
     /*tag*/                             eHTMLTag_menuitem,
     /*req-parent excl-parent*/          eHTMLTag_unknown,eHTMLTag_unknown,
     /*rootnodes,endrootnodes*/          &gRootTags,&gRootTags,
     /*autoclose starttags and endtags*/ 0,0,0,0,
     /*parent,incl,exclgroups*/          kFlowEntity, kNone, kNone,
-    /*special props, prop-range*/       kNonContainer,kDefaultPropRange,
+    /*special props, prop-range*/       0,kDefaultPropRange,
     /*special parents,kids*/            0,0,
   },
   {
     /*tag*/                             eHTMLTag_meta,
     /*req-parent excl-parent*/          eHTMLTag_unknown,eHTMLTag_unknown,
     /*rootnodes,endrootnodes*/          &gInHead,&gInHead,
     /*autoclose starttags and endtags*/ 0,0,0,0,
     /*parent,incl,exclgroups*/          kHeadContent, kNone, kNone,