Check argument types given by type inference (bug 689325, r=sstangl,bhackett).
authorDavid Anderson <danderson@mozilla.com>
Mon, 10 Oct 2011 18:18:56 -0700
changeset 78954 e3463b292ab4b97d74ca4e93d025050a9166b8a2
parent 78953 67d5e0dec3fac1ffa3dc42dfcdebcb35098cc29b
child 78955 77dbb8ab6cf1e7e11c5f485d6c5a583b4614810f
push id227
push userdanderson@mozilla.com
push dateTue, 11 Oct 2011 01:21:08 +0000
reviewerssstangl, bhackett
bugs689325
milestone10.0a1
Check argument types given by type inference (bug 689325, r=sstangl,bhackett).
js/src/ion/CodeGenerator.cpp
js/src/ion/CodeGenerator.h
js/src/ion/IonAnalysis.cpp
js/src/ion/IonBuilder.cpp
js/src/ion/IonBuilder.h
js/src/ion/IonLIR.h
js/src/ion/IonMacroAssembler.cpp
js/src/ion/IonMacroAssembler.h
js/src/ion/LIR-Common.h
js/src/ion/LOpcodes.h
js/src/ion/Lowering.cpp
js/src/ion/MIR.h
js/src/ion/MIRGraph.cpp
js/src/ion/MIRGraph.h
js/src/ion/TypeOracle.h
js/src/ion/arm/Architecture-arm.h
js/src/ion/shared/Assembler-shared.h
js/src/ion/shared/Assembler-x86-shared.h
js/src/ion/shared/CodeGenerator-shared.cpp
js/src/ion/shared/CodeGenerator-shared.h
js/src/ion/shared/CodeGenerator-x86-shared.cpp
js/src/ion/shared/Lowering-shared-inl.h
js/src/ion/shared/MacroAssembler-x86-shared.h
js/src/ion/x64/Architecture-x64.h
js/src/ion/x64/Assembler-x64.h
js/src/ion/x64/CodeGenerator-x64.cpp
js/src/ion/x64/CodeGenerator-x64.h
js/src/ion/x64/LIR-x64.h
js/src/ion/x64/LOpcodes-x64.h
js/src/ion/x64/Lowering-x64.cpp
js/src/ion/x64/MacroAssembler-x64.h
js/src/ion/x86/Architecture-x86.h
js/src/ion/x86/Assembler-x86.h
js/src/ion/x86/CodeGenerator-x86.cpp
js/src/ion/x86/CodeGenerator-x86.h
js/src/ion/x86/LIR-x86.h
js/src/ion/x86/Lowering-x86.cpp
js/src/ion/x86/MacroAssembler-x86.h
js/src/jsval.h
--- a/js/src/ion/CodeGenerator.cpp
+++ b/js/src/ion/CodeGenerator.cpp
@@ -165,17 +165,17 @@ CodeGenerator::visitInt32ToDouble(LInt32
     return true;
 }
 
 bool
 CodeGenerator::visitTestVAndBranch(LTestVAndBranch *lir)
 {
     const ValueOperand value = ToValue(lir, LTestVAndBranch::Input);
 
-    Register tag = splitTagForTest(value);
+    Register tag = masm.splitTagForTest(value);
 
     Assembler::Condition cond;
 
     // Eventually we will want some sort of type filter here. For now, just
     // emit all easy cases. For speed we use the cached tag for all comparison,
     // except for doubles, which we test last (as the operation can clobber the
     // tag, which may be in ScratchReg).
     masm.branchTestUndefined(Assembler::Equal, tag, lir->ifFalse());
@@ -221,16 +221,65 @@ CodeGenerator::visitTruncateDToInt32(LTr
     emitTruncateDouble(ToFloatRegister(lir->input()), ToRegister(lir->output()), &fails);
     if (!bailoutFrom(&fails, lir->snapshot()))
         return false;
 
     return true;
 }
 
 bool
+CodeGenerator::visitParameter(LParameter *lir)
+{
+    return true;
+}
+
+bool
+CodeGenerator::visitStart(LStart *lir)
+{
+    return true;
+}
+
+bool
+CodeGenerator::generateArgumentsChecks()
+{
+    MIRGraph &mir = gen->graph();
+    MResumePoint *rp = mir.entryResumePoint();
+
+    // Reserve the amount of stack the actual frame will use. We have to undo
+    // this before falling through to the method proper though, because the
+    // monomorphic call case will bypass this entire path.
+    masm.reserveStack(frameSize());
+
+    // No registers are allocated yet, so it's safe to grab anything.
+    Register temp = GeneralRegisterSet(Registers::TempMask).getAny();
+
+    Label mismatched;
+    for (uint32 i = 0; i < CountArgSlots(gen->fun()); i++) {
+        // All initial parameters are guaranteed to be MParameters.
+        MParameter *param = rp->getOperand(i)->toParameter();
+        types::TypeSet *types = param->typeSet();
+        if (!types || types->unknown())
+            continue;
+
+        // Use ReturnReg as a scratch register here, since not all platforms
+        // have an actual ScratchReg.
+        int32 offset = ArgToStackOffset(i * sizeof(Value));
+        masm.guardTypeSet(Address(StackPointer, offset), types, temp, &mismatched);
+    }
+
+    if (mismatched.used() && !bailoutFrom(&mismatched, graph.entrySnapshot()))
+        return false;
+
+    masm.freeStack(frameSize());
+
+    return true;
+}
+
+
+bool
 CodeGenerator::generateBody()
 {
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         current = graph.getBlock(i);
         masm.bind(current->label());
         for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) {
             if (!iter->accept(this))
                 return false;
@@ -241,16 +290,22 @@ CodeGenerator::generateBody()
     return true;
 }
 
 bool
 CodeGenerator::generate()
 {
     JSContext *cx = gen->cx;
 
+    // Before generating any code, we generate type checks for all parameters.
+    // This comes before deoptTable_, because we can't use deopt tables without
+    // creating the actual frame.
+    if (!generateArgumentsChecks())
+        return false;
+
     if (frameClass_ != FrameSizeClass::None()) {
         deoptTable_ = cx->compartment->ionCompartment()->getBailoutTable(cx, frameClass_);
         if (!deoptTable_)
             return false;
     }
 
     if (!generatePrologue())
         return false;
--- a/js/src/ion/CodeGenerator.h
+++ b/js/src/ion/CodeGenerator.h
@@ -52,24 +52,27 @@
 #error "CPU Not Supported"
 #endif
 
 namespace js {
 namespace ion {
 
 class CodeGenerator : public CodeGeneratorSpecific
 {
+    bool generateArgumentsChecks();
     bool generateBody();
 
   public:
     CodeGenerator(MIRGenerator *gen, LIRGraph &graph);
 
   public:
     bool generate();
 
+    virtual bool visitParameter(LParameter *lir);
+    virtual bool visitStart(LStart *lir);
     virtual bool visitValueToInt32(LValueToInt32 *lir);
     virtual bool visitValueToDouble(LValueToDouble *lir);
     virtual bool visitInt32ToDouble(LInt32ToDouble *lir);
     virtual bool visitTestVAndBranch(LTestVAndBranch *lir);
     virtual bool visitTruncateDToInt32(LTruncateDToInt32 *lir);
 };
 
 } // namespace ion
--- a/js/src/ion/IonAnalysis.cpp
+++ b/js/src/ion/IonAnalysis.cpp
@@ -438,22 +438,22 @@ ShouldSpecializeInput(MDefinition *box, 
     // If the node is a resume point, always replace the input to avoid
     // carrying around a wider type.
     if (use->isResumePoint()) {
         MResumePoint *resumePoint = use->toResumePoint();
             
         // If this resume point is attached to the definition, being effectful,
         // we *cannot* replace its use! The resume point comes in between the
         // definition and the unbox.
-        MResumePoint *defResumePoint;
+        MResumePoint *defResumePoint = NULL;
         if (box->isInstruction())
             defResumePoint = box->toInstruction()->resumePoint();
         else if (box->isPhi())
             defResumePoint = box->block()->entryResumePoint();
-        return (defResumePoint != resumePoint);
+        return !defResumePoint || (defResumePoint != resumePoint);
     }
 
     MDefinition *def = use->toDefinition();
 
     // Phis do not have type policies, but if they are specialized need
     // specialized inputs.
     if (def->isPhi())
         return def->type() != MIRType_Value;
--- a/js/src/ion/IonBuilder.cpp
+++ b/js/src/ion/IonBuilder.cpp
@@ -152,51 +152,115 @@ bool
 IonBuilder::build()
 {
     current = newBlock(pc);
     if (!current)
         return false;
 
     IonSpew(IonSpew_MIR, "Analying script %s:%d", script->filename, script->lineno);
 
-    // Initialize argument references if inside a function frame.
-    if (fun()) {
-        MParameter *param = MParameter::New(MParameter::THIS_SLOT, oracle->thisTypeSet(script));
-        current->add(param);
-        current->initSlot(thisSlot(), param);
-
-        for (uint32 i = 0; i < nargs(); i++) {
-            param = MParameter::New(int(i), oracle->parameterTypeSet(script, i));
-            current->add(param);
-            current->initSlot(argSlot(i), param);
-        }
-    }
+    initParameters();
 
     // Initialize local variables.
     for (uint32 i = 0; i < nlocals(); i++) {
         MConstant *undef = MConstant::New(UndefinedValue());
         current->add(undef);
         current->initSlot(localSlot(i), undef);
     }
 
-    current->makeStart(new MStart());
+    current->makeStart(MStart::New());
 
-    // Attach a resume point to each parameter, so the type analyzer doesn't
-    // replace its first use.
+    // The type analysis phase attempts to insert unbox operations near
+    // definitions of values. It also attempts to replace uses in resume points
+    // with the narrower, unboxed variants. However, we must prevent this
+    // replacement from happening on values in the entry snapshot. Otherwise we
+    // could get this:
+    //
+    //       v0 = MParameter(0)
+    //       v1 = MParameter(1)
+    //       --   ResumePoint(v2, v3)
+    //       v2 = Unbox(v0, INT32)
+    //       v3 = Unbox(v1, INT32)
+    //
+    // So we attach the initial resume point to each parameter, which the type
+    // analysis explicitly checks (this is the same mechanism used for
+    // effectful operations).
     for (uint32 i = 0; i < CountArgSlots(fun()); i++) {
-        MParameter *param = current->getEntrySlot(i)->toInstruction()->toParameter();
-        param->setResumePoint(current->entryResumePoint());
+        MInstruction *ins = current->getEntrySlot(i)->toInstruction();
+        if (ins->type() == MIRType_Value)
+            ins->setResumePoint(current->entryResumePoint());
     }
 
     if (!traverseBytecode())
         return false;
 
     return true;
 }
 
+// Apply Type Inference information to parameters early on, unboxing them if
+// they have a definitive type. The actual guards will be emitted by the code
+// generator, explicitly, as part of the function prologue.
+void
+IonBuilder::rewriteParameters()
+{
+    for (uint32 i = 0; i < CountArgSlots(fun()); i++) {
+        MParameter *param = current->getSlot(i)->toParameter();
+        types::TypeSet *types = param->typeSet();
+        if (!types)
+            continue;
+
+        JSValueType definiteType = types->getKnownTypeTag(cx);
+        if (definiteType == JSVAL_TYPE_UNKNOWN)
+            continue;
+
+        MInstruction *actual = NULL;
+        switch (definiteType) {
+          case JSVAL_TYPE_UNDEFINED:
+            actual = MConstant::New(UndefinedValue());
+            break;
+
+          case JSVAL_TYPE_NULL:
+            actual = MConstant::New(NullValue());
+            break;
+
+          default:
+            actual = MUnbox::NewUnchecked(param, MIRTypeFromValueType(definiteType));
+            break;
+        }
+
+        // Careful! We leave the original MParameter in the entry resume point. The
+        // arguments still need to be checked unless proven otherwise at the call
+        // site, and these checks can bailout. We can end up:
+        //   v0 = Parameter(0)
+        //   v1 = Unbox(v0, INT32)
+        //   --   ResumePoint(v0)
+        // 
+        // As usual, it would be invalid for v1 to be captured in the initial
+        // resume point, rather than v0.
+        current->rewriteSlot(i, actual);
+    }
+}
+
+void
+IonBuilder::initParameters()
+{
+    if (!fun())
+        return;
+
+    MParameter *param = MParameter::New(MParameter::THIS_SLOT, oracle->thisTypeSet(script));
+    current->add(param);
+    current->initSlot(thisSlot(), param);
+
+    for (uint32 i = 0; i < nargs(); i++) {
+        param = MParameter::New(i, oracle->parameterTypeSet(script, i));
+        current->add(param);
+        current->initSlot(argSlot(i), param);
+    }
+}
+
 // We try to build a control-flow graph in the order that it would be built as
 // if traversing the AST. This leads to a nice ordering and lets us build SSA
 // in one pass, since the bytecode is structured.
 //
 // We traverse the bytecode iteratively, maintaining a current basic block.
 // Each basic block has a mapping of local slots to instructions, as well as a
 // stack depth. As we encounter instructions we mutate this mapping in the
 // current block.
@@ -436,17 +500,18 @@ IonBuilder::inspectOpcode(JSOp op)
         current->setArg(GET_SLOTNO(pc));
         return true;
 
       case JSOP_GETLOCAL:
         current->pushLocal(GET_SLOTNO(pc));
         return true;
 
       case JSOP_SETLOCAL:
-        return current->setLocal(GET_SLOTNO(pc));
+        current->setLocal(GET_SLOTNO(pc));
+        return true;
 
       case JSOP_POP:
         current->pop();
         return true;
 
       case JSOP_IFEQX:
         return jsop_ifeq(JSOP_IFEQX);
 
@@ -1706,18 +1771,17 @@ IonBuilder::jsop_localinc(JSOp op)
     current->pushLocal(GET_SLOTNO(pc));
 
     if (!pushConstant(Int32Value(amt)))
         return false;
 
     if (!jsop_binary(JSOP_ADD))
         return false;
 
-    if (!current->setLocal(GET_SLOTNO(pc)))
-        return false;
+    current->setLocal(GET_SLOTNO(pc));
 
     if (post_incr)
         current->pop();
 
     return true;
 }
 
 bool
@@ -1732,18 +1796,17 @@ IonBuilder::jsop_arginc(JSOp op)
     current->pushArg(GET_SLOTNO(pc));
 
     if (!pushConstant(Int32Value(amt)))
         return false;
 
     if (!jsop_binary(JSOP_ADD))
         return false;
 
-    if (!current->setArg(GET_SLOTNO(pc)))
-        return false;
+    current->setArg(GET_SLOTNO(pc));
 
     if (post_incr)
         current->pop();
 
     return true;
 }
 
 bool
--- a/js/src/ion/IonBuilder.h
+++ b/js/src/ion/IonBuilder.h
@@ -235,16 +235,18 @@ class IonBuilder : public MIRGenerator
     ControlStatus doWhileLoop(JSOp op, jssrcnote *sn);
     ControlStatus tableSwitch(JSOp op, jssrcnote *sn);
 
     // Please see the Big Honkin' Comment about how resume points work in
     // IonBuilder.cpp, near the definition for this function.
     bool resumeAt(MInstruction *ins, jsbytecode *pc);
     bool resumeAfter(MInstruction *ins);
 
+    void initParameters();
+    void rewriteParameters();
     bool pushConstant(const Value &v);
     bool jsop_bitnot();
     bool jsop_bitop(JSOp op);
     bool jsop_binary(JSOp op);
     bool jsop_binary(JSOp op, MDefinition *left, MDefinition *right);
     bool jsop_neg();
     bool jsop_notearg();
     bool jsop_call(uint32 argc);
--- a/js/src/ion/IonLIR.h
+++ b/js/src/ion/IonLIR.h
@@ -550,19 +550,22 @@ class LInstructionVisitor;
 
 class LInstruction : public TempObject,
                      public InlineListNode<LInstruction>
 {
     uint32 id_;
     LSnapshot *snapshot_;
 
   protected:
+    MDefinition *mir_;
+
     LInstruction()
       : id_(0),
-        snapshot_(NULL)
+        snapshot_(NULL),
+        mir_(NULL)
     { }
 
   public:
     class InputIterator;
     enum Opcode {
 #   define LIROP(name) LOp_##name,
         LIR_OPCODE_LIST(LIROP)
 #   undef LIROP
@@ -595,16 +598,19 @@ class LInstruction : public TempObject,
     void setId(uint32 id) {
         JS_ASSERT(!id_);
         JS_ASSERT(id);
         id_ = id;
     }
     LSnapshot *snapshot() const {
         return snapshot_;
     }
+    void setMir(MDefinition *mir) {
+        mir_ = mir;
+    }
     void assignSnapshot(LSnapshot *snapshot);
 
     virtual void print(FILE *fp);
     virtual void printName(FILE *fp);
     virtual void printOperands(FILE *fp);
     virtual void printInfo(FILE *fp) {
     }
 
@@ -869,16 +875,19 @@ class LIRGraph
     js::Vector<Value, 0, SystemAllocPolicy> constantPool_;
     uint32 numVirtualRegisters_;
 
     // Number of stack slots needed for local spills.
     uint32 localSlotCount_;
     // Number of stack slots needed for argument construction for calls.
     uint32 argumentSlotCount_;
 
+    // Snapshot taken before any LIR has been lowered.
+    LSnapshot *entrySnapshot_;
+
     MIRGraph &mir_;
 
   public:
     LIRGraph(MIRGraph &mir);
 
     size_t numBlocks() const {
         return blocks_.length();
     }
@@ -918,16 +927,22 @@ class LIRGraph
         return constantPool_.length();
     }
     Value *constantPool() {
         return &constantPool_[0];
     }
     const Value &getConstant(size_t index) const {
         return constantPool_[index];
     }
+    void setEntrySnapshot(LSnapshot *snapshot) {
+        entrySnapshot_ = snapshot;
+    }
+    LSnapshot *entrySnapshot() const {
+        return entrySnapshot_;
+    }
 };
 
 LAllocation::LAllocation(const AnyRegister &reg)
 {
     if (reg.isFloat())
         *this = LFloatReg(reg.fpu());
     else
         *this = LGeneralReg(reg.gpr());
--- a/js/src/ion/IonMacroAssembler.cpp
+++ b/js/src/ion/IonMacroAssembler.cpp
@@ -34,16 +34,18 @@
  * use your version of this file under the terms of the MPL, indicate your
  * decision by deleting the provisions above and replace them with the notice
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
+#include "jsinfer.h"
+#include "jsinferinlines.h"
 #include "IonMacroAssembler.h"
 #include "MoveEmitter.h"
 
 using namespace js;
 using namespace js::ion;
 
 void
 MacroAssembler::setupAlignedABICall(uint32 args)
@@ -118,8 +120,64 @@ MacroAssembler::callWithABI(void *fun)
 
     freeStack(stackAdjust_);
     if (dynamicAlignment_)
         restoreStackFromDynamicAlignment();
 
     JS_ASSERT(inCall_);
     inCall_ = false;
 }
+
+void
+MacroAssembler::guardTypeSet(const Address &address, types::TypeSet *types,
+                             Register scratch, Label *mismatched)
+{
+    JS_ASSERT(!types->unknown());
+
+    Label matched;
+    Register tag = extractTag(address, scratch);
+
+    if (types->hasType(types::Type::DoubleType())) {
+        // The double type also implies Int32.
+        JS_ASSERT(types->hasType(types::Type::Int32Type()));
+        branchTestNumber(Equal, tag, &matched);
+    } else if (types->hasType(types::Type::Int32Type())) {
+        branchTestInt32(Equal, tag, &matched);
+    }
+
+    if (types->hasType(types::Type::UndefinedType()))
+        branchTestUndefined(Equal, tag, &matched);
+    if (types->hasType(types::Type::BooleanType()))
+        branchTestBoolean(Equal, tag, &matched);
+    if (types->hasType(types::Type::StringType()))
+        branchTestString(Equal, tag, &matched);
+    if (types->hasType(types::Type::NullType()))
+        branchTestNull(Equal, tag, &matched);
+
+    if (types->hasType(types::Type::AnyObjectType())) {
+        branchTestObject(Equal, tag, &matched);
+    } else if (types->getObjectCount()) {
+        branchTestObject(NotEqual, tag, mismatched);
+        Register obj = extractObject(address, scratch);
+
+        Label notSingleton;
+        branchTest32(Zero, Address(obj, offsetof(JSObject, flags)),
+                     Imm32(JSObject::SINGLETON_TYPE), &notSingleton);
+
+        unsigned count = types->getObjectCount();
+        for (unsigned i = 0; i < count; i++) {
+            if (JSObject *object = types->getSingleObject(i))
+                branchPtr(Equal, obj, ImmGCPtr(object), &matched);
+        }
+        jmp(mismatched);
+
+        bind(&notSingleton);
+        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
+        for (unsigned i = 0; i < count; i++) {
+            if (types::TypeObject *object = types->getTypeObject(i))
+                branchPtr(Equal, obj, ImmGCPtr(object), &matched);
+        }
+    }
+
+    jmp(mismatched);
+    bind(&matched);
+}
+
--- a/js/src/ion/IonMacroAssembler.h
+++ b/js/src/ion/IonMacroAssembler.h
@@ -137,15 +137,20 @@ class MacroAssembler : public MacroAssem
     // in parallel immediately before performing the call. This process may
     // temporarily use more stack, in which case esp-relative addresses will be
     // automatically adjusted. It is extremely important that esp-relative
     // addresses are computed *after* setupABICall().
     void setABIArg(uint32 arg, const Register &reg);
 
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABI(void *fun);
+
+    // Emits a test of a value against all types in a TypeSet. A scratch
+    // register is required.
+    void guardTypeSet(const Address &address, types::TypeSet *types, Register scratch,
+                      Label *mismatched);
 };
 
 } // namespace ion
 } // namespace js
 
 #endif // jsion_macro_assembler_h__
 
--- a/js/src/ion/LIR-Common.h
+++ b/js/src/ion/LIR-Common.h
@@ -182,70 +182,70 @@ class LStackArg : public LInstructionHel
 
 // Generates a polymorphic callsite, wherein the function being called is
 // unknown and anticipated to vary.
 class LCallGeneric : public LInstructionHelper<BOX_PIECES, 1, 2>
 {
     // Slot below which %esp should be adjusted to make the call.
     // Zero for a function without arguments.
     uint32 argslot_;
-    MCall *mir_;
 
   public:
     LIR_HEADER(CallGeneric);
 
-    LCallGeneric(MCall *mir, const LAllocation &func,
+    LCallGeneric(const LAllocation &func,
                  uint32 argslot, const LDefinition &token,
                  const LDefinition &nargsreg)
-      : argslot_(argslot), mir_(mir)
+      : argslot_(argslot)
     {
         setOperand(0, func);
         setTemp(0, token);
         setTemp(1, nargsreg);
     }
 
     uint32 argslot() const {
         return argslot_;
     }
+    MCall *mir() const {
+        return mir_->toCall();
+    }
 
     uint32 nargs() const {
-        JS_ASSERT(mir_->argc() >= 1);
-        return mir_->argc() - 1; // |this| is not a formal argument.
+        JS_ASSERT(mir()->argc() >= 1);
+        return mir()->argc() - 1; // |this| is not a formal argument.
     }
 
     const LAllocation *getFunction() {
         return getOperand(0);
     }
     const LAllocation *getToken() {
         return getTemp(0)->output();
     }
     const LAllocation *getNargsReg() {
         return getTemp(1)->output();
     }
 };
 
 // Takes a tableswitch with an integer to decide
 class LTableSwitch : public LInstructionHelper<0, 1, 2>
 {
-    MTableSwitch *mir_;
-
   public:
     LIR_HEADER(TableSwitch);
 
     LTableSwitch(const LAllocation &in, const LDefinition &inputCopy,
-                 const LDefinition &jumpTablePointer, MTableSwitch *mir)
-      : mir_(mir)
+                 const LDefinition &jumpTablePointer, MTableSwitch *ins)
     {
         setOperand(0, in);
         setTemp(0, inputCopy);
         setTemp(1, jumpTablePointer);
+        setMir(ins);
     }
 
     MTableSwitch *mir() const {
-        return mir_;
+        return mir_->toTableSwitch();
     }
 
     const LAllocation *index() {
         return getOperand(0);
     }
     const LAllocation *tempInt() {
         return getTemp(0)->output();
     }
@@ -483,33 +483,31 @@ class LBitOp : public LInstructionHelper
         return op_;
     }
 };
 
 // Shift operation, taking two 32-bit integers as inputs and returning
 // a 32-bit integer result as an output.
 class LShiftOp : public LInstructionHelper<1, 2, 0>
 {
-    MInstruction *mir_;
     JSOp op_;
 
   public:
     LIR_HEADER(ShiftOp);
 
-    LShiftOp(MInstruction *mir, JSOp op)
-      : mir_(mir),
-        op_(op)
+    LShiftOp(JSOp op)
+      : op_(op)
     { }
 
     JSOp bitop() {
         return op_;
     }
 
     MInstruction *mir() {
-        return mir_;
+        return mir_->toInstruction();
     }
 };
 
 // Returns from the function being compiled (not used in inlined frames). The
 // input must be a box.
 class LReturn : public LInstructionHelper<0, BOX_PIECES, 0>
 {
   public:
@@ -543,27 +541,21 @@ class LSubI : public LBinaryMath<0>
 {
   public:
     LIR_HEADER(SubI);
 };
 
 // Adds two integers, returning an integer value.
 class LMulI : public LBinaryMath<0>
 {
-    MMul *mir_;
-
   public:
     LIR_HEADER(MulI);
 
-    LMulI(MMul *mir)
-      : mir_(mir)
-    { }
-
     MMul *mir() {
-        return mir_;
+        return mir_->toMul();
     }
 };
 
 // Performs an add, sub, mul, or div on two double values.
 class LMathD : public LBinaryMath<0>
 {
     JSOp jsop_;
 
@@ -664,16 +656,25 @@ class LTruncateDToInt32 : public LInstru
     const LAllocation *input() {
         return getOperand(0);
     }
     const LDefinition *output() {
         return getDef(0);
     }
 };
 
+// No-op instruction that is used to hold the entry snapshot. This simplifies
+// register allocation as it doesn't need to sniff the snapshot out of the
+// LIRGraph.
+class LStart : public LInstructionHelper<0, 0, 0>
+{
+  public:
+    LIR_HEADER(Start);
+};
+
 class MPhi;
 
 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
 // register allocator. Like its equivalent in MIR, phis are collected at the
 // top of blocks and are meant to be executed in parallel, choosing the input
 // corresponding to the predecessor taken in the control flow graph.
 class LPhi : public LInstruction
 {
--- a/js/src/ion/LOpcodes.h
+++ b/js/src/ion/LOpcodes.h
@@ -67,17 +67,18 @@
     _(CompareDAndBranch)            \
     _(AddI)                         \
     _(SubI)                         \
     _(MulI)                         \
     _(MathD)                        \
     _(Int32ToDouble)                \
     _(ValueToDouble)                \
     _(ValueToInt32)                 \
-    _(TruncateDToInt32)
+    _(TruncateDToInt32)             \
+    _(Start)
 
 #if defined(JS_CPU_X86)
 # include "x86/LOpcodes-x86.h"
 #elif defined(JS_CPU_X64)
 # include "x64/LOpcodes-x64.h"
 #elif defined(JS_CPU_ARM)
 # include "arm/LOpcodes-arm.h"
 #endif
--- a/js/src/ion/Lowering.cpp
+++ b/js/src/ion/Lowering.cpp
@@ -42,16 +42,17 @@
 #include "IonLIR.h"
 #include "Lowering.h"
 #include "MIR.h"
 #include "MIRGraph.h"
 #include "IonSpewer.h"
 #include "jsbool.h"
 #include "jsnum.h"
 #include "shared/Lowering-shared-inl.h"
+#include "jsobjinlines.h"
 
 using namespace js;
 using namespace ion;
 
 bool
 LIRGenerator::visitParameter(MParameter *param)
 {
     ptrdiff_t offset;
@@ -151,17 +152,17 @@ LIRGenerator::visitCall(MCall *call)
     uint32 argc = call->argc();
     JS_ASSERT(call->getFunction()->type() == MIRType_Object);
 
     // Height of the current argument vector.
     uint32 argslot = getArgumentSlotForCall();
 
     // A call is entirely stateful, depending upon arguments already being
     // stored in an argument vector. Therefore visitCall() may be generic.
-    LCallGeneric *ins = new LCallGeneric(call, useRegister(call->getFunction()),
+    LCallGeneric *ins = new LCallGeneric(useRegister(call->getFunction()),
                                          argslot, temp(LDefinition::POINTER),
                                          temp(LDefinition::POINTER));
     if (!defineReturn(ins, call))
         return false;
     if (!assignSnapshot(ins))
         return false;
 
     freeArguments(argc);
@@ -307,17 +308,17 @@ LIRGenerator::visitBitXor(MBitXor *ins)
 
 bool
 LIRGenerator::lowerShiftOp(JSOp op, MInstruction *ins)
 {
     MDefinition *lhs = ins->getOperand(0);
     MDefinition *rhs = ins->getOperand(1);
 
     if (lhs->type() == MIRType_Int32 && rhs->type() == MIRType_Int32) {
-        LShiftOp *lir = new LShiftOp(ins, op);
+        LShiftOp *lir = new LShiftOp(op);
         if (op == JSOP_URSH) {
             MUrsh *ursh = ins->toUrsh();
             if (ursh->fallible() && !assignSnapshot(lir))
                 return false;
         }
         return lowerForShift(lir, ins, lhs, rhs);
     }
     JS_NOT_REACHED("NYI");
@@ -396,17 +397,17 @@ LIRGenerator::visitMul(MMul *ins)
 {
     MDefinition *lhs = ins->lhs();
     MDefinition *rhs = ins->rhs();
     JS_ASSERT(lhs->type() == rhs->type());
 
     if (ins->specialization() == MIRType_Int32) {
         JS_ASSERT(lhs->type() == MIRType_Int32);
         ReorderCommutative(&lhs, &rhs);
-        LMulI *lir = new LMulI(ins);
+        LMulI *lir = new LMulI;
         if (ins->fallible() && !assignSnapshot(lir))
             return false;
         return lowerForALU(lir, ins, lhs, rhs);
     }
     if (ins->specialization() == MIRType_Double) {
         JS_ASSERT(lhs->type() == MIRType_Double);
         return lowerForFPU(new LMathD(JSOP_MUL), ins, lhs, rhs);
     }
@@ -433,18 +434,22 @@ LIRGenerator::visitDiv(MDiv *ins)
 
     JS_NOT_REACHED("NYI");
     return false;
 }
 
 bool
 LIRGenerator::visitStart(MStart *start)
 {
-    // This is a no-op.
-    return true;
+    // Create a snapshot that captures the initial state of the function.
+    LStart *lir = new LStart;
+    if (!assignSnapshot(lir))
+        return false;
+    lirGraph_.setEntrySnapshot(lir->snapshot());
+    return add(lir);
 }
 
 bool
 LIRGenerator::visitToDouble(MToDouble *convert)
 {
     MDefinition *opd = convert->input();
 
     switch (opd->type()) {
--- a/js/src/ion/MIR.h
+++ b/js/src/ion/MIR.h
@@ -59,33 +59,17 @@
 namespace js {
 namespace ion {
 
 static const inline
 MIRType MIRTypeFromValue(const js::Value &vp)
 {
     if (vp.isDouble())
         return MIRType_Double;
-    switch (vp.extractNonDoubleType()) {
-      case JSVAL_TYPE_INT32:
-        return MIRType_Int32;
-      case JSVAL_TYPE_UNDEFINED:
-        return MIRType_Undefined;
-      case JSVAL_TYPE_STRING:
-        return MIRType_String;
-      case JSVAL_TYPE_BOOLEAN:
-        return MIRType_Boolean;
-      case JSVAL_TYPE_NULL:
-        return MIRType_Null;
-      case JSVAL_TYPE_OBJECT:
-        return MIRType_Object;
-      default:
-        JS_NOT_REACHED("unexpected jsval type");
-        return MIRType_None;
-    }
+    return MIRTypeFromValueType(vp.extractNonDoubleType());
 }
 
 #define MIR_FLAG_LIST(_)                                                        \
     _(InWorklist)                                                               \
     _(EmittedAtUses)                                                            \
     _(LoopInvariant)                                                            \
     _(Commutative)                                                              \
     _(Idempotent)    /* The instruction has no side-effects. */                 \
@@ -1017,29 +1001,40 @@ class MBox : public MUnaryInstruction
     }
 };
 
 // Takes a typed value and checks if it is a certain type. If so, the payload
 // is unpacked and returned as that type. Otherwise, it is considered a
 // deoptimization.
 class MUnbox : public MUnaryInstruction
 {
-    MUnbox(MDefinition *ins, MIRType type)
-      : MUnaryInstruction(ins)
+    bool checkType_;
+
+    MUnbox(MDefinition *ins, MIRType type, bool checkType)
+      : MUnaryInstruction(ins),
+        checkType_(checkType)
     {
         JS_ASSERT(ins->type() == MIRType_Value);
         setResultType(type);
         setIdempotent();
     }
 
   public:
     INSTRUCTION_HEADER(Unbox);
     static MUnbox *New(MDefinition *ins, MIRType type)
     {
-        return new MUnbox(ins, type);
+        return new MUnbox(ins, type, true);
+    }
+    static MUnbox *NewUnchecked(MDefinition *ins, MIRType type)
+    {
+        return new MUnbox(ins, type, false);
+    }
+
+    bool checkType() const {
+        return checkType_;
     }
 };
 
 // Passes an MDefinition to an MCall. Must occur between an MPrepareCall and
 // MCall. Boxes the input and stores it to the correct location on stack.
 //
 // Arguments are *not* simply pushed onto a call stack: they are evaluated
 // left-to-right, but stored in the arg vector in C-style, right-to-left.
--- a/js/src/ion/MIRGraph.cpp
+++ b/js/src/ion/MIRGraph.cpp
@@ -250,17 +250,17 @@ MBasicBlock::setSlot(uint32 slot, MDefin
 //
 // We assume that the only way such copies can be created is via simple
 // assignment, like (x = y), which will be reflected in the bytecode via
 // a GET[LOCAL,ARG] that inherits into a SET[LOCAL,ARG]. Normal calls
 // to push() will be compiler-created temporaries. So to minimize creation of
 // new SSA names, we lazily create them when applying a setVariable() whose
 // stack top was pushed by a pushVariable(). That also means we do not create
 // "copies" for calls to push().
-bool
+void
 MBasicBlock::setVariable(uint32 index)
 {
     JS_ASSERT(stackPosition_ > gen()->firstStackSlot());
     StackSlot &top = slots_[stackPosition_ - 1];
 
     MDefinition *def = top.def;
     if (top.isCopy()) {
         // Set the local variable to be a copy of |def|. Note that unlike
@@ -285,32 +285,36 @@ MBasicBlock::setVariable(uint32 index)
         // In this case, we want the second assignment to act as though there
         // was an intervening POP; GETLOCAL. Note that |def| is already
         // correct, because we only created a new instruction if |top.isCopy()|
         // was true.
         top.copyOf = index;
         top.nextCopy = slots_[index].firstCopy;
         slots_[index].firstCopy = stackPosition_ - 1;
     }
-
-    return true;
 }
 
-bool
+void
 MBasicBlock::setArg(uint32 arg)
 {
     // :TODO:  assert not closed
-    return setVariable(gen()->argSlot(arg));
+    setVariable(gen()->argSlot(arg));
 }
 
-bool
+void
 MBasicBlock::setLocal(uint32 local)
 {
     // :TODO:  assert not closed
-    return setVariable(gen()->localSlot(local));
+    setVariable(gen()->localSlot(local));
+}
+
+void
+MBasicBlock::rewriteSlot(uint32 slot, MDefinition *ins)
+{
+    setSlot(slot, ins);
 }
 
 void
 MBasicBlock::push(MDefinition *ins)
 {
     JS_ASSERT(stackPosition_ < gen()->nslots());
     slots_[stackPosition_].set(ins);
     stackPosition_++;
--- a/js/src/ion/MIRGraph.h
+++ b/js/src/ion/MIRGraph.h
@@ -111,17 +111,17 @@ class MBasicBlock : public TempObject, p
     // Pushes a copy of a slot.
     void pushCopy(uint32 slot);
 
     // Pushes a copy of a local variable or argument.
     void pushVariable(uint32 slot);
 
     // Sets a variable slot to the top of the stack, correctly creating copies
     // as needed.
-    bool setVariable(uint32 slot);
+    void setVariable(uint32 slot);
 
   public:
     ///////////////////////////////////////////////////////
     ////////// BEGIN GRAPH BUILDING INSTRUCTIONS //////////
     ///////////////////////////////////////////////////////
 
     // Creates a new basic block for a MIR generator. If |pred| is not NULL,
     // its slots and stack depth are initialized from |pred|.
@@ -137,18 +137,22 @@ class MBasicBlock : public TempObject, p
     MDefinition *peek(int32 depth);
 
     // Initializes a slot value; must not be called for normal stack
     // operations, as it will not create new SSA names for copies.
     void initSlot(uint32 index, MDefinition *ins);
 
     // Sets the instruction associated with various slot types. The
     // instruction must lie at the top of the stack.
-    bool setLocal(uint32 local);
-    bool setArg(uint32 arg);
+    void setLocal(uint32 local);
+    void setArg(uint32 arg);
+
+    // Rewrites a slot directly, bypassing the stack transition. This should
+    // not be used under most circumstances.
+    void rewriteSlot(uint32 slot, MDefinition *ins);
 
     // Tracks an instruction as being pushed onto the operand stack.
     void push(MDefinition *ins);
     void pushArg(uint32 arg);
     void pushLocal(uint32 local);
 
     // Returns the top of the stack, then decrements the virtual stack pointer.
     MDefinition *pop();
@@ -428,16 +432,19 @@ class MIRGraph
         // This intentionally starts above 0. The id 0 is in places used to
         // indicate a failure to perform an operation on an instruction.
         idGen_ += 2;
         ins->setId(idGen_);
     }
     uint32 getMaxInstructionId() {
         return idGen_;
     }
+    MResumePoint *entryResumePoint() {
+        return blocks_.begin()->entryResumePoint();
+    }
 };
 
 class MDefinitionIterator
 {
 
   friend class MBasicBlock;
 
   private:
--- a/js/src/ion/TypeOracle.h
+++ b/js/src/ion/TypeOracle.h
@@ -115,16 +115,40 @@ class TypeInferenceOracle : public TypeO
     bool init(JSContext *cx, JSScript *script);
 
     Unary unaryOp(JSScript *script, jsbytecode *pc);
     Binary binaryOp(JSScript *script, jsbytecode *pc);
     types::TypeSet *thisTypeSet(JSScript *script);
     types::TypeSet *parameterTypeSet(JSScript *script, size_t index);
 };
 
+static inline MIRType
+MIRTypeFromValueType(JSValueType type)
+{
+    switch (type) {
+      case JSVAL_TYPE_DOUBLE:
+        return MIRType_Double;
+      case JSVAL_TYPE_INT32:
+        return MIRType_Int32;
+      case JSVAL_TYPE_UNDEFINED:
+        return MIRType_Undefined;
+      case JSVAL_TYPE_STRING:
+        return MIRType_String;
+      case JSVAL_TYPE_BOOLEAN:
+        return MIRType_Boolean;
+      case JSVAL_TYPE_NULL:
+        return MIRType_Null;
+      case JSVAL_TYPE_OBJECT:
+        return MIRType_Object;
+      default:
+        JS_NOT_REACHED("unexpected jsval type");
+        return MIRType_None;
+    }
+}
+
 static inline JSValueType
 ValueTypeFromMIRType(MIRType type)
 {
   switch (type) {
     case MIRType_Undefined:
       return JSVAL_TYPE_UNDEFINED;
     case MIRType_Null:
       return JSVAL_TYPE_NULL;
--- a/js/src/ion/arm/Architecture-arm.h
+++ b/js/src/ion/arm/Architecture-arm.h
@@ -144,16 +144,20 @@ class FloatRegisters
     static const uint32 AllMask = (1 << Total) - 1;
 
     static const uint32 VolatileMask = AllMask;
     static const uint32 NonVolatileMask = 0;
 
     static const uint32 NonAllocatableMask =
         // the scratch float register for ARM.
         (1 << JSC::ARMRegisters::SD0);
+
+    // Registers that can be allocated without being saved, generally.
+    static const uint32 TempMask = VolatileMask & ~NonAllocatableMask;
+
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
     static const uint32 JSCallClobberMask = AllocatableMask;
 
 };
 
 bool hasMOVWT();
 bool hasVFPv3();
 
--- a/js/src/ion/shared/Assembler-shared.h
+++ b/js/src/ion/shared/Assembler-shared.h
@@ -74,16 +74,27 @@ struct ImmGCPtr
     uintptr_t value;
 
     ImmGCPtr(uintptr_t value) : value(value)
     { }
     ImmGCPtr(void *ptr) : value(reinterpret_cast<uintptr_t>(ptr))
     { }
 };
 
+// Specifies an address computed in the form of a register base and a constant,
+// 32-bit offset.
+struct Address
+{
+    Register base;
+    int32 offset;
+
+    Address(Register base, int32 offset) : base(base), offset(offset)
+    { }
+};
+
 class Relocation {
   public:
     enum Kind {
         // The target will never move, so patching is only needed if the source
         // buffer is moved.
         EXTERNAL,
 
         // The target could move, so patching may be needed. The actual
--- a/js/src/ion/shared/Assembler-x86-shared.h
+++ b/js/src/ion/shared/Assembler-x86-shared.h
@@ -379,26 +379,26 @@ class AssemblerX86Shared
         }
     }
     void setCC(Condition cond, const Register &r) {
         masm.setCC_r(static_cast<JSC::X86Assembler::Condition>(cond), r.code());
     }
     void testl(const Register &lhs, const Register &rhs) {
         masm.testl_rr(rhs.code(), lhs.code());
     }
-    void testl(Imm32 lhs, const Register &rhs) {
-        masm.testl_i32r(lhs.value, rhs.code());
+    void testl(const Register &lhs, Imm32 rhs) {
+        masm.testl_i32r(rhs.value, lhs.code());
     }
-    void testl(Imm32 lhs, const Operand &rhs) {
-        switch (rhs.kind()) {
+    void testl(const Operand &lhs, Imm32 rhs) {
+        switch (lhs.kind()) {
           case Operand::REG:
-            masm.testl_i32r(lhs.value, rhs.reg());
+            masm.testl_i32r(rhs.value, lhs.reg());
             break;
           case Operand::REG_DISP:
-            masm.testl_i32m(lhs.value, rhs.disp(), rhs.base());
+            masm.testl_i32m(rhs.value, lhs.disp(), lhs.base());
             break;
           default:
             JS_NOT_REACHED("unexpected operand kind");
             break;
         }
     }
 
     void addl(Imm32 imm, const Register &dest) {
--- a/js/src/ion/shared/CodeGenerator-shared.cpp
+++ b/js/src/ion/shared/CodeGenerator-shared.cpp
@@ -65,22 +65,16 @@ CodeGeneratorShared::generateOutOfLineCo
         if (!outOfLineCode_[i]->generate(this))
             return false;
     }
 
     return true;
 }
 
 bool
-CodeGeneratorShared::visitParameter(LParameter *param)
-{
-    return true;
-}
-
-bool
 CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code)
 {
     return outOfLineCode_.append(code);
 }
 
 bool
 CodeGeneratorShared::encode(LSnapshot *snapshot)
 {
--- a/js/src/ion/shared/CodeGenerator-shared.h
+++ b/js/src/ion/shared/CodeGenerator-shared.h
@@ -137,19 +137,16 @@ class CodeGeneratorShared : public LInst
     bool addOutOfLineCode(OutOfLineCode *code);
     bool generateOutOfLineCode();
 
     void linkAbsoluteLabels() {
     }
 
   public:
     CodeGeneratorShared(MIRGenerator *gen, LIRGraph &graph);
-
-    // Opcodes that are the same on all platforms.
-    virtual bool visitParameter(LParameter *param);
 };
 
 // Wrapper around Label, on the heap, to avoid a bogus assert with OOM.
 struct HeapLabel
   : public TempObject,
     public Label
 {
 };
--- a/js/src/ion/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/ion/shared/CodeGenerator-x86-shared.cpp
@@ -304,17 +304,17 @@ template <typename T> bool
 CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
 {
     if (!encode(snapshot))
         return false;
 
     // Though the assembler doesn't track all frame pushes, at least make sure
     // the known value makes sense. We can't use bailout tables if the stack
     // isn't properly aligned to the static frame size.
-    JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+    JS_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
                  frameClass_.frameSize() == masm.framePushed());
 
 #ifdef JS_CPU_X86
     // On x64, bailout tables are pointless, because 16 extra bytes are
     // reserved per external jump, whereas it takes only 10 bytes to encode a
     // a non-table based bailout.
     if (assignBailoutId(snapshot)) {
         binder(masm, deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE);
--- a/js/src/ion/shared/Lowering-shared-inl.h
+++ b/js/src/ion/shared/Lowering-shared-inl.h
@@ -75,16 +75,17 @@ LIRGeneratorShared::define(LInstructionH
     uint32 vreg = getVirtualRegister();
     if (vreg >= MAX_VIRTUAL_REGISTERS)
         return false;
 
     // Assign the definition and a virtual register. Then, propagate this
     // virtual register to the MIR, so we can map MIR to LIR during lowering.
     lir->setDef(0, def);
     lir->getDef(0)->setVirtualRegister(vreg);
+    lir->setMir(mir);
     mir->setVirtualRegister(vreg);
     return add(lir);
 }
 
 template <size_t X, size_t Y> bool
 LIRGeneratorShared::define(LInstructionHelper<1, X, Y> *lir, MDefinition *mir, LDefinition::Policy policy)
 {
     LDefinition::Type type = LDefinition::TypeFrom(mir->type());
@@ -108,16 +109,17 @@ LIRGeneratorShared::defineBox(LInstructi
 #if defined(JS_NUNBOX32)
     lir->setDef(0, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, policy));
     lir->setDef(1, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, policy));
     if (getVirtualRegister() >= MAX_VIRTUAL_REGISTERS)
         return false;
 #elif defined(JS_PUNBOX64)
     lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
 #endif
+    lir->setMir(mir);
 
     mir->setVirtualRegister(vreg);
     return add(lir);
 }
 
 template <size_t Ops, size_t Temps> bool
 LIRGeneratorShared::defineReturn(LInstructionHelper<BOX_PIECES, Ops, Temps> *lir, MDefinition *mir)
 {
--- a/js/src/ion/shared/MacroAssembler-x86-shared.h
+++ b/js/src/ion/shared/MacroAssembler-x86-shared.h
@@ -87,16 +87,21 @@ class MacroAssemblerX86Shared : public A
             return (compare == JSOP_GT) ? Above : AboveOrEqual;
 
           default:
             JS_NOT_REACHED("unexpected opcode kind");
             return Parity;
         }
     }
 
+    void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
+        testl(Operand(address.base, address.offset), imm);
+        j(cond, label);
+    }
+
     // The following functions are exposed for use in platform-shared code.
     void Push(const Register &reg) {
         push(reg);
         framePushed_ += STACK_SLOT_SIZE;
     }
     uint32 framePushed() const {
         return framePushed_;
     }
--- a/js/src/ion/x64/Architecture-x64.h
+++ b/js/src/ion/x64/Architecture-x64.h
@@ -111,16 +111,19 @@ class Registers {
         (1 << JSC::X86Registers::r15);
 
     static const uint32 SingleByteRegs = VolatileMask | NonVolatileMask;
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::esp) |
         (1 << JSC::X86Registers::r11);      // This is ScratchReg.
 
+    // Registers that can be allocated without being saved, generally.
+    static const uint32 TempMask = VolatileMask & ~NonAllocatableMask;
+
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
 
     static const uint32 JSCallClobberMask =
         AllocatableMask & ~(1 << JSC::X86Registers::ecx);
 };
 
 class FloatRegisters {
   public:
--- a/js/src/ion/x64/Assembler-x64.h
+++ b/js/src/ion/x64/Assembler-x64.h
@@ -123,32 +123,37 @@ class Operand
 
     Kind kind_ : 2;
     int32 base_ : 5;
     Scale scale_ : 2;
     int32 disp_;
     int32 index_ : 5;
 
   public:
-    explicit Operand(const Register &reg)
+    explicit Operand(Register reg)
       : kind_(REG),
         base_(reg.code())
     { }
-    explicit Operand(const FloatRegister &reg)
+    explicit Operand(FloatRegister reg)
       : kind_(FPREG),
         base_(reg.code())
     { }
-    explicit Operand(const Register &base, const Register &index, Scale scale, int32 disp = 0)
+    explicit Operand(const Address &address)
+      : kind_(REG_DISP),
+        base_(address.base.code()),
+        disp_(address.offset)
+    { }
+    Operand(Register base, Register index, Scale scale, int32 disp = 0)
       : kind_(SCALE),
         base_(base.code()),
         scale_(scale),
         disp_(disp),
         index_(index.code())
     { }
-    Operand(const Register &reg, int32 disp)
+    Operand(Register reg, int32 disp)
       : kind_(REG_DISP),
         base_(reg.code()),
         disp_(disp)
     { }
 
     Kind kind() const {
         return kind_;
     }
@@ -415,21 +420,21 @@ class Assembler : public AssemblerX86Sha
     }
     void cmpq(const Register &lhs, const Register &rhs) {
         masm.cmpq_rr(rhs.code(), lhs.code());
     }
     void cmpq(Imm32 lhs, const Register &rhs) {
         masm.cmpq_ir(lhs.value, rhs.code());
     }
     
-    void testq(Imm32 lhs, const Register &rhs) {
-        masm.testq_i32r(lhs.value, rhs.code());
+    void testq(const Register &lhs, Imm32 rhs) {
+        masm.testq_i32r(rhs.value, lhs.code());
     }
     void testq(const Register &lhs, const Register &rhs) {
-        masm.testq_rr(lhs.code(), rhs.code());
+        masm.testq_rr(rhs.code(), lhs.code());
     }
 
     void jmp(void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
     void j(Condition cond, void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
--- a/js/src/ion/x64/CodeGenerator-x64.cpp
+++ b/js/src/ion/x64/CodeGenerator-x64.cpp
@@ -117,53 +117,55 @@ CodeGeneratorX64::visitBox(LBox *box)
         masm.orq(ToOperand(in), ToRegister(result));
     } else {
         masm.movqsd(ToFloatRegister(in), ToRegister(result));
     }
     return true;
 }
 
 bool
-CodeGeneratorX64::visitUnboxInteger(LUnboxInteger *unbox)
+CodeGeneratorX64::visitUnbox(LUnbox *unbox)
 {
-    const ValueOperand value = ToValue(unbox, LUnboxInteger::Input);
+    const ValueOperand value = ToValue(unbox, LUnbox::Input);
     const LDefinition *result = unbox->output();
-
-    Assembler::Condition cond = masm.testInt32(Assembler::NotEqual, value);
-    if (!bailoutIf(cond, unbox->snapshot()))
-        return false;
-    masm.unboxInt32(value, ToRegister(result));
-
-    return true;
-}
-
-bool
-CodeGeneratorX64::visitUnboxDouble(LUnboxDouble *unbox)
-{
-    const ValueOperand value = ToValue(unbox, LUnboxDouble::Input);
-    const LDefinition *result = unbox->output();
+    MUnbox *mir = unbox->mir();
 
-    Assembler::Condition cond = masm.testDouble(Assembler::NotEqual, value);
-    if (!bailoutIf(cond, unbox->snapshot()))
-        return false;
-    masm.unboxDouble(value, ToFloatRegister(result));
-
-    return true;
-}
+    if (mir->checkType()) {
+        Assembler::Condition cond;
+        switch (mir->type()) {
+          case MIRType_Int32:
+            cond = masm.testInt32(Assembler::NotEqual, value);
+            break;
+          case MIRType_Double:
+            cond = masm.testDouble(Assembler::NotEqual, value);
+            break;
+          case MIRType_Object:
+            cond = masm.testObject(Assembler::NotEqual, value);
+            break;
+          default:
+            JS_NOT_REACHED("NYI");
+            return false;
+        }
+        if (!bailoutIf(cond, unbox->snapshot()))
+            return false;
+    }
 
-bool
-CodeGeneratorX64::visitUnboxObject(LUnboxObject *unbox)
-{
-    const ValueOperand value = ToValue(unbox, LUnboxObject::Input);
-    const LDefinition *object = unbox->output();
-
-    Assembler::Condition cond = masm.testObject(Assembler::NotEqual, value);
-    if (!bailoutIf(cond, unbox->snapshot()))
-        return false;
-    masm.unboxObject(value, ToRegister(object));
+    switch (mir->type()) {
+      case MIRType_Int32:
+        masm.unboxInt32(value, ToRegister(result));
+        break;
+      case MIRType_Double:
+        masm.unboxDouble(value, ToFloatRegister(result));
+        break;
+      case MIRType_Object:
+        masm.unboxObject(value, ToRegister(result));
+        break;
+      default:
+        JS_NOT_REACHED("NYI");
+    }
     
     return true;
 }
 
 bool
 CodeGeneratorX64::visitReturn(LReturn *ret)
 {
 #ifdef DEBUG
@@ -171,23 +173,16 @@ CodeGeneratorX64::visitReturn(LReturn *r
     JS_ASSERT(ToRegister(result) == JSReturnReg);
 #endif
     // Don't emit a jump to the return label if this is the last block.
     if (current->mir() != *gen->graph().poBegin())
         masm.jmp(returnLabel_);
     return true;
 }
 
-Register
-CodeGeneratorX64::splitTagForTest(const ValueOperand &value)
-{
-    masm.splitTag(value, ScratchReg);
-    return ScratchReg;
-}
-
 Assembler::Condition
 CodeGeneratorX64::testStringTruthy(bool truthy, const ValueOperand &value)
 {
     masm.unboxString(value, ScratchReg);
 
     Operand lengthAndFlags(ScratchReg, JSString::offsetOfLengthAndFlags());
     masm.movq(lengthAndFlags, ScratchReg);
     masm.shrq(Imm32(JSString::LENGTH_SHIFT), ScratchReg);
--- a/js/src/ion/x64/CodeGenerator-x64.h
+++ b/js/src/ion/x64/CodeGenerator-x64.h
@@ -53,30 +53,27 @@ class CodeGeneratorX64 : public CodeGene
     CodeGeneratorX64 *thisFromCtor() {
         return this;
     }
 
   protected:
     ValueOperand ToValue(LInstruction *ins, size_t pos);
 
     // This returns the tag in ScratchReg.
-    Register splitTagForTest(const ValueOperand &value);
     Assembler::Condition testStringTruthy(bool truthy, const ValueOperand &value);
 
   public:
     CodeGeneratorX64(MIRGenerator *gen, LIRGraph &graph);
 
   public:
     bool visitValue(LValue *value);
     bool visitReturn(LReturn *ret);
     bool visitStackArg(LStackArg *arg);
     bool visitBox(LBox *box);
-    bool visitUnboxInteger(LUnboxInteger *unbox);
-    bool visitUnboxDouble(LUnboxDouble *unbox);
-    bool visitUnboxObject(LUnboxObject *unbox);
+    bool visitUnbox(LUnbox *unbox);
     bool visitDouble(LDouble *ins);
 };
 
 typedef CodeGeneratorX64 CodeGeneratorSpecific;
 
 } // ion
 } // js
 
--- a/js/src/ion/x64/LIR-x64.h
+++ b/js/src/ion/x64/LIR-x64.h
@@ -61,93 +61,35 @@ class LBox : public LInstructionHelper<1
         setOperand(0, payload);
     }
 
     MIRType type() const {
         return type_;
     }
 };
 
-// Given an untyped input, guards on whether it's an integer and returns an
-// integer payload.
-class LUnboxBoolean : public LInstructionHelper<1, 1, 1>
+// Given an untyped input, guards on whether it's a specific type and returns
+// the unboxed payload.
+class LUnbox : public LInstructionHelper<1, 1, 0>
 {
   public:
-    LIR_HEADER(UnboxBoolean);
-
-    LUnboxBoolean(const LAllocation &input, const LDefinition &temp) {
-        setOperand(0, input);
-        setTemp(0, temp);
-    }
-};
+    LIR_HEADER(Unbox);
 
-// Given an untyped input, guards on whether it's an integer and returns an
-// integer payload.
-class LUnboxInteger : public LInstructionHelper<1, 1, 0>
-{
-  public:
-    LIR_HEADER(UnboxInteger);
-
-    LUnboxInteger(const LAllocation &input) {
+    LUnbox(const LAllocation &input)
+    {
         setOperand(0, input);
     }
 
     static const size_t Input = 0;
 
     const LDefinition *output() {
         return getDef(0);
     }
-};
-
-// Given an untyped input, guards on whether it's a double and returns a double
-// payload.
-class LUnboxDouble : public LInstructionHelper<1, 1, 0>
-{
-  public:
-    LIR_HEADER(UnboxDouble);
-
-    LUnboxDouble(const LAllocation &input) {
-        setOperand(0, input);
-    }
-
-    static const size_t Input = 0;
-
-    const LDefinition *output() {
-        return getDef(0);
-    }
-};
-
-// Given an untyped input, guards on whether it's an object and returns the
-// pointer.
-class LUnboxObject : public LInstructionHelper<1, 1, 0>
-{
-  public:
-    LIR_HEADER(UnboxObject);
-
-    LUnboxObject(const LAllocation &input) {
-        setOperand(0, input);
-    }
-
-    static const size_t Input = 0;
-
-    const LDefinition *output() {
-        return getDef(0);
-    }
-};
-
-// Given an untyped input, guards on whether it's a string and returns the
-// pointer.
-class LUnboxString : public LInstructionHelper<1, 1, 1>
-{
-  public:
-    LIR_HEADER(UnboxString);
-
-    LUnboxString(const LAllocation &input, const LDefinition &temp) {
-        setOperand(0, input);
-        setTemp(0, temp);
+    MUnbox *mir() const {
+        return mir_->toUnbox();
     }
 };
 
 // Constant double.
 class LDouble : public LInstructionHelper<1, 0, 0>
 {
     double d_;
 
--- a/js/src/ion/x64/LOpcodes-x64.h
+++ b/js/src/ion/x64/LOpcodes-x64.h
@@ -39,17 +39,13 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jsion_lir_opcodes_x64_h__
 #define jsion_lir_opcodes_x64_h__
 
 #define LIR_CPU_OPCODE_LIST(_)      \
     _(Box)                          \
-    _(UnboxBoolean)                 \
-    _(UnboxInteger)                 \
-    _(UnboxDouble)                  \
-    _(UnboxObject)                  \
-    _(UnboxString)                  \
+    _(Unbox)                        \
     _(DivI)
 
 #endif // jsion_lir_opcodes_x64_h__
 
--- a/js/src/ion/x64/Lowering-x64.cpp
+++ b/js/src/ion/x64/Lowering-x64.cpp
@@ -91,47 +91,22 @@ LIRGeneratorX64::visitBox(MBox *box)
     LBox *ins = new LBox(opd->type(), useRegister(opd));
     return define(ins, box, LDefinition(LDefinition::BOX));
 }
 
 bool
 LIRGeneratorX64::visitUnbox(MUnbox *unbox)
 {
     MDefinition *box = unbox->getOperand(0);
+    LUnbox *lir = new LUnbox(useRegister(box));
 
-    switch (unbox->type()) {
-      // Integers, booleans, and strings both need two outputs: the payload
-      // and the type, the type of which is temporary and thrown away.
-      case MIRType_Boolean: {
-        LUnboxBoolean *ins = new LUnboxBoolean(useRegister(box), temp(LDefinition::INTEGER));
-        return define(ins, unbox) && assignSnapshot(ins);
-      }
-      case MIRType_Int32: {
-        LUnboxInteger *ins = new LUnboxInteger(useRegister(box));
-        return define(ins, unbox) && assignSnapshot(ins);
-      }
-      case MIRType_String: {
-        LUnboxString *ins = new LUnboxString(useRegister(box), temp(LDefinition::INTEGER));
-        return define(ins, unbox) && assignSnapshot(ins);
-      }
-      case MIRType_Object: {
-        // Objects don't need a temporary.
-        LDefinition out(LDefinition::POINTER);
-        LUnboxObject *ins = new LUnboxObject(useRegister(box));
-        return define(ins, unbox, out) && assignSnapshot(ins);
-      }
-      case MIRType_Double: {
-        LUnboxDouble *ins = new LUnboxDouble(useRegister(box));
-        return define(ins, unbox) && assignSnapshot(ins);
-      }
-      default:
-        JS_NOT_REACHED("cannot unbox a value with no payload");
-    }
+    if (unbox->checkType() && !assignSnapshot(lir))
+        return false;
 
-    return false;
+    return define(lir, unbox);
 }
 
 bool
 LIRGeneratorX64::visitReturn(MReturn *ret)
 {
     MDefinition *opd = ret->getOperand(0);
     JS_ASSERT(opd->type() == MIRType_Value);
 
@@ -152,22 +127,16 @@ LIRGeneratorX64::assignSnapshot(LInstruc
         LAllocation *a = snapshot->getEntry(i);
 
         if (def->isUnused()) {
             *a = LConstantIndex::Bogus();
             continue;
         }
 
         *a = useKeepaliveOrConstant(def);
-#ifdef DEBUG
-        if (a->isUse()) {
-            for (size_t j = 0; j < ins->numDefs(); j++)
-                JS_ASSERT(ins->getDef(j)->virtualRegister() != a->toUse()->virtualRegister());
-        }
-#endif
     }
 
     ins->assignSnapshot(snapshot);
     return true;
 }
 
 bool
 LIRGeneratorX64::lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs, MDefinition *rhs)
--- a/js/src/ion/x64/MacroAssembler-x64.h
+++ b/js/src/ion/x64/MacroAssembler-x64.h
@@ -102,46 +102,51 @@ class MacroAssemblerX64 : public MacroAs
     }
     void movePtr(Operand op, const Register &dest) {
         movq(op, dest);
     }
     void moveValue(const Value &val, const Register &dest) {
         movq(ImmWord((void *)val.asRawBits()), dest);
     }
 
-    Condition testUndefined(Condition cond, const Register &tag) {
+    Condition testUndefined(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_UNDEFINED));
         return cond;
     }
-    Condition testInt32(Condition cond, const Register &tag) {
+    Condition testInt32(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_INT32));
         return cond;
     }
-    Condition testBoolean(Condition cond, const Register &tag) {
+    Condition testBoolean(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_BOOLEAN));
         return cond;
     }
-    Condition testNull(Condition cond, const Register &tag) {
+    Condition testNull(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_NULL));
         return cond;
     }
-    Condition testString(Condition cond, const Register &tag) {
+    Condition testString(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_STRING));
         return cond;
     }
-    Condition testObject(Condition cond, const Register &tag) {
+    Condition testObject(Condition cond, Register tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_OBJECT));
         return cond;
     }
+    Condition testNumber(Condition cond, Register tag) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        cmpl(tag, Imm32(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+        return cond == Equal ? BelowOrEqual : Above;
+    }
     Condition testUndefined(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testUndefined(cond, ScratchReg);
     }
     Condition testInt32(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testInt32(cond, ScratchReg);
     }
@@ -163,99 +168,129 @@ class MacroAssemblerX64 : public MacroAs
         splitTag(src, ScratchReg);
         return testString(cond, ScratchReg);
     }
     Condition testObject(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testObject(cond, ScratchReg);
     }
 
+    void cmpPtr(const Register &lhs, const ImmWord rhs) {
+        JS_ASSERT(lhs != ScratchReg);
+        movq(rhs, ScratchReg);
+        return cmpq(lhs, ScratchReg);
+    }
+    void cmpPtr(const Register &lhs, const ImmGCPtr rhs) {
+        JS_ASSERT(lhs != ScratchReg);
+        movq(rhs, ScratchReg);
+        return cmpq(lhs, ScratchReg);
+    }
+    void testPtr(const Register &lhs, const Register &rhs) {
+        testq(lhs, rhs);
+    }
+
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
     void reserveStack(uint32 amount) {
         if (amount)
             subq(Imm32(amount), StackPointer);
         framePushed_ += amount;
     }
     void freeStack(uint32 amount) {
         JS_ASSERT(amount <= framePushed_);
         if (amount)
             addq(Imm32(amount), StackPointer);
         framePushed_ -= amount;
     }
 
-    void cmpPtr(const Register &lhs, const ImmWord rhs) {
-        JS_ASSERT(lhs != ScratchReg);
-        movq(rhs, ScratchReg);
-        return cmpq(lhs, ScratchReg);
-    }
-    void testPtr(const Register &lhs, const Register &rhs) {
-        return testq(lhs, rhs);
-    }
-
     void addPtr(Imm32 imm, const Register &dest) {
         addq(imm, dest);
     }
     void subPtr(Imm32 imm, const Register &dest) {
         subq(imm, dest);
     }
 
-    void movePtr(ImmWord imm, const Register &dest) {
+    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
+        cmpPtr(lhs, ptr);
+        j(cond, label);
+    }
+
+    void movePtr(ImmWord imm, Register dest) {
         movq(imm, dest);
     }
+    void loadPtr(const Address &address, Register dest) {
+        movq(Operand(address), dest);
+    }
     void setStackArg(const Register &reg, uint32 arg) {
         movq(reg, Operand(rsp, (arg - NumArgRegs) * STACK_SLOT_SIZE + ShadowStackSpace));
     }
     void checkCallAlignment() {
 #ifdef DEBUG
         Label good;
         movl(rsp, rax);
-        testq(Imm32(StackAlignment - 1), rax);
+        testq(rax, Imm32(StackAlignment - 1));
         j(Equal, &good);
         breakpoint();
         bind(&good);
 #endif
     }
 
+    void splitTag(Register src, Register dest) {
+        if (src != dest)
+            movq(src, dest);
+        shrq(Imm32(JSVAL_TAG_SHIFT), dest);
+    }
+
     void splitTag(const ValueOperand &operand, const Register &dest) {
-        movq(operand.value(), ScratchReg);
-        shrq(Imm32(JSVAL_TAG_SHIFT), ScratchReg);
+        JS_ASSERT(operand.valueReg() != dest);
+        splitTag(operand.valueReg(), dest);
+    }
+
+    // Extracts the tag of a value and places it in ScratchReg.
+    Register splitTagForTest(const ValueOperand &value) {
+        splitTag(value, ScratchReg);
+        return ScratchReg;
     }
     void cmpTag(const ValueOperand &operand, ImmTag tag) {
-        splitTag(operand, ScratchReg);
-        cmpl(Operand(ScratchReg), tag);
+        Register reg = splitTagForTest(operand);
+        cmpl(Operand(reg), tag);
     }
 
-    void branchTestUndefined(Condition cond, const Register &tag, Label *label) {
+    void branchTestUndefined(Condition cond, Register tag, Label *label) {
         cond = testUndefined(cond, tag);
         j(cond, label);
     }
-    void branchTestInt32(Condition cond, const Register &tag, Label *label) {
+    void branchTestInt32(Condition cond, Register tag, Label *label) {
         cond = testInt32(cond, tag);
         j(cond, label);
     }
-    void branchTestBoolean(Condition cond, const Register &tag, Label *label) {
+    void branchTestBoolean(Condition cond, Register tag, Label *label) {
         cond = testBoolean(cond, tag);
         j(cond, label);
     }
-    void branchTestNull(Condition cond, const Register &tag, Label *label) {
+    void branchTestNull(Condition cond, Register tag, Label *label) {
         cond = testNull(cond, tag);
         j(cond, label);
     }
-    void branchTestString(Condition cond, const Register &tag, Label *label) {
+    void branchTestString(Condition cond, Register tag, Label *label) {
         cond = testString(cond, tag);
         j(cond, label);
     }
-    void branchTestObject(Condition cond, const Register &tag, Label *label) {
+    void branchTestObject(Condition cond, Register tag, Label *label) {
         cond = testObject(cond, tag);
         j(cond, label);
     }
+    void branchTestNumber(Condition cond, Register tag, Label *label) {
+        cond = testNumber(cond, tag);
+        j(cond, label);
+    }
 
-    // Type-testing instructions on x64 will clobber ScratchReg.
+    // Type-testing instructions on x64 will clobber ScratchReg, when used on
+    // ValueOperands.
     void branchTestUndefined(Condition cond, const ValueOperand &src, Label *label) {
         cond = testUndefined(cond, src);
         j(cond, label);
     }
     void branchTestInt32(Condition cond, const ValueOperand &src, Label *label) {
         splitTag(src, ScratchReg);
         branchTestInt32(cond, ScratchReg, label);
     }
@@ -293,20 +328,37 @@ class MacroAssemblerX64 : public MacroAs
     }
     void unboxString(const ValueOperand &src, const Register &dest) {
         movq(ImmWord(JSVAL_PAYLOAD_MASK), dest);
         andq(src.valueReg(), dest);
     }
     void unboxObject(const ValueOperand &src, const Register &dest) {
         // TODO: Can we unbox more efficiently? Bug 680294.
         movq(JSVAL_PAYLOAD_MASK, ScratchReg);
-        movq(src.value(), dest);
+        if (src.valueReg() != dest)
+            movq(src.valueReg(), dest);
         andq(ScratchReg, dest);
     }
 
+    // Extended unboxing API. If the payload is already in a register, returns
+    // that register. Otherwise, provides a move to the given scratch register,
+    // and returns that.
+    Register extractObject(const Address &address, Register scratch) {
+        JS_ASSERT(scratch != ScratchReg);
+        loadPtr(address, scratch);
+        unboxObject(ValueOperand(scratch), scratch);
+        return scratch;
+    }
+    Register extractTag(const Address &address, Register scratch) {
+        JS_ASSERT(scratch != ScratchReg);
+        loadPtr(address, scratch);
+        splitTag(scratch, scratch);
+        return scratch;
+    }
+
     // These two functions use the low 32-bits of the full value register.
     void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.value(), dest);
     }
     void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.value(), dest);
     }
 
--- a/js/src/ion/x86/Architecture-x86.h
+++ b/js/src/ion/x86/Architecture-x86.h
@@ -101,16 +101,19 @@ class Registers {
         (1 << JSC::X86Registers::edx) |
         (1 << JSC::X86Registers::ebx);
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::esp);
 
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
 
+    // Registers that can be allocated without being saved, generally.
+    static const uint32 TempMask = VolatileMask & ~NonAllocatableMask;
+
     static const uint32 JSCallClobberMask =
         AllocatableMask &
         ~(1 << JSC::X86Registers::ecx) &
         ~(1 << JSC::X86Registers::edx);
 };
 
 class FloatRegisters {
   public:
--- a/js/src/ion/x86/Assembler-x86.h
+++ b/js/src/ion/x86/Assembler-x86.h
@@ -105,32 +105,37 @@ class Operand
 
     Kind kind_ : 2;
     int32 base_ : 5;
     Scale scale_ : 2;
     int32 disp_;
     int32 index_ : 5;
 
   public:
-    explicit Operand(const Register &reg)
+    explicit Operand(Register reg)
       : kind_(REG),
         base_(reg.code())
     { }
-    explicit Operand(const FloatRegister &reg)
+    explicit Operand(FloatRegister reg)
       : kind_(FPREG),
         base_(reg.code())
     { }
-    explicit Operand(const Register &base, const Register &index, Scale scale, int32 disp = 0)
+    explicit Operand(const Address &address)
+      : kind_(REG_DISP),
+        base_(address.base.code()),
+        disp_(address.offset)
+    { }
+    Operand(Register base, Register index, Scale scale, int32 disp = 0)
       : kind_(SCALE),
         base_(base.code()),
         scale_(scale),
         disp_(disp),
         index_(index.code())
     { }
-    Operand(const Register &reg, int32 disp)
+    Operand(Register reg, int32 disp)
       : kind_(REG_DISP),
         base_(reg.code()),
         disp_(disp)
     { }
 
     Kind kind() const {
         return kind_;
     }
@@ -204,16 +209,17 @@ class Assembler : public AssemblerX86Sha
     }
 
   public:
     using AssemblerX86Shared::movl;
     using AssemblerX86Shared::j;
     using AssemblerX86Shared::jmp;
     using AssemblerX86Shared::movsd;
     using AssemblerX86Shared::retarget;
+    using AssemblerX86Shared::cmpl;
 
     static void TraceRelocations(JSTracer *trc, IonCode *code, CompactBufferReader &reader);
 
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
     void flush() { }
 
     // Copy the assembly code to the given buffer, and perform any pending
@@ -256,16 +262,20 @@ class Assembler : public AssemblerX86Sha
           default:
             JS_NOT_REACHED("unexpected operand kind");
         }
     }
     void cvttsd2s(const FloatRegister &src, const Register &dest) {
         cvttsd2si(src, dest);
     }
 
+    void cmpl(const Register src, ImmGCPtr ptr) {
+        masm.cmpl_ir(ptr.value, src.code());
+    }
+
     void jmp(void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
     void j(Condition cond, void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
         addPendingJump(src, target, reloc);
     }
--- a/js/src/ion/x86/CodeGenerator-x86.cpp
+++ b/js/src/ion/x86/CodeGenerator-x86.cpp
@@ -123,19 +123,19 @@ MIRTypeToTag(MIRType type)
         JS_NOT_REACHED("no payload...");
     }
     return JSVAL_TAG_NULL;
 }
 
 bool
 CodeGeneratorX86::visitBox(LBox *box)
 {
-    const LAllocation *a = box->getOperand(0);
     const LDefinition *type = box->getDef(TYPE_INDEX);
 
+    DebugOnly<const LAllocation *> a = box->getOperand(0);
     JS_ASSERT(!a->isConstant());
 
     // On x86, the input operand and the output payload have the same
     // virtual register. All that needs to be written is the type tag for
     // the type definition.
     masm.movl(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
     return true;
 }
@@ -151,20 +151,23 @@ CodeGeneratorX86::visitBoxDouble(LBoxDou
     masm.psrlq(Imm32(4), ToFloatRegister(in));
     masm.movd(ToFloatRegister(in), ToRegister(type));
     return true;
 }
 
 bool
 CodeGeneratorX86::visitUnbox(LUnbox *unbox)
 {
-    LAllocation *type = unbox->getOperand(TYPE_INDEX);
-    masm.cmpl(ToOperand(type), Imm32(MIRTypeToTag(unbox->type())));
-    if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
-        return false;
+    MUnbox *mir = unbox->mir();
+    if (mir->checkType()) {
+        LAllocation *type = unbox->getOperand(TYPE_INDEX);
+        masm.cmpl(ToOperand(type), Imm32(MIRTypeToTag(mir->type())));
+        if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
+            return false;
+    }
     return true;
 }
 
 bool
 CodeGeneratorX86::visitStackArg(LStackArg *arg)
 {
     ValueOperand val = ToValue(arg, 0);
     uint32 argslot = arg->argslot();
@@ -226,32 +229,29 @@ CodeGeneratorX86::visitDouble(LDouble *i
 }
 
 bool
 CodeGeneratorX86::visitUnboxDouble(LUnboxDouble *ins)
 {
     const ValueOperand box = ToValue(ins, LUnboxDouble::Input);
     const LDefinition *result = ins->output();
 
-    Assembler::Condition cond = masm.testDouble(Assembler::NotEqual, box);
-    if (!bailoutIf(cond, ins->snapshot()))
-        return false;
+    MUnbox *mir = ins->mir();
+    if (mir->checkType()) {
+        Assembler::Condition cond = masm.testDouble(Assembler::NotEqual, box);
+        if (!bailoutIf(cond, ins->snapshot()))
+            return false;
+    }
     masm.unboxDouble(box, ToFloatRegister(result));
     return true;
 }
 
-Register
-CodeGeneratorX86::splitTagForTest(const ValueOperand &value)
-{
-    return value.typeReg();
-}
-
 Assembler::Condition
 CodeGeneratorX86::testStringTruthy(bool truthy, const ValueOperand &value)
 {
     Register string = value.payloadReg();
     Operand lengthAndFlags(string, JSString::offsetOfLengthAndFlags());
 
     size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT);
-    masm.testl(Imm32(mask), lengthAndFlags);
+    masm.testl(lengthAndFlags, Imm32(mask));
     return truthy ? Assembler::NonZero : Assembler::Zero;
 }
 
--- a/js/src/ion/x86/CodeGenerator-x86.h
+++ b/js/src/ion/x86/CodeGenerator-x86.h
@@ -73,17 +73,16 @@ class CodeGeneratorX86 : public CodeGene
     CodeGeneratorX86 *thisFromCtor() {
         return this;
     }
 
   protected:
     ValueOperand ToValue(LInstruction *ins, size_t pos);
 
     // Functions for LTestVAndBranch.
-    Register splitTagForTest(const ValueOperand &value);
     Assembler::Condition testStringTruthy(bool truthy, const ValueOperand &value);
 
   protected:
     void linkAbsoluteLabels();
 
   public:
     CodeGeneratorX86(MIRGenerator *gen, LIRGraph &graph);
 
--- a/js/src/ion/x86/LIR-x86.h
+++ b/js/src/ion/x86/LIR-x86.h
@@ -77,35 +77,34 @@ class LBoxDouble : public LInstructionHe
 
 class LUnbox : public LInstructionHelper<1, 2, 0>
 {
     MIRType type_;
 
   public:
     LIR_HEADER(Unbox);
 
-    LUnbox(MIRType type)
-      : type_(type)
-    { }
-
-    MIRType type() const {
-        return type_;
+    MUnbox *mir() const {
+        return mir_->toUnbox();
     }
 };
 
 class LUnboxDouble : public LInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(UnboxDouble);
 
     static const size_t Input = 0;
 
     const LDefinition *output() {
         return getDef(0);
     }
+    MUnbox *mir() const {
+        return mir_->toUnbox();
+    }
 };
 
 // Constant double.
 class LDouble : public LInstructionHelper<1, 1, 0>
 {
   public:
     LIR_HEADER(Double);
 
--- a/js/src/ion/x86/Lowering-x86.cpp
+++ b/js/src/ion/x86/Lowering-x86.cpp
@@ -125,34 +125,38 @@ LIRGeneratorX86::visitUnbox(MUnbox *unbo
     // a payload. Unlike most instructions conusming a box, we ask for the type
     // second, so that the result can re-use the first input.
     MDefinition *inner = unbox->getOperand(0);
 
     if (unbox->type() == MIRType_Double) {
         if (!ensureDefined(inner))
             return false;
 
-        LUnboxDouble *lir = new LUnboxDouble();
-        if (!assignSnapshot(lir))
+        LUnboxDouble *lir = new LUnboxDouble;
+        if (unbox->checkType() && !assignSnapshot(lir))
             return false;
         if (!useBox(lir, LUnboxDouble::Input, inner))
             return false;
         return define(lir, unbox);
     }
 
-    LUnbox *lir = new LUnbox(unbox->type());
+    LUnbox *lir = new LUnbox;
     lir->setOperand(0, useType(inner, LUse::ANY));
     lir->setOperand(1, usePayloadInRegister(inner));
+    lir->setMir(unbox);
 
     // Re-use the inner payload's def, for better register allocation.
     LDefinition::Type type = LDefinition::TypeFrom(unbox->type());
     lir->setDef(0, LDefinition(VirtualRegisterOfPayload(inner), type, LDefinition::REDEFINED));
     unbox->setVirtualRegister(VirtualRegisterOfPayload(inner));
 
-    return assignSnapshot(lir) && add(lir);
+    if (unbox->checkType() && !assignSnapshot(lir))
+        return false;
+
+    return add(lir);
 }
 
 bool
 LIRGeneratorX86::visitReturn(MReturn *ret)
 {
     MDefinition *opd = ret->getOperand(0);
     JS_ASSERT(opd->type() == MIRType_Value);
 
@@ -162,17 +166,17 @@ LIRGeneratorX86::visitReturn(MReturn *re
     return fillBoxUses(ins, 0, opd) && add(ins);
 }
 
 bool
 LIRGeneratorX86::assignSnapshot(LInstruction *ins)
 {
     LSnapshot *snapshot = LSnapshot::New(gen, lastResumePoint_);
     if (!snapshot)
-        return false;
+        return NULL;
 
     for (size_t i = 0; i < lastResumePoint_->numOperands(); i++) {
         MDefinition *ins = lastResumePoint_->getOperand(i);
         LAllocation *type = snapshot->getEntry(i * 2);
         LAllocation *payload = snapshot->getEntry(i * 2 + 1);
 
         // The register allocation will fill these fields in with actual
         // register/stack assignments. During code generation, we can restore
--- a/js/src/ion/x86/MacroAssembler-x86.h
+++ b/js/src/ion/x86/MacroAssembler-x86.h
@@ -46,16 +46,24 @@
 
 namespace js {
 namespace ion {
 
 class MacroAssemblerX86 : public MacroAssemblerX86Shared
 {
     static const uint32 StackAlignment = 16;
 
+  private:
+    Operand payloadOf(const Address &address) {
+        return Operand(address.base, address.offset);
+    }
+    Operand tagOf(const Address &address) {
+        return Operand(address.base, address.offset + 4);
+    }
+
   protected:
     uint32 alignStackForCall(uint32 stackForArgs) {
         // framePushed_ is accurate, so precisely adjust the stack requirement.
         uint32 displacement = stackForArgs + framePushed_;
         return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
     }
 
     uint32 dynamicallyAlignStackForCall(uint32 stackForArgs, const Register &scratch) {
@@ -79,33 +87,38 @@ class MacroAssemblerX86 : public MacroAs
     /////////////////////////////////////////////////////////////////
     Operand ToPayload(Operand base) {
         return base;
     }
     Operand ToType(Operand base) {
         return Operand(Register::FromCode(base.base()),
                        base.disp() + sizeof(void *));
     }
-    void moveValue(const Value &val, const Register &type, const Register &data) {
+    void moveValue(const Value &val, Register type, Register data) {
         jsval_layout jv = JSVAL_TO_IMPL(val);
         movl(Imm32(jv.s.tag), type);
         movl(Imm32(jv.s.payload.i32), data);
     }
 
     /////////////////////////////////////////////////////////////////
     // X86/X64-common interface.
     /////////////////////////////////////////////////////////////////
     void storeValue(ValueOperand val, Operand dest) {
         movl(val.payloadReg(), ToPayload(dest));
         movl(val.typeReg(), ToType(dest));
     }
     void movePtr(Operand op, const Register &dest) {
         movl(op, dest);
     }
 
+    // Returns the register containing the type tag.
+    Register splitTagForTest(const ValueOperand &value) {
+        return value.typeReg();
+    }
+
     Condition testUndefined(Condition cond, const Register &tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_UNDEFINED));
         return cond;
     }
     Condition testBoolean(Condition cond, const Register &tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_BOOLEAN));
@@ -132,16 +145,21 @@ class MacroAssemblerX86 : public MacroAs
         cmpl(tag, ImmTag(JSVAL_TAG_STRING));
         return cond;
     }
     Condition testObject(Condition cond, const Register &tag) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         cmpl(tag, ImmTag(JSVAL_TAG_OBJECT));
         return cond;
     }
+    Condition testNumber(Condition cond, const Register &tag) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        cmpl(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET));
+        return cond == Equal ? BelowOrEqual : Above;
+    }
     Condition testUndefined(Condition cond, const ValueOperand &value) {
         return testUndefined(cond, value.typeReg());
     }
     Condition testBoolean(Condition cond, const ValueOperand &value) {
         return testBoolean(cond, value.typeReg());
     }
     Condition testInt32(Condition cond, const ValueOperand &value) {
         return testInt32(cond, value.typeReg());
@@ -153,16 +171,26 @@ class MacroAssemblerX86 : public MacroAs
         return testNull(cond, value.typeReg());
     }
     Condition testString(Condition cond, const ValueOperand &value) {
         return testString(cond, value.typeReg());
     }
     Condition testObject(Condition cond, const ValueOperand &value) {
         return testObject(cond, value.typeReg());
     }
+    Condition testNumber(Condition cond, const ValueOperand &value) {
+        return testNumber(cond, value.typeReg());
+    }
+
+    void cmpPtr(const Register &lhs, const ImmWord rhs) {
+        cmpl(lhs, Imm32(rhs.value));
+    }
+    void testPtr(const Register &lhs, const Register &rhs) {
+        return testl(lhs, rhs);
+    }
 
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
     void reserveStack(uint32 amount) {
         if (amount)
             subl(Imm32(amount), StackPointer);
         framePushed_ += amount;
@@ -176,37 +204,35 @@ class MacroAssemblerX86 : public MacroAs
 
     void addPtr(Imm32 imm, const Register &dest) {
         addl(imm, dest);
     }
     void subPtr(Imm32 imm, const Register &dest) {
         subl(imm, dest);
     }
 
-    void cmpPtr(const Register &lhs, const Imm32 rhs) {
-        return cmpl(lhs, rhs);
-    }
-    void cmpPtr(const Register &lhs, const ImmWord rhs) {
-        return cmpl(lhs, Imm32(rhs.value));
-    }
-    void testPtr(const Register &lhs, const Register &rhs) {
-        return testl(lhs, rhs);
+    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
+        cmpl(lhs, ptr);
+        j(cond, label);
     }
 
-    void movePtr(ImmWord imm, const Register &dest) {
+    void movePtr(ImmWord imm, Register dest) {
         movl(Imm32(imm.value), dest);
     }
+    void loadPtr(const Address &address, Register dest) {
+        movl(Operand(address), dest);
+    }
     void setStackArg(const Register &reg, uint32 arg) {
         movl(reg, Operand(esp, arg * STACK_SLOT_SIZE));
     }
     void checkCallAlignment() {
 #ifdef DEBUG
         Label good;
         movl(esp, eax);
-        testl(Imm32(StackAlignment - 1), eax);
+        testl(eax, Imm32(StackAlignment - 1));
         j(Equal, &good);
         breakpoint();
         bind(&good);
 #endif
     }
 
     // Type testing instructions can take a tag in a register or a
     // ValueOperand.
@@ -240,16 +266,21 @@ class MacroAssemblerX86 : public MacroAs
         cond = testString(cond, t);
         j(cond, label);
     }
     template <typename T>
     void branchTestObject(Condition cond, const T &t, Label *label) {
         cond = testObject(cond, t);
         j(cond, label);
     }
+    template <typename T>
+    void branchTestNumber(Condition cond, const T &t, Label *label) {
+        cond = testNumber(cond, t);
+        j(cond, label);
+    }
 
     void unboxInt32(const ValueOperand &operand, const Register &dest) {
         movl(operand.payloadReg(), dest);
     }
     void unboxBoolean(const ValueOperand &operand, const Register &dest) {
         movl(operand.payloadReg(), dest);
     }
     void unboxDouble(const ValueOperand &operand, const FloatRegister &dest) {
@@ -259,16 +290,28 @@ class MacroAssemblerX86 : public MacroAs
             pinsrd(operand.typeReg(), dest);
         } else {
             movd(operand.payloadReg(), dest);
             movd(operand.typeReg(), ScratchFloatReg);
             unpcklps(ScratchFloatReg, dest);
         }
     }
 
+    // Extended unboxing API. If the payload is already in a register, returns
+    // that register. Otherwise, provides a move to the given scratch register,
+    // and returns that.
+    Register extractObject(const Address &address, Register scratch) {
+        movl(payloadOf(address), scratch);
+        return scratch;
+    }
+    Register extractTag(const Address &address, Register scratch) {
+        movl(tagOf(address), scratch);
+        return scratch;
+    }
+
     void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.payloadReg(), dest);
     }
     void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.payloadReg(), dest);
     }
 
     void loadStaticDouble(const double *dp, const FloatRegister &dest) {
--- a/js/src/jsval.h
+++ b/js/src/jsval.h
@@ -282,16 +282,18 @@ typedef uint64 JSValueShiftedTag;
 #define JSVAL_TYPE_TO_SHIFTED_TAG(type) (((uint64)JSVAL_TYPE_TO_TAG(type)) << JSVAL_TAG_SHIFT)
 
 #define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_OBJ_OR_NULL_SET  JSVAL_SHIFTED_TAG_NULL
 #define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET    JSVAL_SHIFTED_TAG_OBJECT
 #define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET       JSVAL_SHIFTED_TAG_UNDEFINED
 #define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_PTR_PAYLOAD_SET  JSVAL_SHIFTED_TAG_MAGIC
 #define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET      JSVAL_SHIFTED_TAG_STRING
 
+#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET              JSVAL_TAG_INT32
+
 #endif /* JS_BITS_PER_WORD */
 
 typedef enum JSWhyMagic
 {
     JS_ARRAY_HOLE,               /* a hole in a dense array */
     JS_ARGS_HOLE,                /* a hole in the args object's array */
     JS_NATIVE_ENUMERATE,         /* indicates that a custom enumerate hook forwarded
                                   * to JS_EnumerateState, which really means the object can be