Implement Call lowering and generation. (Bug 670484, r=dvander)
authorSean Stangl <sstangl@mozilla.com>
Thu, 11 Aug 2011 17:43:31 -0700
changeset 105243 090a6a0de93a879fd73c00aee52415327ee92b78
parent 105242 6fbb4a76ef7cdc04ec875ab98911dc1ef54a80c1
child 105244 50142bbb735daf200fdf737e54968010b768dd23
push id14706
push usereakhgari@mozilla.com
push dateTue, 11 Sep 2012 20:39:52 +0000
treeherdermozilla-inbound@d50bf1edaabe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersdvander
bugs670484
milestone9.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Implement Call lowering and generation. (Bug 670484, r=dvander)
js/src/ion/IonBuilder.cpp
js/src/ion/IonBuilder.h
js/src/ion/IonCode.h
js/src/ion/IonLIR.cpp
js/src/ion/IonLIR.h
js/src/ion/LIR-Common.h
js/src/ion/LOpcodes.h
js/src/ion/Lowering.cpp
js/src/ion/Lowering.h
js/src/ion/MIR.cpp
js/src/ion/MIR.h
js/src/ion/MOpcodes.h
js/src/ion/TypePolicy.cpp
js/src/ion/TypePolicy.h
js/src/ion/shared/Assembler-shared.h
js/src/ion/shared/Assembler-x86-shared.h
js/src/ion/shared/CodeGenerator-shared.cpp
js/src/ion/shared/CodeGenerator-shared.h
js/src/ion/shared/CodeGenerator-x86-shared.cpp
js/src/ion/shared/CodeGenerator-x86-shared.h
js/src/ion/shared/Lowering-shared-inl.h
js/src/ion/shared/Lowering-shared.h
js/src/ion/shared/MacroAssembler-x86-shared.h
js/src/ion/x64/Architecture-x64.h
js/src/ion/x64/Assembler-x64.h
js/src/ion/x64/CodeGenerator-x64.cpp
js/src/ion/x64/CodeGenerator-x64.h
js/src/ion/x64/LIR-x64.h
js/src/ion/x64/MacroAssembler-x64.h
js/src/ion/x86/Architecture-x86.h
js/src/ion/x86/Assembler-x86.h
js/src/ion/x86/CodeGenerator-x86.cpp
js/src/ion/x86/CodeGenerator-x86.h
js/src/ion/x86/MacroAssembler-x86.h
js/src/jit-test/tests/ion/bug670484.js
--- a/js/src/ion/IonBuilder.cpp
+++ b/js/src/ion/IonBuilder.cpp
@@ -412,16 +412,25 @@ IonBuilder::inspectOpcode(JSOp op)
         return pushConstant(UndefinedValue());
 
       case JSOP_FALSE:
         return pushConstant(BooleanValue(false));
 
       case JSOP_TRUE:
         return pushConstant(BooleanValue(true));
 
+      case JSOP_NOTEARG:
+        return jsop_notearg();
+
+      case JSOP_CALLARG:
+        current->pushArg(GET_SLOTNO(pc));
+        if (!pushConstant(UndefinedValue())) // Implicit |this|.
+            return false;
+        return jsop_notearg();
+
       case JSOP_GETARG:
         current->pushArg(GET_SLOTNO(pc));
         return true;
 
       case JSOP_SETARG:
         current->setArg(GET_SLOTNO(pc));
         return true;
 
@@ -434,16 +443,19 @@ IonBuilder::inspectOpcode(JSOp op)
 
       case JSOP_POP:
         current->pop();
         return true;
 
       case JSOP_IFEQX:
         return jsop_ifeq(JSOP_IFEQX);
 
+      case JSOP_CALL:
+        return jsop_call(GET_ARGC(pc));
+
       case JSOP_NULLBLOCKCHAIN:
         return true;
 
       case JSOP_INT8:
         return pushConstant(Int32Value(GET_INT8(pc)));
 
       case JSOP_UINT16:
         return pushConstant(Int32Value(GET_UINT16(pc)));
@@ -1612,17 +1624,56 @@ IonBuilder::jsop_binary(JSOp op)
 bool
 IonBuilder::jsop_neg()
 {
     if (!pushConstant(Int32Value(-1)))
         return false;
 
     if (!jsop_binary(JSOP_MUL))
         return false;
+    return true;
+}
 
+bool
+IonBuilder::jsop_notearg()
+{
+    // JSOP_NOTEARG notes that the value in current->pop() has just
+    // been pushed onto the stack for use in calling a function.
+    MDefinition *def = current->pop();
+    MPassArg *arg = MPassArg::New(def);
+
+    current->add(arg);
+    current->push(arg);
+    return true;
+}
+
+bool
+IonBuilder::jsop_call(uint32 argc)
+{
+    MCall *ins = MCall::New(argc + 1); // +1 for implicit this.
+    if (!ins)
+        return false;
+
+    // Bytecode order: Function, This, Arg0, Arg1, ..., ArgN, Call.
+    for (int32 i = argc; i >= 0; i--)
+        ins->addArg(i, current->pop()->toPassArg());
+    ins->initFunction(current->pop());
+
+    // Insert an MPrepareCall immediately before the first argument is pushed.
+    MPrepareCall *start = new MPrepareCall;
+    MPassArg *arg = ins->getArg(0)->toPassArg();
+    current->insertBefore(arg, start);
+
+    ins->initPrepareCall(start);
+
+    current->add(ins);
+    current->push(ins);
+
+    if (!resumeAfter(ins))
+        return false;
     return true;
 }
 
 bool
 IonBuilder::jsop_localinc(JSOp op)
 {
     int32 amt = (js_CodeSpec[op].format & JOF_INC) ? 1 : -1;
     bool post_incr = !!(js_CodeSpec[op].format & JOF_POST);
@@ -1736,15 +1787,17 @@ bool
 IonBuilder::resumeAfter(MInstruction *ins)
 {
     return resumeAt(ins, GetNextPc(pc));
 }
 
 bool
 IonBuilder::resumeAt(MInstruction *ins, jsbytecode *pc)
 {
+    JS_ASSERT(!ins->isIdempotent());
+
     MResumePoint *resumePoint = MResumePoint::New(current, pc);
     if (!resumePoint)
         return false;
     ins->setResumePoint(resumePoint);
     return true;
 }
 
--- a/js/src/ion/IonBuilder.h
+++ b/js/src/ion/IonBuilder.h
@@ -240,16 +240,18 @@ class IonBuilder : public MIRGenerator
     bool resumeAt(MInstruction *ins, jsbytecode *pc);
     bool resumeAfter(MInstruction *ins);
 
     bool pushConstant(const Value &v);
     bool jsop_bitnot();
     bool jsop_bitop(JSOp op);
     bool jsop_binary(JSOp op);
     bool jsop_neg();
+    bool jsop_notearg();
+    bool jsop_call(uint32 argc);
     bool jsop_ifeq(JSOp op);
     bool jsop_localinc(JSOp op);
     bool jsop_arginc(JSOp op);
     bool jsop_compare(JSOp op);
 
   private:
     JSAtom **atoms;
     MBasicBlock *current;
--- a/js/src/ion/IonCode.h
+++ b/js/src/ion/IonCode.h
@@ -59,16 +59,17 @@ namespace ion {
 static const uint32 MAX_BUFFER_SIZE = (1 << 30) - 1;
 
 typedef uint32 SnapshotOffset;
 
 class MacroAssembler;
 
 class IonCode : public gc::Cell
 {
+  protected:
     uint8 *code_;
     JSC::ExecutablePool *pool_;
     uint32 bufferSize_;             // Total buffer size.
     uint32 insnSize_;               // Instruction stream size.
     uint32 dataSize_;               // Size of the read-only data area.
     uint32 relocTableSize_;         // Size of the relocation table.
 
     IonCode()
@@ -108,16 +109,20 @@ class IonCode : public gc::Cell
     void copyFrom(MacroAssembler &masm);
 
     static IonCode *FromExecutable(uint8 *buffer) {
         IonCode *code = *(IonCode **)(buffer - sizeof(IonCode *));
         JS_ASSERT(code->raw() == buffer);
         return code;
     }
 
+    static size_t OffsetOfCode() {
+        return offsetof(IonCode, code_);
+    }
+
     // Allocates a new IonCode object which will be managed by the GC. If no
     // object can be allocated, NULL is returned. On failure, |pool| is
     // automatically released, so the code may be freed.
     static IonCode *New(JSContext *cx, uint8 *code, uint32 bufferSize, JSC::ExecutablePool *pool);
 };
 
 #define ION_DISABLED_SCRIPT ((IonScript *)0x1)
 
--- a/js/src/ion/IonLIR.cpp
+++ b/js/src/ion/IonLIR.cpp
@@ -46,16 +46,17 @@
 #include "IonSpewer.h"
 
 using namespace js;
 using namespace js::ion;
 
 LIRGraph::LIRGraph(MIRGraph &mir)
   : numVirtualRegisters_(0),
     localSlotCount_(0),
+    argumentSlotCount_(0),
     mir_(mir)
 {
 }
 
 bool
 LIRGraph::addConstantToPool(double d, uint32 *index)
 {
     *index = constantPool_.length();
--- a/js/src/ion/IonLIR.h
+++ b/js/src/ion/IonLIR.h
@@ -859,16 +859,18 @@ public:
 class LIRGraph
 {
     Vector<LBlock *, 16, SystemAllocPolicy> blocks_;
     js::Vector<Value, 0, SystemAllocPolicy> constantPool_;
     uint32 numVirtualRegisters_;
 
     // Number of stack slots needed for local spills.
     uint32 localSlotCount_;
+    // Number of stack slots needed for argument construction for calls.
+    uint32 argumentSlotCount_;
 
     MIRGraph &mir_;
 
   public:
     LIRGraph(MIRGraph &mir);
 
     size_t numBlocks() const {
         return blocks_.length();
@@ -892,16 +894,22 @@ class LIRGraph
         return numVirtualRegisters_ + 1;
     } 
     void setLocalSlotCount(uint32 localSlotCount) {
         localSlotCount_ = localSlotCount;
     }
     uint32 localSlotCount() const {
         return localSlotCount_;
     }
+    void setArgumentSlotCount(uint32 argumentSlotCount) {
+        argumentSlotCount_ = argumentSlotCount;
+    }
+    uint32 argumentSlotCount() const {
+        return argumentSlotCount_;
+    }
     bool addConstantToPool(double d, uint32 *index);
     bool addConstantToPool(MConstant *ins, uint32 *index);
     size_t numConstants() const {
         return constantPool_.length();
     }
     Value *constantPool() {
         return &constantPool_[0];
     }
--- a/js/src/ion/LIR-Common.h
+++ b/js/src/ion/LIR-Common.h
@@ -158,16 +158,75 @@ class LGoto : public LInstructionHelper<
       : block_(block)
     { }
 
     MBasicBlock *target() const {
         return block_;
     }
 };
 
+// Writes an argument for a function call to the frame's argument vector.
+class LStackArg : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+    uint32 argslot_; // Index into frame-scope argument vector.
+
+  public:
+    LIR_HEADER(StackArg);
+
+    LStackArg(uint32 argslot)
+      : argslot_(argslot)
+    { }
+
+    uint32 argslot() const {
+        return argslot_;
+    }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LCallGeneric : public LInstructionHelper<BOX_PIECES, 1, 2>
+{
+    // Slot below which %esp should be adjusted to make the call.
+    // Zero for a function without arguments.
+    uint32 argslot_;
+    MCall *mir_;
+
+  public:
+    LIR_HEADER(CallGeneric);
+
+    LCallGeneric(MCall *mir, const LAllocation &func,
+                 uint32 argslot, const LDefinition &token,
+                 const LDefinition &nargsreg)
+      : argslot_(argslot), mir_(mir)
+    {
+        setOperand(0, func);
+        setTemp(0, token);
+        setTemp(1, nargsreg);
+    }
+
+    uint32 argslot() const {
+        return argslot_;
+    }
+
+    uint32 nargs() const {
+        JS_ASSERT(mir_->argc() >= 1);
+        return mir_->argc() - 1; // |this| is not a formal argument.
+    }
+
+    const LAllocation *getFunction() {
+        return getOperand(0);
+    }
+    const LAllocation *getToken() {
+        return getTemp(0)->output();
+    }
+    const LAllocation *getNargsReg() {
+        return getTemp(1)->output();
+    }
+};
+
 // Takes a tableswitch with an integer to decide
 class LTableSwitch : public LInstructionHelper<0, 1, 2>
 {
     MTableSwitch *mir_;
 
   public:
     LIR_HEADER(TableSwitch);
 
--- a/js/src/ion/LOpcodes.h
+++ b/js/src/ion/LOpcodes.h
@@ -46,16 +46,18 @@
     _(MoveGroup)                    \
     _(Integer)                      \
     _(Pointer)                      \
     _(Double)                       \
     _(Value)                        \
     _(Parameter)                    \
     _(TableSwitch)                  \
     _(Goto)                         \
+    _(CallGeneric)                  \
+    _(StackArg)                     \
     _(BitNot)                       \
     _(BitOp)                        \
     _(ShiftOp)                      \
     _(Return)                       \
     _(Phi)                          \
     _(TestIAndBranch)               \
     _(TestDAndBranch)               \
     _(TestVAndBranch)               \
--- a/js/src/ion/Lowering.cpp
+++ b/js/src/ion/Lowering.cpp
@@ -112,16 +112,68 @@ LIRGenerator::visitTableSwitch(MTableSwi
 
 bool
 LIRGenerator::visitGoto(MGoto *ins)
 {
     return add(new LGoto(ins->target()));
 }
 
 bool
+LIRGenerator::visitPrepareCall(MPrepareCall *ins)
+{
+    allocateArguments(ins->argc());
+    return true;
+}
+
+bool
+LIRGenerator::visitPassArg(MPassArg *arg)
+{
+    MDefinition *opd = arg->getArgument();
+    JS_ASSERT(opd->type() == MIRType_Value);
+
+    uint32 argslot = getArgumentSlot(arg->getArgnum());
+
+    LStackArg *stack = new LStackArg(argslot);
+    if (!useBox(stack, 0, opd))
+        return false;
+
+    // Pass through the virtual register of the operand.
+    // This causes snapshots to correctly copy the operand on the stack.
+    // 
+    // This keeps the backing store around longer than strictly required.
+    // We could do better by informing snapshots about the argument vector.
+    arg->setVirtualRegister(opd->virtualRegister());
+
+    return add(stack);
+}
+
+bool
+LIRGenerator::visitCall(MCall *call)
+{
+    uint32 argc = call->argc();
+    JS_ASSERT(call->getFunction()->type() == MIRType_Object);
+
+    // Height of the current argument vector.
+    uint32 argslot = getArgumentSlotForCall();
+
+    // A call is entirely stateful, depending upon arguments already being
+    // stored in an argument vector. Therefore visitCall() may be generic.
+    LCallGeneric *ins = new LCallGeneric(call, useRegister(call->getFunction()),
+                                         argslot, temp(LDefinition::POINTER),
+                                         temp(LDefinition::POINTER));
+    if (!defineReturn(ins, call))
+        return false;
+    if (!assignSnapshot(ins))
+        return false;
+
+    freeArguments(argc);
+    return true;
+}
+
+bool
 LIRGenerator::visitTest(MTest *test)
 {
     MDefinition *opd = test->getOperand(0);
     MBasicBlock *ifTrue = test->ifTrue();
     MBasicBlock *ifFalse = test->ifFalse();
 
     if (opd->type() == MIRType_Value) {
         LTestVAndBranch *lir = new LTestVAndBranch(ifTrue, ifFalse, tempFloat());
@@ -533,16 +585,39 @@ LIRGenerator::updateResumeState(MInstruc
 void
 LIRGenerator::updateResumeState(MBasicBlock *block)
 {
     lastResumePoint_ = block->entryResumePoint();
     if (IonSpewEnabled(IonSpew_Snapshots))
         SpewResumePoint(block, NULL, lastResumePoint_);
 }
 
+void
+LIRGenerator::allocateArguments(uint32 argc)
+{
+    argslots_ += argc;
+    if (argslots_ > maxargslots_)
+        maxargslots_ = argslots_;
+}
+
+uint32
+LIRGenerator::getArgumentSlot(uint32 argnum)
+{
+    // First slot has index 1.
+    JS_ASSERT(argnum < argslots_);
+    return argslots_ - argnum ;
+}
+
+void
+LIRGenerator::freeArguments(uint32 argc)
+{
+    JS_ASSERT(argc <= argslots_);
+    argslots_ -= argc;
+}
+
 bool
 LIRGenerator::visitBlock(MBasicBlock *block)
 {
     current = block->lir();
     updateResumeState(block);
 
     if (!definePhis())
         return false;
@@ -617,11 +692,13 @@ LIRGenerator::generate()
         }
     }
 
     for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
         if (!visitBlock(*block))
             return false;
     }
 
+    lirGraph_.setArgumentSlotCount(maxargslots_);
+
     return true;
 }
 
--- a/js/src/ion/Lowering.h
+++ b/js/src/ion/Lowering.h
@@ -62,38 +62,56 @@
 namespace js {
 namespace ion {
 
 class LIRGenerator : public LIRGeneratorSpecific
 {
     void updateResumeState(MInstruction *ins);
     void updateResumeState(MBasicBlock *block);
 
+    // The active depth of the (perhaps nested) call argument vectors.
+    uint32 argslots_;
+    // The maximum depth, for framesizeclass determination.
+    uint32 maxargslots_;
+
   public:
     LIRGenerator(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph)
-      : LIRGeneratorSpecific(gen, graph, lirGraph)
+      : LIRGeneratorSpecific(gen, graph, lirGraph),
+        argslots_(0), maxargslots_(0)
     { }
 
     bool generate();
 
   private:
     bool lowerBitOp(JSOp op, MInstruction *ins);
     bool lowerShiftOp(JSOp op, MInstruction *ins);
     bool precreatePhi(LBlock *block, MPhi *phi);
     bool definePhis();
 
+    // Allocate argument slots for a future function call.
+    void allocateArguments(uint32 argc);
+    // Map an MPassArg's argument number to a slot in the frame arg vector.
+    // Slots are indexed from 1. argnum is indexed from 0.
+    uint32 getArgumentSlot(uint32 argnum);
+    uint32 getArgumentSlotForCall() { return argslots_; }
+    // Free argument slots following a function call.
+    void freeArguments(uint32 argc);
+
   public:
     bool visitInstruction(MInstruction *ins);
     bool visitBlock(MBasicBlock *block);
 
     // Visitor hooks are explicit, to give CPU-specific versions a chance to
     // intercept without a bunch of explicit gunk in the .cpp.
     bool visitParameter(MParameter *param);
     bool visitTableSwitch(MTableSwitch *tableswitch);
     bool visitGoto(MGoto *ins);
+    bool visitPrepareCall(MPrepareCall *ins);
+    bool visitPassArg(MPassArg *arg);
+    bool visitCall(MCall *call);
     bool visitTest(MTest *test);
     bool visitCompare(MCompare *comp);
     bool visitBitNot(MBitNot *ins);
     bool visitBitAnd(MBitAnd *ins);
     bool visitBitOr(MBitOr *ins);
     bool visitBitXor(MBitXor *ins);
     bool visitLsh(MLsh *ins);
     bool visitRsh(MRsh *ins);
--- a/js/src/ion/MIR.cpp
+++ b/js/src/ion/MIR.cpp
@@ -351,16 +351,25 @@ bool
 MParameter::congruentTo(MDefinition * const &ins) const
 {
     if (!ins->isParameter())
         return false;
 
     return ins->toParameter()->index() == index_;
 }
 
+MCall *
+MCall::New(size_t argc)
+{
+    MCall *ins = new MCall;
+    if (!ins->init(argc + NumNonArgumentOperands))
+        return NULL;
+    return ins;
+}
+
 MCopy *
 MCopy::New(MDefinition *ins)
 {
     // Don't create nested copies.
     if (ins->isCopy())
         ins = ins->toCopy()->getOperand(0);
 
     return new MCopy(ins);
@@ -449,16 +458,33 @@ MPhi::addInput(MDefinition *ins)
 }
 
 MReturn *
 MReturn::New(MDefinition *ins)
 {
     return new MReturn(ins);
 }
 
+uint32
+MPrepareCall::argc() const
+{
+    JS_ASSERT(useCount() == 1);
+    MCall *call = usesBegin()->node()->toDefinition()->toCall();
+    return call->argc();
+}
+
+void
+MCall::addArg(size_t argnum, MPassArg *arg)
+{
+    // The operand vector is initialized in reverse order by the IonBuilder.
+    // It cannot be checked for consistency until all arguments are added.
+    arg->setArgnum(argnum);
+    return MNode::initOperand(argnum + NumNonArgumentOperands, arg->toDefinition());
+}
+
 void
 MBitNot::infer(const TypeOracle::Unary &u)
 {
     if (u.ival == MIRType_Object) {
         specialization_ = MIRType_None;
     } else {
         specialization_ = MIRType_Int32;
         setIdempotent();
--- a/js/src/ion/MIR.h
+++ b/js/src/ion/MIR.h
@@ -790,16 +790,106 @@ class MReturn
         JS_NOT_REACHED("There are no successors");
     }
 
     TypePolicy *typePolicy() {
         return this;
     }
 };
 
+// Designates the start of call frame construction.
+// Generates code to adjust the stack pointer for the argument vector.
+// Argc is inferred by checking the use chain during lowering.
+class MPrepareCall : public MAryInstruction<0>
+{
+  public:
+    INSTRUCTION_HEADER(PrepareCall);
+
+    MPrepareCall()
+    { }
+
+    // Get the vector size for the upcoming call by looking at the call.
+    uint32 argc() const;
+};
+
+class MVariadicInstruction : public MInstruction
+{
+    FixedList<MDefinition *> operands_;
+
+  protected:
+    bool init(size_t length) {
+        return operands_.init(length);
+    }
+
+  public:
+    // Will assert if called before initialization.
+    MDefinition *getOperand(size_t index) const {
+        return operands_[index];
+    }
+    size_t numOperands() const {
+        return operands_.length();
+    }
+    void setOperand(size_t index, MDefinition *operand) {
+        operands_[index] = operand;
+    }
+};
+
+class MCall
+  : public MVariadicInstruction,
+    public CallPolicy
+{
+  private:
+    // An MCall uses the MPrepareCall, MDefinition for the function, and
+    // MPassArg instructions. They are stored in the same list.
+    static const size_t PrepareCallOperandIndex  = 0;
+    static const size_t FunctionOperandIndex   = 1;
+    static const size_t NumNonArgumentOperands = 2;
+
+  protected:
+    MCall()
+    {
+        setResultType(MIRType_Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(Call);
+    static MCall *New(size_t argc);
+
+    void initPrepareCall(MDefinition *start) {
+        JS_ASSERT(start->isPrepareCall());
+        return initOperand(PrepareCallOperandIndex, start);
+    }
+    void initFunction(MDefinition *func) {
+        JS_ASSERT(!func->isPassArg());
+        return initOperand(FunctionOperandIndex, func);
+    }
+
+    MDefinition *getFunction() const {
+        return getOperand(FunctionOperandIndex);
+    }
+    void replaceFunction(MInstruction *newfunc) {
+        replaceOperand(FunctionOperandIndex, newfunc);
+    }
+
+    void addArg(size_t argnum, MPassArg *arg);
+
+    MDefinition *getArg(uint32 index) const {
+        return getOperand(NumNonArgumentOperands + index);
+    }
+
+    // Includes |this|.
+    uint32 argc() const {
+        return numOperands() - NumNonArgumentOperands;
+    }
+
+    TypePolicy *typePolicy() {
+        return this;
+    }
+};
+
 class MUnaryInstruction : public MAryInstruction<1>
 {
   protected:
     MUnaryInstruction(MDefinition *ins)
     {
         initOperand(0, ins);
     }
 };
@@ -937,16 +1027,59 @@ class MUnbox : public MUnaryInstruction
   public:
     INSTRUCTION_HEADER(Unbox);
     static MUnbox *New(MDefinition *ins, MIRType type)
     {
         return new MUnbox(ins, type);
     }
 };
 
+// Passes an MDefinition to an MCall. Must occur between an MPrepareCall and
+// MCall. Boxes the input and stores it to the correct location on stack.
+//
+// Arguments are *not* simply pushed onto a call stack: they are evaluated
+// left-to-right, but stored in the arg vector in C-style, right-to-left.
+class MPassArg
+  : public MUnaryInstruction,
+    public BoxInputsPolicy
+{
+    int32 argnum_;
+
+  private:
+    MPassArg(MDefinition *def)
+      : MUnaryInstruction(def), argnum_(-1) 
+    {
+        setResultType(MIRType_Value);
+    }
+
+  public:
+    INSTRUCTION_HEADER(PassArg);
+    static MPassArg *New(MDefinition *def)
+    {
+        return new MPassArg(def);
+    }
+
+    MDefinition *getArgument() const {
+        return getOperand(0);
+    }
+
+    // Set by the MCall.
+    void setArgnum(uint32 argnum) {
+        argnum_ = argnum;
+    }
+    uint32 getArgnum() const {
+        JS_ASSERT(argnum_ >= 0);
+        return (uint32)argnum_;
+    }
+
+    TypePolicy *typePolicy() {
+        return this;
+    }
+};
+
 // Converts a primitive (either typed or untyped) to a double. If the input is
 // not primitive at runtime, a bailout occurs.
 class MToDouble : public MUnaryInstruction
 {
     MToDouble(MDefinition *def)
       : MUnaryInstruction(def)
     {
         setResultType(MIRType_Double);
--- a/js/src/ion/MOpcodes.h
+++ b/js/src/ion/MOpcodes.h
@@ -49,16 +49,19 @@ namespace ion {
 #define MIR_OPCODE_LIST(_)                                                  \
     _(Constant)                                                             \
     _(Parameter)                                                            \
     _(TableSwitch)                                                          \
     _(Goto)                                                                 \
     _(Test)                                                                 \
     _(Compare)                                                              \
     _(Phi)                                                                  \
+    _(PrepareCall)                                                          \
+    _(PassArg)                                                              \
+    _(Call)                                                                 \
     _(BitNot)                                                               \
     _(BitAnd)                                                               \
     _(BitOr)                                                                \
     _(BitXor)                                                               \
     _(Lsh)                                                                  \
     _(Rsh)                                                                  \
     _(Ursh)                                                                 \
     _(Add)                                                                  \
--- a/js/src/ion/TypePolicy.cpp
+++ b/js/src/ion/TypePolicy.cpp
@@ -280,8 +280,35 @@ TableSwitchPolicy::adjustInputs(MInstruc
     }
     
     ins->block()->insertBefore(ins, replace);
     ins->replaceOperand(0, replace);
 
     return true;
 }
 
+void
+CallPolicy::specializeInputs(MInstruction *ins, TypeAnalysis *analysis)
+{
+    analysis->preferType(ins->getOperand(0), MIRType_Object);
+}
+
+bool
+CallPolicy::adjustInputs(MInstruction *ins)
+{
+    MCall *call = ins->toCall();
+
+    MDefinition *func = call->getFunction();
+    if (func->type() == MIRType_Object)
+        return true;
+
+    // If the function is impossible to call,
+    // bail out by causing a subsequent unbox to fail.
+    if (func->type() != MIRType_Value)
+        func = boxAt(call, func);
+
+    MInstruction *unbox = MUnbox::New(func, MIRType_Object);
+    call->block()->insertBefore(call, unbox);
+    call->replaceFunction(unbox);
+
+    return true;
+}
+
--- a/js/src/ion/TypePolicy.h
+++ b/js/src/ion/TypePolicy.h
@@ -139,16 +139,23 @@ class ComparePolicy : public BoxInputsPo
     MIRType specialization_;
 
   public:
     bool respecialize(MInstruction *def);
     void specializeInputs(MInstruction *ins, TypeAnalysis *analyzer);
     bool adjustInputs(MInstruction *def);
 };
 
+class CallPolicy : public BoxInputsPolicy
+{
+  public:
+    void specializeInputs(MInstruction *ins, TypeAnalysis *analyzer);
+    bool adjustInputs(MInstruction *def);
+};
+
 static inline bool
 CoercesToDouble(MIRType type)
 {
     if (type == MIRType_Undefined || type == MIRType_Double)
         return true;
     return false;
 }
 
--- a/js/src/ion/shared/Assembler-shared.h
+++ b/js/src/ion/shared/Assembler-shared.h
@@ -52,16 +52,17 @@ namespace ion {
 struct Imm32
 {
     int32_t value;
 
     explicit Imm32(int32_t value) : value(value)
     { }
 };
 
+// Pointer-sized immediate.
 struct ImmWord
 {
     uintptr_t value;
 
     explicit ImmWord(uintptr_t value) : value(value)
     { }
     explicit ImmWord(void *ptr) : value(reinterpret_cast<uintptr_t>(ptr))
     { }
--- a/js/src/ion/shared/Assembler-x86-shared.h
+++ b/js/src/ion/shared/Assembler-x86-shared.h
@@ -206,16 +206,26 @@ class AssemblerX86Shared
           case Operand::REG_DISP:
             masm.movsd_rm(src.code(), dest.disp(), dest.base());
             break;
           default:
             JS_NOT_REACHED("unexpected operand kind");
         }
     }
 
+    void load16(const Operand &src, const Register &dest) {
+        switch (src.kind()) {
+          case Operand::REG_DISP:
+            masm.movzwl_mr(src.disp(), src.base(), dest.code());
+            break;
+          default:
+            JS_NOT_REACHED("unexpected operand kind");
+        }
+    }
+
     void j(Condition cond, Label *label) {
         if (label->bound()) {
             // The jump can be immediately patched to the correct destination.
             masm.linkJump(masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond)), JmpDst(label->offset()));
         } else {
             // Thread the jump list through the unpatched jump targets.
             JmpSrc j = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
             JmpSrc prev = JmpSrc(label->use(j.offset()));
--- a/js/src/ion/shared/CodeGenerator-shared.cpp
+++ b/js/src/ion/shared/CodeGenerator-shared.cpp
@@ -47,17 +47,18 @@
 
 using namespace js;
 using namespace js::ion;
 
 CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph &graph)
   : gen(gen),
     graph(graph),
     deoptTable_(NULL),
-    frameDepth_(graph.localSlotCount() * sizeof(STACK_SLOT_SIZE))
+    frameDepth_(graph.localSlotCount() * sizeof(STACK_SLOT_SIZE) +
+                graph.argumentSlotCount() * sizeof(Value))
 {
     frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
 }
 
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
--- a/js/src/ion/shared/CodeGenerator-shared.h
+++ b/js/src/ion/shared/CodeGenerator-shared.h
@@ -79,24 +79,36 @@ class CodeGeneratorShared : public LInst
     // The initial size of the frame in bytes. These are bytes beyond the
     // constant header present for every Ion frame, used for pre-determined
     // spills.
     int32 frameDepth_;
 
     // Frame class this frame's size falls into (see IonFrame.h).
     FrameSizeClass frameClass_;
 
+    // For arguments to the current function.
     inline int32 ArgToStackOffset(int32 slot) const {
         JS_ASSERT(slot >= 0);
         return masm.framePushed() + ION_FRAME_PREFIX_SIZE + slot;
     }
 
     inline int32 SlotToStackOffset(int32 slot) const {
         JS_ASSERT(slot > 0 && slot <= int32(graph.localSlotCount()));
-        int32 offset = masm.framePushed() - slot * STACK_SLOT_SIZE;
+        int32 offset = masm.framePushed() - (slot * STACK_SLOT_SIZE);
+        JS_ASSERT(offset >= 0);
+        return offset;
+    }
+
+    // For argument construction for calls. Argslots are Value-sized.
+    inline int32 StackOffsetOfPassedArg(int32 slot) const {
+        // A slot of 0 is permitted only to calculate %esp offset for calls.
+        JS_ASSERT(slot >= 0 && slot <= int32(graph.argumentSlotCount()));
+        int32 offset = masm.framePushed() -
+                       (graph.localSlotCount() * STACK_SLOT_SIZE) -
+                       (slot * sizeof(Value));
         JS_ASSERT(offset >= 0);
         return offset;
     }
 
     inline int32 ToStackOffset(const LAllocation *a) const {
         if (a->isArgument())
             return ArgToStackOffset(a->toArgument()->index());
         return SlotToStackOffset(a->toStackSlot()->slot());
--- a/js/src/ion/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/ion/shared/CodeGenerator-x86-shared.cpp
@@ -623,16 +623,98 @@ CodeGeneratorX86Shared::visitTableSwitch
 
     // Jump to the right case
     masm.jmp(pointer);
 
     return true;
 }
 
 bool
+CodeGeneratorX86Shared::visitCallGeneric(LCallGeneric *call)
+{
+    // Holds the function object.
+    const LAllocation *obj = call->getFunction();
+    Register objreg  = ToRegister(obj);
+
+    // Holds the callee token. Initially undefined.
+    const LAllocation *tok = call->getToken();
+    Register tokreg  = ToRegister(tok);
+
+    // Holds the function nargs. Initially undefined.
+    const LAllocation *nargs = call->getNargsReg();
+    Register nargsreg = ToRegister(nargs);
+
+    uint32 callargslot  = call->argslot();
+    uint32 unused_stack = StackOffsetOfPassedArg(callargslot);
+
+
+    // Guard that objreg is actually a function object.
+    masm.movePtr(Operand(objreg, JSObject::offsetOfClassPointer()), tokreg);
+    masm.cmpPtr(tokreg, ImmWord(&js::FunctionClass));
+    if (!bailoutIf(Assembler::NotEqual, call->snapshot()))
+        return false;
+
+    // Guard that objreg is a non-native function:
+    // Non-native iff (obj->flags & JSFUN_KINDMASK >= JSFUN_INTERPRETED).
+    masm.movl(Operand(objreg, offsetof(JSFunction, flags)), tokreg);
+    masm.andl(Imm32(JSFUN_KINDMASK), tokreg);
+    masm.cmpl(tokreg, Imm32(JSFUN_INTERPRETED));
+    if (!bailoutIf(Assembler::Below, call->snapshot()))
+        return false;
+
+    // Save the calleeToken, which equals the function object.
+    masm.mov(objreg, tokreg);
+
+    // Knowing that objreg is a non-native function, load the JSScript.
+    masm.movePtr(Operand(objreg, offsetof(JSFunction, u.i.script)), objreg);
+    masm.movePtr(Operand(objreg, offsetof(JSScript, ion)), objreg);
+
+    // Bail if the callee has not yet been JITted.
+    masm.testPtr(objreg, objreg);
+    if (!bailoutIf(Assembler::Zero, call->snapshot()))
+        return false;
+
+    // Remember the size of the frame above this point, in case of bailout.
+    uint32 stack_size = masm.framePushed() - unused_stack;
+    // Mark !IonFramePrefix::isEntryFrame().
+    uint32 size_descriptor = stack_size << 1;
+
+    // If insufficient arguments are passed, bail.
+    // Bug 685099: Instead of bailing, create a new frame with |undefined| padding.
+    masm.load16(Operand(tokreg, offsetof(JSFunction, nargs)), nargsreg);
+    masm.cmpl(nargsreg, Imm32(call->nargs()));
+    if (!bailoutIf(Assembler::NotEqual, call->snapshot()))
+        return false;
+
+    // Nestle %esp up to the argument vector.
+    if (unused_stack)
+        masm.addPtr(Imm32(unused_stack), StackPointer);
+
+    // Construct the IonFramePrefix.
+    masm.push(tokreg);
+    masm.push(Imm32(size_descriptor));
+
+    // Finally, call.
+    masm.movePtr(Operand(objreg, offsetof(IonScript, method_)), objreg);
+    masm.movePtr(Operand(objreg, IonCode::OffsetOfCode()), objreg);
+    masm.call(objreg);
+
+    // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
+    int prefix_garbage = 2 * sizeof(void *);
+    int restore_diff = prefix_garbage - unused_stack;
+    
+    if (restore_diff > 0)
+        masm.addPtr(Imm32(restore_diff), StackPointer);
+    else if (restore_diff < 0)
+        masm.subPtr(Imm32(-restore_diff), StackPointer);
+
+    return true;
+}
+
+bool
 CodeGeneratorX86Shared::visitMathD(LMathD *math)
 {
     const LAllocation *input = math->getOperand(1);
     const LDefinition *output = math->getDef(0);
 
     switch (math->jsop()) {
       case JSOP_ADD:
         masm.addsd(ToFloatRegister(input), ToFloatRegister(output));
--- a/js/src/ion/shared/CodeGenerator-x86-shared.h
+++ b/js/src/ion/shared/CodeGenerator-x86-shared.h
@@ -115,16 +115,17 @@ class CodeGeneratorX86Shared : public Co
     virtual bool visitTestIAndBranch(LTestIAndBranch *test);
     virtual bool visitTestDAndBranch(LTestDAndBranch *test);
     virtual bool visitCompareI(LCompareI *comp);
     virtual bool visitCompareIAndBranch(LCompareIAndBranch *comp);
     virtual bool visitCompareD(LCompareD *comp);
     virtual bool visitCompareDAndBranch(LCompareDAndBranch *comp);
     virtual bool visitMathD(LMathD *math);
     virtual bool visitTableSwitch(LTableSwitch *ins);
+    virtual bool visitCallGeneric(LCallGeneric *call);
 
     // Out of line visitors.
     bool visitOutOfLineBailout(OutOfLineBailout *ool);
 };
 
 // An out-of-line bailout thunk.
 class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorX86Shared>
 {
--- a/js/src/ion/shared/Lowering-shared-inl.h
+++ b/js/src/ion/shared/Lowering-shared-inl.h
@@ -113,16 +113,31 @@ LIRGeneratorShared::defineBox(LInstructi
 #elif defined(JS_PUNBOX64)
     lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
 #endif
 
     mir->setVirtualRegister(vreg);
     return add(lir);
 }
 
+template <size_t Ops, size_t Temps> bool
+LIRGeneratorShared::defineReturn(LInstructionHelper<BOX_PIECES, Ops, Temps> *lir, MDefinition *mir)
+{
+    defineBox(lir, mir, LDefinition::PRESET);
+
+#if defined(JS_NUNBOX32)
+    lir->getDef(TYPE_INDEX)->setOutput(LGeneralReg(JSReturnReg_Type));
+    lir->getDef(PAYLOAD_INDEX)->setOutput(LGeneralReg(JSReturnReg_Data));
+#elif defined(JS_PUNBOX64)
+    lir->getDef(0)->setOutput(LGeneralReg(JSReturnReg));
+#endif
+
+    return true;
+}
+
 // In LIR, we treat booleans and integers as the same low-level type (INTEGER).
 // When snapshotting, we recover the actual JS type from MIR. This function
 // checks that when making redefinitions, we don't accidentally coerce two
 // incompatible types.
 static inline bool
 IsCompatibleLIRCoercion(MIRType to, MIRType from)
 {
     if (to == from)
--- a/js/src/ion/shared/Lowering-shared.h
+++ b/js/src/ion/shared/Lowering-shared.h
@@ -111,16 +111,19 @@ class LIRGeneratorShared : public MInstr
     inline LDefinition temp(LDefinition::Type type);
     inline LDefinition tempFloat();
 
     template <size_t Ops, size_t Temps>
     inline bool defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps> *lir, MDefinition *mir,
                           LDefinition::Policy policy = LDefinition::DEFAULT);
 
     template <size_t Ops, size_t Temps>
+    inline bool defineReturn(LInstructionHelper<BOX_PIECES, Ops, Temps> *lir, MDefinition *mir);
+
+    template <size_t Ops, size_t Temps>
     inline bool define(LInstructionHelper<1, Ops, Temps> *lir, MDefinition *mir,
                         const LDefinition &def);
 
     template <size_t Ops, size_t Temps>
     inline bool define(LInstructionHelper<1, Ops, Temps> *lir, MDefinition *mir,
                        LDefinition::Policy policy = LDefinition::DEFAULT);
 
     template <size_t Ops, size_t Temps>
--- a/js/src/ion/shared/MacroAssembler-x86-shared.h
+++ b/js/src/ion/shared/MacroAssembler-x86-shared.h
@@ -49,17 +49,17 @@
 #endif
 
 namespace js {
 namespace ion {
 
 class MacroAssemblerX86Shared : public Assembler
 {
   protected:
-    // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
+    // Bytes pushed onto the frame by the callee; includes frameDepth_. This is
     // needed to compute offsets to stack slots while temporary space has been
     // reserved for unexpected spills or C++ function calls. It is maintained
     // by functions which track stack alignment, which for clear distinction
     // use StudlyCaps (for example, Push, Pop).
     uint32 framePushed_;
 
   public:
     MacroAssemblerX86Shared()
--- a/js/src/ion/x64/Architecture-x64.h
+++ b/js/src/ion/x64/Architecture-x64.h
@@ -112,16 +112,19 @@ class Registers {
 
     static const uint32 SingleByteRegs = VolatileMask | NonVolatileMask;
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::esp) |
         (1 << JSC::X86Registers::r11);      // This is ScratchReg.
 
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
+
+    static const uint32 JSCallClobberMask =
+        AllocatableMask & ~(1 << JSC::X86Registers::ecx);
 };
 
 class FloatRegisters {
   public:
     typedef JSC::X86Registers::XMMRegisterID Code;
 
     static const char *GetName(Code code) {
         static const char *Names[] = { "xmm0",  "xmm1",  "xmm2",  "xmm3",
@@ -152,15 +155,17 @@ class FloatRegisters {
 
 
     static const uint32 NonVolatileMask = AllMask & ~VolatileMask;
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::xmm15);    // This is ScratchFloatReg.
 
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
+
+    static const uint32 JSCallClobberMask = AllocatableMask;
 };
 
+} // namespace ion
 } // namespace js
-} // namespace ion
 
 #endif // jsion_architecture_x64_h__
 
--- a/js/src/ion/x64/Assembler-x64.h
+++ b/js/src/ion/x64/Assembler-x64.h
@@ -399,22 +399,26 @@ class Assembler : public AssemblerX86Sha
             break;
           default:
             JS_NOT_REACHED("unexpected operand kind");
         }
     }
     void cmpq(const Register &lhs, const Register &rhs) {
         masm.cmpq_rr(rhs.code(), lhs.code());
     }
+    void cmpq(Imm32 lhs, const Register &rhs) {
+        masm.cmpq_ir(lhs.value, rhs.code());
+    }
+    
+    void testq(Imm32 lhs, const Register &rhs) {
+        masm.testq_i32r(lhs.value, rhs.code());
+    }
     void testq(const Register &lhs, const Register &rhs) {
         masm.testq_rr(lhs.code(), rhs.code());
     }
-    void testq(Imm32 lhs, const Register &rhs) {
-        masm.testq_i32r(lhs.value, rhs.code());
-    }
 
     void jmp(void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jmp();
         addPendingJump(src, target, reloc);
     }
     void j(Condition cond, void *target, Relocation::Kind reloc) {
         JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
         addPendingJump(src, target, reloc);
--- a/js/src/ion/x64/CodeGenerator-x64.cpp
+++ b/js/src/ion/x64/CodeGenerator-x64.cpp
@@ -152,16 +152,30 @@ CodeGeneratorX64::visitUnboxDouble(LUnbo
     if (!bailoutIf(cond, unbox->snapshot()))
         return false;
     masm.unboxDouble(value, ToFloatRegister(result));
 
     return true;
 }
 
 bool
+CodeGeneratorX64::visitUnboxObject(LUnboxObject *unbox)
+{
+    const ValueOperand value = ToValue(unbox, LUnboxObject::Input);
+    const LDefinition *object = unbox->output();
+
+    Assembler::Condition cond = masm.testObject(Assembler::NotEqual, value);
+    if (!bailoutIf(cond, unbox->snapshot()))
+        return false;
+    masm.unboxObject(value, ToRegister(object));
+    
+    return true;
+}
+
+bool
 CodeGeneratorX64::visitReturn(LReturn *ret)
 {
 #ifdef DEBUG
     LAllocation *result = ret->getOperand(0);
     JS_ASSERT(ToRegister(result) == JSReturnReg);
 #endif
     // Don't emit a jump to the return label if this is the last block.
     if (current->mir() != *gen->graph().poBegin())
@@ -183,8 +197,19 @@ CodeGeneratorX64::testStringTruthy(bool 
 
     Operand lengthAndFlags(ScratchReg, JSString::offsetOfLengthAndFlags());
     masm.movq(lengthAndFlags, ScratchReg);
     masm.shrq(Imm32(JSString::LENGTH_SHIFT), ScratchReg);
     masm.testq(ScratchReg, ScratchReg);
     return truthy ? Assembler::NonZero : Assembler::Zero;
 }
 
+bool
+CodeGeneratorX64::visitStackArg(LStackArg *arg)
+{
+    ValueOperand val = ToValue(arg, 0);
+    uint32 argslot = arg->argslot();
+    int32 stack_offset = StackOffsetOfPassedArg(argslot);
+
+    masm.storeValue(val, Operand(StackPointer, stack_offset));
+    return true;
+}
+
--- a/js/src/ion/x64/CodeGenerator-x64.h
+++ b/js/src/ion/x64/CodeGenerator-x64.h
@@ -62,19 +62,21 @@ class CodeGeneratorX64 : public CodeGene
     Assembler::Condition testStringTruthy(bool truthy, const ValueOperand &value);
 
   public:
     CodeGeneratorX64(MIRGenerator *gen, LIRGraph &graph);
 
   public:
     bool visitValue(LValue *value);
     bool visitReturn(LReturn *ret);
+    bool visitStackArg(LStackArg *arg);
     bool visitBox(LBox *box);
     bool visitUnboxInteger(LUnboxInteger *unbox);
     bool visitUnboxDouble(LUnboxDouble *unbox);
+    bool visitUnboxObject(LUnboxObject *unbox);
     bool visitDouble(LDouble *ins);
 };
 
 typedef CodeGeneratorX64 CodeGeneratorSpecific;
 
 } // ion
 } // js
 
--- a/js/src/ion/x64/LIR-x64.h
+++ b/js/src/ion/x64/LIR-x64.h
@@ -120,16 +120,22 @@ class LUnboxDouble : public LInstruction
 class LUnboxObject : public LInstructionHelper<1, 1, 0>
 {
   public:
     LIR_HEADER(UnboxObject);
 
     LUnboxObject(const LAllocation &input) {
         setOperand(0, input);
     }
+
+    static const size_t Input = 0;
+
+    const LDefinition *output() {
+        return getDef(0);
+    }
 };
 
 // Given an untyped input, guards on whether it's a string and returns the
 // pointer.
 class LUnboxString : public LInstructionHelper<1, 1, 1>
 {
   public:
     LIR_HEADER(UnboxString);
--- a/js/src/ion/x64/MacroAssembler-x64.h
+++ b/js/src/ion/x64/MacroAssembler-x64.h
@@ -89,27 +89,57 @@ class MacroAssemblerX64 : public MacroAs
         return total + ComputeByteAlignment(displacement, StackAlignment);
     }
 
     void restoreStackFromDynamicAlignment() {
         pop(rsp);
     }
 
   public:
+    /////////////////////////////////////////////////////////////////
+    // X86/X64-common interface.
+    /////////////////////////////////////////////////////////////////
+    void storeValue(ValueOperand val, Operand dest) {
+        movq(val.valueReg(), dest);
+    }
+    void movePtr(Operand op, const Register &dest) {
+        movq(op, dest);
+    }
+
+    /////////////////////////////////////////////////////////////////
+    // Common interface.
+    /////////////////////////////////////////////////////////////////
     void reserveStack(uint32 amount) {
         if (amount)
-            subq(Imm32(amount), rsp);
+            subq(Imm32(amount), StackPointer);
         framePushed_ += amount;
     }
     void freeStack(uint32 amount) {
         JS_ASSERT(amount <= framePushed_);
         if (amount)
-            addq(Imm32(amount), rsp);
+            addq(Imm32(amount), StackPointer);
         framePushed_ -= amount;
     }
+
+    void cmpPtr(const Register &lhs, const ImmWord rhs) {
+        JS_ASSERT(lhs != ScratchReg);
+        movq(rhs, ScratchReg);
+        return cmpq(lhs, ScratchReg);
+    }
+    void testPtr(const Register &lhs, const Register &rhs) {
+        return testq(lhs, rhs);
+    }
+
+    void addPtr(Imm32 imm, const Register &dest) {
+        addq(imm, dest);
+    }
+    void subPtr(Imm32 imm, const Register &dest) {
+        subq(imm, dest);
+    }
+
     void movePtr(ImmWord imm, const Register &dest) {
         movq(imm, dest);
     }
     void setStackArg(const Register &reg, uint32 arg) {
         movq(reg, Operand(rsp, (arg - NumArgRegs) * STACK_SLOT_SIZE + ShadowStackSpace));
     }
     void checkCallAlignment() {
 #ifdef DEBUG
@@ -172,48 +202,55 @@ class MacroAssemblerX64 : public MacroAs
         return testBoolean(cond, ScratchReg);
     }
     Condition testDouble(Condition cond, const ValueOperand &src) {
         JS_ASSERT(cond == Equal || cond == NotEqual);
         movq(ImmShiftedTag(JSVAL_SHIFTED_TAG_MAX_DOUBLE), ScratchReg);
         cmpq(src.value(), ScratchReg);
         return (cond == NotEqual) ? Above : BelowOrEqual;
     }
+    Condition testObject(Condition cond, const ValueOperand &src) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        cmpTag(src, ImmTag(JSVAL_TAG_OBJECT));
+        return cond;
+    }
     Condition testNull(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testNull(cond, ScratchReg);
     }
     Condition testUndefined(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testUndefined(cond, ScratchReg);
     }
     Condition testString(Condition cond, const ValueOperand &src) {
         splitTag(src, ScratchReg);
         return testString(cond, ScratchReg);
     }
-    Condition testObject(Condition cond, const ValueOperand &src) {
-        splitTag(src, ScratchReg);
-        return testObject(cond, ScratchReg);
-    }
 
     // Note that the |dest| register here may be ScratchReg, so we shouldn't
     // use it.
     void unboxInt32(const ValueOperand &src, const Register &dest) {
         movl(src.value(), dest);
     }
     void unboxBoolean(const ValueOperand &src, const Register &dest) {
         movl(src.value(), dest);
     }
     void unboxDouble(const ValueOperand &src, const FloatRegister &dest) {
         movqsd(src.valueReg(), dest);
     }
     void unboxString(const ValueOperand &src, const Register &dest) {
         movq(ImmWord(JSVAL_PAYLOAD_MASK), dest);
         andq(src.valueReg(), dest);
     }
+    void unboxObject(const ValueOperand &src, const Register &dest) {
+        // TODO: Can we unbox more efficiently? Bug 680294.
+        movq(JSVAL_PAYLOAD_MASK, ScratchReg);
+        movq(src.value(), dest);
+        andq(ScratchReg, dest);
+    }
 
     // These two functions use the low 32-bits of the full value register.
     void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.value(), dest);
     }
     void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
         cvtsi2sd(operand.value(), dest);
     }
--- a/js/src/ion/x86/Architecture-x86.h
+++ b/js/src/ion/x86/Architecture-x86.h
@@ -100,16 +100,21 @@ class Registers {
         (1 << JSC::X86Registers::ecx) |
         (1 << JSC::X86Registers::edx) |
         (1 << JSC::X86Registers::ebx);
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::esp);
 
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
+
+    static const uint32 JSCallClobberMask =
+        AllocatableMask &
+        ~(1 << JSC::X86Registers::ecx) &
+        ~(1 << JSC::X86Registers::edx);
 };
 
 class FloatRegisters {
   public:
     typedef JSC::X86Registers::XMMRegisterID Code;
 
     static const char *GetName(Code code) {
         static const char *Names[] = { "xmm0", "xmm1", "xmm2", "xmm3",
@@ -126,15 +131,17 @@ class FloatRegisters {
 
     static const uint32 VolatileMask = AllMask;
     static const uint32 NonVolatileMask = 0;
 
     static const uint32 NonAllocatableMask =
         (1 << JSC::X86Registers::xmm7);
 
     static const uint32 AllocatableMask = AllMask & ~NonAllocatableMask;
+
+    static const uint32 JSCallClobberMask = AllocatableMask;
 };
 
+} // namespace ion
 } // namespace js
-} // namespace ion
 
 #endif // jsion_architecture_x86_h__
 
--- a/js/src/ion/x86/Assembler-x86.h
+++ b/js/src/ion/x86/Assembler-x86.h
@@ -153,18 +153,18 @@ class Operand
         return (FloatRegisters::Code)base_;
     }
     int32 disp() const {
         JS_ASSERT(kind() == REG_DISP || kind() == SCALE);
         return disp_;
     }
 };
 
+} // namespace ion
 } // namespace js
-} // namespace ion
 
 #include "ion/shared/Assembler-x86-shared.h"
 
 namespace js {
 namespace ion {
 
 class ValueOperand
 {
@@ -235,16 +235,19 @@ class Assembler : public AssemblerX86Sha
     }
     void mov(AbsoluteLabel *label, const Register &dest) {
         JS_ASSERT(!label->bound());
         // Thread the patch list through the unpatched address word in the
         // instruction stream.
         masm.movl_i32r(label->prev(), dest.code());
         label->setPrev(masm.size());
     }
+    void mov(const Register &src, const Register &dest) {
+        movl(src, dest);
+    }
     void lea(const Operand &src, const Register &dest) {
         switch (src.kind()) {
           case Operand::REG_DISP:
             masm.leal_mr(src.disp(), src.base(), dest.code());
             break;
           case Operand::SCALE:
             masm.leal_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
             break;
--- a/js/src/ion/x86/CodeGenerator-x86.cpp
+++ b/js/src/ion/x86/CodeGenerator-x86.cpp
@@ -160,16 +160,27 @@ CodeGeneratorX86::visitUnbox(LUnbox *unb
     LAllocation *type = unbox->getOperand(TYPE_INDEX);
     masm.cmpl(ToOperand(type), Imm32(MIRTypeToTag(unbox->type())));
     if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
         return false;
     return true;
 }
 
 bool
+CodeGeneratorX86::visitStackArg(LStackArg *arg)
+{
+    ValueOperand val = ToValue(arg, 0);
+    uint32 argslot = arg->argslot();
+    int32 stack_offset = StackOffsetOfPassedArg(argslot);
+
+    masm.storeValue(val, Operand(StackPointer, stack_offset));
+    return true;
+}
+
+bool
 CodeGeneratorX86::visitReturn(LReturn *ret)
 {
 #ifdef DEBUG
     LAllocation *type = ret->getOperand(TYPE_INDEX);
     LAllocation *payload = ret->getOperand(PAYLOAD_INDEX);
 
     JS_ASSERT(ToRegister(type) == JSReturnReg_Type);
     JS_ASSERT(ToRegister(payload) == JSReturnReg_Data);
--- a/js/src/ion/x86/CodeGenerator-x86.h
+++ b/js/src/ion/x86/CodeGenerator-x86.h
@@ -88,16 +88,17 @@ class CodeGeneratorX86 : public CodeGene
     CodeGeneratorX86(MIRGenerator *gen, LIRGraph &graph);
 
   public:
     bool visitBox(LBox *box);
     bool visitBoxDouble(LBoxDouble *box);
     bool visitUnbox(LUnbox *unbox);
     bool visitUnboxDouble(LUnboxDouble *ins);
     bool visitValue(LValue *value);
+    bool visitStackArg(LStackArg *arg);
     bool visitReturn(LReturn *ret);
     bool visitDouble(LDouble *ins);
 };
 
 typedef CodeGeneratorX86 CodeGeneratorSpecific;
 
 } // ion
 } // js
--- a/js/src/ion/x86/MacroAssembler-x86.h
+++ b/js/src/ion/x86/MacroAssembler-x86.h
@@ -69,27 +69,70 @@ class MacroAssemblerX86 : public MacroAs
         return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
     }
 
     void restoreStackFromDynamicAlignment() {
         pop(esp);
     }
 
   public:
+    /////////////////////////////////////////////////////////////////
+    // X86-specific interface.
+    /////////////////////////////////////////////////////////////////
+    Operand ToPayload(Operand base) {
+        return base;
+    }
+    Operand ToType(Operand base) {
+        return Operand(Register::FromCode(base.base()),
+                       base.disp() + sizeof(void *));
+    }
+
+    /////////////////////////////////////////////////////////////////
+    // X86/X64-common interface.
+    /////////////////////////////////////////////////////////////////
+    void storeValue(ValueOperand val, Operand dest) {
+        movl(val.payloadReg(), ToPayload(dest));
+        movl(val.typeReg(), ToType(dest));
+    }
+    void movePtr(Operand op, const Register &dest) {
+        movl(op, dest);
+    }
+
+    /////////////////////////////////////////////////////////////////
+    // Common interface.
+    /////////////////////////////////////////////////////////////////
     void reserveStack(uint32 amount) {
         if (amount)
-            subl(Imm32(amount), esp);
+            subl(Imm32(amount), StackPointer);
         framePushed_ += amount;
     }
     void freeStack(uint32 amount) {
         JS_ASSERT(amount <= framePushed_);
         if (amount)
-            addl(Imm32(amount), esp);
+            addl(Imm32(amount), StackPointer);
         framePushed_ -= amount;
     }
+
+    void addPtr(Imm32 imm, const Register &dest) {
+        addl(imm, dest);
+    }
+    void subPtr(Imm32 imm, const Register &dest) {
+        subl(imm, dest);
+    }
+
+    void cmpPtr(const Register &lhs, const Imm32 rhs) {
+        return cmpl(lhs, rhs);
+    }
+    void cmpPtr(const Register &lhs, const ImmWord rhs) {
+        return cmpl(lhs, Imm32(rhs.value));
+    }
+    void testPtr(const Register &lhs, const Register &rhs) {
+        return testl(lhs, rhs);
+    }
+
     void movePtr(ImmWord imm, const Register &dest) {
         movl(Imm32(imm.value), dest);
     }
     void setStackArg(const Register &reg, uint32 arg) {
         movl(reg, Operand(esp, arg * STACK_SLOT_SIZE));
     }
     void checkCallAlignment() {
 #ifdef DEBUG
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug670484.js
@@ -0,0 +1,101 @@
+// Call a function with no arguments.
+function a_g() {
+	return 5;
+}
+
+function a_f(g) {
+	return g();
+}
+
+a_g();
+assertEq(a_f(a_g), 5);
+
+///////////////////////////////////////////////////////////////////////////////
+// Call a function with one argument.
+function b_g(a) {
+	return a;
+}
+
+function b_f(h,b) {
+	return h(5);
+}
+b_g(5);
+assertEq(b_f(b_g,4), 5);
+
+///////////////////////////////////////////////////////////////////////////////
+// Try to confuse the register allocator.
+function c_g(a) {
+	return a;
+}
+function c_f(h,b) {
+	var x = h(5);
+	var y = x + 1;
+	var z = h(h(y + x + 2));
+	var k = 2 + z + 3;
+	return h(h(h(k)));
+}
+c_g(2); // prime g().
+assertEq(c_f(c_g,7), 18)
+
+///////////////////////////////////////////////////////////////////////////////
+// Fail during unboxing, get kicked to interpreter.
+// Interpreter throws an exception; handle it.
+
+function d_f(a) {
+	return a(); // Call a known non-object. This fails in unboxing.
+}
+var d_x = 0;
+try {
+	d_f(1); // Don't assert.
+} catch(e) {
+	d_x = 1;
+}
+assertEq(d_x, 1);
+
+///////////////////////////////////////////////////////////////////////////////
+// Try passing an uncompiled function.
+
+function e_uncompiled(a,b,c) {
+	return eval("b");
+}
+function e_f(h) {
+	return h(0,h(2,4,6),1);
+}
+assertEq(e_f(e_uncompiled),4);
+
+///////////////////////////////////////////////////////////////////////////////
+// Try passing a native function.
+
+function f_app(f,n) {
+	return f(n);
+}
+assertEq(f_app(Math.sqrt, 16), 4);
+
+///////////////////////////////////////////////////////////////////////////////
+// Handle the case where too few arguments are passed.
+function g_g(a,b,c,d,e) {
+	return e;
+}
+
+function g_f(g) {
+	return g(2);
+}
+
+g_g();
+assertEq(g_f(g_g), undefined);
+
+///////////////////////////////////////////////////////////////////////////////
+// Don't assert when given a non-function object.
+function h_f(a) {
+	return a();
+}
+
+var x = new Object();
+var h_ret = 0;
+try {
+	h_f(x); // don't assert.
+} catch (e) {
+	h_ret = 1;
+}
+assertEq(h_ret, 1);
+