Merge from mozilla-central.
authorDavid Anderson <danderson@mozilla.com>
Tue, 06 Mar 2012 13:42:21 -0800
changeset 112240 9c866973398c487baf4771af084ffc1c9cfd29f2
parent 112239 f1407f88d2ba716adfbf3c0da434ac1bb9fe73f0 (current diff)
parent 112237 d72074514c6fa4aec75323b18f135b0e720f3e7a (diff)
child 112241 1edb33d3750780b9130f196ef6b5e2e2bed14368
push id1708
push userakeybl@mozilla.com
push dateMon, 19 Nov 2012 21:10:21 +0000
treeherdermozilla-beta@27b14fe50103 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone13.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge from mozilla-central.
--- a/js/src/ion/BitSet.cpp
+++ b/js/src/ion/BitSet.cpp
@@ -65,65 +65,38 @@ BitSet::init()
         return false;
 
     memset(bits_, 0, sizeRequired);
 
     return true;
 }
 
 bool
-BitSet::contains(unsigned int value) const
-{
-    JS_ASSERT(bits_);
-    JS_ASSERT(value <= max_);
-
-    return bits_[wordForValue(value)] & bitForValue(value);
-}
-
-bool
 BitSet::empty() const
 {
     JS_ASSERT(bits_);
     for (unsigned int i = 0; i < numWords(); i++) {
         if (bits_[i])
             return false;
     }
     return true;
 }
 
 void
-BitSet::insert(unsigned int value)
-{
-    JS_ASSERT(bits_);
-    JS_ASSERT(value <= max_);
-
-    bits_[wordForValue(value)] |= bitForValue(value);
-}
-
-void
 BitSet::insertAll(const BitSet *other)
 {
     JS_ASSERT(bits_);
     JS_ASSERT(other->max_ == max_);
     JS_ASSERT(other->bits_);
 
     for (unsigned int i = 0; i < numWords(); i++)
         bits_[i] |= other->bits_[i];
 }
 
 void
-BitSet::remove(unsigned int value)
-{
-    JS_ASSERT(bits_);
-    JS_ASSERT(value <= max_);
-
-    bits_[wordForValue(value)] &= ~bitForValue(value);
-}
-
-void
 BitSet::removeAll(const BitSet *other)
 {
     JS_ASSERT(bits_);
     JS_ASSERT(other->max_ == max_);
     JS_ASSERT(other->bits_);
 
     for (unsigned int i = 0; i < numWords(); i++)
         bits_[i] &= ~other->bits_[i];
@@ -170,20 +143,8 @@ BitSet::complement()
 
 void
 BitSet::clear()
 {
     JS_ASSERT(bits_);
     for (unsigned int i = 0; i < numWords(); i++)
         bits_[i] = 0;
 }
-
-BitSet::Iterator
-BitSet::begin()
-{
-    return Iterator(*this, 0);
-}
-
-BitSet::Iterator
-BitSet::end()
-{
-    return Iterator(*this, max_ + 1);
-}
--- a/js/src/ion/BitSet.h
+++ b/js/src/ion/BitSet.h
@@ -84,29 +84,44 @@ class BitSet : private TempObject
 
     static BitSet *New(unsigned int max);
 
     unsigned int getMax() const {
         return max_;
     }
 
     // O(1): Check if this set contains the given value.
-    bool contains(unsigned int value) const;
+    bool contains(unsigned int value) const {
+        JS_ASSERT(bits_);
+        JS_ASSERT(value <= max_);
+
+        return !!(bits_[wordForValue(value)] & bitForValue(value));
+    }
 
     // O(max): Check if this set contains any value.
     bool empty() const;
 
     // O(1): Insert the given value into this set.
-    void insert(unsigned int value);
+    void insert(unsigned int value) {
+        JS_ASSERT(bits_);
+        JS_ASSERT(value <= max_);
+
+        bits_[wordForValue(value)] |= bitForValue(value);
+    }
 
     // O(max): Insert every element of the given set into this set.
     void insertAll(const BitSet *other);
 
     // O(1): Remove the given value from this set.
-    void remove(unsigned int value);
+    void remove(unsigned int value) {
+        JS_ASSERT(bits_);
+        JS_ASSERT(value <= max_);
+
+        bits_[wordForValue(value)] &= ~bitForValue(value);
+    }
 
     // O(max): Remove the every element of the given set from this set.
     void removeAll(const BitSet *other);
 
     // O(max): Intersect this set with the given set.
     void intersect(const BitSet *other);
 
     // O(max): Intersect this set with the given set; return whether the
@@ -114,55 +129,75 @@ class BitSet : private TempObject
     bool fixedPointIntersect(const BitSet *other);
 
     // O(max): Does inplace complement of the set.
     void complement();
 
     // O(max): Clear this set.
     void clear();
 
-    // Iterator to the beginning of this set.
-    Iterator begin();
-
-    // Iterator to the end of this set.
-    Iterator end();
-
     uint32 *raw() const {
         return bits_;
     }
     size_t rawLength() const {
         return numWords();
     }
 };
 
 class BitSet::Iterator
 {
   private:
     BitSet &set_;
     unsigned index_;
+    unsigned word_;
+    uint32 value_;
 
   public:
-    Iterator(BitSet &set, unsigned int index) :
+    Iterator(BitSet &set) :
       set_(set),
-      index_(index)
+      index_(0),
+      word_(0),
+      value_(set.bits_[0])
     {
-        if (index_ <= set_.max_ && !set_.contains(index_))
+        if (!set_.contains(index_))
             (*this)++;
     }
 
-    bool operator!=(const Iterator &other) const {
-        return index_ != other.index_;
+    inline bool more() const {
+        return word_ < set_.numWords();
+    }
+    inline operator bool() const {
+        return more();
     }
 
-    // FIXME (668305): Use bit scan.
-    Iterator& operator++(int dummy) {
+    inline Iterator& operator++(int dummy) {
+        JS_ASSERT(more());
         JS_ASSERT(index_ <= set_.max_);
-        do {
-            index_++;
-        } while (index_ <= set_.max_ && !set_.contains(index_));
+
+        index_++;
+        value_ >>= 1;
+
+        // Skip words containing only zeros.
+        while (value_ == 0) {
+            word_++;
+            if (!more())
+                return *this;
+
+            index_ = word_ * sizeof(value_) * 8;
+            value_ = set_.bits_[word_];
+        }
+
+        // The result of js_bitscan_ctz32 is undefined if the input is 0.
+        JS_ASSERT(value_ != 0);
+
+        int numZeros = js_bitscan_ctz32(value_);
+        index_ += numZeros;
+        value_ >>= numZeros;
+
+        JS_ASSERT_IF(index_ <= set_.max_, set_.contains(index_));
         return *this;
     }
 
     unsigned int operator *() {
         JS_ASSERT(index_ <= set_.max_);
         return index_;
     }
 };
--- a/js/src/ion/LinearScan.cpp
+++ b/js/src/ion/LinearScan.cpp
@@ -510,19 +510,19 @@ LinearScanAllocator::buildLivenessInfo()
                 LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
                 uint32 reg = use->toUse()->virtualRegister();
                 live->insert(reg);
             }
         }
 
         // Variables are assumed alive for the entire block, a define shortens
         // the interval to the point of definition.
-        for (BitSet::Iterator i(live->begin()); i != live->end(); i++) {
-            vregs[*i].getInterval(0)->addRange(inputOf(block->firstId()),
-                                               outputOf(block->lastId()).next());
+        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
+            vregs[*liveRegId].getInterval(0)->addRange(inputOf(block->firstId()),
+                                                       outputOf(block->lastId()).next());
         }
 
         // Shorten the front end of live intervals for live variables to their
         // point of definition, if found.
         for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
             // Calls may clobber registers, so force a spill and reload around the callsite.
             if (ins->isCall()) {
                 for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++)
@@ -624,19 +624,19 @@ LinearScanAllocator::buildLivenessInfo()
             // loop is not possible. Additionally, we require liveIn in a later
             // pass for resolution, so that must also be fixed up here.
             MBasicBlock *loopBlock = mblock->backedge();
             while (true) {
                 // Blocks must already have been visited to have a liveIn set.
                 JS_ASSERT(loopBlock->id() >= mblock->id());
 
                 // Add an interval for this entire loop block
-                for (BitSet::Iterator i(live->begin()); i != live->end(); i++) {
-                    vregs[*i].getInterval(0)->addRange(inputOf(loopBlock->lir()->firstId()),
-                                                       outputOf(loopBlock->lir()->lastId()).next());
+                for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
+                    vregs[*liveRegId].getInterval(0)->addRange(inputOf(loopBlock->lir()->firstId()),
+                                                               outputOf(loopBlock->lir()->lastId()).next());
                 }
 
                 // Fix up the liveIn set to account for the new interval
                 liveIn[loopBlock->id()]->insertAll(live);
 
                 // Make sure we don't visit this node again
                 loopDone->insert(loopBlock->id());
 
@@ -923,17 +923,17 @@ LinearScanAllocator::resolveControlFlow(
                 if (!moves->add(to->getAllocation(), to->reg()->canonicalSpill()))
                     return false;
             }
         }
 
         // Resolve split intervals with moves
         BitSet *live = liveIn[mSuccessor->id()];
 
-        for (BitSet::Iterator liveRegId(live->begin()); liveRegId != live->end(); liveRegId++) {
+        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
             LiveInterval *to = vregs[*liveRegId].intervalFor(inputOf(successor->firstId()));
             JS_ASSERT(to);
 
             for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
                 LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
                 LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
                 JS_ASSERT(from);
 
--- a/js/src/ion/arm/Assembler-arm.cpp
+++ b/js/src/ion/arm/Assembler-arm.cpp
@@ -391,20 +391,21 @@ ion::PatchJump(CodeLocationJump jump_, C
 void
 Assembler::finish()
 {
     JS_ASSERT(!isFinished);
     isFinished = true;
     for (size_t i = 0; i < jumps_.length(); i++) {
         jumps_[i].fixOffset(m_buffer);
     }
-    
+
     for (int i = 0; i < tmpDataRelocations_.length(); i++) {
         int offset = tmpDataRelocations_[i].getOffset();
-        dataRelocations_.writeUnsigned(offset + m_buffer.poolSizeBefore(offset));
+        int real_offset = offset + m_buffer.poolSizeBefore(offset);
+        dataRelocations_.writeUnsigned(real_offset);
     }
 }
 
 void
 Assembler::executableCopy(uint8 *buffer)
 {
     JS_ASSERT(isFinished);
     m_buffer.executableCopy(buffer);
@@ -2097,17 +2098,18 @@ Assembler::nextInstruction(uint8 *inst_,
 bool instIsGuard(Instruction *inst, const PoolHeader **ph)
 {
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
         return false;
-    *ph = inst->as<const PoolHeader>();
+    // See if the next instruction is a pool header.
+    *ph = (inst+1)->as<const PoolHeader>();
     return *ph != NULL;
 }
 
 bool instIsArtificialGuard(Instruction *inst, const PoolHeader **ph)
 {
     if (!instIsGuard(inst, ph))
         return false;
     return !(*ph)->isNatural();
--- a/js/src/ion/arm/CodeGenerator-arm.cpp
+++ b/js/src/ion/arm/CodeGenerator-arm.cpp
@@ -1131,16 +1131,60 @@ CodeGeneratorARM::visitCompareDAndBranch
     Assembler::Condition cond = masm.compareDoubles(comp->jsop(), lhs, rhs);
     // TODO: we don't handle anything that has an undefined in it.
     emitBranch(cond, comp->ifTrue(), comp->ifFalse());
     //    Assembler::Condition cond = masm.compareDoubles(comp->jsop(), lhs, rhs);
 
     return true;
 }
 
+bool
+CodeGeneratorARM::visitNotI(LNotI *ins)
+{
+    // It is hard to optimize !x, so just do it the basic way for now.
+    masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
+    emitSet(Assembler::Equal, ToRegister(ins->output()));
+    return true;
+}
+
+bool
+CodeGeneratorARM::visitNotD(LNotD *ins)
+{
+    // Since this operation is not, we want to set a bit if
+    // the double is falsey, which means 0.0, -0.0 or NaN.
+    // when comparing with 0, an input of 0 will set the Z bit (30)
+    // and NaN will set the V bit (28) of the APSR.
+    FloatRegister opd = ToFloatRegister(ins->input());
+    Register dest = ToRegister(ins->output());
+
+    // Do the compare
+    masm.ma_vcmpz(opd);
+    bool nocond = true;
+    if (nocond) {
+        // Load the value into the dest register
+        masm.as_vmrs(dest);
+        masm.ma_lsr(Imm32(28), dest, dest);
+        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
+        masm.ma_and(Imm32(1), dest);
+    } else {
+        masm.as_vmrs(pc);
+        masm.ma_mov(Imm32(0), dest);
+        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
+        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
+#if 0
+        masm.as_vmrs(ToRegister(dest));
+        // Mask out just the two bits we care about.  If neither bit is set,
+        // the dest is already zero
+        masm.ma_and(Imm32(0x50000000), dest, dest, Assembler::SetCond);
+        // If it is non-zero, then force it to be 1.
+        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::NotEqual);
+#endif
+    }
+    return true;
+}
 
 bool
 CodeGeneratorARM::visitLoadSlotV(LLoadSlotV *load)
 {
     const ValueOperand out = ToOutValue(load);
     Register base = ToRegister(load->input());
     int32 offset = load->mir()->slot() * sizeof(js::Value);
 
--- a/js/src/ion/arm/CodeGenerator-arm.h
+++ b/js/src/ion/arm/CodeGenerator-arm.h
@@ -114,16 +114,18 @@ class CodeGeneratorARM : public CodeGene
     virtual bool visitShiftOp(LShiftOp *ins);
 
     virtual bool visitTestIAndBranch(LTestIAndBranch *test);
     virtual bool visitCompare(LCompare *comp);
     virtual bool visitCompareAndBranch(LCompareAndBranch *comp);
     virtual bool visitTestDAndBranch(LTestDAndBranch *test);
     virtual bool visitCompareD(LCompareD *comp);
     virtual bool visitCompareDAndBranch(LCompareDAndBranch *comp);
+    virtual bool visitNotI(LNotI *ins);
+    virtual bool visitNotD(LNotD *ins);
 
     virtual bool visitMathD(LMathD *math);
     virtual bool visitRound(LRound *lir);
     virtual bool visitTableSwitch(LTableSwitch *ins);
 
 
     // Out of line visitors.
     bool visitOutOfLineBailout(OutOfLineBailout *ool);
--- a/js/src/ion/arm/IonFrames-arm.h
+++ b/js/src/ion/arm/IonFrames-arm.h
@@ -129,18 +129,20 @@ class IonRectifierFrameLayout : public I
         return sizeof(IonRectifierFrameLayout);
     }
 };
 
 class IonBailedRectifierFrameLayout : public IonJSFrameLayout
 {
   public:
     static inline size_t Size() {
-        // Include an extra word for the dead callee token.
-        return sizeof(IonBailedRectifierFrameLayout) + sizeof(void *);
+        // On X86, there is a +sizeof(uintptr_t) to account for an extra callee token.
+        // This is not needee here because sizeof(IonExitFrame) == sizeof(IonRectifierFrame)
+        // due to extra padding.
+        return sizeof(IonBailedRectifierFrameLayout);
     }
 };
 
 // this is the frame layout when we are exiting ion code, and about to enter EABI code
 class IonExitFrameLayout : public IonCommonFrameLayout
 {
     void *padding2;
 
--- a/js/src/ion/arm/MacroAssembler-arm.cpp
+++ b/js/src/ion/arm/MacroAssembler-arm.cpp
@@ -270,17 +270,17 @@ MacroAssemblerARM::ma_mov(Imm32 imm, Reg
 {
     ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest)
 {
     writeDataRelocation(nextOffset());
-    ma_mov(Imm32(ptr.value), dest);
+    ma_movPatchable(Imm32(ptr.value), dest, Always, L_MOVWT);
 }
 
     // Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift.value));
 }
@@ -587,18 +587,18 @@ void
 MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c)
 {
     ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c);
 }
 
 void
 MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c)
 {
-    writeDataRelocation(nextOffset());
-    ma_alu(src1, Imm32(ptr.value), InvalidReg, op_cmp, SetCond, c);
+    ma_mov(ptr, ScratchRegister);
+    ma_cmp(src1, ScratchRegister, c);
 }
 void
 MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c)
 {
     switch (op.getTag()) {
       case Operand::OP2:
         as_cmp(src1, op.toOp2(), c);
         break;
@@ -1227,17 +1227,16 @@ MacroAssemblerARMCompat::movePtr(const R
 void
 MacroAssemblerARMCompat::movePtr(const ImmWord &imm, const Register &dest)
 {
     ma_mov(Imm32(imm.value), dest);
 }
 void
 MacroAssemblerARMCompat::movePtr(const ImmGCPtr &imm, const Register &dest)
 {
-    writeDataRelocation(nextOffset());
     ma_mov(imm, dest);
 }
 void
 MacroAssemblerARMCompat::movePtr(const Address &src, const Register &dest)
 {
     loadPtr(src, dest);
 }
 
@@ -1392,17 +1391,20 @@ MacroAssemblerARMCompat::addPtr(Imm32 im
 {
     ma_add(imm, dest);
 }
 
 // higher level tag testing code
 Assembler::Condition
 MacroAssemblerARMCompat::compareDoubles(JSOp compare, FloatRegister lhs, FloatRegister rhs)
 {
-    ma_vcmp(lhs, rhs);
+    if (rhs == InvalidFloatReg)
+        ma_vcmpz(lhs);
+    else
+        ma_vcmp(lhs, rhs);
     as_vmrs(pc);
     switch (compare) {
       case JSOP_STRICTNE:
       case JSOP_NE:
         return Assembler::VFP_NotEqualOrUnordered;
       case JSOP_STRICTEQ:
       case JSOP_EQ:
         return Assembler::VFP_Equal;
@@ -1864,21 +1866,21 @@ MacroAssemblerARMCompat::popValue(ValueO
     ma_pop(val.payloadReg());
     ma_pop(val.typeReg());
 }
 void
 MacroAssemblerARMCompat::storePayload(const Value &val, Operand dest)
 {
     jsval_layout jv = JSVAL_TO_IMPL(val);
     if (val.isMarkable()) {
-        ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister);
+        ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), lr);
     } else {
-        ma_mov(Imm32(jv.s.payload.i32), ScratchRegister);
+        ma_mov(Imm32(jv.s.payload.i32), lr);
     }
-    ma_str(ScratchRegister, ToPayload(dest));
+    ma_str(lr, ToPayload(dest));
 }
 void
 MacroAssemblerARMCompat::storePayload(Register src, Operand dest)
 {
     if (dest.getTag() == Operand::MEM) {
         ma_str(src, ToPayload(dest));
         return;
     }
@@ -1910,18 +1912,18 @@ MacroAssemblerARMCompat::storePayload(Re
     // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up,
     // and this is one less code path to get wrong.
     as_dtr(IsStore, 32, Offset, src, DTRAddr(base, DtrRegImmShift(index, LSL, shift)));
 }
 
 void
 MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) {
     if (dest.getTag() == Operand::MEM) {
-        ma_mov(tag, ScratchRegister);
-        ma_str(ScratchRegister, ToType(dest));
+        ma_mov(tag, lr);
+        ma_str(lr, ToType(dest));
         return;
     }
 
     JS_NOT_REACHED("why do we do all of these things?");
 
 }
 
 void
@@ -2019,16 +2021,18 @@ MacroAssemblerARMCompat::setupAlignedABI
 
 void
 MacroAssemblerARMCompat::setupUnalignedABICall(uint32 args, const Register &scratch)
 {
     setupABICall(args);
     dynamicAlignment_ = true;
 
     ma_mov(sp, scratch);
+
+    // Force sp to be aligned
     ma_and(Imm32(~(StackAlignment - 1)), sp, sp);
     ma_push(scratch);
 }
 
 void
 MacroAssemblerARMCompat::passABIArg(const MoveOperand &from)
 {
     MoveOperand to;
@@ -2046,17 +2050,17 @@ MacroAssemblerARMCompat::passABIArg(cons
 
     Register destReg;
     MoveOperand dest;
     if (GetArgReg(usedSlots_, &destReg)) {
         if (from.isDouble()) {
             floatArgsInGPR[destReg.code() >> 1] = VFPRegister(from.floatReg());
             useResolver = false;
         } else {
-            dest = MoveOperand(dest);
+            dest = MoveOperand(destReg);
         }
     } else {
         uint32 disp = GetArgStackDisp(usedSlots_);
         dest = MoveOperand(sp, disp);
     }
 
     if (useResolver)
         enoughMemory_ = enoughMemory_ && moveResolver_.addMove(from, dest, kind);
@@ -2085,19 +2089,23 @@ void MacroAssemblerARMCompat::checkStack
         bind(&good);
 #endif
 }
 
 void
 MacroAssemblerARMCompat::callWithABI(void *fun, Result result)
 {
     JS_ASSERT(inCall_);
-    uint32 stackAdjust = ((usedSlots_ - 4 > 0) ? usedSlots_ - 4 : 0) * STACK_SLOT_SIZE;
+    uint32 stackAdjust = ((usedSlots_ > NumArgRegs) ? usedSlots_ - NumArgRegs : 0) * STACK_SLOT_SIZE;
     if (!dynamicAlignment_)
-        stackAdjust += 8-(framePushed_ & 7);
+        stackAdjust +=
+            ComputeByteAlignment(framePushed_ + stackAdjust, StackAlignment);
+    else
+        // STACK_SLOT_SIZE account for the saved stack pointer pushed by setupUnalignedABICall
+        stackAdjust += ComputeByteAlignment(stackAdjust + STACK_SLOT_SIZE, StackAlignment);
 
     reserveStack(stackAdjust);
     // Position all arguments.
     {
         enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
         if (!enoughMemory_)
             return;
 
--- a/js/src/ion/arm/MacroAssembler-arm.h
+++ b/js/src/ion/arm/MacroAssembler-arm.h
@@ -688,34 +688,34 @@ class MacroAssemblerARMCompat : public M
     void moveValue(const Value &val, const ValueOperand &dest);
 
     void storeValue(ValueOperand val, Operand dst);
     void storeValue(ValueOperand val, Register base, Register index, int32 shift = defaultShift);
     void storeValue(ValueOperand val, const Address &dest) {
         storeValue(val, Operand(dest));
     }
     void storeValue(JSValueType type, Register reg, Address dest) {
-        ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), ScratchRegister);
-        ma_str(ScratchRegister, Address(dest.base, dest.offset + 4));
+        ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), lr);
+        ma_str(lr, Address(dest.base, dest.offset + 4));
         ma_str(reg, dest);
     }
     void storeValue(ValueOperand val, const BaseIndex &dest) {
         // Harder cases not handled yet.
         JS_ASSERT(dest.offset == 0);
         storeValue(val, dest.base, dest.index);
     }
     void storeValue(const Value &val, Address dest) {
         jsval_layout jv = JSVAL_TO_IMPL(val);
-        ma_mov(Imm32(jv.s.tag), ScratchRegister);
-        ma_str(ScratchRegister, Address(dest.base, dest.offset + 4));
+        ma_mov(Imm32(jv.s.tag), lr);
+        ma_str(lr, Address(dest.base, dest.offset + 4));
         if (val.isGCThing())
-            ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), ScratchRegister);
+            ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), lr);
         else
-            ma_mov(Imm32(jv.s.payload.i32), ScratchRegister);
-        ma_str(ScratchRegister, dest);
+            ma_mov(Imm32(jv.s.payload.i32), lr);
+        ma_str(lr, dest);
     }
 
     void loadValue(Address src, ValueOperand val);
     void loadValue(Operand dest, ValueOperand val) {
         loadValue(dest.toAddress(), val);
     }
     void loadValue(Register base, Register index, ValueOperand val);
     void loadValue(const BaseIndex &addr, ValueOperand val) {
--- a/js/src/ion/arm/Trampoline-arm.cpp
+++ b/js/src/ion/arm/Trampoline-arm.cpp
@@ -361,17 +361,17 @@ IonCompartment::generateArgumentsRectifi
 
     masm.ma_mov(sp, r3); // Save %rsp.
 
     // Push undefined.
     {
         Label undefLoopTop;
         masm.bind(&undefLoopTop);
         masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
-        masm.ma_sub(r2, Imm32(1), r2);
+        masm.ma_sub(r2, Imm32(1), r2, SetCond);
 
         masm.ma_b(&undefLoopTop, Assembler::NonZero);
     }
 
     // Get the topmost argument.
 
     masm.ma_alu(r3, lsl(r8, 3), r3, op_add); // r3 <- r3 + nargs * 8
     masm.ma_add(r3, Imm32(sizeof(IonJSFrameLayout)), r3);
@@ -604,17 +604,16 @@ IonCompartment::generateVMWrapper(JSCont
 
     // Initialize and set the context parameter.
     // r0 is the first argument register.
     Register cxreg = r0;
     masm.loadJSContext(cx->runtime, cxreg);
     masm.passABIArg(cxreg);
 
     size_t argDisp = 0;
-    size_t argc = 1;
 
     // Copy arguments.
     if (f.explicitArgs) {
         for (uint32 explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
             MoveOperand from;
             switch (f.argProperties(explicitArg)) {
               case VMFunction::WordByValue:
                 masm.passABIArg(MoveOperand(argsBase, argDisp));
@@ -637,17 +636,16 @@ IonCompartment::generateVMWrapper(JSCont
                 break;
             }
         }
     }
 
     // Copy the implicit outparam, if any.
     if (outReg != InvalidReg)
         masm.passABIArg(outReg);
-    JS_ASSERT(f.argc() == argc);
 
     masm.callWithABI(f.wrapped);
 
     // Test for failure.
     Label exception;
     // Called functions return bools, which are 0/false and non-zero/true
     masm.ma_cmp(r0, Imm32(0));
     masm.ma_b(&exception, Assembler::Zero);
--- a/js/src/ion/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/ion/shared/IonAssemblerBufferWithConstantPools.h
@@ -165,17 +165,17 @@ struct Pool {
         }
         memcpy(&poolData[numEntries * immSize], data, immSize);
         loadOffsets.append(off.getOffset());
         return numEntries++;
     }
 
     bool reset() {
         numEntries = 0;
-        uint32 buffSize = 8;
+        buffSize = 8;
         poolData = static_cast<uint8*>(malloc(buffSize * immSize));
         if (poolData == NULL)
             return false;
         other = new Pool(other->maxOffset, other->immSize, other->instSize, other->bias,
                          other->alignment, other->isBackref, other->canDedup);
         if (other == NULL)
             return false;
         new (&loadOffsets) LoadOffsets;