Bug 1207843 - Part 2/3 - Clean up MacroAssemblerARM spacing. r=h4writer
authorSean Stangl <sstangl@mozilla.com>
Wed, 23 Sep 2015 15:03:33 -0700
changeset 304535 2c189431cdf7b60a633872e459532a08781e81c6
parent 304534 fd8e682dd7de5bf20ba619834235522aca5d47dd
child 304536 c0ad95a1ec35fedb403398b10fedbf3be25094d5
push id1001
push userraliiev@mozilla.com
push dateMon, 18 Jan 2016 19:06:03 +0000
treeherdermozilla-release@8b89261f3ac4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersh4writer
bugs1207843
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1207843 - Part 2/3 - Clean up MacroAssemblerARM spacing. r=h4writer
js/src/jit/arm/MacroAssembler-arm.cpp
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -46,18 +46,17 @@ MacroAssemblerARM::convertBoolToInt32(Re
     ma_and(Imm32(0xff), source, dest);
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(Register src, FloatRegister dest_)
 {
     // Direct conversions aren't possible.
     VFPRegister dest = VFPRegister(dest_);
-    as_vxfer(src, InvalidReg, dest.sintOverlay(),
-             CoreToFloat);
+    as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
     as_vcvt(dest, dest.sintOverlay());
 }
 
 void
 MacroAssemblerARM::convertInt32ToDouble(const Address& src, FloatRegister dest)
 {
     ScratchDoubleScope scratch(asMasm());
     ma_vldr(src, scratch);
@@ -311,34 +310,33 @@ MacroAssemblerARM::alu_dbl(Register src1
     // doesn't have a dest, such as check for overflow by doing first operation
     // don't do second operation if first operation overflowed. This preserves
     // the overflow condition code. Unfortunately, it is horribly brittle.
     as_alu(dest, src1, Operand2(both.fst), interop, LeaveCC, c);
     as_alu(dest, dest, Operand2(both.snd), op, s, c);
     return true;
 }
 
-
 void
 MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
-                          ALUOp op,
-                          SBit s, Condition c)
+                          ALUOp op, SBit s, Condition c)
 {
     // As it turns out, if you ask for a compare-like instruction you *probably*
     // want it to set condition codes.
     if (dest == InvalidReg)
         MOZ_ASSERT(s == SetCC);
 
     // The operator gives us the ability to determine how this can be used.
     Imm8 imm8 = Imm8(imm.value);
     // One instruction: If we can encode it using an imm8m, then do so.
     if (!imm8.invalid) {
         as_alu(dest, src1, imm8, op, s, c);
         return;
     }
+
     // One instruction, negated:
     Imm32 negImm = imm;
     Register negDest;
     ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest);
     Imm8 negImm8 = Imm8(negImm.value);
     // 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'. For bonus
     // points, dest can be replaced (nearly always invalid => ScratchRegister)
     // This is useful if we wish to negate tst. tst has an invalid (aka not
@@ -562,58 +560,66 @@ MacroAssemblerARM::ma_mov(ImmGCPtr ptr, 
 }
 
 // Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift.value));
 }
+
 void
 MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, lsr(src, shift.value));
 }
+
 void
 MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, asr(src, shift.value));
 }
+
 void
 MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, ror(src, shift.value));
 }
+
 void
 MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst)
 {
     as_mov(dst, rol(src, shift.value));
 }
 
 // Shifts (just a move with a shifting op2)
 void
 MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst)
 {
     as_mov(dst, lsl(src, shift));
 }
+
 void
 MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst)
 {
     as_mov(dst, lsr(src, shift));
 }
+
 void
 MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst)
 {
     as_mov(dst, asr(src, shift));
 }
+
 void
 MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst)
 {
     as_mov(dst, ror(src, shift));
 }
+
 void
 MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
 {
     ScratchRegisterScope scratch(asMasm());
     ma_rsb(shift, Imm32(32), scratch);
     as_mov(dst, ror(src, scratch));
 }
 
@@ -638,27 +644,30 @@ MacroAssemblerARM::ma_neg(Register src1,
 }
 
 // And.
 void
 MacroAssemblerARM::ma_and(Register src, Register dest, SBit s, Assembler::Condition c)
 {
     ma_and(dest, src, dest);
 }
+
 void
 MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
                           SBit s, Assembler::Condition c)
 {
     as_and(dest, src1, O2Reg(src2), s, c);
 }
+
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
 {
     ma_alu(dest, imm, dest, OpAnd, s, c);
 }
+
 void
 MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
                           SBit s, Assembler::Condition c)
 {
     ma_alu(src1, imm, dest, OpAnd, s, c);
 }
 
 // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
@@ -669,70 +678,78 @@ MacroAssemblerARM::ma_bic(Imm32 imm, Reg
 }
 
 // Exclusive or.
 void
 MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s, Assembler::Condition c)
 {
     ma_eor(dest, src, dest, s, c);
 }
+
 void
 MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
                           SBit s, Assembler::Condition c)
 {
     as_eor(dest, src1, O2Reg(src2), s, c);
 }
+
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
 {
     ma_alu(dest, imm, dest, OpEor, s, c);
 }
+
 void
 MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
        SBit s, Assembler::Condition c)
 {
     ma_alu(src1, imm, dest, OpEor, s, c);
 }
 
 // Or.
 void
 MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s, Assembler::Condition c)
 {
     ma_orr(dest, src, dest, s, c);
 }
+
 void
 MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
                           SBit s, Assembler::Condition c)
 {
     as_orr(dest, src1, O2Reg(src2), s, c);
 }
+
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SBit s, Assembler::Condition c)
 {
     ma_alu(dest, imm, dest, OpOrr, s, c);
 }
+
 void
 MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
                           SBit s, Assembler::Condition c)
 {
     ma_alu(src1, imm, dest, OpOrr, s, c);
 }
 
 // Arithmetic-based ops.
 // Add with carry.
 void
 MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, imm, dest, OpAdc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s, Condition c)
 {
     as_alu(dest, dest, O2Reg(src), OpAdc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     as_alu(dest, src1, O2Reg(src2), OpAdc, s, c);
 }
 
 // Add.
 void
@@ -741,127 +758,143 @@ MacroAssemblerARM::ma_add(Imm32 imm, Reg
     ma_alu(dest, imm, dest, OpAdd, s, c);
 }
 
 void
 MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c);
 }
+
 void
 MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     as_alu(dest, src1, O2Reg(src2), OpAdd, s, c);
 }
+
 void
 MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, op, dest, OpAdd, s, c);
 }
+
 void
 MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, op, dest, OpAdd, s, c);
 }
 
 // Subtract with carry.
 void
 MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, imm, dest, OpSbc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s, Condition c)
 {
     as_alu(dest, dest, O2Reg(src1), OpSbc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     as_alu(dest, src1, O2Reg(src2), OpSbc, s, c);
 }
 
 // Subtract.
 void
 MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, imm, dest, OpSub, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, Operand(src1), dest, OpSub, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, Operand(src2), dest, OpSub, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, op, dest, OpSub, s, c);
 }
+
 void
 MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, op, dest, OpSub, s, c);
 }
 
-// Severse subtract.
+// Reverse subtract.
 void
 MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, imm, dest, OpRsb, s, c);
 }
+
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s, Condition c)
 {
     as_alu(dest, dest, O2Reg(src1), OpAdd, s, c);
 }
+
 void
 MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     as_alu(dest, src1, O2Reg(src2), OpRsb, s, c);
 }
+
 void
 MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SBit s, Condition c)
 {
     ma_alu(src1, op2, dest, OpRsb, s, c);
 }
 
 // Reverse subtract with carry.
 void
 MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SBit s, Condition c)
 {
     ma_alu(dest, imm, dest, OpRsc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s, Condition c)
 {
     as_alu(dest, dest, O2Reg(src1), OpRsc, s, c);
 }
+
 void
 MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SBit s, Condition c)
 {
     as_alu(dest, src1, O2Reg(src2), OpRsc, s, c);
 }
 
 // Compares/tests.
 // Compare negative (sets condition codes as src1 + src2 would).
 void
 MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c)
 {
     ma_alu(src1, imm, InvalidReg, OpCmn, SetCC, c);
 }
+
 void
 MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c)
 {
     as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c);
 }
+
 void
 MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c)
 {
     MOZ_CRASH("Feature NYI");
 }
 
 // Compare (src - src2).
 void
@@ -878,16 +911,17 @@ MacroAssemblerARM::ma_cmp(Register src1,
 
 void
 MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c)
 {
     ScratchRegisterScope scratch(asMasm());
     ma_mov(ptr, scratch);
     ma_cmp(src1, scratch, c);
 }
+
 void
 MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c)
 {
     switch (op.getTag()) {
       case Operand::OP2:
         as_cmp(src1, op.toOp2(), c);
         break;
       case Operand::MEM: {
@@ -895,62 +929,67 @@ MacroAssemblerARM::ma_cmp(Register src1,
         ma_ldr(op.toAddress(), scratch);
         as_cmp(src1, O2Reg(scratch), c);
         break;
       }
       default:
         MOZ_CRASH("trying to compare FP and integer registers");
     }
 }
+
 void
 MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c)
 {
     as_cmp(src1, O2Reg(src2), c);
 }
 
 // Test for equality, (src1 ^ src2).
 void
 MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c)
 {
     ma_alu(src1, imm, InvalidReg, OpTeq, SetCC, c);
 }
+
 void
 MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
+
 void
 MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c)
 {
     as_teq(src1, op.toOp2(), c);
 }
 
-
 // Test (src1 & src2).
 void
 MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c)
 {
     ma_alu(src1, imm, InvalidReg, OpTst, SetCC, c);
 }
+
 void
 MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c)
 {
     as_tst(src1, O2Reg(src2), c);
 }
+
 void
 MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c)
 {
     as_tst(src1, op.toOp2(), c);
 }
 
 void
 MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest)
 {
     as_mul(dest, src1, src2);
 }
+
 void
 MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest)
 {
     ScratchRegisterScope scratch(asMasm());
     ma_mov(imm, scratch);
     as_mul(dest, src1, scratch);
 }
 
@@ -1091,22 +1130,24 @@ MacroAssemblerARM::ma_sdiv(Register num,
     as_sdiv(dest, num, div, cond);
 }
 
 void
 MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond)
 {
     as_udiv(dest, num, div, cond);
 }
-// Miscelanous instructions
+
+// Miscellaneous instructions.
 void
 MacroAssemblerARM::ma_clz(Register src, Register dest, Condition cond)
 {
     as_clz(dest, src, cond);
 }
+
 // Memory.
 // Shortcut for when we know we're transferring 32 bits of data.
 void
 MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
                           Index mode, Assembler::Condition cc)
 {
     ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc);
 }
@@ -1130,29 +1171,31 @@ MacroAssemblerARM::ma_dtr(LoadStore ls, 
     ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, mode, cc);
 }
 
 void
 MacroAssemblerARM::ma_str(Register rt, const Address& addr, Index mode, Condition cc)
 {
     ma_dtr(IsStore, rt, addr, mode, cc);
 }
+
 void
 MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc)
 {
     MOZ_ASSERT((rt.code() & 1) == 0);
     MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
     as_extdtr(IsStore, 64, true, mode, rt, addr, cc);
 }
 
 void
 MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc)
 {
     as_dtr(IsLoad, 32, mode, rt, addr, cc);
 }
+
 void
 MacroAssemblerARM::ma_ldr(const Address& addr, Register rt, Index mode, Condition cc)
 {
     ma_dtr(IsLoad, rt, addr, mode, cc);
 }
 
 void
 MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc)
@@ -1166,46 +1209,49 @@ MacroAssemblerARM::ma_ldrsh(EDtrAddr add
     as_extdtr(IsLoad, 16, true, mode, rt, addr, cc);
 }
 
 void
 MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc)
 {
     as_extdtr(IsLoad, 16, false, mode, rt, addr, cc);
 }
+
 void
 MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc)
 {
     as_extdtr(IsLoad, 8, true, mode, rt, addr, cc);
 }
+
 void
 MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2,
                            Index mode, Condition cc)
 {
     MOZ_ASSERT((rt.code() & 1) == 0);
     MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
     as_extdtr(IsLoad, 64, true, mode, rt, addr, cc);
 }
+
 void
 MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc)
 {
     as_extdtr(IsStore, 16, false, mode, rt, addr, cc);
 }
 
 void
 MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc)
 {
     as_dtr(IsStore, 8, mode, rt, addr, cc);
 }
 
 // Specialty for moving N bits of data, where n == 8,16,32,64.
 BufferOffset
 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
                           Register rn, Register rm, Register rt,
-                                    Index mode, Assembler::Condition cc, unsigned shiftAmount)
+                          Index mode, Assembler::Condition cc, unsigned shiftAmount)
 {
     if (size == 32 || (size == 8 && !IsSigned))
         return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc);
 
     ScratchRegisterScope scratch(asMasm());
 
     if (shiftAmount != 0) {
         MOZ_ASSERT(rn != scratch);
@@ -1400,16 +1446,17 @@ MacroAssemblerARM::ma_dataTransferN(Load
     }
 }
 
 void
 MacroAssemblerARM::ma_pop(Register r)
 {
     ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex);
 }
+
 void
 MacroAssemblerARM::ma_push(Register r)
 {
     // Pushing sp is not well defined: use two instructions.
     if (r == sp) {
         ScratchRegisterScope scratch(asMasm());
         ma_mov(sp, scratch);
         ma_dtr(IsStore, sp, Imm32(-4), scratch, PreIndex);
@@ -1421,16 +1468,17 @@ MacroAssemblerARM::ma_push(Register r)
 
 void
 MacroAssemblerARM::ma_vpop(VFPRegister r)
 {
     startFloatTransferM(IsLoad, sp, IA, WriteBack);
     transferFloatReg(r);
     finishFloatTransfer();
 }
+
 void
 MacroAssemblerARM::ma_vpush(VFPRegister r)
 {
     startFloatTransferM(IsStore, sp, DB, WriteBack);
     transferFloatReg(r);
     finishFloatTransfer();
 }
 
@@ -1438,16 +1486,17 @@ MacroAssemblerARM::ma_vpush(VFPRegister 
 void
 MacroAssemblerARM::ma_dmb(BarrierOption option)
 {
     if (HasDMBDSBISB())
         as_dmb(option);
     else
         as_dmb_trap();
 }
+
 void
 MacroAssemblerARM::ma_dsb(BarrierOption option)
 {
     if (HasDMBDSBISB())
         as_dsb(option);
     else
         as_dsb_trap();
 }
@@ -1661,82 +1710,91 @@ MacroAssemblerARM::ma_vimm_f32(float val
     as_FImm32Pool(vd, value, cc);
 }
 
 void
 MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc)
 {
     as_vcmp(VFPRegister(src1), VFPRegister(src2), cc);
 }
+
 void
 MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc)
 {
     as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc);
 }
+
 void
 MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc)
 {
     as_vcmpz(VFPRegister(src1), cc);
 }
+
 void
 MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc)
 {
     as_vcmpz(VFPRegister(src1).singleOverlay(), cc);
 }
 
 void
 MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isDouble());
     MOZ_ASSERT(dest.isSInt());
     as_vcvt(dest, src, false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isDouble());
     MOZ_ASSERT(dest.isUInt());
     as_vcvt(dest, src, false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isSInt());
     MOZ_ASSERT(dest.isDouble());
     as_vcvt(dest, src, false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isUInt());
     MOZ_ASSERT(dest.isDouble());
     as_vcvt(dest, src, false, cc);
 }
 
 void
 MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isSingle());
     MOZ_ASSERT(dest.isSInt());
     as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isSingle());
     MOZ_ASSERT(dest.isUInt());
     as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isSInt());
     MOZ_ASSERT(dest.isSingle());
     as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc);
 }
+
 void
 MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc)
 {
     MOZ_ASSERT(src.isUInt());
     MOZ_ASSERT(dest.isSingle());
     as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc);
 }
 
@@ -1824,21 +1882,23 @@ MacroAssemblerARM::ma_vdtr(LoadStore ls,
     return as_vdtr(ls, rt, VFPAddr(scratch, VFPOffImm(0)), cc);
 }
 
 BufferOffset
 MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc)
 {
     return as_vdtr(IsLoad, dest, addr, cc);
 }
+
 BufferOffset
 MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest, Condition cc)
 {
     return ma_vdtr(IsLoad, addr, dest, cc);
 }
+
 BufferOffset
 MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc)
 {
     ScratchRegisterScope scratch(asMasm());
     as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
     return ma_vldr(Address(scratch, 0), src, cc);
 }
 
@@ -1848,16 +1908,17 @@ MacroAssemblerARM::ma_vstr(VFPRegister s
     return as_vdtr(IsStore, src, addr, cc);
 }
 
 BufferOffset
 MacroAssemblerARM::ma_vstr(VFPRegister src, const Address& addr, Condition cc)
 {
     return ma_vdtr(IsStore, addr, src, cc);
 }
+
 BufferOffset
 MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
                            int32_t offset, Condition cc)
 {
     ScratchRegisterScope scratch(asMasm());
     as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
     return ma_vstr(src, Address(scratch, offset), cc);
 }
@@ -1935,52 +1996,58 @@ MacroAssemblerARMCompat::addPtr(const Ad
 
 void
 MacroAssemblerARMCompat::move32(Imm32 imm, Register dest)
 {
     ma_mov(imm, dest);
 }
 
 void
-MacroAssemblerARMCompat::move32(Register src, Register dest) {
+MacroAssemblerARMCompat::move32(Register src, Register dest)
+{
     ma_mov(src, dest);
 }
 
 void
 MacroAssemblerARMCompat::movePtr(Register src, Register dest)
 {
     ma_mov(src, dest);
 }
+
 void
 MacroAssemblerARMCompat::movePtr(ImmWord imm, Register dest)
 {
     ma_mov(Imm32(imm.value), dest);
 }
+
 void
 MacroAssemblerARMCompat::movePtr(ImmGCPtr imm, Register dest)
 {
     ma_mov(imm, dest);
 }
+
 void
 MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
+
 void
 MacroAssemblerARMCompat::movePtr(AsmJSImmPtr imm, Register dest)
 {
     RelocStyle rs;
     if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
     append(AsmJSAbsoluteLink(CodeOffsetLabel(currentOffset()), imm.kind()));
     ma_movPatchable(Imm32(-1), dest, Always, rs);
 }
+
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
 }
 
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex& src, Register dest)
@@ -2096,16 +2163,17 @@ MacroAssemblerARMCompat::load32(const Ba
     loadPtr(address, dest);
 }
 
 void
 MacroAssemblerARMCompat::load32(AbsoluteAddress address, Register dest)
 {
     loadPtr(address, dest);
 }
+
 void
 MacroAssemblerARMCompat::loadPtr(const Address& address, Register dest)
 {
     ma_ldr(address, dest);
 }
 
 void
 MacroAssemblerARMCompat::loadPtr(const BaseIndex& src, Register dest)
@@ -2266,16 +2334,17 @@ MacroAssemblerARMCompat::store16(Registe
 
 void
 MacroAssemblerARMCompat::store16(Imm32 imm, const BaseIndex& dest)
 {
     AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
     ma_mov(imm, scratch2);
     store16(scratch2, dest);
 }
+
 void
 MacroAssemblerARMCompat::store16(Register src, const BaseIndex& address)
 {
     Register index = address.index;
 
     ScratchRegisterScope scratch(asMasm());
 
     // We don't have LSL on index register yet.
@@ -2285,16 +2354,17 @@ MacroAssemblerARMCompat::store16(Registe
     }
 
     if (address.offset != 0) {
         ma_add(index, Imm32(address.offset), scratch);
         index = scratch;
     }
     ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index)));
 }
+
 void
 MacroAssemblerARMCompat::store32(Register src, AbsoluteAddress address)
 {
     storePtr(src, address);
 }
 
 void
 MacroAssemblerARMCompat::store32(Register src, const Address& address)
@@ -2696,16 +2766,17 @@ MacroAssemblerARMCompat::testInt32(Assem
 
 Assembler::Condition
 MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand& value)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN));
     return cond;
 }
+
 Assembler::Condition
 MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand& value)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
     ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR));
     return actual;
 }
@@ -3051,18 +3122,18 @@ MacroAssemblerARMCompat::testGCThing(Con
     MOZ_ASSERT(cond == Equal || cond == NotEqual);
     ScratchRegisterScope scratch(asMasm());
     extractTag(address, scratch);
     ma_cmp(scratch, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET));
     return cond == Equal ? AboveOrEqual : Below;
 }
 
 void
-MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand& value, const Value& v,
-                                         Label* label)
+MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand& value,
+                                         const Value& v, Label* label)
 {
     // If cond == NotEqual, branch when a.payload != b.payload || a.tag !=
     // b.tag. If the payloads are equal, compare the tags. If the payloads are
     // not equal, short circuit true (NotEqual).
     //
     // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag.
     // If the payloads are equal, compare the tags. If the payloads are not
     // equal, short circuit false (NotEqual).
@@ -3241,17 +3312,18 @@ MacroAssemblerARMCompat::loadInt32OrDoub
 
     // Not an int, just load as double.
     bind(&notInt32);
     ma_vldr(src, dest);
     bind(&end);
 }
 
 void
-MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, FloatRegister dest, int32_t shift)
+MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index,
+                                           FloatRegister dest, int32_t shift)
 {
     Label notInt32, end;
 
     JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0);
 
     ScratchRegisterScope scratch(asMasm());
 
     // If it's an int, convert it to double.
@@ -3278,17 +3350,16 @@ MacroAssemblerARMCompat::loadInt32OrDoub
 
 void
 MacroAssemblerARMCompat::loadConstantDouble(double dp, FloatRegister dest)
 {
     as_FImm64Pool(dest, dp);
 }
 
 // Treat the value as a boolean, and set condition codes accordingly.
-
 Assembler::Condition
 MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand& operand)
 {
     ma_tst(operand.payloadReg(), operand.payloadReg());
     return truthy ? NonZero : Zero;
 }
 
 Assembler::Condition
@@ -3325,18 +3396,18 @@ Register
 MacroAssemblerARMCompat::extractTag(const BaseIndex& address, Register scratch)
 {
     ma_alu(address.base, lsl(address.index, address.scale), scratch, OpAdd, LeaveCC);
     return extractTag(Address(scratch, address.offset), scratch);
 }
 
 template <typename T>
 void
-MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
-                                           MIRType slotType)
+MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType,
+                                           const T& dest, MIRType slotType)
 {
     if (valueType == MIRType_Double) {
         storeDouble(value.reg().typedReg().fpu(), dest);
         return;
     }
 
     // Store the type tag if needed.
     if (valueType != slotType)
@@ -3345,22 +3416,22 @@ MacroAssemblerARMCompat::storeUnboxedVal
     // Store the payload.
     if (value.constant())
         storePayload(value.value(), dest);
     else
         storePayload(value.reg().typedReg().gpr(), dest);
 }
 
 template void
-MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest,
-                                           MIRType slotType);
+MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType,
+                                           const Address& dest, MIRType slotType);
 
 template void
-MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest,
-                                           MIRType slotType);
+MacroAssemblerARMCompat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType,
+                                           const BaseIndex& dest, MIRType slotType);
 
 
 void
 MacroAssemblerARMCompat::branchTest64(Condition cond, Register64 lhs, Register64 rhs,
                                       Register temp, Label* label)
 {
     if (cond == Assembler::Zero) {
         MOZ_ASSERT(lhs.low == rhs.low);
@@ -3378,16 +3449,17 @@ MacroAssemblerARMCompat::moveValue(const
 {
     jsval_layout jv = JSVAL_TO_IMPL(val);
     ma_mov(Imm32(jv.s.tag), type);
     if (val.isMarkable())
         ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), data);
     else
         ma_mov(Imm32(jv.s.payload.i32), data);
 }
+
 void
 MacroAssemblerARMCompat::moveValue(const Value& val, const ValueOperand& dest)
 {
     moveValue(val, dest.typeReg(), dest.payloadReg());
 }
 
 /////////////////////////////////////////////////////////////////
 // X86/X64-common (ARM too now) interface.
@@ -3472,30 +3544,21 @@ MacroAssemblerARMCompat::loadValue(Addre
     // instruction.
 
     if (val.payloadReg().code() < val.typeReg().code()) {
         if (src.offset <= 4 && src.offset >= -8 && (src.offset & 3) == 0) {
             // Turns out each of the 4 value -8, -4, 0, 4 corresponds exactly
             // with one of LDM{DB, DA, IA, IB}
             DTMMode mode;
             switch (src.offset) {
-              case -8:
-                mode = DB;
-                break;
-              case -4:
-                mode = DA;
-                break;
-              case 0:
-                mode = IA;
-                break;
-              case 4:
-                mode = IB;
-                break;
-              default:
-                MOZ_CRASH("Bogus Offset for LoadValue as DTM");
+              case -8: mode = DB; break;
+              case -4: mode = DA; break;
+              case  0: mode = IA; break;
+              case  4: mode = IB; break;
+              default: MOZ_CRASH("Bogus Offset for LoadValue as DTM");
             }
             startDataTransferM(IsLoad, src.base, mode);
             transferReg(val.payloadReg());
             transferReg(val.typeReg());
             finishDataTransfer();
             return;
         }
     }
@@ -3515,35 +3578,39 @@ MacroAssemblerARMCompat::tagValue(JSValu
 {
     MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
     if (payload != dest.payloadReg())
         ma_mov(payload, dest.payloadReg());
     ma_mov(ImmType(type), dest.typeReg());
 }
 
 void
-MacroAssemblerARMCompat::pushValue(ValueOperand val) {
+MacroAssemblerARMCompat::pushValue(ValueOperand val)
+{
     ma_push(val.typeReg());
     ma_push(val.payloadReg());
 }
+
 void
 MacroAssemblerARMCompat::pushValue(const Address& addr)
 {
     ScratchRegisterScope scratch(asMasm());
     ma_ldr(ToType(addr), scratch);
     ma_push(scratch);
     ma_ldr(ToPayloadAfterStackPush(addr), scratch);
     ma_push(scratch);
 }
 
 void
-MacroAssemblerARMCompat::popValue(ValueOperand val) {
+MacroAssemblerARMCompat::popValue(ValueOperand val)
+{
     ma_pop(val.payloadReg());
     ma_pop(val.typeReg());
 }
+
 void
 MacroAssemblerARMCompat::storePayload(const Value& val, const Address& dest)
 {
     AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
 
     jsval_layout jv = JSVAL_TO_IMPL(val);
     if (val.isMarkable())
         ma_mov(ImmGCPtr((gc::Cell*)jv.s.payload.ptr), scratch2);