Destroy absolutely everything, move most functions out of *Assembler-arm.h into *Assembler-arm.cpp (bug 693449, r=jbramley)
authorMarty Rosenberg <mrosenberg@mozilla.com>
Thu, 13 Oct 2011 11:56:47 -0700
changeset 78972 6e442768f0ca0b5a76a7dd5e845f5458fecd80a0
parent 78971 2101d4b040ee620a9d9fcdf48ffb1828d999c050
child 78973 b2cc6f0cc5802c576962f2fef1b10cdb2eb61394
push idunknown
push userunknown
push dateunknown
reviewersjbramley
bugs693449
milestone10.0a1
Destroy absolutely everything, move most functions out of *Assembler-arm.h into *Assembler-arm.cpp (bug 693449, r=jbramley)
js/src/ion/arm/Assembler-arm.cpp
js/src/ion/arm/Assembler-arm.h
js/src/ion/arm/MacroAssembler-arm.cpp
js/src/ion/arm/MacroAssembler-arm.h
--- a/js/src/ion/arm/Assembler-arm.cpp
+++ b/js/src/ion/arm/Assembler-arm.cpp
@@ -501,8 +501,677 @@ js::ion::VFPImm::VFPImm(uint32 top)
     data = -1;
     datastore::Imm8VFPImmData tmp;
     if (DoubleEncoder::lookup(top, &tmp)) {
         data = tmp.encode();
     }
 }
 
 js::ion::DoubleEncoder js::ion::DoubleEncoder::_this;
+
+//VFPRegister implementation
+VFPRegister
+VFPRegister::doubleOverlay()
+{
+    JS_ASSERT(!_isInvalid);
+    if (kind != Double) {
+        return VFPRegister(_code >> 1, Double);
+    } else {
+        return *this;
+    }
+}
+VFPRegister
+VFPRegister::singleOverlay()
+{
+    JS_ASSERT(!_isInvalid);
+    if (kind == Double) {
+        // There are no corresponding float registers for d16-d31
+        ASSERT(_code < 16);
+        return VFPRegister(_code << 1, Double);
+    } else {
+        return VFPRegister(_code, Single);
+    }
+}
+
+VFPRegister
+VFPRegister::intOverlay()
+{
+    JS_ASSERT(!_isInvalid);
+    if (kind == Double) {
+        // There are no corresponding float registers for d16-d31
+        ASSERT(_code < 16);
+        return VFPRegister(_code << 1, Double);
+    } else {
+        return VFPRegister(_code, Int);
+    }
+}
+bool
+VFPRegister::isInvalid()
+{
+    return _isInvalid;
+}
+
+bool
+VFPRegister::isMissing()
+{
+    JS_ASSERT(!_isInvalid);
+    return _isMissing;
+}
+
+
+bool
+Assembler::oom() const
+{
+    return m_buffer.oom() ||
+        !enoughMemory_ ||
+        jumpRelocations_.oom();
+}
+
+bool
+Assembler::addDeferredData(DeferredData *data, size_t bytes)
+{
+    data->setOffset(dataBytesNeeded_);
+    dataBytesNeeded_ += bytes;
+    if (dataBytesNeeded_ >= MAX_BUFFER_SIZE)
+        return false;
+    return data_.append(data);
+}
+
+bool
+Assembler::addCodeLabel(CodeLabel *label)
+{
+    return codeLabels_.append(label);
+}
+
+// Size of the instruction stream, in bytes.
+size_t
+Assembler::size() const
+{
+    return m_buffer.uncheckedSize();
+}
+// Size of the relocation table, in bytes.
+size_t
+Assembler::jumpRelocationTableBytes() const
+{
+    return jumpRelocations_.length();
+}
+size_t
+Assembler::dataRelocationTableBytes() const
+{
+    return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t
+Assembler::dataSize() const
+{
+    return dataBytesNeeded_;
+}
+size_t
+Assembler::bytesNeeded() const
+{
+    return size() +
+        dataSize() +
+        jumpRelocationTableBytes() +
+        dataRelocationTableBytes();
+}
+// write a blob of binary into the instruction stream
+void
+Assembler::writeInst(uint32 x)
+{
+    m_buffer.putInt(x);
+}
+
+void
+Assembler::align(int alignment)
+{
+    while (!m_buffer.isAligned(alignment))
+        as_mov(r0, O2Reg(r0));
+
+}
+void
+Assembler::as_alu(Register dest, Register src1, Operand2 op2,
+                ALUOp op, SetCond_ sc, Condition c)
+{
+    writeInst((int)op | (int)sc | (int) c | op2.encode() |
+              ((dest == InvalidReg) ? 0 : RD(dest)) |
+              ((src1 == InvalidReg) ? 0 : RN(src1)));
+}
+void
+Assembler::as_mov(Register dest,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, InvalidReg, op2, op_mov, sc, c);
+}
+void
+Assembler::as_mvn(Register dest, Operand2 op2,
+                SetCond_ sc, Condition c)
+{
+    as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
+}
+// logical operations
+void
+Assembler::as_and(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_and, sc, c);
+}
+void
+Assembler::as_bic(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_bic, sc, c);
+}
+void
+Assembler::as_eor(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_eor, sc, c);
+}
+void
+Assembler::as_orr(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_orr, sc, c);
+}
+// mathematical operations
+void
+Assembler::as_adc(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_adc, sc, c);
+}
+void
+Assembler::as_add(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_add, sc, c);
+}
+void
+Assembler::as_sbc(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_sbc, sc, c);
+}
+void
+Assembler::as_sub(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_sub, sc, c);
+}
+void
+Assembler::as_rsb(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_rsb, sc, c);
+}
+void
+Assembler::as_rsc(Register dest, Register src1,
+                Operand2 op2, SetCond_ sc, Condition c)
+{
+    as_alu(dest, src1, op2, op_rsc, sc, c);
+}
+// test operations
+void
+Assembler::as_cmn(Register src1, Operand2 op2,
+                Condition c)
+{
+    as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
+}
+void
+Assembler::as_cmp(Register src1, Operand2 op2,
+                Condition c)
+{
+    as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
+}
+void
+Assembler::as_teq(Register src1, Operand2 op2,
+                Condition c)
+{
+    as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
+}
+void
+Assembler::as_tst(Register src1, Operand2 op2,
+                Condition c)
+{
+    as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
+}
+
+// Not quite ALU worthy, but useful none the less:
+// These also have the isue of these being formatted
+// completly differently from the standard ALU operations.
+void
+Assembler::as_movw(Register dest, Imm16 imm, Condition c)
+{
+    JS_ASSERT(hasMOVWT());
+    writeInst(0x03000000 | c | imm.encode() | RD(dest));
+}
+void
+Assembler::as_movt(Register dest, Imm16 imm, Condition c)
+{
+    JS_ASSERT(hasMOVWT());
+    writeInst(0x03400000 | c | imm.encode() | RD(dest));
+}
+// Data transfer instructions: ldr, str, ldrb, strb.
+// Using an int to differentiate between 8 bits and 32 bits is
+// overkill, but meh
+void
+Assembler::as_dtr(LoadStore ls, int size, Index mode,
+                Register rt, DTRAddr addr, Condition c)
+{
+    JS_ASSERT(size == 32 || size == 8);
+    writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c |
+               RT(rt) | addr.encode());
+    return;
+}
+// Handles all of the other integral data transferring functions:
+// ldrsb, ldrsh, ldrd, etc.
+// size is given in bits.
+void
+Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
+                   Register rt, EDtrAddr addr, Condition c)
+{
+    int extra_bits2 = 0;
+    int extra_bits1 = 0;
+    switch(size) {
+      case 8:
+        JS_ASSERT(IsSigned);
+        JS_ASSERT(ls!=IsStore);
+        break;
+      case 16:
+        //case 32:
+        // doesn't need to be handled-- it is handled by the default ldr/str
+        extra_bits2 = 0x01;
+        extra_bits1 = (ls == IsStore) ? 0 : 1;
+        if (IsSigned) {
+            JS_ASSERT(ls != IsStore);
+            extra_bits2 |= 0x2;
+        }
+        break;
+      case 64:
+        if (ls == IsStore) {
+            extra_bits2 = 0x3;
+        } else {
+            extra_bits2 = 0x2;
+        }
+        extra_bits1 = 0;
+        break;
+      default:
+        JS_NOT_REACHED("SAY WHAT?");
+    }
+    writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
+              addr.encode() | RT(rt) | c);
+    return;
+}
+
+void
+Assembler::as_dtm(LoadStore ls, Register rn, uint32 mask,
+                DTMMode mode, DTMWriteBack wb, Condition c)
+{
+    writeInst(0x08000000 | RN(rn) | ls |
+              mode | mask | c | wb);
+
+    return;
+}
+
+// Control flow stuff:
+
+// bx can *only* branch to a register
+// never to an immediate.
+void
+Assembler::as_bx(Register r, Condition c)
+{
+    writeInst(((int) c) | op_bx | r.code());
+}
+
+// Branch can branch to an immediate *or* to a register.
+// Branches to immediates are pc relative, branches to registers
+// are absolute
+void
+Assembler::as_b(BOffImm off, Condition c)
+{
+    writeInst(((int)c) | op_b | off.encode());
+}
+
+void
+Assembler::as_b(Label *l, Condition c)
+{
+    BufferOffset next = nextOffset();
+    if (l->bound()) {
+        as_b(BufferOffset(l).diffB(next), c);
+    } else {
+        // Ugh.  int32 :(
+        int32 old = l->use(next.getOffset());
+        if (old == LabelBase::INVALID_OFFSET) {
+            old = -4;
+        }
+        // This will currently throw an assertion if we couldn't actually
+        // encode the offset of the branch.
+        as_b(BOffImm(old), c);
+    }
+}
+void
+Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
+{
+    *editSrc(inst) = ((int)c) | op_b | off.encode();
+}
+
+// blx can go to either an immediate or a register.
+// When blx'ing to a register, we change processor mode
+// depending on the low bit of the register
+// when blx'ing to an immediate, we *always* change processor state.
+void
+Assembler::as_blx(Label *l)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::as_blx(Register r, Condition c)
+{
+    writeInst(((int) c) | op_blx | r.code());
+}
+void
+Assembler::as_bl(BOffImm off, Condition c)
+{
+    writeInst(((int)c) | op_bl | off.encode());
+}
+// bl can only branch+link to an immediate, never to a register
+// it never changes processor state
+void
+Assembler::as_bl()
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+// bl #imm can have a condition code, blx #imm cannot.
+// blx reg can be conditional.
+void
+Assembler::as_bl(Label *l, Condition c)
+{
+    BufferOffset next = nextOffset();
+    if (l->bound()) {
+        as_bl(BufferOffset(l).diffB(next), c);
+    } else {
+        int32 old = l->use(next.getOffset());
+        // See if the list was empty :(
+        if (old == -1) {
+            old = -4;
+        }
+        // This will fail if we couldn't actually
+        // encode the offset of the branch.
+        as_bl(BOffImm(old), c);
+    }
+}
+void
+Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
+{
+    *editSrc(inst) = ((int)c) | op_bl | off.encode();
+}
+
+// VFP instructions!
+
+// Unityped variants: all registers hold the same (ieee754 single/double)
+// notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
+void
+Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                  VFPOp op, Condition c)
+{
+    // Make sure we believe that all of our operands are the same kind
+    JS_ASSERT(vd.equiv(vn) && vd.equiv(vm));
+    vfp_size sz = isDouble;
+    if (!vd.isDouble()) {
+        sz = isSingle;
+    }
+    writeInst(VD(vd) | VN(vn) | VM(vm) | op | c | sz | 0x0e000a00);
+}
+
+void
+Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                 Condition c)
+{
+    as_vfp_float(vd, vn, vm, opv_add, c);
+}
+
+void
+Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                 Condition c)
+{
+    as_vfp_float(vd, vn, vm, opv_mul, c);
+}
+
+void
+Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                 Condition c)
+{
+    as_vfp_float(vd, vn, vm, opv_mul, c);
+}
+
+void
+Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                  Condition c)
+{
+    as_vfp_float(vd, vn, vm, opv_mul, c);
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                  Condition c)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                  Condition c)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
+{
+    as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c);
+}
+
+void
+Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c)
+{
+    as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c);
+}
+
+void
+Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
+{
+    as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c);
+}
+
+void
+Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
+                 Condition c)
+{
+    as_vfp_float(vd, vn, vm, opv_sub, c);
+}
+
+void
+Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
+                 Condition c)
+{
+    as_vfp_float(vd, NoVFPRegister, vm, opv_sub, c);
+}
+
+// specifically, a move between two same sized-registers
+void
+Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
+{
+    as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c);
+}
+//xfer between Core and VFP
+
+// Unlike the next function, moving between the core registers and vfp
+// registers can't be *that* properly typed.  Namely, since I don't want to
+// munge the type VFPRegister to also include core registers.  Thus, the core
+// and vfp registers are passed in based on their type, and src/dest is
+// determined by the float2core.
+
+void
+Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
+                  Condition c)
+{
+    vfp_size sz = isSingle;
+    if (vm.isDouble()) {
+        // Technically, this can be done with a vmov à la ARM ARM under vmov
+        // however, that requires at least an extra bit saying if the
+        // operation should be performed on the lower or upper half of the
+        // double.  Moving a single to/from 2N/2N+1 isn't equivalent,
+        // since there are 32 single registers, and 32 double registers
+        // so there is no way to encode the last 16 double registers.
+        JS_ASSERT(vt2 != InvalidReg);
+        sz = isDouble;
+    }
+    VFPXferSize xfersz = WordTransfer;
+    if (vt2 != InvalidReg) {
+        // We are doing a 64 bit transfer.
+        xfersz = DoubleTransfer;
+    }
+    writeInst(xfersz | f2c | c | sz |
+              RT(vt1) | ((vt2 != InvalidReg) ? RN(vt2) : 0) | VM(vm));
+}
+
+// our encoding actually allows just the src and the dest (and theiyr types)
+// to uniquely specify the encoding that we are going to use.
+void
+Assembler::as_vcvt(VFPRegister vd, VFPRegister vm,
+                 Condition c)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+// xfer between VFP and memory
+void
+Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
+                 Condition c /* vfp doesn't have a wb option*/)
+{
+    vfp_size sz = isDouble;
+    if (!vd.isDouble()) {
+        sz = isSingle;
+    }
+
+    writeInst(0x0D000A00 | addr.encode() | VD(vd) | sz | c);
+}
+
+// VFP's ldm/stm work differently from the standard arm ones.
+// You can only transfer a range
+
+void
+Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
+                 /*also has update conditions*/Condition c)
+{
+    JS_ASSERT(length <= 16 && length >= 0);
+    vfp_size sz = isDouble;
+    if (!vd.isDouble()) {
+        sz = isSingle;
+    } else {
+        length *= 2;
+    }
+    writeInst(dtmLoadStore | RN(rn) | VD(vd) |
+              length |
+              dtmMode | dtmUpdate | dtmCond |
+              0x0C000B00 | sz);
+}
+
+void
+Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
+{
+    vfp_size sz = isDouble;
+    if (!vd.isDouble()) {
+        // totally do not know how to handle this right now
+        sz = isSingle;
+        JS_NOT_REACHED("non-double immediate");
+    }
+    writeInst(c | sz | imm.encode() | VD(vd) | 0x0EB00A00);
+
+}
+
+bool
+Assembler::nextLink(BufferOffset b, BufferOffset *next)
+{
+    uint32 branch = *editSrc(b);
+    JS_ASSERT(((branch & op_b_mask) == op_b) ||
+              ((branch & op_b_mask) == op_bl));
+    uint32 dest = (branch & op_b_dest_mask);
+    // turns out the end marker is the same as the mask.
+    if (dest == op_b_dest_mask)
+        return false;
+    // add in the extra 2 bits of padding that we chopped off when we made the b
+    dest = dest << 2;
+    // and let everyone know about it.
+    new (next) BufferOffset(dest);
+    return true;
+}
+
+void
+Assembler::bind(Label *label)
+{
+    //        JSC::MacroAssembler::Label jsclabel;
+    if (label->used()) {
+        bool more;
+        BufferOffset dest = nextOffset();
+        BufferOffset b(label);
+        do {
+            BufferOffset next;
+            more = nextLink(b, &next);
+            uint32 branch = *editSrc(b);
+            Condition c = getCondition(branch);
+            switch (branch & op_b_mask) {
+              case op_b:
+                as_b(dest.diffB(b), c, b);
+                break;
+              case op_bl:
+                as_bl(dest.diffB(b), c, b);
+                break;
+              default:
+                JS_NOT_REACHED("crazy fixup!");
+            }
+            b = next;
+        } while (more);
+    }
+    label->bind(nextOffset().getOffset());
+}
+
+void
+Assembler::Bind(IonCode *code, AbsoluteLabel *label, const void *address)
+{
+#if 0
+    uint8 *raw = code->raw();
+    if (label->used()) {
+        intptr_t src = label->offset();
+        do {
+            intptr_t next = reinterpret_cast<intptr_t>(JSC::ARMAssembler::getPointer(raw + src));
+            JSC::ARMAssembler::setPointer(raw + src, address);
+            src = next;
+        } while (src != AbsoluteLabel::INVALID_OFFSET);
+    }
+    JS_ASSERT(((uint8 *)address - raw) >= 0 && ((uint8 *)address - raw) < INT_MAX);
+    label->bind();
+#endif
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::call(Label *label)
+{
+#if 0
+    if (label->bound()) {
+        masm.linkJump(masm.call(), JmpDst(label->offset()));
+    } else {
+        JmpSrc j = masm.call();
+        JmpSrc prev = JmpSrc(label->use(j.offset()));
+        masm.setNextJump(j, prev);
+    }
+#endif
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+Assembler::as_bkpt()
+{
+    writeInst(0xe1200070);
+}
--- a/js/src/ion/arm/Assembler-arm.h
+++ b/js/src/ion/arm/Assembler-arm.h
@@ -157,51 +157,21 @@ class VFPRegister
     }
     bool isDouble() { return kind == Double; }
     bool isSingle() { return kind == Single; }
     bool isFloat() { return (kind == Double) || (kind == Single); }
     bool isInt() { return (kind == UInt) || (kind == Int); }
     bool isSInt()   { return kind == Int; }
     bool isUInt()   { return kind == UInt; }
     bool equiv(VFPRegister other) { return other.kind == kind; }
-    VFPRegister doubleOverlay() {
-        JS_ASSERT(!_isInvalid);
-        if (kind != Double) {
-            return VFPRegister(_code >> 1, Double);
-        } else {
-            return *this;
-        }
-    }
-    VFPRegister singleOverlay() {
-        JS_ASSERT(!_isInvalid);
-        if (kind == Double) {
-            // There are no corresponding float registers for d16-d31
-            ASSERT(_code < 16);
-            return VFPRegister(_code << 1, Double);
-        } else {
-            return VFPRegister(_code, Single);
-        }
-    }
-    VFPRegister intOverlay() {
-        JS_ASSERT(!_isInvalid);
-        if (kind == Double) {
-            // There are no corresponding float registers for d16-d31
-            ASSERT(_code < 16);
-            return VFPRegister(_code << 1, Double);
-        } else {
-            return VFPRegister(_code, Int);
-        }
-    }
-    bool isInvalid() {
-        return _isInvalid;
-    }
-    bool isMissing() {
-        JS_ASSERT(!_isInvalid);
-        return _isMissing;
-    }
+    VFPRegister doubleOverlay();
+    VFPRegister singleOverlay();
+    VFPRegister intOverlay();
+    bool isInvalid();
+    bool isMissing();
     struct VFPRegIndexSplit;
     VFPRegIndexSplit encode();
     // for serializing values
     struct VFPRegIndexSplit {
         const uint32 block : 4;
         const uint32 bit : 1;
       private:
         friend VFPRegIndexSplit js::ion::VFPRegister::encode();
@@ -382,17 +352,17 @@ class ValueOperand
 // a register-shifted-by-a-register.  I represent this in C++ by having a
 // base class Operand2, which just stores the 32 bits of data as they will be
 // encoded in the instruction.  You cannot directly create an Operand2
 // since it is tricky, and not entirely sane to do so.  Instead, you create
 // one of its child classes, e.g. Imm8.  Imm8's constructor takes a single
 // integer argument.  Imm8 will verify that its argument can be encoded
 // as an ARM 12 bit imm8, encode it using an Imm8data, and finally call
 // its parent's (Operand2) constructor with the Imm8data.  The Operand2
-// constructor will then call the Imm8data's toInt() function to extract
+// constructor will then call the Imm8data's encode() function to extract
 // the raw bits from it.  In the future, we should be able to extract
 // data from the Operand2 by asking it for its component Imm8data
 // structures.  The reason this is so horribly round-about is I wanted
 // to have Imm8 and RegisterShiftedRegister inherit directly from Operand2
 // but have all of them take up only a single word of storage.
 // I also wanted to avoid passing around raw integers at all
 // since they are error prone.
 namespace datastore {
@@ -404,17 +374,17 @@ struct Reg
     uint32 RRS : 1;
     ShiftType Type : 2;
     // I'd like this to be a more sensible encoding, but that would
     // need to be a struct and that would not pack :(
     uint32 ShiftAmount : 5;
     uint32 pad : 20;
     Reg(uint32 rm, ShiftType type, uint32 rsr, uint32 shiftamount)
         : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0) {}
-    uint32 toInt() {
+    uint32 encode() {
         return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
     }
 };
 // Op2 has a mode labelled "<imm8m>", which is arm's magical
 // immediate encoding.  Some instructions actually get 8 bits of
 // data, which is called Imm8Data below.  These should have edit
 // distance > 1, but this is how it is for now.
 struct Imm8mData
@@ -423,48 +393,48 @@ struct Imm8mData
     uint32 data:8;
     uint32 rot:4;
     // Throw in an extra bit that will be 1 if we can't encode this
     // properly.  if we can encode it properly, a simple "|" will still
     // suffice to meld it into the instruction.
     uint32 buff : 19;
   public:
     uint32 invalid : 1;
-    uint32 toInt() {
+    uint32 encode() {
         JS_ASSERT(!invalid);
         return data | rot << 8;
     };
     // Default constructor makes an invalid immediate.
     Imm8mData() : data(0xff), rot(0xf), invalid(1) {}
     Imm8mData(uint32 data_, uint32 rot_)
         : data(data_), rot(rot_), invalid(0)  {}
 };
 
 struct Imm8Data
 {
   private:
     uint32 imm4L : 4;
     uint32 pad : 4;
     uint32 imm4H : 4;
   public:
-    uint32 toInt() {
+    uint32 encode() {
         return imm4L | (imm4H << 8);
     };
     Imm8Data(uint32 imm) : imm4L(imm&0xf), imm4H(imm>>4) {
         JS_ASSERT(imm <= 0xff);
     }
 };
 // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted
 // by 2.
 struct Imm8VFPOffData
 {
   private:
     uint32 data;
   public:
-    uint32 toInt() {
+    uint32 encode() {
         return data;
     };
     Imm8VFPOffData(uint32 imm) : data (imm) {
         JS_ASSERT((imm & ~(0xff)) == 0);
     }
 };
 // ARM can magically encode 256 very special immediates to be moved
 // into a register.
@@ -486,60 +456,60 @@ struct Imm8VFPImmData
         JS_ASSERT(imm <= 0xff);
     }
 };
 
 struct Imm12Data
 {
     uint32 data : 12;
     Imm12Data(uint32 imm) : data(imm) { JS_ASSERT(data == imm); }
-    uint32 toInt() { return data; }
+    uint32 encode() { return data; }
 };
 
 struct RIS
 {
     uint32 ShiftAmount : 5;
     RIS(uint32 imm) : ShiftAmount(imm) { ASSERT(ShiftAmount == imm); }
-    uint32 toInt () {
+    uint32 encode () {
         return ShiftAmount;
     }
 };
 struct RRS
 {
     uint32 MustZero : 1;
     // the register that holds the shift amount
     uint32 RS : 4;
     RRS(uint32 rs) : RS(rs) { ASSERT(rs == RS); }
-    uint32 toInt () {return RS << 1;}
+    uint32 encode () {return RS << 1;}
 };
 
 } // datastore
 
 //
 class MacroAssemblerARM;
 class Operand;
 class Operand2
 {
   public:
     uint32 oper : 31;
     uint32 invalid : 1;
     // Constructors
   protected:
     friend class MacroAssemblerARM;
     Operand2(datastore::Imm8mData base)
-        : oper(base.invalid ? -1 : (base.toInt() | (uint32)IsImmOp2)),
+        : oper(base.invalid ? -1 : (base.encode() | (uint32)IsImmOp2)),
           invalid(base.invalid)
     {
     }
-    Operand2(datastore::Reg base) : oper(base.toInt() | (uint32)IsNotImmOp2) {}
+    Operand2(datastore::Reg base) : oper(base.encode() | (uint32)IsNotImmOp2) {}
   private:
     friend class Operand;
     Operand2(int blob) : oper(blob) {}
   public:
-    uint32 toInt() { return oper; }
+    uint32 encode() { return oper; }
 };
 
 class Imm8 : public Operand2
 {
   public:
     static datastore::Imm8mData encodeImm(uint32 imm) {
         // In gcc, clz is undefined if you call it with 0.
         if (imm == 0)
@@ -581,19 +551,19 @@ class Imm8 : public Operand2
     static TwoImm8mData encodeTwoImms(uint32);
     Imm8(uint32 imm) : Operand2(encodeImm(imm)) {}
 };
 
 class Op2Reg : public Operand2
 {
   protected:
     Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
-        : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.toInt())) {}
+        : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode())) {}
     Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
-        : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.toInt())) {}
+        : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode())) {}
 };
 class O2RegImmShift : public Op2Reg
 {
   public:
     O2RegImmShift(Register rn, ShiftType type, uint32 shift)
         : Op2Reg(rn, type, datastore::RIS(shift)) {}
 };
 
@@ -620,40 +590,40 @@ O2RegRegShift ror (Register r, Register 
 // an unsigned offset, then the instruction specifies if the offset is positive
 // or negative.  The +/- bit is necessary if the instruction set wants to be
 // able to have a negative register offset e.g. ldr pc, [r1,-r2];
 class DtrOff
 {
     uint32 data;
   protected:
     DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
-    : data(immdata.toInt() | (uint32)IsImmDTR | ((uint32)iu)) {}
+    : data(immdata.encode() | (uint32)IsImmDTR | ((uint32)iu)) {}
     DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
-        : data(reg.toInt() | (uint32) IsNotImmDTR | iu) {}
+        : data(reg.encode() | (uint32) IsNotImmDTR | iu) {}
   public:
-    uint32 toInt() { return data; }
+    uint32 encode() { return data; }
 };
 
 class DtrOffImm : public DtrOff
 {
   public:
     DtrOffImm(int32 imm)
         : DtrOff(datastore::Imm12Data(abs(imm)), imm >= 0 ? IsUp : IsDown)
     { JS_ASSERT((imm < 4096) && (imm > -4096)); }
 };
 
 class DtrOffReg : public DtrOff
 {
     // These are designed to be called by a constructor of a subclass.
     // Constructing the necessary RIS/RRS structures are annoying
   protected:
     DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm)
-        : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.toInt())) {}
+        : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode())) {}
     DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg)
-        : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.toInt())) {}
+        : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode())) {}
 };
 
 class DtrRegImmShift : public DtrOffReg
 {
   public:
     DtrRegImmShift(Register rn, ShiftType type, uint32 shift)
         : DtrOffReg(rn, type, datastore::RIS(shift)) {}
 };
@@ -667,35 +637,35 @@ class DtrRegRegShift : public DtrOffReg
 
 // we will frequently want to bundle a register with its offset so that we have
 // an "operand" to a load instruction.
 class DTRAddr
 {
     uint32 data;
   public:
     DTRAddr(Register reg, DtrOff dtr)
-        : data(dtr.toInt() | (reg.code() << 16)) {}
-    uint32 toInt() { return data; }
+        : data(dtr.encode() | (reg.code() << 16)) {}
+    uint32 encode() { return data; }
   private:
     friend class Operand;
     DTRAddr(uint32 blob) : data(blob) {}
 };
 
 // Offsets for the extended data transfer instructions:
 // ldrsh, ldrd, ldrsb, etc.
 class EDtrOff
 {
   protected:
     uint32 data;
     EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
-        : data(imm8.toInt() | IsImmEDTR | (uint32)iu) {}
+        : data(imm8.encode() | IsImmEDTR | (uint32)iu) {}
     EDtrOff(Register rm, IsUp_ iu = IsUp)
         : data(rm.code() | IsNotImmEDTR | iu) {}
   public:
-    uint32 toInt() { return data; }
+    uint32 encode() { return data; }
 };
 
 class EDtrOffImm : public EDtrOff
 {
   public:
     EDtrOffImm(uint32 imm)
         : EDtrOff(datastore::Imm8Data(abs(imm)), (imm >= 0) ? IsUp : IsDown) {}
 };
@@ -708,26 +678,26 @@ class EDtrOffReg : EDtrOff
   public:
     EDtrOffReg(Register rm) : EDtrOff(rm) {}
 };
 
 class EDtrAddr
 {
     uint32 data;
   public:
-    EDtrAddr(Register r, EDtrOff off) : data(RN(r) | off.toInt()) {}
-    uint32 toInt() { return data; }
+    EDtrAddr(Register r, EDtrOff off) : data(RN(r) | off.encode()) {}
+    uint32 encode() { return data; }
 };
 
 class VFPOff
 {
     uint32 data;
   protected:
     VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
-        : data(imm.toInt() | (uint32)isup) {}
+        : data(imm.encode() | (uint32)isup) {}
   public:
     uint32 encode() { return data; }
 };
 
 class VFPOffImm : public VFPOff
 {
   public:
     VFPOffImm(uint32 imm)
@@ -738,32 +708,32 @@ class VFPAddr
     uint32 data;
     friend class Operand;
     VFPAddr(uint32 blob) : data(blob) {}
   public:
     VFPAddr(Register base, VFPOff off)
         : data(RN(base) | off.encode())
     {
     }
-    uint32 toInt() { return data; }
+    uint32 encode() { return data; }
 };
 
 class VFPImm {
     uint32 data;
   public:
     VFPImm(uint32 top);
     uint32 encode() { return data; }
     bool isValid() { return data != -1; }
 };
 
 class BOffImm
 {
     uint32 data;
   public:
-    uint32 toInt() {
+    uint32 encode() {
         return data;
     }
     BOffImm(int offset) : data (offset >> 2 & 0x00ffffff) {
         JS_ASSERT ((offset & 0x3) == 0);
         JS_ASSERT (offset >= -33554432);
         JS_ASSERT (offset <= 33554428);
     }
 };
@@ -773,17 +743,17 @@ class Imm16
     uint32 pad : 4;
     uint32 upper : 4;
   public:
     Imm16(uint32 imm)
         : lower(imm & 0xfff), pad(0), upper((imm>>12) & 0xf)
     {
         JS_ASSERT(uint32(lower | (upper << 12)) == imm);
     }
-    uint32 toInt() { return lower | upper << 16; }
+    uint32 encode() { return lower | upper << 16; }
 };
 // FP Instructions use a different set of registers,
 // with a different encoding, so this calls for a different class.
 // which will be implemented later
 // IIRC, this has been subsumed by vfpreg.
 class FloatOp
 {
     uint32 data;
@@ -806,21 +776,21 @@ class Operand
         EDTR,
         VDTR,
         FOP
     };
   private:
     Tag_ Tag;
     uint32 data;
   public:
-    Operand (Operand2 init) : Tag(OP2), data(init.toInt()) {}
-    Operand (Register reg)  : Tag(OP2), data(O2Reg(reg).toInt()) {}
+    Operand (Operand2 init) : Tag(OP2), data(init.encode()) {}
+    Operand (Register reg)  : Tag(OP2), data(O2Reg(reg).encode()) {}
     Operand (FloatRegister reg)  : Tag(FOP), data(reg.code()) {}
-    Operand (DTRAddr addr) : Tag(DTR), data(addr.toInt()) {}
-    Operand (VFPAddr addr) : Tag(VDTR), data(addr.toInt()) {}
+    Operand (DTRAddr addr) : Tag(DTR), data(addr.encode()) {}
+    Operand (VFPAddr addr) : Tag(VDTR), data(addr.encode()) {}
     Tag_ getTag() { return Tag; }
     Operand2 toOp2() { return Operand2(data); }
     DTRAddr toDTRAddr() {JS_ASSERT(Tag == DTR); return DTRAddr(data); }
     VFPAddr toVFPAddr() {JS_ASSERT(Tag == VDTR); return VFPAddr(data); }
 };
 
 class Assembler
 {
@@ -927,389 +897,170 @@ public:
 
     {
     }
     static Condition InvertCondition(Condition cond);
 
     // MacroAssemblers hold onto gcthings, so they are traced by the GC.
     void trace(JSTracer *trc);
 
-    bool oom() const {
-        return m_buffer.oom() ||
-            !enoughMemory_ ||
-            jumpRelocations_.oom();
-    }
+    bool oom() const;
 
     void executableCopy(void *buffer);
     void processDeferredData(IonCode *code, uint8 *data);
     void processCodeLabels(IonCode *code);
     void copyJumpRelocationTable(uint8 *buffer);
     void copyDataRelocationTable(uint8 *buffer);
 
-    bool addDeferredData(DeferredData *data, size_t bytes) {
-        data->setOffset(dataBytesNeeded_);
-        dataBytesNeeded_ += bytes;
-        if (dataBytesNeeded_ >= MAX_BUFFER_SIZE)
-            return false;
-        return data_.append(data);
-    }
+    bool addDeferredData(DeferredData *data, size_t bytes);
 
-    bool addCodeLabel(CodeLabel *label) {
-        return codeLabels_.append(label);
-    }
+    bool addCodeLabel(CodeLabel *label);
 
     // Size of the instruction stream, in bytes.
-    size_t size() const {
-        return m_buffer.uncheckedSize();
-    }
+    size_t size() const;
     // Size of the jump relocation table, in bytes.
-    size_t jumpRelocationTableBytes() const {
-        return jumpRelocations_.length();
-    }
-    size_t dataRelocationTableBytes() const {
-        return dataRelocations_.length();
-    }
+    size_t jumpRelocationTableBytes() const;
+    size_t dataRelocationTableBytes() const;
     // Size of the data table, in bytes.
-    size_t dataSize() const {
-        return dataBytesNeeded_;
-    }
-    size_t bytesNeeded() const {
-        return size() +
-               dataSize() +
-               jumpRelocationTableBytes() +
-               dataRelocationTableBytes();
-    }
+    size_t dataSize() const;
+    size_t bytesNeeded() const;
     // write a blob of binary into the instruction stream
-    void writeBlob(uint32 x)
-    {
-        m_buffer.putInt(x);
-    }
+    void writeInst(uint32 x);
 
   public:
-    void align(int alignment) {
-        while (!m_buffer.isAligned(alignment))
-            as_mov(r0, O2Reg(r0));
-
-    }
+    void align(int alignment);
     void as_alu(Register dest, Register src1, Operand2 op2,
-                ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always) {
-        writeBlob((int)op | (int)sc | (int) c | op2.toInt() |
-                  ((dest == InvalidReg) ? 0 : RD(dest)) |
-                  ((src1 == InvalidReg) ? 0 : RN(src1)));
-    }
+                ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_mov(Register dest,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, InvalidReg, op2, op_mov, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_mvn(Register dest, Operand2 op2,
-                SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
     // logical operations
     void as_and(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_and, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_bic(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_bic, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_eor(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_eor, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_orr(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_orr, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     // mathematical operations
     void as_adc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_adc, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_add(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_add, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_sbc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_sbc, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_sub(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_sub, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_rsb(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_rsb, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     void as_rsc(Register dest, Register src1,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always) {
-        as_alu(dest, src1, op2, op_rsc, sc, c);
-    }
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     // test operations
     void as_cmn(Register src1, Operand2 op2,
-                Condition c = Always) {
-        as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
-    }
+                Condition c = Always);
     void as_cmp(Register src1, Operand2 op2,
-                Condition c = Always) {
-        as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
-    }
+                Condition c = Always);
     void as_teq(Register src1, Operand2 op2,
-                Condition c = Always) {
-        as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
-    }
+                Condition c = Always);
     void as_tst(Register src1, Operand2 op2,
-                Condition c = Always) {
-        as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
-    }
+                Condition c = Always);
 
     // Not quite ALU worthy, but useful none the less:
     // These also have the isue of these being formatted
     // completly differently from the standard ALU operations.
-    void as_movw(Register dest, Imm16 imm, Condition c = Always) {
-        JS_ASSERT(hasMOVWT());
-        writeBlob(0x03000000 | c | imm.toInt() | RD(dest));
-    }
-    void as_movt(Register dest, Imm16 imm, Condition c = Always) {
-        JS_ASSERT(hasMOVWT());
-        writeBlob(0x03400000 | c | imm.toInt() | RD(dest));
-    }
+    void as_movw(Register dest, Imm16 imm, Condition c = Always);
+    void as_movt(Register dest, Imm16 imm, Condition c = Always);
     // Data transfer instructions: ldr, str, ldrb, strb.
     // Using an int to differentiate between 8 bits and 32 bits is
     // overkill, but meh
     void as_dtr(LoadStore ls, int size, Index mode,
-                Register rt, DTRAddr addr, Condition c = Always)
-    {
-        JS_ASSERT(size == 32 || size == 8);
-        writeBlob( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c |
-                   RT(rt) | addr.toInt());
-        return;
-    }
+                Register rt, DTRAddr addr, Condition c = Always);
     // Handles all of the other integral data transferring functions:
     // ldrsb, ldrsh, ldrd, etc.
     // size is given in bits.
     void as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
-                   Register rt, EDtrAddr addr, Condition c = Always)
-    {
-        int extra_bits2 = 0;
-        int extra_bits1 = 0;
-        switch(size) {
-          case 8:
-            JS_ASSERT(IsSigned);
-            JS_ASSERT(ls!=IsStore);
-            break;
-          case 16:
-            //case 32:
-            // doesn't need to be handled-- it is handled by the default ldr/str
-            extra_bits2 = 0x01;
-            extra_bits1 = (ls == IsStore) ? 0 : 1;
-            if (IsSigned) {
-                JS_ASSERT(ls != IsStore);
-                extra_bits2 |= 0x2;
-            }
-            break;
-          case 64:
-            if (ls == IsStore) {
-                extra_bits2 = 0x3;
-            } else {
-                extra_bits2 = 0x2;
-            }
-            extra_bits1 = 0;
-            break;
-          default:
-            JS_NOT_REACHED("SAY WHAT?");
-        }
-        writeBlob(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
-                  addr.toInt() | RT(rt) | c);
-        return;
-    }
+                   Register rt, EDtrAddr addr, Condition c = Always);
 
     void as_dtm(LoadStore ls, Register rn, uint32 mask,
-                DTMMode mode, DTMWriteBack wb, Condition c = Always)
-    {
-        writeBlob(0x08000000 | RN(rn) | ls |
-                  mode | mask | c | wb);
-
-        return;
-    }
+                DTMMode mode, DTMWriteBack wb, Condition c = Always);
 
     // Control flow stuff:
 
     // bx can *only* branch to a register
     // never to an immediate.
-    void as_bx(Register r, Condition c = Always)
-    {
-        writeBlob(((int) c) | op_bx | r.code());
-    }
+    void as_bx(Register r, Condition c = Always);
 
     // Branch can branch to an immediate *or* to a register.
     // Branches to immediates are pc relative, branches to registers
     // are absolute
-    void as_b(BOffImm off, Condition c)
-    {
-        writeBlob(((int)c) | op_b | off.toInt());
-    }
+    void as_b(BOffImm off, Condition c);
 
-    void as_b(Label *l, Condition c = Always)
-    {
-        BufferOffset next = nextOffset();
-        if (l->bound()) {
-            as_b(BufferOffset(l).diffB(next), c);
-        } else {
-            // Ugh.  int32 :(
-            int32 old = l->use(next.getOffset());
-            if (old == LabelBase::INVALID_OFFSET) {
-                old = -4;
-            }
-            // This will currently throw an assertion if we couldn't actually
-            // encode the offset of the branch.
-            as_b(BOffImm(old), c);
-        }
-    }
-    void as_b(BOffImm off, Condition c, BufferOffset inst)
-    {
-        *editSrc(inst) = ((int)c) | op_b | off.toInt();
-    }
+    void as_b(Label *l, Condition c = Always);
+    void as_b(BOffImm off, Condition c, BufferOffset inst);
 
     // blx can go to either an immediate or a register.
     // When blx'ing to a register, we change processor mode
     // depending on the low bit of the register
     // when blx'ing to an immediate, we *always* change processor state.
-    void as_blx(Label *l)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+    void as_blx(Label *l);
 
-    void as_blx(Register r, Condition c = Always)
-    {
-        writeBlob(((int) c) | op_blx | r.code());
-    }
-    void as_bl(BOffImm off, Condition c)
-    {
-        writeBlob(((int)c) | op_bl | off.toInt());
-    }
+    void as_blx(Register r, Condition c = Always);
+    void as_bl(BOffImm off, Condition c);
     // bl can only branch+link to an immediate, never to a register
     // it never changes processor state
-    void as_bl()
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+    void as_bl();
     // bl #imm can have a condition code, blx #imm cannot.
     // blx reg can be conditional.
-    void as_bl(Label *l, Condition c)
-    {
-        BufferOffset next = nextOffset();
-        if (l->bound()) {
-            as_bl(BufferOffset(l).diffB(next), c);
-        } else {
-            int32 old = l->use(next.getOffset());
-            // See if the list was empty :(
-            if (old == -1) {
-                old = -4;
-            }
-            // This will fail if we couldn't actually
-            // encode the offset of the branch.
-            as_bl(BOffImm(old), c);
-        }
-    }
-    void as_bl(BOffImm off, Condition c, BufferOffset inst)
-    {
-        *editSrc(inst) = ((int)c) | op_bl | off.toInt();
-    }
+    void as_bl(Label *l, Condition c);
+    void as_bl(BOffImm off, Condition c, BufferOffset inst);
 
     // VFP instructions!
     enum vfp_size {
         isDouble = 1 << 8,
         isSingle = 0 << 8
     };
     // Unityped variants: all registers hold the same (ieee754 single/double)
     // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
     void as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                      VFPOp op, Condition c = Always)
-    {
-        // Make sure we believe that all of our operands are the same kind
-        JS_ASSERT(vd.equiv(vn) && vd.equiv(vm));
-        vfp_size sz = isDouble;
-        if (!vd.isDouble()) {
-            sz = isSingle;
-        }
-        writeBlob(VD(vd) | VN(vn) | VM(vm) | op | c | sz | 0x0e000a00);
-    }
+                      VFPOp op, Condition c = Always);
 
     void as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always)
-    {
-        as_vfp_float(vd, vn, vm, opv_add, c);
-    }
+                 Condition c = Always);
 
     void as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always)
-    {
-        as_vfp_float(vd, vn, vm, opv_mul, c);
-    }
+                 Condition c = Always);
 
     void as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always)
-    {
-        as_vfp_float(vd, vn, vm, opv_mul, c);
-    }
+                 Condition c = Always);
 
     void as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always)
-    {
-        as_vfp_float(vd, vn, vm, opv_mul, c);
-        JS_NOT_REACHED("Feature NYI");
-    }
+                  Condition c = Always);
 
     void as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+                  Condition c = Always);
 
     void as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                  Condition c = Always)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+                  Condition c = Always);
 
-    void as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always)
-    {
-        as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c);
-    }
+    void as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
 
-    void as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always)
-    {
-        as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c);
-    }
+    void as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
 
-    void as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always)
-    {
-        as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c);
-    }
+    void as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
 
     void as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
-                 Condition c = Always)
-    {
-        as_vfp_float(vd, vn, vm, opv_sub, c);
-    }
+                 Condition c = Always);
 
     void as_vcmp(VFPRegister vd, VFPRegister vm,
-                 Condition c = Always)
-    {
-        as_vfp_float(vd, NoVFPRegister, vm, opv_sub, c);
-    }
+                 Condition c = Always);
 
     // specifically, a move between two same sized-registers
-    void as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always)
-    {
-        as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c);
-    }
+    void as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
     /*xfer between Core and VFP*/
     enum FloatToCore_ {
         FloatToCore = 1 << 20,
         CoreToFloat = 0 << 20
     };
 
     enum VFPXferSize {
         WordTransfer   = 0x0E000A10,
@@ -1318,163 +1069,43 @@ public:
 
     // Unlike the next function, moving between the core registers and vfp
     // registers can't be *that* properly typed.  Namely, since I don't want to
     // munge the type VFPRegister to also include core registers.  Thus, the core
     // and vfp registers are passed in based on their type, and src/dest is
     // determined by the float2core.
 
     void as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
-                  Condition c = Always)
-    {
-        vfp_size sz = isSingle;
-        if (vm.isDouble()) {
-            // Technically, this can be done with a vmov à la ARM ARM under vmov
-            // however, that requires at least an extra bit saying if the
-            // operation should be performed on the lower or upper half of the
-            // double.  Moving a single to/from 2N/2N+1 isn't equivalent,
-            // since there are 32 single registers, and 32 double registers
-            // so there is no way to encode the last 16 double registers.
-            JS_ASSERT(vt2 != InvalidReg);
-            sz = isDouble;
-        }
-        VFPXferSize xfersz = WordTransfer;
-        if (vt2 != InvalidReg) {
-            // We are doing a 64 bit transfer.
-            xfersz = DoubleTransfer;
-        }
-        writeBlob(xfersz | f2c | c | sz |
-                 RT(vt1) | ((vt2 != InvalidReg) ? RN(vt2) : 0) | VM(vm));
-    }
+                  Condition c = Always);
 
     // our encoding actually allows just the src and the dest (and theiyr types)
     // to uniquely specify the encoding that we are going to use.
     void as_vcvt(VFPRegister vd, VFPRegister vm,
-                 Condition c = Always)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+                 Condition c = Always);
     /* xfer between VFP and memory*/
     void as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
-                 Condition c = Always /* vfp doesn't have a wb option*/) {
-        vfp_size sz = isDouble;
-        if (!vd.isDouble()) {
-            sz = isSingle;
-        }
-
-        writeBlob(0x0D000A00 | addr.toInt() | VD(vd) | sz | c);
-    }
+                 Condition c = Always /* vfp doesn't have a wb option*/);
 
     // VFP's ldm/stm work differently from the standard arm ones.
     // You can only transfer a range
 
     void as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
-                 /*also has update conditions*/Condition c = Always)
-    {
-        JS_ASSERT(length <= 16 && length >= 0);
-        vfp_size sz = isDouble;
-        if (!vd.isDouble()) {
-            sz = isSingle;
-        } else {
-            length *= 2;
-        }
-        writeBlob(dtmLoadStore | RN(rn) | VD(vd) |
-                  length |
-                  dtmMode | dtmUpdate | dtmCond |
-                  0x0C000B00 | sz);
-    }
+                 /*also has update conditions*/Condition c = Always);
 
-    void as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always)
-    {
-        vfp_size sz = isDouble;
-        if (!vd.isDouble()) {
-            // totally do not know how to handle this right now
-            sz = isSingle;
-            JS_NOT_REACHED("non-double immediate");
-        }
-        writeBlob(c | sz | imm.encode() | VD(vd) | 0x0EB00A00);
-
-    }
+    void as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
 
-    bool nextLink(BufferOffset b, BufferOffset *next)
-    {
-        uint32 branch = *editSrc(b);
-        JS_ASSERT(((branch & op_b_mask) == op_b) ||
-                  ((branch & op_b_mask) == op_bl));
-        uint32 dest = (branch & op_b_dest_mask);
-        // turns out the end marker is the same as the mask.
-        if (dest == op_b_dest_mask)
-            return false;
-        // add in the extra 2 bits of padding that we chopped off when we made the b
-        dest = dest << 2;
-        // and let everyone know about it.
-        new (next) BufferOffset(dest);
-        return true;
-    }
+    bool nextLink(BufferOffset b, BufferOffset *next);
 
-    void bind(Label *label) {
-        //        JSC::MacroAssembler::Label jsclabel;
-        if (label->used()) {
-            bool more;
-            BufferOffset dest = nextOffset();
-            BufferOffset b(label);
-            do {
-                BufferOffset next;
-                more = nextLink(b, &next);
-                uint32 branch = *editSrc(b);
-                Condition c = getCondition(branch);
-                switch (branch & op_b_mask) {
-                  case op_b:
-                    as_b(dest.diffB(b), c, b);
-                    break;
-                  case op_bl:
-                    as_bl(dest.diffB(b), c, b);
-                    break;
-                  default:
-                    JS_NOT_REACHED("crazy fixup!");
-                }
-                b = next;
-            } while (more);
-        }
-        label->bind(nextOffset().getOffset());
-    }
+    void bind(Label *label);
+
+    static void Bind(IonCode *code, AbsoluteLabel *label, const void *address);
 
-    static void Bind(IonCode *code, AbsoluteLabel *label, const void *address) {
-#if 0
-        uint8 *raw = code->raw();
-        if (label->used()) {
-            intptr_t src = label->offset();
-            do {
-                intptr_t next = reinterpret_cast<intptr_t>(JSC::ARMAssembler::getPointer(raw + src));
-                JSC::ARMAssembler::setPointer(raw + src, address);
-                src = next;
-            } while (src != AbsoluteLabel::INVALID_OFFSET);
-        }
-        JS_ASSERT(((uint8 *)address - raw) >= 0 && ((uint8 *)address - raw) < INT_MAX);
-        label->bind();
-#endif
-        JS_NOT_REACHED("Feature NYI");
-    }
+    void call(Label *label);
 
-    void call(Label *label) {
-#if 0
-        if (label->bound()) {
-            masm.linkJump(masm.call(), JmpDst(label->offset()));
-        } else {
-            JmpSrc j = masm.call();
-            JmpSrc prev = JmpSrc(label->use(j.offset()));
-            masm.setNextJump(j, prev);
-        }
-#endif
-        JS_NOT_REACHED("Feature NYI");
-    }
-
-    void as_bkpt() {
-        writeBlob(0xe1200070);
-    }
+    void as_bkpt();
 
   public:
     static void TraceJumpRelocations(JSTracer *trc, IonCode *code, CompactBufferReader &reader);
     static void TraceDataRelocations(JSTracer *trc, IonCode *code, CompactBufferReader &reader);
 
     // The buffer is about to be linked, make sure any constant pools or excess
     // bookkeeping has been flushed to the instruction stream.
     void flush() { }
--- a/js/src/ion/arm/MacroAssembler-arm.cpp
+++ b/js/src/ion/arm/MacroAssembler-arm.cpp
@@ -181,8 +181,796 @@ MacroAssemblerARM::ma_alu(Register src1,
         as_movw(ScratchRegister, imm.value & 0xffff, c);
         as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c);
     } else {
         JS_NOT_REACHED("non-ARMv7 loading of immediates NYI.");
     }
     as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
     // done!
 }
+
+void
+MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
+            SetCond_ sc, Assembler::Condition c)
+{
+    JS_ASSERT(op2.getTag() == Operand::OP2);
+    as_alu(dest, src1, op2.toOp2(), op, sc, c);
+}
+
+
+void
+MacroAssemblerARM::ma_mov(Register src, Register dest,
+            SetCond_ sc, Assembler::Condition c)
+{
+    as_mov(dest, O2Reg(src), sc, c);
+}
+
+void
+MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
+}
+void
+MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest)
+{
+    ma_mov(Imm32(ptr.value), dest);
+    JS_NOT_REACHED("todo:make gc more sane.");
+}
+
+    // Shifts (just a move with a shifting op2)
+void
+MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst)
+{
+    as_mov(dst, lsl(src, shift.value));
+}
+void
+MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst)
+{
+    as_mov(dst, lsr(src, shift.value));
+}
+void
+MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst)
+{
+    as_mov(dst, asr(src, shift.value));
+}
+void
+MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst)
+{
+    as_mov(dst, ror(src, shift.value));
+}
+void
+MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst)
+{
+    as_mov(dst, rol(src, shift.value));
+}
+    // Shifts (just a move with a shifting op2)
+void
+MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst)
+{
+    as_mov(dst, lsl(src, shift));
+}
+void
+MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst)
+{
+    as_mov(dst, lsr(src, shift));
+}
+void
+MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst)
+{
+    as_mov(dst, asr(src, shift));
+}
+void
+MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst)
+{
+    as_mov(dst, ror(src, shift));
+}
+void
+MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst)
+{
+    ma_rsb(shift, Imm32(32), ScratchRegister);
+    as_mov(dst, ror(src, ScratchRegister));
+}
+
+    // Move not (dest <- ~src)
+
+void
+MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(InvalidReg, imm, dest, op_mvn, sc, c);
+}
+
+void
+MacroAssemblerARM::ma_mvn(Register src1, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c);
+}
+
+    // and
+void
+MacroAssemblerARM::ma_and(Register src, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_and(dest, src, dest);
+}
+void
+MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
+            SetCond_ sc, Assembler::Condition c)
+{
+    as_and(dest, src1, O2Reg(src2), sc, c);
+}
+void
+MacroAssemblerARM::ma_and(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, op_and, sc, c);
+}
+void
+MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, op_and, sc, c);
+}
+
+
+    // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
+void
+MacroAssemblerARM::ma_bic(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, op_bic, sc, c);
+}
+
+    // exclusive or
+void
+MacroAssemblerARM::ma_eor(Register src, Register dest,
+            SetCond_ sc, Assembler::Condition c)
+{
+    ma_eor(dest, src, dest, sc, c);
+}
+void
+MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    as_eor(dest, src1, O2Reg(src2), sc, c);
+}
+void
+MacroAssemblerARM::ma_eor(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, op_eor, sc, c);
+}
+void
+MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
+       SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, op_eor, sc, c);
+}
+
+    // or
+void
+MacroAssemblerARM::ma_orr(Register src, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_orr(dest, src, dest, sc, c);
+}
+void
+MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    as_orr(dest, src1, O2Reg(src2), sc, c);
+}
+void
+MacroAssemblerARM::ma_orr(Imm32 imm, Register dest,
+                          SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(dest, imm, dest, op_orr, sc, c);
+}
+void
+MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
+                SetCond_ sc, Assembler::Condition c)
+{
+    ma_alu(src1, imm, dest, op_orr, sc, c);
+}
+
+    // arithmetic based ops
+    // add with carry
+void
+MacroAssemblerARM::ma_adc(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_adc);
+}
+void
+MacroAssemblerARM::ma_adc(Register src, Register dest)
+{
+    as_alu(dest, dest, O2Reg(src), op_adc);
+}
+void
+MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest)
+{
+    as_alu(dest, src1, O2Reg(src2), op_adc);
+}
+
+    // add
+void
+MacroAssemblerARM::ma_add(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_add);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Register dest)
+{
+    as_alu(dest, dest, O2Reg(src1), op_add);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest)
+{
+    as_alu(dest, src1, O2Reg(dest), op_add);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest)
+{
+    ma_alu(src1, op, dest, op_add);
+}
+void
+MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest)
+{
+    ma_alu(src1, op, dest, op_add);
+}
+
+    // subtract with carry
+void
+MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_sbc);
+}
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register dest)
+{
+    as_alu(dest, dest, O2Reg(src1), op_sbc);
+}
+void
+MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest)
+{
+    as_alu(dest, src1, O2Reg(dest), op_sbc);
+}
+
+    // subtract
+void
+MacroAssemblerARM::ma_sub(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_sub);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Register dest)
+{
+    ma_alu(dest, Operand(O2Reg(src1)), dest, op_sub);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest)
+{
+    ma_alu(src1, Operand(O2Reg(src2)), dest, op_sub);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest)
+{
+    ma_alu(src1, op, dest, op_sub);
+}
+void
+MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest)
+{
+    ma_alu(src1, op, dest, op_sub);
+}
+
+    // reverse subtract
+void
+MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_rsb);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register dest)
+{
+    as_alu(dest, dest, O2Reg(src1), op_add);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest)
+{
+    as_alu(dest, src1, O2Reg(dest), op_rsc);
+}
+void
+MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest)
+{
+    ma_alu(src1, op2, dest, op_rsb);
+}
+
+    // reverse subtract with carry
+void
+MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest)
+{
+    ma_alu(dest, imm, dest, op_rsc);
+}
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register dest)
+{
+    as_alu(dest, dest, O2Reg(src1), op_rsc);
+}
+void
+MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest)
+{
+    as_alu(dest, src1, O2Reg(dest), op_rsc);
+}
+
+    // compares/tests
+    // compare negative (sets condition codes as src1 + src2 would)
+void
+MacroAssemblerARM::ma_cmn(Imm32 imm, Register src1)
+{
+    ma_alu(src1, imm, InvalidReg, op_cmn);
+}
+void
+MacroAssemblerARM::ma_cmn(Register src1, Register src2)
+{
+    as_alu(InvalidReg, src2, O2Reg(src1), op_cmn);
+}
+void
+MacroAssemblerARM::ma_cmn(Register src1, Operand op)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+    // compare (src - src2)
+void
+MacroAssemblerARM::ma_cmp(Imm32 imm, Register src1)
+{
+    ma_alu(src1, imm, InvalidReg, op_cmp, SetCond);
+}
+void
+MacroAssemblerARM::ma_cmp(Register src1, Operand op)
+{
+    as_cmp(src1, op.toOp2());
+}
+void
+MacroAssemblerARM::ma_cmp(Register src1, Register src2)
+{
+    as_cmp(src2, O2Reg(src1));
+}
+
+    // test for equality, (src1^src2)
+void
+MacroAssemblerARM::ma_teq(Imm32 imm, Register src1)
+{
+    ma_alu(src1, imm, InvalidReg, op_teq, SetCond);
+}
+void
+MacroAssemblerARM::ma_teq(Register src2, Register src1)
+{
+    as_tst(src2, O2Reg(src1));
+}
+void
+MacroAssemblerARM::ma_teq(Register src1, Operand op)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+
+// test (src1 & src2)
+void
+MacroAssemblerARM::ma_tst(Imm32 imm, Register src1)
+{
+    ma_alu(src1, imm, InvalidReg, op_tst, SetCond);
+}
+void
+MacroAssemblerARM::ma_tst(Register src1, Register src2)
+{
+    as_tst(src1, O2Reg(src2));
+}
+void
+MacroAssemblerARM::ma_tst(Register src1, Operand op)
+{
+    as_tst(src1, op.toOp2());
+}
+
+
+    // memory
+    // shortcut for when we know we're transferring 32 bits of data
+void
+MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
+                          Index mode, Assembler::Condition cc)
+{
+    int off = offset.value;
+    if (off < 4096 && off > -4096) {
+        // simplest offset, just use an immediate
+        as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
+        return;
+    }
+    // see if we can attempt to encode it as a standard imm8m offset
+    datastore::Imm8mData imm = Imm8::encodeImm(off & (~0xfff));
+    if (!imm.invalid) {
+        as_add(ScratchRegister, rn, imm);
+        as_dtr(ls, 32, mode, rt, DTRAddr(ScratchRegister, DtrOffImm(off & 0xfff)), cc);
+    } else {
+        ma_mov(offset, ScratchRegister);
+        as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
+    }
+}
+
+void
+MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
+                          Index mode, Assembler::Condition cc)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Assembler::Condition cc)
+{
+    as_dtr(IsStore, 32, mode, rt, addr, cc);
+}
+void
+MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Assembler::Condition cc)
+{
+    as_dtr(IsLoad, 32, mode, rt, addr, cc);
+}
+    // specialty for moving N bits of data, where n == 8,16,32,64
+void
+MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
+                          Register rn, Register rm, Register rt,
+                          Index mode, Assembler::Condition cc)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+
+void
+MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
+                          Register rn, Imm32 offset, Register rt,
+                          Index mode, Assembler::Condition cc)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+void
+MacroAssemblerARM::ma_pop(Register r)
+{
+    ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex);
+}
+void
+MacroAssemblerARM::ma_push(Register r)
+{
+    ma_dtr(IsStore, sp,Imm32(4), r, PreIndex);
+}
+
+// branches when done from within arm-specific code
+void
+MacroAssemblerARM::ma_b(Label *dest, Assembler::Condition c)
+{
+    as_b(dest, c);
+}
+void
+MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+void
+MacroAssemblerARM::ma_b(void *target, Assembler::Condition c, Relocation::Kind reloc)
+{
+    // we know the absolute address of the target, but not our final
+    // location (with relocating GC, we *can't* know our final location)
+    // for now, I'm going to be conservative, and load this with an
+    // absolute address
+    uint32 trg = (uint32)target;
+    as_movw(ScratchRegister, Imm16(trg & 0xffff), c);
+    as_movt(ScratchRegister, Imm16(trg >> 16), c);
+    // this is going to get the branch predictor pissed off.
+    as_bx(ScratchRegister, c);
+}
+
+// this is almost NEVER necessary, we'll basically never be calling a label
+// except, possibly in the crazy bailout-table case.
+void
+MacroAssemblerARM::ma_bl(Label *dest, Assembler::Condition c)
+{
+    as_bl(dest, c);
+}
+
+//VFP/ALU
+void
+MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+    as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+void
+MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst)
+{
+    as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
+}
+void
+MacroAssemblerARM::ma_vcmp_F64(FloatRegister src1, FloatRegister src2)
+{
+    as_vcmp(VFPRegister(src1), VFPRegister(src2));
+}
+void
+MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+void
+MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest)
+{
+    JS_NOT_REACHED("Feature NYI");
+}
+void
+MacroAssemblerARM::ma_vmov(FloatRegister src, Register dest)
+{
+    JS_NOT_REACHED("Feature NYI");
+    //as_vmov(VFPRegister(dest), VFPRegister(src));
+}
+void
+MacroAssemblerARM::ma_vldr(VFPAddr addr, FloatRegister dest)
+{
+    as_vdtr(IsLoad, dest, addr);
+}
+void
+MacroAssemblerARM::ma_vstr(FloatRegister src, VFPAddr addr)
+{
+    as_vdtr(IsStore, src, addr);
+}
+
+uint32
+MacroAssemblerARM::alignStackForCall(uint32 stackForArgs)
+{
+    // framePushed_ is accurate, so precisely adjust the stack requirement.
+    uint32 displacement = stackForArgs + framePushed_;
+    return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
+}
+
+uint32
+MacroAssemblerARM::dynamicallyAlignStackForCall(uint32 stackForArgs, const Register &scratch)
+{
+    // framePushed_ is bogus or we don't know it for sure, so instead, save
+    // the original value of esp and then chop off its low bits. Then, we
+    // push the original value of esp.
+
+    JS_NOT_REACHED("Codegen for dynamicallyAlignedStackForCall NYI");
+#if 0
+    ma_mov(sp, scratch);
+    ma_bic(Imm32(StackAlignment - 1), sp);
+    Push(scratch);
+#endif
+    uint32 displacement = stackForArgs + STACK_SLOT_SIZE;
+    return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
+}
+
+void
+MacroAssemblerARM::restoreStackFromDynamicAlignment()
+{
+    // x86 supports pop esp.  on arm, that isn't well defined, so just
+    //  do it manually
+    as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
+}
+
+void
+MacroAssemblerARM::reserveStack(uint32 amount)
+{
+    if (amount)
+        ma_sub(Imm32(amount), sp);
+    framePushed_ += amount;
+}
+void
+MacroAssemblerARM::freeStack(uint32 amount)
+{
+    JS_ASSERT(amount <= framePushed_);
+    if (amount)
+        ma_add(Imm32(amount), sp);
+    framePushed_ -= amount;
+}
+void
+MacroAssemblerARM::movePtr(ImmWord imm, const Register dest)
+{
+    ma_mov(Imm32(imm.value), dest);
+}
+void
+MacroAssemblerARM::movePtr(ImmGCPtr imm, const Register dest)
+{
+    ma_mov(imm, dest);
+}
+
+void
+MacroAssemblerARM::loadPtr(const Address &address, Register dest)
+{
+    JS_NOT_REACHED("NYI");
+}
+void
+MacroAssemblerARM::setStackArg(const Register &reg, uint32 arg)
+{
+    ma_dataTransferN(IsStore, 32, sp, Imm32(arg * STACK_SLOT_SIZE), reg);
+
+}
+#ifdef DEBUG
+void
+MacroAssemblerARM::checkCallAlignment()
+{
+    Label good;
+    ma_tst(Imm32(StackAlignment - 1), sp);
+    ma_b(&good, Equal);
+    breakpoint();
+    bind(&good);
+}
+#endif
+
+    // higher level tag testing code
+Assembler::Condition
+MacroAssemblerARM::testInt32(Assembler::Condition cond, const ValueOperand &value)
+{
+    JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_cmp(ImmType(JSVAL_TYPE_INT32), value.typeReg());
+    return cond;
+}
+
+Assembler::Condition
+MacroAssemblerARM::testBoolean(Assembler::Condition cond, const ValueOperand &value)
+{
+    JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_cmp(ImmType(JSVAL_TYPE_BOOLEAN), value.typeReg());
+    return cond;
+}
+Assembler::Condition
+MacroAssemblerARM::testDouble(Assembler::Condition cond, const ValueOperand &value)
+{
+    JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    Assembler::Condition actual = (cond == Equal)
+        ? Below
+        : AboveOrEqual;
+    ma_cmp(ImmTag(JSVAL_TAG_CLEAR), value.typeReg());
+    return actual;
+}
+Assembler::Condition
+MacroAssemblerARM::testNull(Assembler::Condition cond, const ValueOperand &value)
+{
+    JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_cmp(ImmType(JSVAL_TYPE_NULL), value.typeReg());
+    return cond;
+}
+Assembler::Condition
+MacroAssemblerARM::testUndefined(Assembler::Condition cond, const ValueOperand &value)
+{
+    JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_cmp(ImmType(JSVAL_TYPE_UNDEFINED), value.typeReg());
+    return cond;
+}
+Assembler::Condition
+MacroAssemblerARM::testString(Assembler::Condition cond, const ValueOperand &value)
+{
+    return testString(cond, value.typeReg());
+}
+Assembler::Condition
+MacroAssemblerARM::testObject(Assembler::Condition cond, const ValueOperand &value)
+{
+    return testObject(cond, value.typeReg());
+}
+
+    // register-based tests
+Assembler::Condition
+MacroAssemblerARM::testInt32(Assembler::Condition cond, const Register &tag)
+{
+    JS_ASSERT(cond == Equal || cond == NotEqual);
+    ma_cmp(ImmTag(JSVAL_TAG_INT32), tag);
+    return cond;
+}
+Assembler::Condition
+MacroAssemblerARM::testBoolean(Assembler::Condition cond, const Register &tag)
+{
+    JS_ASSERT(cond == Equal || cond == NotEqual);
+    ma_cmp(ImmTag(JSVAL_TAG_BOOLEAN), tag);
+    return cond;
+}
+Assembler::Condition
+MacroAssemblerARM::testNull(Assembler::Condition cond, const Register &tag) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        ma_cmp(ImmTag(JSVAL_TAG_NULL), tag);
+        return cond;
+    }
+
+Assembler::Condition
+MacroAssemblerARM::testUndefined(Assembler::Condition cond, const Register &tag) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        ma_cmp(ImmTag(JSVAL_TAG_UNDEFINED), tag);
+        return cond;
+    }
+Assembler::Condition
+MacroAssemblerARM::testString(Assembler::Condition cond, const Register &tag) {
+        JS_ASSERT(cond == Equal || cond == NotEqual);
+        ma_cmp(ImmTag(JSVAL_TAG_STRING), tag);
+        return cond;
+    }
+
+Assembler::Condition
+MacroAssemblerARM::testObject(Assembler::Condition cond, const Register &tag)
+{
+    JS_ASSERT(cond == Equal || cond == NotEqual);
+    ma_cmp(ImmTag(JSVAL_TAG_OBJECT), tag);
+    return cond;
+}
+
+    // unboxing code
+void
+MacroAssemblerARM::unboxInt32(const ValueOperand &operand, const Register &dest)
+{
+    ma_mov(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerARM::unboxBoolean(const ValueOperand &operand, const Register &dest)
+{
+    ma_mov(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerARM::unboxDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+    JS_ASSERT(dest != ScratchFloatReg);
+    as_vxfer(operand.payloadReg(), operand.typeReg(),
+             VFPRegister(dest), CoreToFloat);
+}
+
+void
+MacroAssemblerARM::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+    JS_NOT_REACHED("Codegen for boolValueToDouble NYI");
+#if 0
+    cvtsi2sd(operand.payloadReg(), dest);
+#endif
+}
+
+void
+MacroAssemblerARM::int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+    JS_NOT_REACHED("Codegen for int32ValueToDouble NYI");
+    // transfer the integral value to a floating point register
+    VFPRegister vfpdest = VFPRegister(dest);
+    as_vxfer(operand.payloadReg(), InvalidReg,
+             vfpdest.intOverlay(), CoreToFloat);
+    // convert the value to a double.
+    as_vcvt(dest, dest);
+}
+
+void
+MacroAssemblerARM::loadStaticDouble(const double *dp, const FloatRegister &dest)
+{
+    JS_NOT_REACHED("Codegen for loadStaticDouble NYI");
+#if 0
+    _vldr()
+        movsd(dp, dest);
+#endif
+}
+    // treat the value as a boolean, and set condition codes accordingly
+
+Assembler::Condition
+MacroAssemblerARM::testInt32Truthy(bool truthy, const ValueOperand &operand)
+{
+    ma_tst(operand.payloadReg(), operand.payloadReg());
+    return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition
+MacroAssemblerARM::testBooleanTruthy(bool truthy, const ValueOperand &operand)
+{
+    ma_tst(operand.payloadReg(), operand.payloadReg());
+    return truthy ? NonZero : Zero;
+}
+
+Assembler::Condition
+MacroAssemblerARM::testDoubleTruthy(bool truthy, const FloatRegister &reg)
+{
+    JS_NOT_REACHED("codegen for testDoubleTruthy NYI");
+    // need to do vfp code here.
+#if 0
+    xorpd(ScratchFloatReg, ScratchFloatReg);
+    ucomisd(ScratchFloatReg, reg);
+#endif
+    return truthy ? NonZero : Zero;
+}
+
+void
+MacroAssemblerARM::breakpoint() {
+    as_bkpt();
+}
--- a/js/src/ion/arm/MacroAssembler-arm.h
+++ b/js/src/ion/arm/MacroAssembler-arm.h
@@ -83,565 +83,259 @@ public:
     bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
                  SetCond_ sc, Condition c);
   public:
     void ma_alu(Register src1, Imm32 imm, Register dest,
                 ALUOp op,
                 SetCond_ sc =  NoSetCond, Condition c = Always);
 
     void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        JS_ASSERT(op2.getTag() == Operand::OP2);
-        as_alu(dest, src1, op2.toOp2(), op, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
 
     // These should likely be wrapped up as a set of macros
     // or something like that.  I cannot think of a good reason
     // to explicitly have all of this code.
     // ALU based ops
     // mov
     void ma_mov(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        as_mov(dest, O2Reg(src), sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_mov(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(InvalidReg, imm, dest, op_mov, sc, c);
-    }
-    void ma_mov(const ImmGCPtr &ptr, Register dest) {
-        ma_mov(Imm32(ptr.value), dest);
-        JS_NOT_REACHED("todo:make gc more sane.");
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
+    void ma_mov(const ImmGCPtr &ptr, Register dest);
 
     // Shifts (just a move with a shifting op2)
-    void ma_lsl(Imm32 shift, Register src, Register dst) {
-        as_mov(dst, lsl(src, shift.value));
-    }
-    void ma_lsr(Imm32 shift, Register src, Register dst) {
-        as_mov(dst, lsr(src, shift.value));
-    }
-    void ma_asr(Imm32 shift, Register src, Register dst) {
-        as_mov(dst, asr(src, shift.value));
-    }
-    void ma_ror(Imm32 shift, Register src, Register dst) {
-        as_mov(dst, ror(src, shift.value));
-    }
-    void ma_rol(Imm32 shift, Register src, Register dst) {
-        as_mov(dst, rol(src, shift.value));
-    }
+    void ma_lsl(Imm32 shift, Register src, Register dst);
+    void ma_lsr(Imm32 shift, Register src, Register dst);
+    void ma_asr(Imm32 shift, Register src, Register dst);
+    void ma_ror(Imm32 shift, Register src, Register dst);
+    void ma_rol(Imm32 shift, Register src, Register dst);
     // Shifts (just a move with a shifting op2)
-    void ma_lsl(Register shift, Register src, Register dst) {
-        as_mov(dst, lsl(src, shift));
-    }
-    void ma_lsr(Register shift, Register src, Register dst) {
-        as_mov(dst, lsr(src, shift));
-    }
-    void ma_asr(Register shift, Register src, Register dst) {
-        as_mov(dst, asr(src, shift));
-    }
-    void ma_ror(Register shift, Register src, Register dst) {
-        as_mov(dst, ror(src, shift));
-    }
-    void ma_rol(Register shift, Register src, Register dst) {
-        ma_rsb(shift, Imm32(32), ScratchRegister);
-        as_mov(dst, ror(src, ScratchRegister));
-    }
+    void ma_lsl(Register shift, Register src, Register dst);
+    void ma_lsr(Register shift, Register src, Register dst);
+    void ma_asr(Register shift, Register src, Register dst);
+    void ma_ror(Register shift, Register src, Register dst);
+    void ma_rol(Register shift, Register src, Register dst);
 
     // Move not (dest <- ~src)
     void ma_mvn(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(InvalidReg, imm, dest, op_mvn, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
     void ma_mvn(Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
     // and
     void ma_and(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_and(dest, src, dest);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_and(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        as_and(dest, src1, O2Reg(src2), sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_and(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(dest, imm, dest, op_and, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_and(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(src1, imm, dest, op_and, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
 
     // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
     void ma_bic(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(dest, imm, dest, op_bic, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
     // exclusive or
     void ma_eor(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_eor(dest, src, dest, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_eor(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        as_eor(dest, src1, O2Reg(src2), sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_eor(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(dest, imm, dest, op_eor, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_eor(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(src1, imm, dest, op_eor, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
     // or
     void ma_orr(Register src, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_orr(dest, src, dest, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_orr(Register src1, Register src2, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        as_orr(dest, src1, O2Reg(src2), sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_orr(Imm32 imm, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(dest, imm, dest, op_orr, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
     void ma_orr(Imm32 imm, Register src1, Register dest,
-                SetCond_ sc = NoSetCond, Condition c = Always)
-    {
-        ma_alu(src1, imm, dest, op_orr, sc, c);
-    }
+                SetCond_ sc = NoSetCond, Condition c = Always);
+
 
     // arithmetic based ops
     // add with carry
-    void ma_adc(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_adc);
-    }
-    void ma_adc(Register src, Register dest) {
-        as_alu(dest, dest, O2Reg(src), op_adc);
-    }
-    void ma_adc(Register src1, Register src2, Register dest) {
-        as_alu(dest, src1, O2Reg(src2), op_adc);
-    }
+    void ma_adc(Imm32 imm, Register dest);
+    void ma_adc(Register src, Register dest);
+    void ma_adc(Register src1, Register src2, Register dest);
 
     // add
-    void ma_add(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_add);
-    }
-    void ma_add(Register src1, Register dest) {
-        as_alu(dest, dest, O2Reg(src1), op_add);
-    }
-    void ma_add(Register src1, Register src2, Register dest) {
-        as_alu(dest, src1, O2Reg(dest), op_add);
-    }
-    void ma_add(Register src1, Operand op, Register dest) {
-        ma_alu(src1, op, dest, op_add);
-    }
-    void ma_add(Register src1, Imm32 op, Register dest) {
-        ma_alu(src1, op, dest, op_add);
-    }
+    void ma_add(Imm32 imm, Register dest);
+    void ma_add(Register src1, Register dest);
+    void ma_add(Register src1, Register src2, Register dest);
+    void ma_add(Register src1, Operand op, Register dest);
+    void ma_add(Register src1, Imm32 op, Register dest);
 
     // subtract with carry
-    void ma_sbc(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_sbc);
-    }
-    void ma_sbc(Register src1, Register dest) {
-        as_alu(dest, dest, O2Reg(src1), op_sbc);
-    }
-    void ma_sbc(Register src1, Register src2, Register dest) {
-        as_alu(dest, src1, O2Reg(dest), op_sbc);
-    }
+    void ma_sbc(Imm32 imm, Register dest);
+    void ma_sbc(Register src1, Register dest);
+    void ma_sbc(Register src1, Register src2, Register dest);
 
     // subtract
-    void ma_sub(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_sub);
-    }
-    void ma_sub(Register src1, Register dest) {
-        ma_alu(dest, Operand(O2Reg(src1)), dest, op_sub);
-    }
-    void ma_sub(Register src1, Register src2, Register dest) {
-        ma_alu(src1, Operand(O2Reg(src2)), dest, op_sub);
-    }
-    void ma_sub(Register src1, Operand op, Register dest) {
-        ma_alu(src1, op, dest, op_sub);
-    }
-    void ma_sub(Register src1, Imm32 op, Register dest) {
-        ma_alu(src1, op, dest, op_sub);
-    }
+    void ma_sub(Imm32 imm, Register dest);
+    void ma_sub(Register src1, Register dest);
+    void ma_sub(Register src1, Register src2, Register dest);
+    void ma_sub(Register src1, Operand op, Register dest);
+    void ma_sub(Register src1, Imm32 op, Register dest);
 
     // reverse subtract
-    void ma_rsb(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_rsb);
-    }
-    void ma_rsb(Register src1, Register dest) {
-        as_alu(dest, dest, O2Reg(src1), op_add);
-    }
-    void ma_rsb(Register src1, Register src2, Register dest) {
-        as_alu(dest, src1, O2Reg(dest), op_rsc);
-    }
-    void ma_rsb(Register src1, Imm32 op2, Register dest) {
-        ma_alu(src1, op2, dest, op_rsb);
-    }
+    void ma_rsb(Imm32 imm, Register dest);
+    void ma_rsb(Register src1, Register dest);
+    void ma_rsb(Register src1, Register src2, Register dest);
+    void ma_rsb(Register src1, Imm32 op2, Register dest);
 
     // reverse subtract with carry
-    void ma_rsc(Imm32 imm, Register dest) {
-        ma_alu(dest, imm, dest, op_rsc);
-    }
-    void ma_rsc(Register src1, Register dest) {
-        as_alu(dest, dest, O2Reg(src1), op_rsc);
-    }
-    void ma_rsc(Register src1, Register src2, Register dest) {
-        as_alu(dest, src1, O2Reg(dest), op_rsc);
-    }
+    void ma_rsc(Imm32 imm, Register dest);
+    void ma_rsc(Register src1, Register dest);
+    void ma_rsc(Register src1, Register src2, Register dest);
 
     // compares/tests
     // compare negative (sets condition codes as src1 + src2 would)
-    void ma_cmn(Imm32 imm, Register src1) {
-        ma_alu(src1, imm, InvalidReg, op_cmn);
-    }
-    void ma_cmn(Register src1, Register src2) {
-        as_alu(InvalidReg, src2, O2Reg(src1), op_cmn);
-    }
-    void ma_cmn(Register src1, Operand op) {
-        JS_NOT_REACHED("Feature NYI");
-    }
+    void ma_cmn(Imm32 imm, Register src1);
+    void ma_cmn(Register src1, Register src2);
+    void ma_cmn(Register src1, Operand op);
 
     // compare (src - src2)
-    void ma_cmp(Imm32 imm, Register src1) {
-        ma_alu(src1, imm, InvalidReg, op_cmp, SetCond);
-    }
-    void ma_cmp(Register src1, Operand op) {
-        as_cmp(src1, op.toOp2());
-    }
-    void ma_cmp(Register src1, Register src2) {
-        as_cmp(src2, O2Reg(src1));
-    }
+    void ma_cmp(Imm32 imm, Register src1);
+    void ma_cmp(Register src1, Operand op);
+    void ma_cmp(Register src1, Register src2);
 
     // test for equality, (src1^src2)
-    void ma_teq(Imm32 imm, Register src1) {
-        ma_alu(src1, imm, InvalidReg, op_teq, SetCond);
-    }
-    void ma_teq(Register src2, Register src1) {
-        as_tst(src2, O2Reg(src1));
-    }
-    void ma_teq(Register src1, Operand op) {
-        JS_NOT_REACHED("Feature NYI");
-    }
+    void ma_teq(Imm32 imm, Register src1);
+    void ma_teq(Register src2, Register src1);
+    void ma_teq(Register src1, Operand op);
 
 
     // test (src1 & src2)
-    void ma_tst(Imm32 imm, Register src1) {
-        ma_alu(src1, imm, InvalidReg, op_tst, SetCond);
-    }
-    void ma_tst(Register src1, Register src2) {
-        as_tst(src1, O2Reg(src2));
-    }
-    void ma_tst(Register src1, Operand op) {
-        as_tst(src1, op.toOp2());
-    }
+    void ma_tst(Imm32 imm, Register src1);
+    void ma_tst(Register src1, Register src2);
+    void ma_tst(Register src1, Operand op);
 
 
     // memory
     // shortcut for when we know we're transferring 32 bits of data
     void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
-                Index mode = Offset, Condition cc = Always)
-    {
-        int off = offset.value;
-        if (off < 4096 && off > -4096) {
-            // simplest offset, just use an immediate
-            as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
-            return;
-        }
-        // see if we can attempt to encode it as a standard imm8m offset
-        datastore::Imm8mData imm = Imm8::encodeImm(off & (~0xfff));
-        if (!imm.invalid) {
-            as_add(ScratchRegister, rn, imm);
-            as_dtr(ls, 32, mode, rt, DTRAddr(ScratchRegister, DtrOffImm(off & 0xfff)), cc);
-        } else {
-            ma_mov(offset, ScratchRegister);
-            as_dtr(ls, 32, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0)));
-        }
-    }
+                Index mode = Offset, Condition cc = Always);
+
     void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
-                Index mode = Offset, Condition cc = Always)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
+                Index mode = Offset, Condition cc = Always);
+
 
-    void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always)
-    {
-        as_dtr(IsStore, 32, mode, rt, addr, cc);
-    }
-    void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always)
-    {
-        as_dtr(IsLoad, 32, mode, rt, addr, cc);
-    }
+    void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
+
+    void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
+
     // specialty for moving N bits of data, where n == 8,16,32,64
     void ma_dataTransferN(LoadStore ls, int size,
                           Register rn, Register rm, Register rt,
-                          Index mode = Offset, Condition cc = Always) {
-        JS_NOT_REACHED("Feature NYI");
-    }
+                          Index mode = Offset, Condition cc = Always);
 
     void ma_dataTransferN(LoadStore ls, int size,
                           Register rn, Imm32 offset, Register rt,
-                          Index mode = Offset, Condition cc = Always) {
-        JS_NOT_REACHED("Feature NYI");
-    }
-    void ma_pop(Register r) {
-        ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex);
-    }
-    void ma_push(Register r) {
-        ma_dtr(IsStore, sp ,Imm32(4), r, PreIndex);
-    }
+                          Index mode = Offset, Condition cc = Always);
+    void ma_pop(Register r);
+    void ma_push(Register r);
 
     // branches when done from within arm-specific code
-    void ma_b(Label *dest, Condition c = Always)
-    {
-        as_b(dest, c);
-    }
-    void ma_b(void *target, Relocation::Kind reloc)
-    {
-            JS_NOT_REACHED("Feature NYI");
-    }
-    void ma_b(void *target, Condition c, Relocation::Kind reloc)
-    {
-        // we know the absolute address of the target, but not our final
-        // location (with relocating GC, we *can't* know our final location)
-        // for now, I'm going to be conservative, and load this with an
-        // absolute address
-        uint32 trg = (uint32)target;
-        as_movw(ScratchRegister, Imm16(trg & 0xffff), c);
-        as_movt(ScratchRegister, Imm16(trg >> 16), c);
-        // this is going to get the branch predictor pissed off.
-        as_bx(ScratchRegister, c);
-    }
+    void ma_b(Label *dest, Condition c = Always);
+
+    void ma_b(void *target, Relocation::Kind reloc);
+
+    void ma_b(void *target, Condition c, Relocation::Kind reloc);
+
     // this is almost NEVER necessary, we'll basically never be calling a label
     // except, possibly in the crazy bailout-table case.
-    void ma_bl(Label *dest, Condition c = Always)
-    {
-        as_bl(dest, c);
-    }
+    void ma_bl(Label *dest, Condition c = Always);
+
 
     //VFP/ALU
-    void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst)
-    {
-        as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
-    }
-    void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst)
-    {
-        as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
-    }
-    void ma_vcmp_F64(FloatRegister src1, FloatRegister src2)
-    {
-        as_vcmp(VFPRegister(src1), VFPRegister(src2));
-    }
-    void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
-    void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest)
-    {
-        JS_NOT_REACHED("Feature NYI");
-    }
-    void ma_vmov(FloatRegister src, Register dest)
-    {
-        JS_NOT_REACHED("Feature NYI");
-        //as_vmov(VFPRegister(dest), VFPRegister(src));
-    }
-    void ma_vldr(VFPAddr addr, FloatRegister dest)
-    {
-        as_vdtr(IsLoad, dest, addr);
-    }
-    void ma_vstr(FloatRegister src, VFPAddr addr)
-    {
-        as_vdtr(IsStore, src, addr);
-    }
+    void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+    void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
+
+    void ma_vcmp_F64(FloatRegister src1, FloatRegister src2);
+
+    void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest);
+
+    void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest);
+
+    void ma_vmov(FloatRegister src, Register dest);
+
+    void ma_vldr(VFPAddr addr, FloatRegister dest);
+
+    void ma_vstr(FloatRegister src, VFPAddr addr);
+
   protected:
-    uint32 alignStackForCall(uint32 stackForArgs) {
-        // framePushed_ is accurate, so precisely adjust the stack requirement.
-        uint32 displacement = stackForArgs + framePushed_;
-        return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
-    }
-
-    uint32 dynamicallyAlignStackForCall(uint32 stackForArgs, const Register &scratch) {
-        // framePushed_ is bogus or we don't know it for sure, so instead, save
-        // the original value of esp and then chop off its low bits. Then, we
-        // push the original value of esp.
+    uint32 alignStackForCall(uint32 stackForArgs);
 
-        JS_NOT_REACHED("Codegen for dynamicallyAlignedStackForCall NYI");
-#if 0
-        ma_mov(sp, scratch);
-        ma_bic(Imm32(StackAlignment - 1), sp);
-        Push(scratch);
-#endif
-        uint32 displacement = stackForArgs + STACK_SLOT_SIZE;
-        return stackForArgs + ComputeByteAlignment(displacement, StackAlignment);
-    }
+    uint32 dynamicallyAlignStackForCall(uint32 stackForArgs, const Register &scratch);
 
-    void restoreStackFromDynamicAlignment() {
-        // x86 supports pop esp.  on arm, that isn't well defined, so just
-        //  do it manually
-        as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0)));
-    }
+    void restoreStackFromDynamicAlignment();
 
   public:
-    void reserveStack(uint32 amount) {
-        if (amount)
-            ma_sub(Imm32(amount), sp);
-        framePushed_ += amount;
-    }
-    void freeStack(uint32 amount) {
-        JS_ASSERT(amount <= framePushed_);
-        if (amount)
-            ma_add(Imm32(amount), sp);
-        framePushed_ -= amount;
-    }
-
-    void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
-        JS_NOT_REACHED("NYI");
-    }
-    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
-        JS_NOT_REACHED("NYI");
-    }
+    void reserveStack(uint32 amount);
+    void freeStack(uint32 amount);
 
-    void movePtr(ImmWord imm, Register dest) {
-        ma_mov(Imm32(imm.value), dest);
-    }
-    void movePtr(ImmGCPtr imm, Register dest) {
-        ma_mov(imm, dest);
-    }
-    void loadPtr(const Address &address, Register dest) {
-        JS_NOT_REACHED("NYI");
-    }
-    void setStackArg(const Register &reg, uint32 arg) {
-        ma_dataTransferN(IsStore, 32, sp, Imm32(arg * STACK_SLOT_SIZE), reg);
-
-    }
+    void movePtr(ImmWord imm, Register dest);
+    void movePtr(ImmGCPtr imm, Register dest);
+    void loadPtr(const Address &address, Register dest);
+    void setStackArg(const Register &reg, uint32 arg);
 #ifdef DEBUG
-    void checkCallAlignment() {
-        Label good;
-        ma_tst(Imm32(StackAlignment - 1), sp);
-        ma_b(&good, Equal);
-        breakpoint();
-        bind(&good);
-    }
+    void checkCallAlignment();
 #endif
 
     // Returns the register containing the type tag.
     Register splitTagForTest(const ValueOperand &value) {
         return value.typeReg();
     }
 
     // higher level tag testing code
-    Condition testInt32(Condition cond, const ValueOperand &value) {
-        JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
-        ma_cmp(ImmType(JSVAL_TYPE_INT32), value.typeReg());
-        return cond;
-    }
+    Condition testInt32(Condition cond, const ValueOperand &value);
 
-    Condition testBoolean(Condition cond, const ValueOperand &value) {
-        JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
-        ma_cmp(ImmType(JSVAL_TYPE_BOOLEAN), value.typeReg());
-        return cond;
-    }
-    Condition testDouble(Condition cond, const ValueOperand &value) {
-        JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
-        Condition actual = (cond == Equal)
-                           ? Below
-                           : AboveOrEqual;
-        ma_cmp(ImmTag(JSVAL_TAG_CLEAR), value.typeReg());
-        return actual;
-    }
-    Condition testNull(Condition cond, const ValueOperand &value) {
-        JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
-        ma_cmp(ImmType(JSVAL_TYPE_NULL), value.typeReg());
-        return cond;
-    }
-    Condition testUndefined(Condition cond, const ValueOperand &value) {
-        JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
-        ma_cmp(ImmType(JSVAL_TYPE_UNDEFINED), value.typeReg());
-        return cond;
-    }
-    Condition testString(Condition cond, const ValueOperand &value) {
-        return testString(cond, value.typeReg());
-    }
-    Condition testObject(Condition cond, const ValueOperand &value) {
-        return testObject(cond, value.typeReg());
-    }
+    Condition testBoolean(Condition cond, const ValueOperand &value);
+    Condition testDouble(Condition cond, const ValueOperand &value);
+    Condition testNull(Condition cond, const ValueOperand &value);
+    Condition testUndefined(Condition cond, const ValueOperand &value);
+    Condition testString(Condition cond, const ValueOperand &value);
+    Condition testObject(Condition cond, const ValueOperand &value);
 
     // register-based tests
-    Condition testInt32(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_INT32), tag);
-        return cond;
-    }
-    Condition testBoolean(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_BOOLEAN), tag);
-        return cond;
-    }
-    Condition testNull(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_NULL), tag);
-        return cond;
-    }
-    Condition testUndefined(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_UNDEFINED), tag);
-        return cond;
-    }
-    Condition testString(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_STRING), tag);
-        return cond;
-    }
-    Condition testObject(Condition cond, const Register &tag) {
-        JS_ASSERT(cond == Equal || cond == NotEqual);
-        ma_cmp(ImmTag(JSVAL_TAG_OBJECT), tag);
-        return cond;
-    }
+    Condition testInt32(Condition cond, const Register &tag);
+    Condition testBoolean(Condition cond, const Register &tag);
+    Condition testNull(Condition cond, const Register &tag);
+    Condition testUndefined(Condition cond, const Register &tag);
+    Condition testString(Condition cond, const Register &tag);
+    Condition testObject(Condition cond, const Register &tag);
 
     // unboxing code
-    void unboxInt32(const ValueOperand &operand, const Register &dest) {
-        ma_mov(operand.payloadReg(), dest);
-    }
-    void unboxBoolean(const ValueOperand &operand, const Register &dest) {
-        ma_mov(operand.payloadReg(), dest);
-    }
-    void unboxDouble(const ValueOperand &operand, const FloatRegister &dest) {
-        JS_ASSERT(dest != ScratchFloatReg);
-        as_vxfer(operand.payloadReg(), operand.typeReg(),
-                VFPRegister(dest), CoreToFloat);
-    }
+    void unboxInt32(const ValueOperand &operand, const Register &dest);
+    void unboxBoolean(const ValueOperand &operand, const Register &dest);
+    void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
 
     // Extended unboxing API. If the payload is already in a register, returns
     // that register. Otherwise, provides a move to the given scratch register,
     // and returns that.
     Register extractObject(const Address &address, Register scratch) {
         JS_NOT_REACHED("NYI");
         return scratch;
     }
@@ -651,62 +345,25 @@ public:
     Register extractTag(const Address &address, Register scratch) {
         JS_NOT_REACHED("NYI");
         return scratch;
     }
     Register extractTag(const ValueOperand &value, Register scratch) {
         return value.typeReg();
     }
 
-    void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
-        JS_NOT_REACHED("Codegen for boolValueToDouble NYI");
-#if 0
-        cvtsi2sd(operand.payloadReg(), dest);
-#endif
-    }
-    void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) {
-        JS_NOT_REACHED("Codegen for int32ValueToDouble NYI");
-        // transfer the integral value to a floating point register
-        VFPRegister vfpdest = VFPRegister(dest);
-        as_vxfer(operand.payloadReg(), InvalidReg,
-                 vfpdest.intOverlay(), CoreToFloat);
-        // convert the value to a double.
-        as_vcvt(dest, dest);
-    }
+    void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
+    void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
 
-    void loadStaticDouble(const double *dp, const FloatRegister &dest) {
-        JS_NOT_REACHED("Codegen for loadStaticDouble NYI");
-#if 0
-        _vldr()
-        movsd(dp, dest);
-#endif
-    }
+    void loadStaticDouble(const double *dp, const FloatRegister &dest);
     // treat the value as a boolean, and set condition codes accordingly
-    Condition testInt32Truthy(bool truthy, const ValueOperand &operand) {
-        ma_tst(operand.payloadReg(), operand.payloadReg());
-        return truthy ? NonZero : Zero;
-    }
-    Condition testBooleanTruthy(bool truthy, const ValueOperand &operand) {
-        ma_tst(operand.payloadReg(), operand.payloadReg());
-        return truthy ? NonZero : Zero;
-    }
-    Condition testDoubleTruthy(bool truthy, const FloatRegister &reg) {
-        JS_NOT_REACHED("codegen for testDoubleTruthy NYI");
-        // need to do vfp code here.
-#if 0
-        xorpd(ScratchFloatReg, ScratchFloatReg);
-        ucomisd(ScratchFloatReg, reg);
-#endif
-        return truthy ? NonZero : Zero;
-    }
-#if 0
-#endif
-    void breakpoint() {
-        as_bkpt();
-    }
+    Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
+    Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
+    Condition testDoubleTruthy(bool truthy, const FloatRegister &reg);
+    void breakpoint();
 };
 
 class MacroAssemblerARMCompat : public MacroAssemblerARM
 {
 public:
     // jumps + other functions that should be called from
     // non-arm specific code...
     // basically, an x86 front end on top of the ARM code.
@@ -774,16 +431,23 @@ public:
     void branchTestNumber(Condition cond, const T &t, Label *label) {
         JS_NOT_REACHED("feature NYI");
     }
 
     template<typename T>
     void branchTestBooleanTruthy(bool b, const T & t, Label *label) {
         JS_NOT_REACHED("feature NYI");
     }
+    void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
+        JS_NOT_REACHED("NYI");
+    }
+    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
+        JS_NOT_REACHED("NYI");
+    }
+
 };
 
 typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
 
 } // namespace ion
 } // namespace js
 
 #endif // jsion_macro_assembler_arm_h__