Bug 541491 - ARM_ARCH, ARM_VFP, and ARM_THUMB2 should be runtime options everywhere (r=rreitmai,nnethercote)
authorSteven Johnson <stejohns@adobe.com>
Tue, 26 Jan 2010 10:37:38 -0800
changeset 37768 cf0ba05a0ef95a4f80645640d5c88a493961f74d
parent 37767 2b3944b2a5c2d8bb85427f581ec4e9bee65907a3
child 37769 cb223e493966159b20157513674b8a511abdb32a
push id11426
push userrsayre@mozilla.com
push dateSun, 31 Jan 2010 16:36:36 +0000
treeherdermozilla-central@3048d03980e7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrreitmai, nnethercote
bugs541491
milestone1.9.3a1pre
Bug 541491 - ARM_ARCH, ARM_VFP, and ARM_THUMB2 should be runtime options everywhere (r=rreitmai,nnethercote)
js/src/lirasm/lirasm.cpp
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeARM.h
js/src/nanojit/avmplus.h
js/src/nanojit/nanojit.h
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -491,17 +491,17 @@ FragmentAssembler::FragmentAssembler(Lir
       mValidateWriter1(NULL), mValidateWriter2(NULL)
 {
     mFragment = new Fragment(NULL verbose_only(, (mParent.mLogc.lcbits &
                                                   nanojit::LC_FragProfile) ?
                                                   sProfId++ : 0));
     mFragment->lirbuf = mParent.mLirbuf;
     mParent.mFragments[mFragName].fragptr = mFragment;
 
-    mLir = mBufWriter  = new LirBufWriter(mParent.mLirbuf);
+    mLir = mBufWriter  = new LirBufWriter(mParent.mLirbuf, nanojit::AvmCore::config);
     if (optimize) {
 #ifdef DEBUG
         mLir = mValidateWriter2 = new ValidateWriter(mLir, "end of writer pipeline");
 #endif
         mLir = mCseFilter  = new CseFilter(mLir, mParent.mAlloc);
         mLir = mExprFilter = new ExprFilter(mLir);
     }
 #ifdef DEBUG
@@ -2146,22 +2146,22 @@ processCmdLine(int argc, char **argv, Cm
                       "you must specify either a filename or --random (but not both)");
 
     // Handle the architecture-specific options.
 #if defined NANOJIT_IA32
     avmplus::AvmCore::config.use_cmov = avmplus::AvmCore::config.sse2 = i386_sse;
     avmplus::AvmCore::config.fixed_esp = true;
 #elif defined NANOJIT_ARM
     // Note that we don't check for sensible configurations here!
-    avmplus::AvmCore::config.arch = arm_arch;
-    avmplus::AvmCore::config.vfp = arm_vfp;
+    avmplus::AvmCore::config.arm_arch = arm_arch;
+    avmplus::AvmCore::config.arm_vfp = arm_vfp;
     avmplus::AvmCore::config.soft_float = !arm_vfp;
     // This doesn't allow us to test ARMv6T2 (which also supports Thumb2), but this shouldn't
     // really matter here.
-    avmplus::AvmCore::config.thumb2 = (arm_arch >= 7);
+    avmplus::AvmCore::config.arm_thumb2 = (arm_arch >= 7);
 #endif
 }
 
 int
 main(int argc, char **argv)
 {
     CmdLineOptions opts;
     processCmdLine(argc, argv, opts);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -932,22 +932,27 @@ namespace nanojit
         static const LOpcode k_callmap[] = {
         //  ARGSIZE_NONE  ARGSIZE_F  ARGSIZE_LO  ARGSIZE_Q  (4)        (5)        ARGSIZE_U  (7)
             LIR_pcall,    LIR_fcall, LIR_icall,  LIR_qcall, LIR_pcall, LIR_pcall, LIR_icall, LIR_pcall
         };
 
         uint32_t argt = ci->_argtypes;
         LOpcode op = k_callmap[argt & ARGSIZE_MASK_ANY];
         NanoAssert(op != LIR_skip); // LIR_skip here is just an error condition
+#ifndef NANOJIT_64BIT
+        NanoAssert(op != LIR_qcall); // qcall should only be possible on 64-bit arch
+#endif
 
         int32_t argc = ci->count_args();
         NanoAssert(argc <= (int)MAXARGS);
 
-        if (!ARM_VFP && (op == LIR_fcall || op == LIR_qcall))
+#if defined(NANOJIT_ARM)
+        if (!_config.arm_vfp && op == LIR_fcall)
             op = LIR_callh;
+#endif
 
         // Allocate space for and copy the arguments.  We use the same
         // allocator as the normal LIR buffers so it has the same lifetime.
         // Nb: this must be kept in sync with arg().
         LInsp* args2 = (LInsp*)_buf->_allocator.alloc(argc * sizeof(LInsp));
         memcpy(args2, args, argc * sizeof(LInsp));
 
         // Allocate and write the call instruction.
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -1471,21 +1471,22 @@ namespace nanojit
             Allocator&  _allocator;
             uintptr_t   _unused;   // next unused instruction slot in the current LIR chunk
             uintptr_t   _limit;    // one past the last usable byte of the current LIR chunk
             size_t      _bytesAllocated;
     };
 
     class LirBufWriter : public LirWriter
     {
-        LirBuffer*    _buf;        // underlying buffer housing the instructions
+        LirBuffer*              _buf;        // underlying buffer housing the instructions
+        const avmplus::Config&  _config;
 
         public:
-            LirBufWriter(LirBuffer* buf)
-                : LirWriter(0), _buf(buf) {
+            LirBufWriter(LirBuffer* buf, const avmplus::Config& config)
+                : LirWriter(0), _buf(buf), _config(config) {
             }
 
             // LirWriter interface
             LInsp   insLoad(LOpcode op, LInsp base, int32_t disp);
             LInsp   insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp);
             LInsp   ins0(LOpcode op);
             LInsp   ins1(LOpcode op, LInsp o1);
             LInsp   ins2(LOpcode op, LInsp o1, LInsp o2);
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -102,17 +102,17 @@ Assembler::CountLeadingZeroes(uint32_t d
 
     // We can't do CLZ on anything earlier than ARMv5. Architectures as early
     // as that aren't supported, but assert that we aren't running on one
     // anyway.
     // If ARMv4 support is required in the future for some reason, we can do a
     // run-time check on config.arch and fall back to the C routine, but for
     // now we can avoid the cost of the check as we don't intend to support
     // ARMv4 anyway.
-    NanoAssert(ARM_ARCH >= 5);
+    NanoAssert(config.arm_arch >= 5);
 
 #if defined(__ARMCC__)
     // ARMCC can do this with an intrinsic.
     leading_zeroes = __clz(data);
 
 // current Android GCC compiler incorrectly refuses to compile 'clz' for armv5
 // (even though this is a legal instruction there). Since we currently only compile for ARMv5
 // for emulation, we don't care too much (but we DO care for ARMv6+ since those are "real"
@@ -554,17 +554,17 @@ Assembler::nFragExit(LInsp guard)
     MOV(SP, FP);
 }
 
 NIns*
 Assembler::genEpilogue()
 {
     // On ARMv5+, loading directly to PC correctly handles interworking.
     // Note that we don't support anything older than ARMv5.
-    NanoAssert(ARM_ARCH >= 5);
+    NanoAssert(config.arm_arch >= 5);
 
     RegisterMask savingMask = rmask(FP) | rmask(PC);
 
     POP_mask(savingMask); // regs
 
     // NB: this is the later half of the dual-nature patchable exit branch
     // workaround noted above in nFragExit. IP has the "return value"
     // incoming, we need to move it to R0.
@@ -626,21 +626,21 @@ Assembler::asm_arg(ArgSize sz, LInsp arg
 // handle arguments where (ArgSize)sz == ARGSIZE_F.
 void
 Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
 {
     // The stack pointer must always be at least aligned to 4 bytes.
     NanoAssert((stkd & 3) == 0);
     // The only use for this function when we are using soft floating-point
     // is for LIR_qjoin.
-    NanoAssert(ARM_VFP || arg->isop(LIR_qjoin));
+    NanoAssert(config.arm_vfp || arg->isop(LIR_qjoin));
 
     Register    fp_reg = deprecated_UnknownReg;
 
-    if (ARM_VFP) {
+    if (config.arm_vfp) {
         fp_reg = findRegFor(arg, FpRegs);
         NanoAssert(isKnownReg(fp_reg));
     }
 
 #ifdef NJ_ARM_EABI
     // EABI requires that 64-bit arguments are aligned on even-numbered
     // registers, as R0:R1 or R2:R3. If the register base is at an
     // odd-numbered register, advance it. Note that this will push r past
@@ -660,17 +660,17 @@ Assembler::asm_arg_64(LInsp arg, Registe
         // EABI requires that 64-bit arguments are aligned on even-numbered
         // registers, as R0:R1 or R2:R3.
         NanoAssert( ((ra == R0) && (rb == R1)) || ((ra == R2) && (rb == R3)) );
 #endif
 
         // Put the argument in ra and rb. If the argument is in a VFP register,
         // use FMRRD to move it to ra and rb. Otherwise, let asm_regarg deal
         // with the argument as if it were two 32-bit arguments.
-        if (ARM_VFP) {
+        if (config.arm_vfp) {
             FMRRD(ra, rb, fp_reg);
         } else {
             asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
             asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb);
         }
 
 #ifndef NJ_ARM_EABI
     } else if (r == R3) {
@@ -683,17 +683,17 @@ Assembler::asm_arg_64(LInsp arg, Registe
         // This really just checks that nextreg() works properly, as we know
         // that r was previously R3.
         NanoAssert(r == R4);
 
         // We're splitting the argument between registers and the stack.  This
         // must be the first time that the stack is used, so stkd must be at 0.
         NanoAssert(stkd == 0);
 
-        if (ARM_VFP) {
+        if (config.arm_vfp) {
             // TODO: We could optimize the this to store directly from
             // the VFP register to memory using "FMRRD ra, fp_reg[31:0]" and
             // "STR fp_reg[63:32], [SP, #stkd]".
 
             // Load from the floating-point register as usual, but use IP
             // as a swap register.
             STR(IP, SP, 0);
             stkd += 4;
@@ -770,28 +770,28 @@ void
 Assembler::asm_stkarg(LInsp arg, int stkd)
 {
     bool isF64 = arg->isF64();
 
     Register rr;
     if (arg->isUsed() && (rr = arg->deprecated_getReg(), isKnownReg(rr))) {
         // The argument resides somewhere in registers, so we simply need to
         // push it onto the stack.
-        if (!ARM_VFP || !isF64) {
+        if (!config.arm_vfp || !isF64) {
             NanoAssert(IsGpReg(rr));
 
             STR(rr, SP, stkd);
         } else {
             // According to the comments in asm_arg_64, LIR_qjoin
             // can have a 64-bit argument even if VFP is disabled. However,
             // asm_arg_64 will split the argument and issue two 32-bit
             // arguments to asm_stkarg so we can ignore that case here and
             // assert that we will never get 64-bit arguments unless VFP is
             // available.
-            NanoAssert(ARM_VFP);
+            NanoAssert(config.arm_vfp);
             NanoAssert(IsFpReg(rr));
 
 #ifdef NJ_ARM_EABI
             // EABI requires that 64-bit arguments are 64-bit aligned.
             NanoAssert((stkd & 7) == 0);
 #endif
 
             FSTD(rr, SP, stkd);
@@ -819,17 +819,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
             LDR(IP, FP, d);
         }
     }
 }
 
 void
 Assembler::asm_call(LInsp ins)
 {
-    if (ARM_VFP && ins->isop(LIR_fcall)) {
+    if (config.arm_vfp && ins->isop(LIR_fcall)) {
         /* Because ARM actually returns the result in (R0,R1), and not in a
          * floating point register, the code to move the result into a correct
          * register is below.  We do nothing here.
          *
          * The reason being that if we did something here, the final code
          * sequence we'd get would be something like:
          *     MOV {R0-R3},params        [from below]
          *     BL function               [from below]
@@ -857,23 +857,23 @@ Assembler::asm_call(LInsp ins)
 
     const CallInfo* call = ins->callInfo();
     ArgSize sizes[MAXARGS];
     uint32_t argc = call->get_sizes(sizes);
     bool indirect = call->isIndirect();
 
     // If we aren't using VFP, assert that the LIR operation is an integer
     // function call.
-    NanoAssert(ARM_VFP || ins->isop(LIR_icall));
+    NanoAssert(config.arm_vfp || ins->isop(LIR_icall));
 
     // If we're using VFP, and the return type is a double, it'll come back in
     // R0/R1. We need to either place it in the result fp reg, or store it.
     // See comments above for more details as to why this is necessary here
     // for floating point calls, but not for integer calls.
-    if (ARM_VFP && ins->isUsed()) {
+    if (config.arm_vfp && ins->isUsed()) {
         // Determine the size (and type) of the instruction result.
         ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY);
 
         // If the result size is a floating-point value, treat the result
         // specially, as described previously.
         if (rsize == ARGSIZE_F) {
             Register rr = ins->deprecated_getReg();
 
@@ -966,17 +966,17 @@ void
 Assembler::nRegisterResetAll(RegAlloc& a)
 {
     // add scratch registers to our free list for the allocator
     a.clear();
     a.free =
         rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
         rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
         rmask(R10) | rmask(LR);
-    if (ARM_VFP)
+    if (config.arm_vfp)
         a.free |= FpRegs;
 
     debug_only(a.managed = a.free);
 }
 
 static inline ConditionCode
 get_cc(NIns *ins)
 {
@@ -1251,17 +1251,17 @@ Assembler::asm_restore(LInsp i, Register
         }
         asm_ld_imm(r, i->imm32());
     }
     else {
         // We can't easily load immediate values directly into FP registers, so
         // ensure that memory is allocated for the constant and load it from
         // memory.
         int d = findMemFor(i);
-        if (ARM_VFP && IsFpReg(r)) {
+        if (config.arm_vfp && IsFpReg(r)) {
             if (isS8(d >> 2)) {
                 FLDD(r, FP, d);
             } else {
                 FLDD(r, IP, 0);
                 asm_add_imm(IP, FP, d);
             }
         } else {
             NIns merged;
@@ -1282,17 +1282,17 @@ Assembler::asm_restore(LInsp i, Register
 }
 
 void
 Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
 {
     (void) pop;
     (void) quad;
     if (d) {
-        if (ARM_VFP && IsFpReg(rr)) {
+        if (config.arm_vfp && IsFpReg(rr)) {
             if (isS8(d >> 2)) {
                 FSTD(rr, FP, d);
             } else {
                 FSTD(rr, IP, 0);
                 asm_add_imm(IP, FP, d);
             }
         } else {
             NIns merged;
@@ -1331,17 +1331,17 @@ Assembler::asm_load64(LInsp ins)
     NanoAssert(IsGpReg(rb));
     deprecated_freeRsrcOf(ins, false);
 
     //outputf("--- load64: Finished register allocation.");
 
     switch (ins->opcode()) {
         case LIR_ldf:
         case LIR_ldfc:
-            if (ARM_VFP && isKnownReg(rr)) {
+            if (config.arm_vfp && isKnownReg(rr)) {
                 // VFP is enabled and the result will go into a register.
                 NanoAssert(IsFpReg(rr));
 
                 if (!isS8(offset >> 2) || (offset&3) != 0) {
                     FLDD(rr,IP,0);
                     asm_add_imm(IP, rb, offset);
                 } else {
                     FLDD(rr,rb,offset);
@@ -1358,17 +1358,17 @@ Assembler::asm_load64(LInsp ins)
 
                 // *(uint64_t*)(FP+d) = *(uint64_t*)(rb+offset)
                 asm_mmq(FP, d, rb, offset);
             }
             return;
 
         case LIR_ld32f:
         case LIR_ldc32f:
-            if (ARM_VFP) {
+            if (config.arm_vfp) {
                 if (isKnownReg(rr)) {
                     NanoAssert(IsFpReg(rr));
                     FCVTDS(rr, S14);
                 } else {
                     // Normally D7 isn't allowed to be used as an FP reg.
                     // In this case we make an explicit exception.
                     if (isS8(d)) {
                         FSTD_allowD7(D7, FP, d, true);
@@ -1404,17 +1404,17 @@ void
 Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
 {
     NanoAssert(op != LIR_stqi);
 
     //asm_output("<<< store64 (dr: %d)", dr);
 
     switch (op) {
         case LIR_stfi:
-            if (ARM_VFP) {
+            if (config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isconstq()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
@@ -1453,17 +1453,17 @@ Assembler::asm_store64(LOpcode op, LInsp
                 int da = findMemFor(value);
                 Register rb = findRegFor(base, GpRegs);
                 // *(uint64_t*)(rb+dr) = *(uint64_t*)(FP+da)
                 asm_mmq(rb, dr, FP, da);
             }
             return;
 
         case LIR_st32f:
-            if (ARM_VFP) {
+            if (config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isconstq()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
@@ -1541,17 +1541,17 @@ Assembler::asm_quad(LInsp ins)
 {
     //asm_output(">>> asm_quad");
 
     int d = deprecated_disp(ins);
     Register rr = ins->deprecated_getReg();
 
     deprecated_freeRsrcOf(ins, false);
 
-    if (ARM_VFP && isKnownReg(rr))
+    if (config.arm_vfp && isKnownReg(rr))
     {
         asm_spill(rr, d, false, true);
 
         underrunProtect(4*4);
         asm_quad_nochk(rr, ins->imm64_0(), ins->imm64_1());
     } else {
         NanoAssert(d);
         // asm_mmq might spill a reg, so don't call it;
@@ -1565,17 +1565,17 @@ Assembler::asm_quad(LInsp ins)
     }
 
     //asm_output("<<< asm_quad");
 }
 
 void
 Assembler::asm_nongp_copy(Register r, Register s)
 {
-    if (ARM_VFP && IsFpReg(r) && IsFpReg(s)) {
+    if (config.arm_vfp && IsFpReg(r) && IsFpReg(s)) {
         // fp->fp
         FCPYD(r, s);
     } else {
         // We can't move a double-precision FP register into a 32-bit GP
         // register, so assert that no calling code is trying to do that.
         NanoAssert(0);
     }
 }
@@ -1815,17 +1815,17 @@ Assembler::BranchWithLink(NIns* addr)
             *(--_nIns) = (NIns)( (COND_AL) | (0xB<<24) | (offs2) );
             asm_output("bl %p", (void*)addr);
         } else {
             // The target is Thumb, so emit a BLX.
 
             // We need to emit an ARMv5+ instruction, so assert that we have a
             // suitable processor. Note that we don't support ARMv4(T), but
             // this serves as a useful sanity check.
-            NanoAssert(ARM_ARCH >= 5);
+            NanoAssert(config.arm_arch >= 5);
 
             // The (pre-shifted) value of the "H" bit in the BLX encoding.
             uint32_t    H = (offs & 0x2) << 23;
 
             // BLX addr
             *(--_nIns) = (NIns)( (0xF << 28) | (0x5<<25) | (H) | (offs2) );
             asm_output("blx %p", (void*)addr);
         }
@@ -1842,17 +1842,17 @@ Assembler::BranchWithLink(NIns* addr)
 // This is identical to BranchWithLink(NIns*) but emits a branch to an address
 // held in a register rather than a literal address.
 inline void
 Assembler::BLX(Register addr, bool chk /* = true */)
 {
     // We need to emit an ARMv5+ instruction, so assert that we have a suitable
     // processor. Note that we don't support ARMv4(T), but this serves as a
     // useful sanity check.
-    NanoAssert(ARM_ARCH >= 5);
+    NanoAssert(config.arm_arch >= 5);
 
     NanoAssert(IsGpReg(addr));
     // There is a bug in the WinCE device emulator which stops "BLX LR" from
     // working as expected. Assert that we never do that!
     if (blx_lr_bug) { NanoAssert(addr != LR); }
 
     if (chk) {
         underrunProtect(4);
@@ -1867,17 +1867,17 @@ Assembler::BLX(Register addr, bool chk /
 // d = *(b+off)
 // underrunProtect calls from this function can be disabled by setting chk to
 // false. However, this function can use more than LD32_size bytes of space if
 // the offset is out of the range of a LDR instruction; the maximum space this
 // function requires for underrunProtect is 4+LD32_size.
 void
 Assembler::asm_ldr_chk(Register d, Register b, int32_t off, bool chk)
 {
-    if (ARM_VFP && IsFpReg(d)) {
+    if (config.arm_vfp && IsFpReg(d)) {
         FLDD_chk(d,b,off,chk);
         return;
     }
 
     NanoAssert(IsGpReg(d));
     NanoAssert(IsGpReg(b));
 
     // We can't use underrunProtect if the base register is the PC because
@@ -1942,17 +1942,17 @@ Assembler::asm_ld_imm(Register d, int32_
         MVNis(d, op2imm, 0);
         return;
     }
 
     // Try to use simple MOV, MVN or MOV(W|T) instructions to load the
     // immediate. If this isn't possible, load it from memory.
     //  - We cannot use MOV(W|T) on cores older than the introduction of
     //    Thumb-2 or if the target register is the PC.
-    if (ARM_THUMB2 && (d != PC)) {
+    if (config.arm_thumb2 && (d != PC)) {
         // ARMv6T2 and above have MOVW and MOVT.
         uint32_t    high_h = (uint32_t)imm >> 16;
         uint32_t    low_h = imm & 0xffff;
 
         if (high_h != 0) {
             // Load the high half-word (if necessary).
             MOVTi_chk(d, high_h, chk);
         }
@@ -2185,17 +2185,17 @@ Assembler::asm_fcmp(LInsp ins)
 /* Call this with targ set to 0 if the target is not yet known and the branch
  * will be patched up later.
  */
 NIns*
 Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
 {
     LOpcode condop = cond->opcode();
     NanoAssert(cond->isCond());
-    NanoAssert(ARM_VFP || ((condop < LIR_feq) || (condop > LIR_fge)));
+    NanoAssert(config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
 
     // The old "never" condition code has special meaning on newer ARM cores,
     // so use "always" as a sensible default code.
     ConditionCode cc = AL;
 
     // Detect whether or not this is a floating-point comparison.
     bool    fp_cond;
 
@@ -2238,26 +2238,26 @@ Assembler::asm_branch(bool branchOnFalse
     // Invert the condition if required.
     if (branchOnFalse)
         cc = OppositeCond(cc);
 
     // Ensure that we got a sensible condition code.
     NanoAssert((cc != AL) && (cc != NV));
 
     // Ensure that we don't hit floating-point LIR codes if VFP is disabled.
-    NanoAssert(ARM_VFP || !fp_cond);
+    NanoAssert(config.arm_vfp || !fp_cond);
 
     // Emit a suitable branch instruction.
     B_cond(cc, targ);
 
     // Store the address of the branch instruction so that we can return it.
     // asm_[f]cmp will move _nIns so we must do this now.
     NIns *at = _nIns;
 
-    if (ARM_VFP && fp_cond)
+    if (config.arm_vfp && fp_cond)
         asm_fcmp(cond);
     else
         asm_cmp(cond);
 
     return at;
 }
 
 void
@@ -2458,29 +2458,29 @@ Assembler::asm_arith(LInsp ins)
         case LIR_mul:
             // ARMv5 and earlier cores cannot do a MUL where the first operand
             // is also the result, so we need a special case to handle that.
             //
             // We try to use rb as the first operand by default because it is
             // common for (rr == ra) and is thus likely to be the most
             // efficient method.
 
-            if ((ARM_ARCH > 5) || (rr != rb)) {
+            if ((config.arm_arch > 5) || (rr != rb)) {
                 // IP is used to temporarily store the high word of the result from
                 // SMULL, so we make use of this to perform an overflow check, as
                 // ARM's MUL instruction can't set the overflow flag by itself.
                 // We can check for overflow using the following:
                 //   SMULL  rr, ip, ra, rb
                 //   CMP    ip, rr, ASR #31
                 // An explanation can be found in bug 521161. This sets Z if we did
                 // _not_ overflow, and clears it if we did.
                 ALUr_shi(AL, cmp, 1, IP, IP, rr, ASR_imm, 31);
                 SMULL(rr, IP, rb, ra);
             } else {
-                // ARM_ARCH is ARMv5 (or below) and rr == rb, so we must
+                // config.arm_arch is ARMv5 (or below) and rr == rb, so we must
                 // find a different way to encode the instruction.
 
                 // If possible, swap the arguments to avoid the restriction.
                 if (rr != ra) {
                     // We know that rr == rb, so this will be something like
                     // rX = rY * rX.
                     // Other than swapping ra and rb, this works in the same as
                     // as the ARMv6+ case, above.
@@ -2737,17 +2737,17 @@ Assembler::asm_ret(LIns *ins)
     releaseRegisters();
     assignSavedRegs();
     LIns *value = ins->oprnd1();
     if (ins->isop(LIR_ret)) {
         findSpecificRegFor(value, R0);
     }
     else {
         NanoAssert(ins->isop(LIR_fret));
-        if (ARM_VFP) {
+        if (config.arm_vfp) {
             Register reg = findRegFor(value, FpRegs);
             FMRRD(R0, R1, reg);
         } else {
             NanoAssert(value->isop(LIR_qjoin));
             findSpecificRegFor(value->oprnd1(), R0); // lo
             findSpecificRegFor(value->oprnd2(), R1); // hi
         }
     }
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -57,26 +57,16 @@
 namespace nanojit
 {
 #if defined VMCFG_DOUBLE_MSW_FIRST || defined _MSC_VER
 #  undef  NJ_ARM_EABI
 #else
 #  define NJ_ARM_EABI  1
 #endif
 
-// default to ARMv5
-#if !defined(ARM_ARCH)
-#  define ARM_ARCH  5
-#endif
-
-// default to no-thumb2
-#if !defined(ARM_THUMB2)
-#  define ARM_THUMB2  0
-#endif
-
 // only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
 #define NJ_VFP_MAX_REGISTERS            8
 #define NJ_MAX_REGISTERS                (11 + NJ_VFP_MAX_REGISTERS)
 #define NJ_MAX_STACK_ENTRY              256
 #define NJ_MAX_PARAMETERS               16
 #define NJ_ALIGN_STACK                  8
 #define NJ_JTBL_SUPPORTED               1
 #define NJ_EXPANDED_LOADSTORE_SUPPORTED 1
@@ -461,30 +451,30 @@ enum {
 #define SUBs(_d,_l,_r,_s)   ALUr(AL, sub, _s, _d, _l, _r)
 #define SUB(_d,_l,_r)       ALUr(AL, sub,  0, _d, _l, _r)
 
 // --------
 // Other operations.
 // --------
 
 // [_d_hi,_d] = _l * _r
-#define SMULL(_d, _d_hi, _l, _r)  do {                                                                             \
+#define SMULL(_d, _d_hi, _l, _r)  do {                                                          \
         underrunProtect(4);                                                                     \
-        NanoAssert((ARM_ARCH >= 6) || ((_d   ) != (_l)));                                       \
-        NanoAssert((ARM_ARCH >= 6) || ((_d_hi) != (_l)));                                       \
+        NanoAssert((config.arm_arch >= 6) || ((_d   ) != (_l)));                                \
+        NanoAssert((config.arm_arch >= 6) || ((_d_hi) != (_l)));                                \
         NanoAssert(IsGpReg(_d) && IsGpReg(_d_hi) && IsGpReg(_l) && IsGpReg(_r));                \
         NanoAssert(((_d) != PC) && ((_d_hi) != PC) && ((_l) != PC) && ((_r) != PC));            \
         *(--_nIns) = (NIns)( COND_AL | 0xc00090 | (_d_hi)<<16 | (_d)<<12 | (_r)<<8 | (_l) );    \
         asm_output("smull %s, %s, %s, %s",gpn(_d),gpn(_d_hi),gpn(_l),gpn(_r));                  \
 } while(0)
 
 // _d = _l * _r
 #define MUL(_d, _l, _r)  do {                                               \
         underrunProtect(4);                                                 \
-        NanoAssert((ARM_ARCH >= 6) || ((_d) != (_l)));                      \
+        NanoAssert((config.arm_arch >= 6) || ((_d) != (_l)));               \
         NanoAssert(IsGpReg(_d) && IsGpReg(_l) && IsGpReg(_r));              \
         NanoAssert(((_d) != PC) && ((_l) != PC) && ((_r) != PC));           \
         *(--_nIns) = (NIns)( COND_AL | (_d)<<16 | (_r)<<8 | 0x90 | (_l) );  \
         asm_output("mul %s, %s, %s",gpn(_d),gpn(_l),gpn(_r));               \
 } while(0)
 
 // RSBS _d, _r
 // _d = 0 - _r
@@ -830,49 +820,49 @@ enum {
     } while(0)
 
 /*
  * VFP
  */
 
 #define FMDRR(_Dm,_Rd,_Rn) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dm) && IsGpReg(_Rd) && IsGpReg(_Rn));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC4<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmdrr %s,%s,%s", gpn(_Dm), gpn(_Rd), gpn(_Rn));    \
     } while (0)
 
 #define FMRRD(_Rd,_Rn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsGpReg(_Rd) && IsGpReg(_Rn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC5<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmrrd %s,%s,%s", gpn(_Rd), gpn(_Rn), gpn(_Dm));    \
     } while (0)
 
 #define FMRDH(_Rd,_Dn) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
         asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
     } while (0)
 
 #define FMRDL(_Rd,_Dn) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                            \
         NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
         asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
     } while (0)
 
 #define FSTD_allowD7(_Dd,_Rn,_offs,_allowD7) do {                               \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                            \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2));         \
         NanoAssert((IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7)) && !IsFpReg(_Rn));     \
         int negflag = 1<<23;                                            \
         intptr_t offs = (_offs);                                        \
         if (_offs < 0) {                                                \
             negflag = 0<<23;                                            \
             offs = -(offs);                                             \
         }                                                               \
@@ -880,201 +870,201 @@ enum {
         asm_output("fstd %s,%s(%d)", gpn(_Dd), gpn(_Rn), _offs);    \
     } while (0)
 
 #define FSTD(_Dd,_Rn,_offs) \
         FSTD_allowD7(_Dd,_Rn,_offs,0)
 
 #define FLDD_chk(_Dd,_Rn,_offs,_chk) do {                               \
         if(_chk) underrunProtect(4);                                    \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2));         \
         NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn));                      \
         int negflag = 1<<23;                                            \
         intptr_t offs = (_offs);                                        \
         if (_offs < 0) {                                                \
             negflag = 0<<23;                                            \
             offs = -(offs);                                             \
         }                                                               \
         *(--_nIns) = (NIns)( COND_AL | (0xD1<<20) | ((_Rn)<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | negflag | ((offs>>2)&0xff) ); \
         asm_output("fldd %s,%s(%d)", gpn(_Dd), gpn(_Rn), _offs);       \
     } while (0)
 #define FLDD(_Dd,_Rn,_offs) FLDD_chk(_Dd,_Rn,_offs,1)
 
 #define FUITOD(_Dd,_Sm) do {                                            \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2D<<6) | (0<<5) | (0x7) ); \
         asm_output("fuitod %s,%s", gpn(_Dd), gpn(_Sm));                \
     } while (0)
 
 #define FNEGD(_Dd,_Dm) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xEB1<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fnegd %s,%s", gpn(_Dd), gpn(_Dm));                 \
     } while (0)
 
 #define FADDD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("faddd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FSUBD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fsubd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FMULD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE2<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FDIVD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE8<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FMSTAT() do {                               \
         underrunProtect(4);                         \
-        NanoAssert(ARM_VFP);                        \
+        NanoAssert(config.arm_vfp);                 \
         *(--_nIns) = (NIns)( COND_AL | 0x0EF1FA10); \
         asm_output("fmstat");                       \
     } while (0)
 
 #define FCMPD(_Dd,_Dm,_E) do {                                          \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         NanoAssert(((_E)==0) || ((_E)==1));                             \
         *(--_nIns) = (NIns)( COND_AL | (0xEB4<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | ((_E)<<7) | (0x4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fcmp%sd %s,%s", (((_E)==1)?"e":""), gpn(_Dd), gpn(_Dm)); \
     } while (0)
 
 #define FCPYD(_Dd,_Dm) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xEB0<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fcpyd %s,%s", gpn(_Dd), gpn(_Dm));                 \
     } while (0)
 
 #define FMRS(_Rd,_Sn) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn));                  \
     } while (0)
 
 /*
  * The following instructions can only be used with S14 as the
  * single-precision register; that limitation can be removed if
  * needed, but we'd have to teach NJ about all the single precision
  * regs, and their encoding is strange (top 4 bits usually in a block,
  * low bit elsewhere).
  */
 
 #define FSITOD(_Dd,_Sm) do {                                            \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2F<<6) | (0<<5) | (0x7) ); \
         asm_output("fsitod %s,%s", gpn(_Dd), gpn(_Sm));                \
     } while (0)
 
 #define FMSR(_Sn,_Rd) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd));                  \
     } while (0)
 
 #define FMRS(_Rd,_Sn) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn));                  \
     } while (0)
 
 #define FMSR(_Sn,_Rd) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(ARM_VFP);                                            \
+        NanoAssert(config.arm_vfp);                                     \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd));                  \
     } while (0)
 
 #define FCVTSD(_Sd,_Dm) do {                        \
         underrunProtect(4);                         \
-        NanoAssert(ARM_VFP);                        \
+        NanoAssert(config.arm_vfp);                 \
         NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
         *(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (0x7<<12) | (0xBC<<4) | (FpRegNum(_Dm)) ); \
         asm_output("[0x%08x] fcvtsd s14,%s", *_nIns, gpn(_Dm));                          \
     } while (0)
 
-#define FCVTDS_allowD7(_Dd,_Sm,_allowD7) do {               \
+#define FCVTDS_allowD7(_Dd,_Sm,_allowD7) do {       \
         underrunProtect(4);                         \
-        NanoAssert(ARM_VFP);                        \
+        NanoAssert(config.arm_vfp);                 \
         NanoAssert(((_Sm) == S14) && (IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7))); \
         *(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (FpRegNum(_Dd)<<12) | (0xAC<<4) | (0x7) ); \
         asm_output("[0x%08x] fcvtds %s,s14", *_nIns, gpn(_Dd));      \
     } while(0)
 
 #define FCVTDS(_Dd,_Sm) \
     FCVTDS_allowD7(_Dd,_Sm,0)
 
 #define FLDS(_Sd,_Rn,_offs) do {                                \
         underrunProtect(4);                                     \
-        NanoAssert(ARM_VFP);                                    \
+        NanoAssert(config.arm_vfp);                             \
         NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn));            \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
         int addflag = 1<<23;                                    \
         intptr_t offs = (_offs);                                \
         if (offs < 0) {                                         \
             addflag = 0;                                        \
             offs = -offs;                                       \
         }                                                       \
         *(--_nIns) = (NIns)( COND_AL | (0xD1<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
         asm_output("[0x%08x] flds s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
     } while (0)
 
-#define FSTS(_Sd,_Rn,_offs) do {                \
+#define FSTS(_Sd,_Rn,_offs) do {                                \
         underrunProtect(4);                                     \
-        NanoAssert(ARM_VFP);                                    \
+        NanoAssert(config.arm_vfp);                             \
         NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn));            \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
         int addflag = 1<<23;                                    \
         intptr_t offs = (_offs);                                \
         if (offs < 0) {                                         \
             addflag = 0;                                        \
             offs = -offs;                                       \
         }                                                       \
         *(--_nIns) = (NIns)( COND_AL | (0xD0<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
         asm_output("[0x%08x] fsts s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
     } while (0)
 
 #define FTOSID(_Sd,_Dm) do {                                   \
         underrunProtect(4);                                    \
-        NanoAssert(ARM_VFP);                                   \
+        NanoAssert(config.arm_vfp);                            \
         NanoAssert(((_Sd) == S14) && IsFpReg(_Dm));            \
         *(--_nIns) = (NIns)( COND_AL | (0xEBD<<16) | (0x7<<12) | (0xB4<<4) | FpRegNum(_Dm) ); \
-        asm_output("ftosid s14, %s", gpn(_Dm));                         \
+        asm_output("ftosid s14, %s", gpn(_Dm));                \
     } while (0)
 
 } // namespace nanojit
 #endif // __nanojit_NativeARM__
--- a/js/src/nanojit/avmplus.h
+++ b/js/src/nanojit/avmplus.h
@@ -33,25 +33,16 @@
  *
  ***** END LICENSE BLOCK ***** */
 
 #ifndef avm_h___
 #define avm_h___
 
 #include "VMPI.h"
 
-#ifdef AVMPLUS_ARM
-#define ARM_ARCH   AvmCore::config.arch
-#define ARM_VFP    AvmCore::config.vfp
-#define ARM_THUMB2 AvmCore::config.thumb2
-#else
-#define ARM_VFP    1
-#define ARM_THUMB2 1
-#endif
-
 #if !defined(AVMPLUS_LITTLE_ENDIAN) && !defined(AVMPLUS_BIG_ENDIAN)
 #ifdef IS_BIG_ENDIAN
 #define AVMPLUS_BIG_ENDIAN
 #else
 #define AVMPLUS_LITTLE_ENDIAN
 #endif
 #endif
 
@@ -218,42 +209,42 @@ namespace avmplus {
         bool use_cmov;
         // Whether to use a virtual stack pointer
         bool fixed_esp;
 #endif
 
 #if defined (AVMPLUS_ARM)
         // Whether or not to generate VFP instructions.
 # if defined (NJ_FORCE_SOFTFLOAT)
-        static const bool vfp = false;
+        static const bool arm_vfp = false;
 # else
-        bool vfp;
+        bool arm_vfp;
 # endif
 
         // The ARM architecture version.
 # if defined (NJ_FORCE_ARM_ARCH_VERSION)
-        static const unsigned int arch = NJ_FORCE_ARM_ARCH_VERSION;
+        static const unsigned int arm_arch = NJ_FORCE_ARM_ARCH_VERSION;
 # else
-        unsigned int arch;
+        unsigned int arm_arch;
 # endif
 
         // Support for Thumb, even if it isn't used by nanojit. This is used to
         // determine whether or not to generate interworking branches.
 # if defined (NJ_FORCE_NO_ARM_THUMB)
-        static const bool thumb = false;
+        static const bool arm_thumb = false;
 # else
-        bool thumb;
+        bool arm_thumb;
 # endif
 
         // Support for Thumb2, even if it isn't used by nanojit. This is used to
         // determine whether or not to use some of the ARMv6T2 instructions.
 # if defined (NJ_FORCE_NO_ARM_THUMB2)
-        static const bool thumb2 = false;
+        static const bool arm_thumb2 = false;
 # else
-        bool thumb2;
+        bool arm_thumb2;
 # endif
 
 #endif
 
 #if defined (NJ_FORCE_SOFTFLOAT)
         static const bool soft_float = true;
 #else
         bool soft_float;
--- a/js/src/nanojit/nanojit.h
+++ b/js/src/nanojit/nanojit.h
@@ -65,31 +65,16 @@
 #if defined NANOJIT_64BIT
     #define IF_64BIT(...) __VA_ARGS__
     #define UNLESS_64BIT(...)
 #else
     #define IF_64BIT(...)
     #define UNLESS_64BIT(...) __VA_ARGS__
 #endif
 
-// set ARM_VFP constant if not already set
-#if !defined(ARM_VFP)
-    #ifdef AVMPLUS_ARM
-        #if defined(NJ_ARM_VFP)
-            #define ARM_VFP      1
-        #else
-            #define ARM_VFP      0
-        #endif
-    #else
-        // some LIR features should test VFP on ARM,
-        // but can be set to "always on" on non-ARM
-        #define ARM_VFP 1
-    #endif
-#endif
-
 // Embed no-op macros that let Valgrind work with the JIT.
 #ifdef MOZ_VALGRIND
 #  define JS_VALGRIND
 #endif
 #ifdef JS_VALGRIND
 #  include <valgrind/valgrind.h>
 #else
 #  define VALGRIND_DISCARD_TRANSLATIONS(addr, szB)