Bug 542133 - Add a real NJConfig struct to nanojit (r=edwsmith,r=nnethercote)
authorSteven Johnson <stejohns@adobe.com>
Mon, 15 Feb 2010 17:56:41 -0800
changeset 38581 42c01cffd7bceada68b285e5fe48a80a02762452
parent 38580 7b1d42444f851b69d31978731dab1f97805cb0a9
child 38582 d681e5fb77ba9930f6fa244f523aa16fe9234588
push idunknown
push userunknown
push dateunknown
reviewersedwsmith, nnethercote
bugs542133
milestone1.9.3a2pre
Bug 542133 - Add a real NJConfig struct to nanojit (r=edwsmith,r=nnethercote)
js/src/lirasm/lirasm.cpp
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeARM.h
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/Nativei386.cpp
js/src/nanojit/avmplus.cpp
js/src/nanojit/avmplus.h
js/src/nanojit/manifest.mk
js/src/nanojit/nanojit.h
js/src/nanojit/njconfig.cpp
js/src/nanojit/njconfig.h
js/src/nanojit/njcpudetect.h
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -1937,20 +1937,19 @@ FragmentAssembler::assembleRandomFragmen
     delete[] classGenerator;
 
     // End with a vanilla exit.
     mReturnTypeBits |= RT_GUARD;
     endFragment();
 }
 
 Lirasm::Lirasm(bool verbose) :
-    mAssm(mCodeAlloc, mAlloc, mAlloc, &mCore, &mLogc)
+    mAssm(mCodeAlloc, mAlloc, mAlloc, &mCore, &mLogc, nanojit::AvmCore::config)
 {
     mVerbose = verbose;
-    nanojit::AvmCore::config.tree_opt = true;
     mLogc.lcbits = 0;
 
     mLirbuf = new (mAlloc) LirBuffer(mAlloc);
 #ifdef DEBUG
     if (mVerbose) {
         mLogc.lcbits = LC_Assembly | LC_RegAlloc | LC_Activation;
         mLabelMap = new (mAlloc) LabelMap(mAlloc, &mLogc);
         mLirbuf->names = new (mAlloc) LirNameMap(mAlloc, mLabelMap);
@@ -2209,18 +2208,18 @@ processCmdLine(int argc, char **argv, Cm
     }
 
     if ((!opts.random && opts.filename.empty()) || (opts.random && !opts.filename.empty()))
         errMsgAndQuit(opts.progname,
                       "you must specify either a filename or --random (but not both)");
 
     // Handle the architecture-specific options.
 #if defined NANOJIT_IA32
-    avmplus::AvmCore::config.use_cmov = avmplus::AvmCore::config.sse2 = i386_sse;
-    avmplus::AvmCore::config.fixed_esp = true;
+    avmplus::AvmCore::config.i386_use_cmov = avmplus::AvmCore::config.i386_sse2 = i386_sse;
+    avmplus::AvmCore::config.i386_fixed_esp = true;
 #elif defined NANOJIT_ARM
     // Note that we don't check for sensible configurations here!
     avmplus::AvmCore::config.arm_arch = arm_arch;
     avmplus::AvmCore::config.arm_vfp = arm_vfp;
     avmplus::AvmCore::config.soft_float = !arm_vfp;
 #endif
 }
 
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -52,17 +52,17 @@
 
 namespace nanojit
 {
     /**
      * Need the following:
      *
      *    - merging paths ( build a graph? ), possibly use external rep to drive codegen
      */
-    Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc)
+    Assembler::Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config)
         : codeList(NULL)
         , alloc(alloc)
         , _codeAlloc(codeAlloc)
         , _dataAlloc(dataAlloc)
         , _thisfrag(NULL)
         , _branchStateMap(alloc)
         , _patches(alloc)
         , _labels(alloc)
@@ -72,17 +72,17 @@ namespace nanojit
         , _epilogue(NULL)
         , _err(None)
     #if PEDANTIC
         , pedanticTop(NULL)
     #endif
     #ifdef VTUNE
         , cgen(NULL)
     #endif
-        , config(core->config)
+        , _config(config)
     {
         VMPI_memset(&_stats, 0, sizeof(_stats));
         nInit(core);
         (void)logc;
         verbose_only( _logc = logc; )
         verbose_only( _outputCache = 0; )
         verbose_only( outline[0] = '\0'; )
         verbose_only( outlineEOL[0] = '\0'; )
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -288,17 +288,17 @@ namespace nanojit
             void printActivationState();
             #endif // NJ_VERBOSE
 
         public:
             #ifdef VTUNE
             avmplus::CodegenLIR *cgen;
             #endif
 
-            Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc);
+            Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config);
 
             void        compile(Fragment *frag, Allocator& alloc, bool optimize
                                 verbose_only(, LabelMap*));
 
             void        endAssembly(Fragment* frag);
             void        assemble(Fragment* frag, LirFilter* reader);
             void        beginAssembly(Fragment *frag);
 
@@ -482,17 +482,17 @@ namespace nanojit
             // since we generate backwards the depth is negative
             inline void fpu_push() {
                 debug_only( ++_fpuStkDepth; NanoAssert(_fpuStkDepth<=0); )
             }
             inline void fpu_pop() {
                 debug_only( --_fpuStkDepth; NanoAssert(_fpuStkDepth<=0); )
             }
 #endif
-            avmplus::Config &config;
+            const Config& _config;
     };
 
     inline int32_t arDisp(LIns* ins)
     {
         // even on 64bit cpu's, we allocate stack area in 4byte chunks
         return -4 * int32_t(ins->getArIndex());
     }
     // XXX: deprecated, use arDisp() instead.  See bug 538924.
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -948,17 +948,17 @@ namespace nanojit
                     ins2(LIR_and, iffalse, ins1(LIR_not, ncond)));
     }
 
     LIns* LirBufWriter::insCall(const CallInfo *ci, LInsp args[])
     {
         LOpcode op = getCallOpcode(ci);
 #if NJ_SOFTFLOAT_SUPPORTED
         // SoftFloat: convert LIR_fcall to LIR_icall.
-        if (!_config.arm_vfp && op == LIR_fcall)
+        if (_config.soft_float && op == LIR_fcall)
             op = LIR_icall;
 #endif
 
         int32_t argc = ci->count_args();
         NanoAssert(argc <= (int)MAXARGS);
 
         // Allocate space for and copy the arguments.  We use the same
         // allocator as the normal LIR buffers so it has the same lifetime.
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -1553,20 +1553,20 @@ namespace nanojit
             uintptr_t   _unused;   // next unused instruction slot in the current LIR chunk
             uintptr_t   _limit;    // one past the last usable byte of the current LIR chunk
             size_t      _bytesAllocated;
     };
 
     class LirBufWriter : public LirWriter
     {
         LirBuffer*              _buf;        // underlying buffer housing the instructions
-        const avmplus::Config&  _config;
+        const Config&           _config;
 
         public:
-            LirBufWriter(LirBuffer* buf, const avmplus::Config& config)
+            LirBufWriter(LirBuffer* buf, const Config& config)
                 : LirWriter(0), _buf(buf), _config(config) {
             }
 
             // LirWriter interface
             LInsp   insLoad(LOpcode op, LInsp base, int32_t disp);
             LInsp   insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp);
             LInsp   ins0(LOpcode op);
             LInsp   ins1(LOpcode op, LInsp o1);
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -99,20 +99,20 @@ inline uint32_t
 Assembler::CountLeadingZeroes(uint32_t data)
 {
     uint32_t    leading_zeroes;
 
     // We can't do CLZ on anything earlier than ARMv5. Architectures as early
     // as that aren't supported, but assert that we aren't running on one
     // anyway.
     // If ARMv4 support is required in the future for some reason, we can do a
-    // run-time check on config.arch and fall back to the C routine, but for
+    // run-time check on _config.arm_arch and fall back to the C routine, but for
     // now we can avoid the cost of the check as we don't intend to support
     // ARMv4 anyway.
-    NanoAssert(config.arm_arch >= 5);
+    NanoAssert(_config.arm_arch >= 5);
 
 #if defined(__ARMCC__)
     // ARMCC can do this with an intrinsic.
     leading_zeroes = __clz(data);
 
 // current Android GCC compiler incorrectly refuses to compile 'clz' for armv5
 // (even though this is a legal instruction there). Since we currently only compile for ARMv5
 // for emulation, we don't care too much (but we DO care for ARMv6+ since those are "real"
@@ -530,17 +530,17 @@ Assembler::nFragExit(LInsp guard)
         // "incoming 0th parameter". This is just a quirk of ARM ABI. So
         // we compromise by passing "return value" to the epilogue in IP,
         // not R0, and have the epilogue MOV(R0, IP) first thing.
 
         asm_ld_imm(IP, int(gr));
     }
 
 #ifdef NJ_VERBOSE
-    if (config.show_stats) {
+    if (_config.arm_show_stats) {
         // load R1 with Fragment *fromFrag, target fragment
         // will make use of this when calling fragenter().
         int fromfrag = int((Fragment*)_thisfrag);
         asm_ld_imm(argRegs[1], fromfrag);
     }
 #endif
 
     // profiling for the exit
@@ -554,17 +554,17 @@ Assembler::nFragExit(LInsp guard)
     MOV(SP, FP);
 }
 
 NIns*
 Assembler::genEpilogue()
 {
     // On ARMv5+, loading directly to PC correctly handles interworking.
     // Note that we don't support anything older than ARMv5.
-    NanoAssert(config.arm_arch >= 5);
+    NanoAssert(_config.arm_arch >= 5);
 
     RegisterMask savingMask = rmask(FP) | rmask(PC);
 
     POP_mask(savingMask); // regs
 
     // NB: this is the later half of the dual-nature patchable exit branch
     // workaround noted above in nFragExit. IP has the "return value"
     // incoming, we need to move it to R0.
@@ -623,21 +623,21 @@ Assembler::asm_arg(ArgSize sz, LInsp arg
 // handle arguments where (ArgSize)sz == ARGSIZE_F.
 void
 Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
 {
     // The stack pointer must always be at least aligned to 4 bytes.
     NanoAssert((stkd & 3) == 0);
     // The only use for this function when we are using soft floating-point
     // is for LIR_qjoin.
-    NanoAssert(config.arm_vfp || arg->isop(LIR_qjoin));
+    NanoAssert(_config.arm_vfp || arg->isop(LIR_qjoin));
 
     Register    fp_reg = deprecated_UnknownReg;
 
-    if (config.arm_vfp) {
+    if (_config.arm_vfp) {
         fp_reg = findRegFor(arg, FpRegs);
         NanoAssert(isKnownReg(fp_reg));
     }
 
 #ifdef NJ_ARM_EABI
     // EABI requires that 64-bit arguments are aligned on even-numbered
     // registers, as R0:R1 or R2:R3. If the register base is at an
     // odd-numbered register, advance it. Note that this will push r past
@@ -657,17 +657,17 @@ Assembler::asm_arg_64(LInsp arg, Registe
         // EABI requires that 64-bit arguments are aligned on even-numbered
         // registers, as R0:R1 or R2:R3.
         NanoAssert( ((ra == R0) && (rb == R1)) || ((ra == R2) && (rb == R3)) );
 #endif
 
         // Put the argument in ra and rb. If the argument is in a VFP register,
         // use FMRRD to move it to ra and rb. Otherwise, let asm_regarg deal
         // with the argument as if it were two 32-bit arguments.
-        if (config.arm_vfp) {
+        if (_config.arm_vfp) {
             FMRRD(ra, rb, fp_reg);
         } else {
             asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra);
             asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb);
         }
 
 #ifndef NJ_ARM_EABI
     } else if (r == R3) {
@@ -680,17 +680,17 @@ Assembler::asm_arg_64(LInsp arg, Registe
         // This really just checks that nextreg() works properly, as we know
         // that r was previously R3.
         NanoAssert(r == R4);
 
         // We're splitting the argument between registers and the stack.  This
         // must be the first time that the stack is used, so stkd must be at 0.
         NanoAssert(stkd == 0);
 
-        if (config.arm_vfp) {
+        if (_config.arm_vfp) {
             // TODO: We could optimize the this to store directly from
             // the VFP register to memory using "FMRRD ra, fp_reg[31:0]" and
             // "STR fp_reg[63:32], [SP, #stkd]".
 
             // Load from the floating-point register as usual, but use IP
             // as a swap register.
             STR(IP, SP, 0);
             stkd += 4;
@@ -763,28 +763,28 @@ void
 Assembler::asm_stkarg(LInsp arg, int stkd)
 {
     bool isF64 = arg->isF64();
 
     Register rr;
     if (arg->isUsed() && (rr = arg->deprecated_getReg(), isKnownReg(rr))) {
         // The argument resides somewhere in registers, so we simply need to
         // push it onto the stack.
-        if (!config.arm_vfp || !isF64) {
+        if (!_config.arm_vfp || !isF64) {
             NanoAssert(IsGpReg(rr));
 
             STR(rr, SP, stkd);
         } else {
             // According to the comments in asm_arg_64, LIR_qjoin
             // can have a 64-bit argument even if VFP is disabled. However,
             // asm_arg_64 will split the argument and issue two 32-bit
             // arguments to asm_stkarg so we can ignore that case here and
             // assert that we will never get 64-bit arguments unless VFP is
             // available.
-            NanoAssert(config.arm_vfp);
+            NanoAssert(_config.arm_vfp);
             NanoAssert(IsFpReg(rr));
 
 #ifdef NJ_ARM_EABI
             // EABI requires that 64-bit arguments are 64-bit aligned.
             NanoAssert((stkd & 7) == 0);
 #endif
 
             FSTD(rr, SP, stkd);
@@ -812,17 +812,17 @@ Assembler::asm_stkarg(LInsp arg, int stk
             LDR(IP, FP, d);
         }
     }
 }
 
 void
 Assembler::asm_call(LInsp ins)
 {
-    if (config.arm_vfp && ins->isop(LIR_fcall)) {
+    if (_config.arm_vfp && ins->isop(LIR_fcall)) {
         /* Because ARM actually returns the result in (R0,R1), and not in a
          * floating point register, the code to move the result into a correct
          * register is below.  We do nothing here.
          *
          * The reason being that if we did something here, the final code
          * sequence we'd get would be something like:
          *     MOV {R0-R3},params        [from below]
          *     BL function               [from below]
@@ -850,23 +850,23 @@ Assembler::asm_call(LInsp ins)
 
     const CallInfo* call = ins->callInfo();
     ArgSize sizes[MAXARGS];
     uint32_t argc = call->get_sizes(sizes);
     bool indirect = call->isIndirect();
 
     // If we aren't using VFP, assert that the LIR operation is an integer
     // function call.
-    NanoAssert(config.arm_vfp || ins->isop(LIR_icall));
+    NanoAssert(_config.arm_vfp || ins->isop(LIR_icall));
 
     // If we're using VFP, and the return type is a double, it'll come back in
     // R0/R1. We need to either place it in the result fp reg, or store it.
     // See comments above for more details as to why this is necessary here
     // for floating point calls, but not for integer calls.
-    if (config.arm_vfp && ins->isUsed()) {
+    if (_config.arm_vfp && ins->isUsed()) {
         // Determine the size (and type) of the instruction result.
         ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY);
 
         // If the result size is a floating-point value, treat the result
         // specially, as described previously.
         if (rsize == ARGSIZE_F) {
             Register rr = ins->deprecated_getReg();
 
@@ -959,17 +959,17 @@ void
 Assembler::nRegisterResetAll(RegAlloc& a)
 {
     // add scratch registers to our free list for the allocator
     a.clear();
     a.free =
         rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
         rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
         rmask(R10) | rmask(LR);
-    if (config.arm_vfp)
+    if (_config.arm_vfp)
         a.free |= FpRegs;
 
     debug_only(a.managed = a.free);
 }
 
 static inline ConditionCode
 get_cc(NIns *ins)
 {
@@ -1244,17 +1244,17 @@ Assembler::asm_restore(LInsp i, Register
         }
         asm_ld_imm(r, i->imm32());
     }
     else {
         // We can't easily load immediate values directly into FP registers, so
         // ensure that memory is allocated for the constant and load it from
         // memory.
         int d = findMemFor(i);
-        if (config.arm_vfp && IsFpReg(r)) {
+        if (_config.arm_vfp && IsFpReg(r)) {
             if (isS8(d >> 2)) {
                 FLDD(r, FP, d);
             } else {
                 FLDD(r, IP, 0);
                 asm_add_imm(IP, FP, d);
             }
         } else {
             NIns merged;
@@ -1275,17 +1275,17 @@ Assembler::asm_restore(LInsp i, Register
 }
 
 void
 Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
 {
     (void) pop;
     (void) quad;
     if (d) {
-        if (config.arm_vfp && IsFpReg(rr)) {
+        if (_config.arm_vfp && IsFpReg(rr)) {
             if (isS8(d >> 2)) {
                 FSTD(rr, FP, d);
             } else {
                 FSTD(rr, IP, 0);
                 asm_add_imm(IP, FP, d);
             }
         } else {
             NIns merged;
@@ -1322,17 +1322,17 @@ Assembler::asm_load64(LInsp ins)
     NanoAssert(IsGpReg(rb));
     deprecated_freeRsrcOf(ins, false);
 
     //outputf("--- load64: Finished register allocation.");
 
     switch (ins->opcode()) {
         case LIR_ldf:
         case LIR_ldfc:
-            if (config.arm_vfp && isKnownReg(rr)) {
+            if (_config.arm_vfp && isKnownReg(rr)) {
                 // VFP is enabled and the result will go into a register.
                 NanoAssert(IsFpReg(rr));
 
                 if (!isS8(offset >> 2) || (offset&3) != 0) {
                     FLDD(rr,IP,0);
                     asm_add_imm(IP, rb, offset);
                 } else {
                     FLDD(rr,rb,offset);
@@ -1349,17 +1349,17 @@ Assembler::asm_load64(LInsp ins)
 
                 // *(uint64_t*)(FP+d) = *(uint64_t*)(rb+offset)
                 asm_mmq(FP, d, rb, offset);
             }
             return;
 
         case LIR_ld32f:
         case LIR_ldc32f:
-            if (config.arm_vfp) {
+            if (_config.arm_vfp) {
                 if (isKnownReg(rr)) {
                     NanoAssert(IsFpReg(rr));
                     FCVTDS(rr, S14);
                 } else {
                     // Normally D7 isn't allowed to be used as an FP reg.
                     // In this case we make an explicit exception.
                     if (isS8(d)) {
                         FSTD_allowD7(D7, FP, d, true);
@@ -1393,17 +1393,17 @@ Assembler::asm_load64(LInsp ins)
 
 void
 Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
 {
     //asm_output("<<< store64 (dr: %d)", dr);
 
     switch (op) {
         case LIR_stfi:
-            if (config.arm_vfp) {
+            if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isconstq()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
@@ -1442,17 +1442,17 @@ Assembler::asm_store64(LOpcode op, LInsp
                 int da = findMemFor(value);
                 Register rb = findRegFor(base, GpRegs);
                 // *(uint64_t*)(rb+dr) = *(uint64_t*)(FP+da)
                 asm_mmq(rb, dr, FP, da);
             }
             return;
 
         case LIR_st32f:
-            if (config.arm_vfp) {
+            if (_config.arm_vfp) {
                 Register rb = findRegFor(base, GpRegs);
 
                 if (value->isconstq()) {
                     underrunProtect(LD32_size*2 + 8);
 
                     // XXX use another reg, get rid of dependency
                     STR(IP, rb, dr);
                     asm_ld_imm(IP, value->imm64_0(), false);
@@ -1530,17 +1530,17 @@ Assembler::asm_quad(LInsp ins)
 {
     //asm_output(">>> asm_quad");
 
     int d = deprecated_disp(ins);
     Register rr = ins->deprecated_getReg();
 
     deprecated_freeRsrcOf(ins, false);
 
-    if (config.arm_vfp && isKnownReg(rr))
+    if (_config.arm_vfp && isKnownReg(rr))
     {
         asm_spill(rr, d, false, true);
 
         underrunProtect(4*4);
         asm_quad_nochk(rr, ins->imm64_0(), ins->imm64_1());
     } else {
         NanoAssert(d);
         // asm_mmq might spill a reg, so don't call it;
@@ -1554,17 +1554,17 @@ Assembler::asm_quad(LInsp ins)
     }
 
     //asm_output("<<< asm_quad");
 }
 
 void
 Assembler::asm_nongp_copy(Register r, Register s)
 {
-    if (config.arm_vfp && IsFpReg(r) && IsFpReg(s)) {
+    if (_config.arm_vfp && IsFpReg(r) && IsFpReg(s)) {
         // fp->fp
         FCPYD(r, s);
     } else {
         // We can't move a double-precision FP register into a 32-bit GP
         // register, so assert that no calling code is trying to do that.
         NanoAssert(0);
     }
 }
@@ -1804,17 +1804,17 @@ Assembler::BranchWithLink(NIns* addr)
             *(--_nIns) = (NIns)( (COND_AL) | (0xB<<24) | (offs2) );
             asm_output("bl %p", (void*)addr);
         } else {
             // The target is Thumb, so emit a BLX.
 
             // We need to emit an ARMv5+ instruction, so assert that we have a
             // suitable processor. Note that we don't support ARMv4(T), but
             // this serves as a useful sanity check.
-            NanoAssert(config.arm_arch >= 5);
+            NanoAssert(_config.arm_arch >= 5);
 
             // The (pre-shifted) value of the "H" bit in the BLX encoding.
             uint32_t    H = (offs & 0x2) << 23;
 
             // BLX addr
             *(--_nIns) = (NIns)( (0xF << 28) | (0x5<<25) | (H) | (offs2) );
             asm_output("blx %p", (void*)addr);
         }
@@ -1831,17 +1831,17 @@ Assembler::BranchWithLink(NIns* addr)
 // This is identical to BranchWithLink(NIns*) but emits a branch to an address
 // held in a register rather than a literal address.
 inline void
 Assembler::BLX(Register addr, bool chk /* = true */)
 {
     // We need to emit an ARMv5+ instruction, so assert that we have a suitable
     // processor. Note that we don't support ARMv4(T), but this serves as a
     // useful sanity check.
-    NanoAssert(config.arm_arch >= 5);
+    NanoAssert(_config.arm_arch >= 5);
 
     NanoAssert(IsGpReg(addr));
     // There is a bug in the WinCE device emulator which stops "BLX LR" from
     // working as expected. Assert that we never do that!
     if (blx_lr_bug) { NanoAssert(addr != LR); }
 
     if (chk) {
         underrunProtect(4);
@@ -1856,17 +1856,17 @@ Assembler::BLX(Register addr, bool chk /
 // d = *(b+off)
 // underrunProtect calls from this function can be disabled by setting chk to
 // false. However, this function can use more than LD32_size bytes of space if
 // the offset is out of the range of a LDR instruction; the maximum space this
 // function requires for underrunProtect is 4+LD32_size.
 void
 Assembler::asm_ldr_chk(Register d, Register b, int32_t off, bool chk)
 {
-    if (config.arm_vfp && IsFpReg(d)) {
+    if (_config.arm_vfp && IsFpReg(d)) {
         FLDD_chk(d,b,off,chk);
         return;
     }
 
     NanoAssert(IsGpReg(d));
     NanoAssert(IsGpReg(b));
 
     // We can't use underrunProtect if the base register is the PC because
@@ -1935,17 +1935,17 @@ Assembler::asm_ld_imm(Register d, int32_
     // Try to use simple MOV, MVN or MOV(W|T) instructions to load the
     // immediate. If this isn't possible, load it from memory.
     //  - We cannot use MOV(W|T) on cores older than the introduction of
     //    Thumb-2 or if the target register is the PC.
     //
     // (Note that we use Thumb-2 if arm_arch is ARMv7 or later; the only earlier
     // ARM core that provided Thumb-2 is ARMv6T2/ARM1156, which is a real-time
     // core that nanojit is unlikely to ever target.)
-    if (config.arm_arch >= 7 && (d != PC)) {
+    if (_config.arm_arch >= 7 && (d != PC)) {
         // ARMv6T2 and above have MOVW and MOVT.
         uint32_t    high_h = (uint32_t)imm >> 16;
         uint32_t    low_h = imm & 0xffff;
 
         if (high_h != 0) {
             // Load the high half-word (if necessary).
             MOVTi_chk(d, high_h, chk);
         }
@@ -2178,17 +2178,17 @@ Assembler::asm_fcmp(LInsp ins)
 /* Call this with targ set to 0 if the target is not yet known and the branch
  * will be patched up later.
  */
 NIns*
 Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
 {
     LOpcode condop = cond->opcode();
     NanoAssert(cond->isCond());
-    NanoAssert(config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
+    NanoAssert(_config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
 
     // The old "never" condition code has special meaning on newer ARM cores,
     // so use "always" as a sensible default code.
     ConditionCode cc = AL;
 
     // Detect whether or not this is a floating-point comparison.
     bool    fp_cond;
 
@@ -2231,26 +2231,26 @@ Assembler::asm_branch(bool branchOnFalse
     // Invert the condition if required.
     if (branchOnFalse)
         cc = OppositeCond(cc);
 
     // Ensure that we got a sensible condition code.
     NanoAssert((cc != AL) && (cc != NV));
 
     // Ensure that we don't hit floating-point LIR codes if VFP is disabled.
-    NanoAssert(config.arm_vfp || !fp_cond);
+    NanoAssert(_config.arm_vfp || !fp_cond);
 
     // Emit a suitable branch instruction.
     B_cond(cc, targ);
 
     // Store the address of the branch instruction so that we can return it.
     // asm_[f]cmp will move _nIns so we must do this now.
     NIns *at = _nIns;
 
-    if (config.arm_vfp && fp_cond)
+    if (_config.arm_vfp && fp_cond)
         asm_fcmp(cond);
     else
         asm_cmp(cond);
 
     return at;
 }
 
 void
@@ -2452,29 +2452,29 @@ Assembler::asm_arith(LInsp ins)
         case LIR_mul:
             // ARMv5 and earlier cores cannot do a MUL where the first operand
             // is also the result, so we need a special case to handle that.
             //
             // We try to use rb as the first operand by default because it is
             // common for (rr == ra) and is thus likely to be the most
             // efficient method.
 
-            if ((config.arm_arch > 5) || (rr != rb)) {
+            if ((_config.arm_arch > 5) || (rr != rb)) {
                 // IP is used to temporarily store the high word of the result from
                 // SMULL, so we make use of this to perform an overflow check, as
                 // ARM's MUL instruction can't set the overflow flag by itself.
                 // We can check for overflow using the following:
                 //   SMULL  rr, ip, ra, rb
                 //   CMP    ip, rr, ASR #31
                 // An explanation can be found in bug 521161. This sets Z if we did
                 // _not_ overflow, and clears it if we did.
                 ALUr_shi(AL, cmp, 1, SBZ, IP, rr, ASR_imm, 31);
                 SMULL(rr, IP, rb, ra);
             } else {
-                // config.arm_arch is ARMv5 (or below) and rr == rb, so we must
+                // _config.arm_arch is ARMv5 (or below) and rr == rb, so we must
                 // find a different way to encode the instruction.
 
                 // If possible, swap the arguments to avoid the restriction.
                 if (rr != ra) {
                     // We know that rr == rb, so this will be something like
                     // rX = rY * rX.
                     // Other than swapping ra and rb, this works in the same as
                     // as the ARMv6+ case, above.
@@ -2730,17 +2730,17 @@ Assembler::asm_ret(LIns *ins)
     releaseRegisters();
     assignSavedRegs();
     LIns *value = ins->oprnd1();
     if (ins->isop(LIR_ret)) {
         findSpecificRegFor(value, R0);
     }
     else {
         NanoAssert(ins->isop(LIR_fret));
-        if (config.arm_vfp) {
+        if (_config.arm_vfp) {
             Register reg = findRegFor(value, FpRegs);
             FMRRD(R0, R1, reg);
         } else {
             NanoAssert(value->isop(LIR_qjoin));
             findSpecificRegFor(value->oprnd1(), R0); // lo
             findSpecificRegFor(value->oprnd2(), R1); // hi
         }
     }
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -457,28 +457,28 @@ enum {
 
 // --------
 // Other operations.
 // --------
 
 // [_d_hi,_d] = _l * _r
 #define SMULL(_d, _d_hi, _l, _r)  do {                                                          \
         underrunProtect(4);                                                                     \
-        NanoAssert((config.arm_arch >= 6) || ((_d   ) != (_l)));                                \
-        NanoAssert((config.arm_arch >= 6) || ((_d_hi) != (_l)));                                \
+        NanoAssert((_config.arm_arch >= 6) || ((_d   ) != (_l)));                               \
+        NanoAssert((_config.arm_arch >= 6) || ((_d_hi) != (_l)));                               \
         NanoAssert(IsGpReg(_d) && IsGpReg(_d_hi) && IsGpReg(_l) && IsGpReg(_r));                \
         NanoAssert(((_d) != PC) && ((_d_hi) != PC) && ((_l) != PC) && ((_r) != PC));            \
         *(--_nIns) = (NIns)( COND_AL | 0xc00090 | (_d_hi)<<16 | (_d)<<12 | (_r)<<8 | (_l) );    \
         asm_output("smull %s, %s, %s, %s",gpn(_d),gpn(_d_hi),gpn(_l),gpn(_r));                  \
 } while(0)
 
 // _d = _l * _r
 #define MUL(_d, _l, _r)  do {                                               \
         underrunProtect(4);                                                 \
-        NanoAssert((config.arm_arch >= 6) || ((_d) != (_l)));               \
+        NanoAssert((_config.arm_arch >= 6) || ((_d) != (_l)));              \
         NanoAssert(IsGpReg(_d) && IsGpReg(_l) && IsGpReg(_r));              \
         NanoAssert(((_d) != PC) && ((_l) != PC) && ((_r) != PC));           \
         *(--_nIns) = (NIns)( COND_AL | (_d)<<16 | (_r)<<8 | 0x90 | (_l) );  \
         asm_output("mul %s, %s, %s",gpn(_d),gpn(_l),gpn(_r));               \
 } while(0)
 
 // RSBS _d, _r
 // _d = 0 - _r
@@ -824,49 +824,49 @@ enum {
     } while(0)
 
 /*
  * VFP
  */
 
 #define FMDRR(_Dm,_Rd,_Rn) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dm) && IsGpReg(_Rd) && IsGpReg(_Rn));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC4<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmdrr %s,%s,%s", gpn(_Dm), gpn(_Rd), gpn(_Rn));    \
     } while (0)
 
 #define FMRRD(_Rd,_Rn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsGpReg(_Rd) && IsGpReg(_Rn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xC5<<20) | ((_Rn)<<16) | ((_Rd)<<12) | (0xB1<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmrrd %s,%s,%s", gpn(_Rd), gpn(_Rn), gpn(_Dm));    \
     } while (0)
 
 #define FMRDH(_Rd,_Dn) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
         asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
     } while (0)
 
 #define FMRDL(_Rd,_Dn) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                            \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsGpReg(_Rd) && IsFpReg(_Dn));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (FpRegNum(_Dn)<<16) | ((_Rd)<<12) | (0xB<<8) | (1<<4) ); \
         asm_output("fmrdh %s,%s", gpn(_Rd), gpn(_Dn));                  \
     } while (0)
 
 #define FSTD_allowD7(_Dd,_Rn,_offs,_allowD7) do {                               \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                            \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2));         \
         NanoAssert((IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7)) && !IsFpReg(_Rn));     \
         int negflag = 1<<23;                                            \
         intptr_t offs = (_offs);                                        \
         if (_offs < 0) {                                                \
             negflag = 0<<23;                                            \
             offs = -(offs);                                             \
         }                                                               \
@@ -874,201 +874,201 @@ enum {
         asm_output("fstd %s,%s(%d)", gpn(_Dd), gpn(_Rn), _offs);    \
     } while (0)
 
 #define FSTD(_Dd,_Rn,_offs) \
         FSTD_allowD7(_Dd,_Rn,_offs,0)
 
 #define FLDD_chk(_Dd,_Rn,_offs,_chk) do {                               \
         if(_chk) underrunProtect(4);                                    \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2));         \
         NanoAssert(IsFpReg(_Dd) && !IsFpReg(_Rn));                      \
         int negflag = 1<<23;                                            \
         intptr_t offs = (_offs);                                        \
         if (_offs < 0) {                                                \
             negflag = 0<<23;                                            \
             offs = -(offs);                                             \
         }                                                               \
         *(--_nIns) = (NIns)( COND_AL | (0xD1<<20) | ((_Rn)<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | negflag | ((offs>>2)&0xff) ); \
         asm_output("fldd %s,%s(%d)", gpn(_Dd), gpn(_Rn), _offs);       \
     } while (0)
 #define FLDD(_Dd,_Rn,_offs) FLDD_chk(_Dd,_Rn,_offs,1)
 
 #define FUITOD(_Dd,_Sm) do {                                            \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2D<<6) | (0<<5) | (0x7) ); \
         asm_output("fuitod %s,%s", gpn(_Dd), gpn(_Sm));                \
     } while (0)
 
 #define FNEGD(_Dd,_Dm) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xEB1<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fnegd %s,%s", gpn(_Dd), gpn(_Dm));                 \
     } while (0)
 
 #define FADDD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("faddd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FSUBD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE3<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fsubd %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FMULD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE2<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FDIVD(_Dd,_Dn,_Dm) do {                                         \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dn) && IsFpReg(_Dm));       \
         *(--_nIns) = (NIns)( COND_AL | (0xE8<<20) | (FpRegNum(_Dn)<<16) | (FpRegNum(_Dd)<<12) | (0xB0<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fmuld %s,%s,%s", gpn(_Dd), gpn(_Dn), gpn(_Dm));    \
     } while (0)
 
 #define FMSTAT() do {                               \
         underrunProtect(4);                         \
-        NanoAssert(config.arm_vfp);                 \
+        NanoAssert(_config.arm_vfp);                \
         *(--_nIns) = (NIns)( COND_AL | 0x0EF1FA10); \
         asm_output("fmstat");                       \
     } while (0)
 
 #define FCMPD(_Dd,_Dm,_E) do {                                          \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         NanoAssert(((_E)==0) || ((_E)==1));                             \
         *(--_nIns) = (NIns)( COND_AL | (0xEB4<<16) | (FpRegNum(_Dd)<<12) | (0xB<<8) | ((_E)<<7) | (0x4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fcmp%sd %s,%s", (((_E)==1)?"e":""), gpn(_Dd), gpn(_Dm)); \
     } while (0)
 
 #define FCPYD(_Dd,_Dm) do {                                             \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && IsFpReg(_Dm));                       \
         *(--_nIns) = (NIns)( COND_AL | (0xEB0<<16) | (FpRegNum(_Dd)<<12) | (0xB4<<4) | (FpRegNum(_Dm)) ); \
         asm_output("fcpyd %s,%s", gpn(_Dd), gpn(_Dm));                 \
     } while (0)
 
 #define FMRS(_Rd,_Sn) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn));                  \
     } while (0)
 
 /*
  * The following instructions can only be used with S14 as the
  * single-precision register; that limitation can be removed if
  * needed, but we'd have to teach NJ about all the single precision
  * regs, and their encoding is strange (top 4 bits usually in a block,
  * low bit elsewhere).
  */
 
 #define FSITOD(_Dd,_Sm) do {                                            \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(IsFpReg(_Dd) && ((_Sm) == S14));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xEB8<<16) | (FpRegNum(_Dd)<<12) | (0x2F<<6) | (0<<5) | (0x7) ); \
         asm_output("fsitod %s,%s", gpn(_Dd), gpn(_Sm));                \
     } while (0)
 
 #define FMSR(_Sn,_Rd) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd));                  \
     } while (0)
 
 #define FMRS(_Rd,_Sn) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE1<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmrs %s,%s", gpn(_Rd), gpn(_Sn));                  \
     } while (0)
 
 #define FMSR(_Sn,_Rd) do {                                              \
         underrunProtect(4);                                             \
-        NanoAssert(config.arm_vfp);                                     \
+        NanoAssert(_config.arm_vfp);                                    \
         NanoAssert(((_Sn) == S14) && IsGpReg(_Rd));                     \
         *(--_nIns) = (NIns)( COND_AL | (0xE0<<20) | (0x7<<16) | ((_Rd)<<12) | (0xA<<8) | (0<<7) | (0x1<<4) ); \
         asm_output("fmsr %s,%s", gpn(_Sn), gpn(_Rd));                  \
     } while (0)
 
 #define FCVTSD(_Sd,_Dm) do {                        \
         underrunProtect(4);                         \
-        NanoAssert(config.arm_vfp);                 \
+        NanoAssert(_config.arm_vfp);                \
         NanoAssert(((_Sd) == S14) && IsFpReg(_Dm)); \
         *(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (0x7<<12) | (0xBC<<4) | (FpRegNum(_Dm)) ); \
         asm_output("[0x%08x] fcvtsd s14,%s", *_nIns, gpn(_Dm));                          \
     } while (0)
 
 #define FCVTDS_allowD7(_Dd,_Sm,_allowD7) do {       \
         underrunProtect(4);                         \
-        NanoAssert(config.arm_vfp);                 \
+        NanoAssert(_config.arm_vfp);                \
         NanoAssert(((_Sm) == S14) && (IsFpReg(_Dd) || ((_allowD7) && (_Dd) == D7))); \
         *(--_nIns) = (NIns)( COND_AL | (0xEB7<<16) | (FpRegNum(_Dd)<<12) | (0xAC<<4) | (0x7) ); \
         asm_output("[0x%08x] fcvtds %s,s14", *_nIns, gpn(_Dd));      \
     } while(0)
 
 #define FCVTDS(_Dd,_Sm) \
     FCVTDS_allowD7(_Dd,_Sm,0)
 
 #define FLDS(_Sd,_Rn,_offs) do {                                \
         underrunProtect(4);                                     \
-        NanoAssert(config.arm_vfp);                             \
+        NanoAssert(_config.arm_vfp);                            \
         NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn));            \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
         int addflag = 1<<23;                                    \
         intptr_t offs = (_offs);                                \
         if (offs < 0) {                                         \
             addflag = 0;                                        \
             offs = -offs;                                       \
         }                                                       \
         *(--_nIns) = (NIns)( COND_AL | (0xD1<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
         asm_output("[0x%08x] flds s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
     } while (0)
 
 #define FSTS(_Sd,_Rn,_offs) do {                                \
         underrunProtect(4);                                     \
-        NanoAssert(config.arm_vfp);                             \
+        NanoAssert(_config.arm_vfp);                            \
         NanoAssert(((_Sd) == S14) && !IsFpReg(_Rn));            \
         NanoAssert((((_offs) & 3) == 0) && isS8((_offs) >> 2)); \
         int addflag = 1<<23;                                    \
         intptr_t offs = (_offs);                                \
         if (offs < 0) {                                         \
             addflag = 0;                                        \
             offs = -offs;                                       \
         }                                                       \
         *(--_nIns) = (NIns)( COND_AL | (0xD0<<20) | ((_Rn)<<16) | (0x7<<12) | (0xA << 8) | addflag | ((offs>>2)&0xff) ); \
         asm_output("[0x%08x] fsts s14, [%s, #%d]", *_nIns, gpn(_Rn), (_offs)); \
     } while (0)
 
 #define FTOSID(_Sd,_Dm) do {                                   \
         underrunProtect(4);                                    \
-        NanoAssert(config.arm_vfp);                            \
+        NanoAssert(_config.arm_vfp);                           \
         NanoAssert(((_Sd) == S14) && IsFpReg(_Dm));            \
         *(--_nIns) = (NIns)( COND_AL | (0xEBD<<16) | (0x7<<12) | (0xB4<<4) | FpRegNum(_Dm) ); \
         asm_output("ftosid s14, %s", gpn(_Dm));                \
     } while (0)
 
 } // namespace nanojit
 #endif // __nanojit_NativeARM__
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -1685,24 +1685,23 @@ namespace nanojit
             TODO(unknown_patch);
         // TAG("nPatchBranch(branch=%p target=%p)", branch, target);
     }
 
     void
     Assembler::nFragExit(LIns *guard)
     {
         SideExit *exit = guard->record()->exit;
-        bool trees = config.tree_opt;
         Fragment *frag = exit->target;
         bool destKnown = (frag && frag->fragEntry);
 
         // Generate jump to epilogue and initialize lr.
 
         // If the guard already exists, use a simple jump.
-        if (destKnown && !trees) {
+        if (destKnown) {
             // j     _fragEntry
             //  move $v0,$zero
             MOVE(V0, ZERO);
             asm_j(frag->fragEntry, false);
         }
         else {
             // Target doesn't exist. Jump to an epilogue for now.
             // This can be patched later.
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -65,55 +65,18 @@ namespace nanojit
 
     const static uint8_t max_abi_regs[] = {
         2, /* ABI_FASTCALL */
         1, /* ABI_THISCALL */
         0, /* ABI_STDCALL */
         0  /* ABI_CDECL */
     };
 
-    static bool CheckForSSE2()
+    void Assembler::nInit(AvmCore*)
     {
-        int features = 0;
-    #if defined _MSC_VER
-        __asm
-        {
-            pushad
-            mov eax, 1
-            cpuid
-            mov features, edx
-            popad
-        }
-    #elif defined __GNUC__
-        asm("xchg %%esi, %%ebx\n" /* we can't clobber ebx on gcc (PIC register) */
-            "mov $0x01, %%eax\n"
-            "cpuid\n"
-            "mov %%edx, %0\n"
-            "xchg %%esi, %%ebx\n"
-            : "=m" (features)
-            : /* We have no inputs */
-            : "%eax", "%esi", "%ecx", "%edx"
-           );
-    #elif defined __SUNPRO_C || defined __SUNPRO_CC
-        asm("push %%ebx\n"
-            "mov $0x01, %%eax\n"
-            "cpuid\n"
-            "pop %%ebx\n"
-            : "=d" (features)
-            : /* We have no inputs */
-            : "%eax", "%ecx"
-           );
-    #endif
-        return (features & (1<<26)) != 0;
-    }
-
-    void Assembler::nInit(AvmCore* core)
-    {
-        (void) core;
-        config.sse2 = config.sse2 && CheckForSSE2();
     }
 
     void Assembler::nBeginAssembly() {
         max_stk_args = 0;
     }
 
     NIns* Assembler::genPrologue()
     {
@@ -140,17 +103,16 @@ namespace nanojit
         PUSHr(FP); // Save caller's FP.
 
         return fragEntry;
     }
 
     void Assembler::nFragExit(LInsp guard)
     {
         SideExit *exit = guard->record()->exit;
-        bool trees = config.tree_opt;
         Fragment *frag = exit->target;
         GuardRecord *lr = 0;
         bool destKnown = (frag && frag->fragEntry);
 
         // Generate jump to epilog and initialize lr.
         // If the guard is LIR_xtbl, use a jump table with epilog in every entry
         if (guard->isop(LIR_xtbl)) {
             lr = guard->record();
@@ -158,17 +120,17 @@ namespace nanojit
             SwitchInfo* si = guard->record()->exit->switchInfo;
             if (!_epilogue)
                 _epilogue = genEpilogue();
             emitJumpTable(si, _epilogue);
             JMP_indirect(r);
             LEAmi4(r, si->table, r);
         } else {
             // If the guard already exists, use a simple jump.
-            if (destKnown && !trees) {
+            if (destKnown) {
                 JMP(frag->fragEntry);
                 lr = 0;
             } else {  // Target doesn't exist. Jump to an epilogue for now. This can be patched later.
                 if (!_epilogue)
                     _epilogue = genEpilogue();
                 lr = guard->record();
                 JMP_long(_epilogue);
                 lr->jmp = _nIns;
@@ -232,17 +194,17 @@ namespace nanojit
         // x86-32 requires dynamic ESP alignment in prolog/epilog and static
         // esp-alignment here.
         uint32_t align = 4;//NJ_ALIGN_STACK;
 #else
         uint32_t align = NJ_ALIGN_STACK;
 #endif
 
         if (pushsize) {
-            if (config.fixed_esp) {
+            if (_config.i386_fixed_esp) {
                 // In case of fastcall, stdcall and thiscall the callee cleans up the stack,
                 // and since we reserve max_stk_args words in the prolog to call functions
                 // and don't adjust the stack pointer individually for each call we have
                 // to undo here any changes the callee just did to the stack.
                 if (abi != ABI_CDECL)
                     SUBi(SP, pushsize);
             } else {
                 // stack re-alignment
@@ -276,34 +238,34 @@ namespace nanojit
 
         ArgSize sizes[MAXARGS];
         uint32_t argc = call->get_sizes(sizes);
         int32_t stkd = 0;
 
         if (indirect) {
             argc--;
             asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd);
-            if (!config.fixed_esp)
+            if (!_config.i386_fixed_esp)
                 stkd = 0;
         }
 
         for(uint32_t i=0; i < argc; i++)
         {
             uint32_t j = argc-i-1;
             ArgSize sz = sizes[j];
             Register r = UnspecifiedReg;
             if (n < max_regs && sz != ARGSIZE_F) {
                 r = argRegs[n++]; // tell asm_arg what reg to use
             }
             asm_arg(sz, ins->arg(j), r, stkd);
-            if (!config.fixed_esp)
+            if (!_config.i386_fixed_esp)
                 stkd = 0;
         }
 
-        if (config.fixed_esp) {
+        if (_config.i386_fixed_esp) {
             if (pushsize > max_stk_args)
                 max_stk_args = pushsize;
         } else if (extra > 0) {
             SUBi(SP, extra);
         }
     }
 
     Register Assembler::nRegisterAllocFromSet(RegisterMask set)
@@ -337,17 +299,17 @@ namespace nanojit
         return r;
     }
 
     void Assembler::nRegisterResetAll(RegAlloc& a)
     {
         // add scratch registers to our free list for the allocator
         a.clear();
         a.free = SavedRegs | ScratchRegs;
-        if (!config.sse2)
+        if (!_config.i386_sse2)
             a.free &= ~XmmRegs;
         debug_only( a.managed = a.free; )
     }
 
     void Assembler::nPatchBranch(NIns* branch, NIns* targ)
     {
         intptr_t offset = intptr_t(targ) - intptr_t(branch);
         if (branch[0] == JMP32) {
@@ -606,17 +568,17 @@ namespace nanojit
 
     void Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
     {
         Register rb = getBaseReg(base, dr, GpRegs);
 
         if (op == LIR_st32f) {
             bool pop = !value->isInReg();
             Register rv = ( pop
-                          ? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
+                          ? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
                           : value->getReg() );
 
             if (rmask(rv) & XmmRegs) {
                 // need a scratch reg
                 Register rt = registerAllocTmp(XmmRegs);
 
                 // cvt to single-precision and store
                 SSE_STSS(dr, rb, rt);
@@ -636,28 +598,28 @@ namespace nanojit
             // It may be live in an FPU reg.  Either way, don't put it in an
             // FPU reg just to load & store it.
 
             // a) If we know it's not a double, this is right.
             // b) If we guarded that it's a double, this store could be on the
             //    side exit, copying a non-double.
             // c) Maybe it's a double just being stored.  Oh well.
 
-            if (config.sse2) {
+            if (_config.i386_sse2) {
                 Register rv = findRegFor(value, XmmRegs);
                 SSE_STQ(dr, rb, rv);
             } else {
                 int da = findMemFor(value);
                 asm_mmq(rb, dr, FP, da);
             }
 
         } else {
             bool pop = !value->isInReg();
             Register rv = ( pop
-                          ? findRegFor(value, config.sse2 ? XmmRegs : FpRegs)
+                          ? findRegFor(value, _config.i386_sse2 ? XmmRegs : FpRegs)
                           : value->getReg() );
 
             if (rmask(rv) & XmmRegs) {
                 SSE_STQ(dr, rb, rv);
             } else {
                 FSTQ(pop?1:0, dr, rb);
             }
         }
@@ -665,17 +627,17 @@ namespace nanojit
 
     // Copy 64 bits: (rd+dd) <- (rs+ds).
     //
     void Assembler::asm_mmq(Register rd, int dd, Register rs, int ds)
     {
         // Value is either a 64-bit struct or maybe a float that isn't live in
         // an FPU reg.  Either way, avoid allocating an FPU reg just to load
         // and store it.
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             Register t = registerAllocTmp(XmmRegs);
             SSE_STQ(dd, rd, t);
             SSE_LDQ(t, ds, rs);
         } else {
             // We avoid copying via the FP stack because it's slow and likely
             // to cause spills.
             Register t = registerAllocTmp(GpRegs & ~(rmask(rd)|rmask(rs)));
             ST(rd, dd+4, t);
@@ -818,17 +780,17 @@ namespace nanojit
     void Assembler::asm_fcond(LInsp ins)
     {
         LOpcode opcode = ins->opcode();
         Register r = prepareResultReg(ins, AllowableFlagRegs);
 
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
 
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             // LIR_flt and LIR_fgt are handled by the same case because
             // asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a).  Likewise
             // for LIR_fle/LIR_fge.
             switch (opcode) {
             case LIR_feq:   SETNP(r);       break;
             case LIR_flt:
             case LIR_fgt:   SETA(r);        break;
             case LIR_fle:
@@ -1347,17 +1309,17 @@ namespace nanojit
 #else
     static const AVMPLUS_ALIGN16(uint32_t) negateMask[] = {0,0x80000000,0,0};
 #endif
 
     void Assembler::asm_fneg(LInsp ins)
     {
         LIns *lhs = ins->oprnd1();
 
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             Register rr = prepareResultReg(ins, XmmRegs);
 
             // If 'lhs' isn't in a register, it can be clobbered by 'ins'.
             Register ra;
             if (!lhs->isInReg()) {
                 ra = rr;
             } else if (!(rmask(lhs->getReg()) & XmmRegs)) {
                 // We need to evict lhs from x87Regs, which then puts us in
@@ -1418,17 +1380,17 @@ namespace nanojit
 
                 } else {
                     // This is the last use, so fine to assign it
                     // to the scratch reg, it's dead after this point.
                     findSpecificRegForUnallocated(ins, r);
                 }
             }
             else {
-                if (config.fixed_esp)
+                if (_config.i386_fixed_esp)
                     asm_stkarg(ins, stkd);
                 else
                     asm_pusharg(ins);
             }
         }
         else
         {
             NanoAssert(sz == ARGSIZE_F);
@@ -1497,26 +1459,26 @@ namespace nanojit
             // see https://bugzilla.mozilla.org/show_bug.cgi?id=491084
 
             /* It's possible that the same LIns* with r=FST0 will appear in the argument list more
              * than once.  In this case FST0 will not have been evicted and the multiple pop
              * actions will unbalance the FPU stack.  A quick fix is to always evict FST0 manually.
              */
             evictIfActive(FST0);
         }
-        if (!config.fixed_esp)
+        if (!_config.i386_fixed_esp)
             SUBi(ESP, 8);
 
         stkd += sizeof(double);
     }
 
     void Assembler::asm_fop(LInsp ins)
     {
         LOpcode op = ins->opcode();
-        if (config.sse2)
+        if (_config.i386_sse2)
         {
             LIns *lhs = ins->oprnd1();
             LIns *rhs = ins->oprnd2();
 
             RegisterMask allow = XmmRegs;
             Register rb = UnspecifiedReg;
             if (lhs != rhs) {
                 rb = findRegFor(rhs,allow);
@@ -1681,17 +1643,17 @@ namespace nanojit
 
         freeResourcesOf(ins);
     }
 
     void Assembler::asm_f2i(LInsp ins)
     {
         LIns *lhs = ins->oprnd1();
 
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             Register rr = prepareResultReg(ins, GpRegs);
             Register ra = findRegFor(lhs, XmmRegs);
             SSE_CVTSD2SI(rr, ra);
         } else {
             int pop = !lhs->isInReg();
             findSpecificRegFor(lhs, FST0);
             if (ins->isInReg())
                 evict(ins);
@@ -1715,17 +1677,17 @@ namespace nanojit
         }
     }
 
     NIns* Assembler::asm_fbranch(bool branchOnFalse, LIns *cond, NIns *targ)
     {
         NIns* at;
         LOpcode opcode = cond->opcode();
 
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             // LIR_flt and LIR_fgt are handled by the same case because
             // asm_fcmp() converts LIR_flt(a,b) to LIR_fgt(b,a).  Likewise
             // for LIR_fle/LIR_fge.
             if (branchOnFalse) {
                 // op == LIR_xf
                 switch (opcode) {
                 case LIR_feq:   JP(targ);       break;
                 case LIR_flt:
@@ -1764,17 +1726,17 @@ namespace nanojit
     void Assembler::asm_fcmp(LIns *cond)
     {
         LOpcode condop = cond->opcode();
         NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
         LIns* lhs = cond->oprnd1();
         LIns* rhs = cond->oprnd2();
         NanoAssert(lhs->isF64() && rhs->isF64());
 
-        if (config.sse2) {
+        if (_config.i386_sse2) {
             // First, we convert (a < b) into (b > a), and (a <= b) into (b >= a).
             if (condop == LIR_flt) {
                 condop = LIR_fgt;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             } else if (condop == LIR_fle) {
                 condop = LIR_fge;
                 LIns* t = lhs; lhs = rhs; rhs = t;
             }
--- a/js/src/nanojit/avmplus.cpp
+++ b/js/src/nanojit/avmplus.cpp
@@ -44,17 +44,17 @@
 extern "C" bool
 blx_lr_broken() {
     return false;
 }
 #endif
 
 using namespace avmplus;
 
-Config AvmCore::config;
+nanojit::Config AvmCore::config;
 
 void
 avmplus::AvmLog(char const *msg, ...) {
     va_list ap;
     va_start(ap, msg);
     VMPI_vfprintf(stderr, msg, ap);
     va_end(ap);
 }
--- a/js/src/nanojit/avmplus.h
+++ b/js/src/nanojit/avmplus.h
@@ -32,16 +32,17 @@
  * under the terms of any one of the MPL, the GPL or the LGPL.
  *
  ***** END LICENSE BLOCK ***** */
 
 #ifndef avm_h___
 #define avm_h___
 
 #include "VMPI.h"
+#include "njconfig.h"
 
 #if !defined(AVMPLUS_LITTLE_ENDIAN) && !defined(AVMPLUS_BIG_ENDIAN)
 #ifdef IS_BIG_ENDIAN
 #define AVMPLUS_BIG_ENDIAN
 #else
 #define AVMPLUS_LITTLE_ENDIAN
 #endif
 #endif
@@ -176,70 +177,16 @@ struct JSContext;
 #endif
 
 namespace avmplus {
 
     typedef int FunctionID;
 
     extern void AvmLog(char const *msg, ...);
 
-    class Config
-    {
-    public:
-        Config() {
-            memset(this, 0, sizeof(Config));
-#ifdef DEBUG
-            verbose = false;
-            verbose_addrs = 1;
-            verbose_exits = 1;
-            verbose_live = 1;
-            show_stats = 1;
-#endif
-        }
-
-        uint32_t tree_opt:1;
-        uint32_t quiet_opt:1;
-        uint32_t verbose:1;
-        uint32_t verbose_addrs:1;
-        uint32_t verbose_live:1;
-        uint32_t verbose_exits:1;
-        uint32_t show_stats:1;
-
-#if defined (AVMPLUS_IA32)
-    // Whether or not we can use SSE2 instructions and conditional moves.
-        bool sse2;
-        bool use_cmov;
-        // Whether to use a virtual stack pointer
-        bool fixed_esp;
-#endif
-
-#if defined (AVMPLUS_ARM)
-        // Whether or not to generate VFP instructions.
-# if defined (NJ_FORCE_SOFTFLOAT)
-        static const bool arm_vfp = false;
-# else
-        bool arm_vfp;
-# endif
-
-        // The ARM architecture version.
-# if defined (NJ_FORCE_ARM_ARCH_VERSION)
-        static const unsigned int arm_arch = NJ_FORCE_ARM_ARCH_VERSION;
-# else
-        unsigned int arm_arch;
-# endif
-
-#endif
-
-#if defined (NJ_FORCE_SOFTFLOAT)
-        static const bool soft_float = true;
-#else
-        bool soft_float;
-#endif
-    };
-
     static const int kstrconst_emptyString = 0;
 
     class AvmInterpreter
     {
         class Labels {
         public:
             const char* format(const void* ip)
             {
@@ -271,48 +218,35 @@ namespace avmplus {
     };
 
     class AvmCore
     {
     public:
         AvmInterpreter interp;
         AvmConsole console;
 
-        static Config config;
+        static nanojit::Config config;
 
 #ifdef AVMPLUS_IA32
         static inline bool
         use_sse2()
         {
-            return config.sse2;
+            return config.i386_sse2;
         }
 #endif
 
         static inline bool
         use_cmov()
         {
 #ifdef AVMPLUS_IA32
-            return config.use_cmov;
+            return config.i386_use_cmov;
 #else
         return true;
 #endif
         }
-
-        static inline bool
-        quiet_opt()
-        {
-            return config.quiet_opt;
-        }
-
-        static inline bool
-        verbose()
-        {
-            return config.verbose;
-        }
-
     };
 
     /**
      * Bit vectors are an efficent method of keeping True/False information
      * on a set of items or conditions. Class BitSet provides functions
      * to manipulate individual bits in the vector.
      *
      * Since most vectors are rather small an array of longs is used by
--- a/js/src/nanojit/manifest.mk
+++ b/js/src/nanojit/manifest.mk
@@ -67,16 +67,17 @@ endif
 
 avmplus_CXXSRCS := $(avmplus_CXXSRCS) \
   $(curdir)/Allocator.cpp \
   $(curdir)/Assembler.cpp \
   $(curdir)/CodeAlloc.cpp \
   $(curdir)/Containers.cpp \
   $(curdir)/Fragmento.cpp \
   $(curdir)/LIR.cpp \
+  $(curdir)/njconfig.cpp \
   $(curdir)/RegAlloc.cpp \
   $(curdir)/$(nanojit_cpu_cxxsrc) \
   $(NULL)
 
 ifeq ($(COMPILER),VS)
 # Disable the 'cast truncates constant value' warning, incurred by
 # macros encoding instruction operands in machine code fields.
 $(curdir)/Assembler.obj $(curdir)/Nativei386.obj: avmplus_CXXFLAGS += -wd4310
--- a/js/src/nanojit/nanojit.h
+++ b/js/src/nanojit/nanojit.h
@@ -260,24 +260,24 @@ namespace nanojit {
     public:
         // All Nanojit and jstracer printing should be routed through
         // this function.
         void printf( const char* format, ... ) PRINTF_CHECK(2,3);
 
         // An OR of LC_Bits values, indicating what should be output
         uint32_t lcbits;
     };
-
 }
 
 // -------------------------------------------------------------------
 // END debug-logging definitions
 // -------------------------------------------------------------------
 
 
+#include "njconfig.h"
 #include "Allocator.h"
 #include "Containers.h"
 #include "Native.h"
 #include "CodeAlloc.h"
 #include "LIR.h"
 #include "RegAlloc.h"
 #include "Fragmento.h"
 #include "Assembler.h"
new file mode 100644
--- /dev/null
+++ b/js/src/nanojit/njconfig.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is [Open Source Virtual Machine].
+ *
+ * The Initial Developer of the Original Code is
+ * Adobe System Incorporated.
+ * Portions created by the Initial Developer are Copyright (C) 2004-2007
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Adobe AS3 Team
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "nanojit.h"
+
+#ifdef FEATURE_NANOJIT
+
+namespace nanojit
+{
+#ifdef NANOJIT_IA32
+    static bool CheckForSSE2()
+    {
+        int features = 0;
+    #if defined _MSC_VER
+        __asm
+        {
+            pushad
+            mov eax, 1
+            cpuid
+            mov features, edx
+            popad
+        }
+    #elif defined __GNUC__
+        asm("xchg %%esi, %%ebx\n" /* we can't clobber ebx on gcc (PIC register) */
+            "mov $0x01, %%eax\n"
+            "cpuid\n"
+            "mov %%edx, %0\n"
+            "xchg %%esi, %%ebx\n"
+            : "=m" (features)
+            : /* We have no inputs */
+            : "%eax", "%esi", "%ecx", "%edx"
+           );
+    #elif defined __SUNPRO_C || defined __SUNPRO_CC
+        asm("push %%ebx\n"
+            "mov $0x01, %%eax\n"
+            "cpuid\n"
+            "pop %%ebx\n"
+            : "=d" (features)
+            : /* We have no inputs */
+            : "%eax", "%ecx"
+           );
+    #endif
+        return (features & (1<<26)) != 0;
+    }
+#endif
+
+    Config::Config()
+    {
+        VMPI_memset(this, 0, sizeof(*this));
+
+        cseopt = true;
+
+#ifdef NANOJIT_IA32
+        i386_sse2 = CheckForSSE2();
+        i386_use_cmov = true;
+        i386_fixed_esp = false;
+#endif
+
+#if defined(NANOJIT_ARM)
+
+        NanoStaticAssert(NJ_COMPILER_ARM_ARCH >= 5 && NJ_COMPILER_ARM_ARCH <= 7);
+
+        arm_arch = NJ_COMPILER_ARM_ARCH;
+        arm_vfp = (arm_arch >= 7);
+
+    #if defined(DEBUG) || defined(_DEBUG)
+        arm_show_stats = true;
+    #else
+        arm_show_stats = false;
+    #endif
+
+        soft_float = !arm_vfp;
+
+#endif // NANOJIT_ARM
+    }
+}
+#endif /* FEATURE_NANOJIT */
new file mode 100644
--- /dev/null
+++ b/js/src/nanojit/njconfig.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is [Open Source Virtual Machine].
+ *
+ * The Initial Developer of the Original Code is
+ * Adobe System Incorporated.
+ * Portions created by the Initial Developer are Copyright (C) 2004-2007
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Adobe AS3 Team
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef __njconfig_h__
+#define __njconfig_h__
+
+#include "avmplus.h"
+
+// Do not include nanojit.h here; this file should be usable without it.
+
+#ifdef FEATURE_NANOJIT
+
+namespace nanojit
+{
+    /***
+     * A struct used to configure the assumptions that Assembler can make when
+     * generating code. The ctor will fill in all fields with the most reasonable
+     * values it can derive from compiler flags and/or runtime detection, but
+     * the embedder is free to override any or all of them as it sees fit. 
+     * Using the ctor-provided default setup is guaranteed to provide a safe
+     * runtime environment (though perhaps suboptimal in some cases), so an embedder
+     * should replace these values with great care.
+     *
+     * Note that although many fields are used on only specific architecture(s), 
+     * this struct is deliberately declared without ifdef's for them, so (say) ARM-specific
+     * fields are declared everywhere. This reduces build dependencies (so that this
+     * files does not require nanojit.h to be included beforehand) and also reduces
+     * clutter in this file; the extra storage space required is trivial since most
+     * fields are single bits.
+     */
+    struct Config
+    {
+    public:
+        // fills in reasonable default values for all fields.
+        Config();
+        
+        // ARM architecture to assume when generate instructions for (currently, 5 <= arm_arch <= 7)
+        uint8_t arm_arch;
+
+        // If true, use CSE.
+        uint32_t cseopt:1;
+        
+        // Can we use SSE2 instructions? (x86-only)
+        uint32_t i386_sse2:1;
+        
+        // Can we use cmov instructions? (x86-only)
+        uint32_t i386_use_cmov:1;
+        
+        // Should we use a virtual stack pointer? (x86-only)
+        uint32_t i386_fixed_esp:1;
+
+        // Whether or not to generate VFP instructions. (ARM only)
+        uint32_t arm_vfp:1;
+        
+        // @todo, document me
+        uint32_t arm_show_stats:1;
+
+        // If true, use softfloat for all floating point operations, 
+        // whether or not an FPU is present. (ARM only for now, but might also includes MIPS in the future)
+        uint32_t soft_float:1;
+    };
+}
+
+#endif // FEATURE_NANOJIT
+#endif // __njconfig_h__
new file mode 100644
--- /dev/null
+++ b/js/src/nanojit/njcpudetect.h
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
+/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is [Open Source Virtual Machine].
+ *
+ * The Initial Developer of the Original Code is
+ * Adobe System Incorporated.
+ * Portions created by the Initial Developer are Copyright (C) 2004-2007
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Adobe AS3 Team
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef __njcpudetect__
+#define __njcpudetect__
+
+/***
+ * Note: this file should not include *any* other files, nor should it wrap
+ * itself in ifdef FEATURE_NANOJIT, nor should it do anything other than
+ * define preprocessor symbols.
+ */
+
+/***
+ * NJ_COMPILER_ARM_ARCH attempts to specify the minimum ARM architecture
+ * that the C++ compiler has specified. Note that although Config::arm_arch
+ * is initialized to this value by default, there is no requirement that they
+ * be in sync.
+ *
+ * Note, this is done via #define so that downstream preprocessor usage can
+ * examine it, but please don't attempt to redefine it.
+ *
+ * Note, this is deliberately not encased in "ifdef NANOJIT_ARM", as this file
+ * may be included before that is defined. On non-ARM platforms we will hit the
+ * "Unable to determine" case.
+ */
+
+// GCC and RealView usually define __ARM_ARCH__
+#if defined(__ARM_ARCH__)
+    
+    #define NJ_COMPILER_ARM_ARCH __ARM_ARCH__
+
+// ok, try well-known GCC flags ( see http://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html )
+#elif     defined(__ARM_ARCH_7__) || \
+        defined(__ARM_ARCH_7A__) || \
+        defined(__ARM_ARCH_7M__) || \
+        defined(__ARM_ARCH_7R__)
+
+    #define NJ_COMPILER_ARM_ARCH 7
+
+#elif   defined(__ARM_ARCH_6__) || \
+        defined(__ARM_ARCH_6J__) || \
+        defined(__ARM_ARCH_6T2__) || \
+        defined(__ARM_ARCH_6Z__) || \
+        defined(__ARM_ARCH_6ZK__) || \
+        defined(__ARM_ARCH_6M__)
+
+    #define NJ_COMPILER_ARM_ARCH 6
+
+#elif   defined(__ARM_ARCH_5__) || \
+        defined(__ARM_ARCH_5T__) || \
+        defined(__ARM_ARCH_5E__) || \
+        defined(__ARM_ARCH_5TE__)
+
+    #define NJ_COMPILER_ARM_ARCH 5;
+
+// Visual C has its own mojo
+#elif defined(_MSC_VER) && defined(_M_ARM)
+
+    #define NJ_COMPILER_ARM_ARCH _M_ARM
+
+#else
+    
+    // non-numeric value
+    #define NJ_COMPILER_ARM_ARCH "Unable to determine valid NJ_COMPILER_ARM_ARCH (nanojit only supports ARMv5 or later)"
+
+#endif
+
+#endif // __njcpudetect__