Bug 595034 - nanojit: harden via random no-op instruction insertion (r+nnethercote,wmaddox,edwsmith)
authorRick Reitmaier <rreitmai@adobe.com>
Mon, 01 Nov 2010 14:02:18 -0700
changeset 57680 3be4ae3c2b98744eb3a502384fdbc60429024c9b
parent 57679 da076e4b0ad85da83c8f92b5011d11b6641a8c17
child 57681 2ed0911faae3c7943620f29cf72eff5e1c32f24f
push id1
push usershaver@mozilla.com
push dateTue, 04 Jan 2011 17:58:04 +0000
bugs595034
milestone2.0b8pre
Bug 595034 - nanojit: harden via random no-op instruction insertion (r+nnethercote,wmaddox,edwsmith) Insert no-op instructions when JIT'ing with the intention of making exploitable patterns within the generated code less predicable.
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/CodeAlloc.h
js/src/nanojit/NativeARM.cpp
js/src/nanojit/NativeARM.h
js/src/nanojit/NativeMIPS.cpp
js/src/nanojit/NativeMIPS.h
js/src/nanojit/NativePPC.cpp
js/src/nanojit/NativePPC.h
js/src/nanojit/NativeSH4.cpp
js/src/nanojit/NativeSH4.h
js/src/nanojit/NativeSparc.cpp
js/src/nanojit/NativeSparc.h
js/src/nanojit/NativeX64.cpp
js/src/nanojit/NativeX64.h
js/src/nanojit/Nativei386.cpp
js/src/nanojit/Nativei386.h
js/src/nanojit/njconfig.cpp
js/src/nanojit/njconfig.h
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1397,24 +1397,34 @@ namespace nanojit
         verbose_only( _thisfrag->nStaticExits++; )
         countlir_xcc();
         // We only support cmp with guard right now, also assume it is 'close'
         // and only emit the branch.
         NIns* exit = asm_exit(ins); // does intersectRegisterState()
         asm_branch(ins->opcode() == LIR_xf, cond, exit);
     }
 
+    // helper function for nop insertion feature that results in no more
+    // than 1 no-op instruction insertion every 128-1151 Bytes
+    static inline uint32_t noiseForNopInsertion(Noise* n) {
+        return n->getValue(1023) + 128;
+    }
+
     void Assembler::gen(LirFilter* reader)
     {
         NanoAssert(_thisfrag->nStaticExits == 0);
 
         InsList pending_lives(alloc);
 
         NanoAssert(!error());
 
+        // compiler hardening setup
+        NIns* priorIns = _nIns;
+        int32_t nopInsertTrigger = hardenNopInsertion(_config) ? noiseForNopInsertion(_noise): 0;
+
         // What's going on here: we're visiting all the LIR instructions in
         // the buffer, working strictly backwards in buffer-order, and
         // generating machine instructions for them as we go.
         //
         // For each LIns, we first check if it's live.  If so we mark its
         // operands as also live, and then generate code for it *if
         // necessary*.  It may not be necessary if the instruction is an
         // expression and code has already been generated for all its uses in
@@ -1468,16 +1478,38 @@ namespace nanojit
             // it is printed after the LIR and native code, exactly when the
             // post-regstate should be shown.
             if ((_logc->lcbits & LC_Native) && (_logc->lcbits & LC_Activation))
                 printActivationState();
             if ((_logc->lcbits & LC_Native) && (_logc->lcbits & LC_RegAlloc))
                 printRegState();
 #endif
 
+            // compiler hardening technique that inserts no-op instructions in the compiled method when nopInsertTrigger < 0
+            if (hardenNopInsertion(_config))
+            {
+                size_t delta = (uintptr_t)priorIns - (uintptr_t)_nIns; // # bytes that have been emitted since last go-around
+
+                if (codeList) {
+                    codeList = codeList;
+                }
+                // if no codeList then we know priorIns and _nIns are on same page, otherwise make sure priorIns was not in the previous code block
+                if (!codeList || !codeList->isInBlock(priorIns)) {
+                    NanoAssert(delta < VMPI_getVMPageSize()); // sanity check
+                    nopInsertTrigger -= delta;
+                    if (nopInsertTrigger < 0)
+                    {
+                        nopInsertTrigger = noiseForNopInsertion(_noise);
+                        asm_insert_random_nop();
+                        PERFM_NVPROF("hardening:nop-insert", 1);
+                    }
+                }
+                priorIns = _nIns;
+            }
+
             LOpcode op = ins->opcode();
             switch (op)
             {
                 default:
                     NanoAssertMsgf(false, "unsupported LIR instruction: %d\n", op);
                     break;
 
                 case LIR_regfence:
@@ -2028,32 +2060,32 @@ namespace nanojit
                     if (vtuneHandle) {
                         uint32_t currentLine = (uint32_t) ins->oprnd1()->immI();
                         vtuneLine(vtuneHandle, currentLine, _nIns);
                     }
                     break;
                 }
                #endif // VMCFG_VTUNE
 
-                case LIR_comment: 
+                case LIR_comment:
                     // Do nothing.
                     break;
             }
 
 #ifdef NJ_VERBOSE
             // We do final LIR printing inside this loop to avoid printing
             // dead LIR instructions.  We print the LIns after generating the
             // code.  This ensures that the LIns will appear in debug output
             // *before* the native code, because Assembler::outputf()
             // prints everything in reverse.
             //
             if (_logc->lcbits & LC_AfterDCE) {
                 InsBuf b;
                 LInsPrinter* printer = _thisfrag->lirbuf->printer;
-                if (ins->isop(LIR_comment)) 
+                if (ins->isop(LIR_comment))
                     outputf("%s", printer->formatIns(&b, ins));
                 else
                     outputf("    %s", printer->formatIns(&b, ins));
             }
 #endif
 
             if (error())
                 return;
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -482,16 +482,17 @@ namespace nanojit
 #endif
             void        asm_nongp_copy(Register r, Register s);
             void        asm_call(LIns*);
             Register    asm_binop_rhs_reg(LIns* ins);
             NIns*       asm_branch(bool branchOnFalse, LIns* cond, NIns* targ);
             NIns*       asm_branch_ov(LOpcode op, NIns* targ);
             void        asm_switch(LIns* ins, NIns* target);
             void        asm_jtbl(LIns* ins, NIns** table);
+            void        asm_insert_random_nop();
             void        emitJumpTable(SwitchInfo* si, NIns* target);
             void        assignSavedRegs();
             void        reserveSavedRegs();
             void        assignParamRegs();
             void        handleLoopCarriedExprs(InsList& pending_lives);
 
             // platform specific implementation (see NativeXXX.cpp file)
             void        nInit(AvmCore *);
--- a/js/src/nanojit/CodeAlloc.h
+++ b/js/src/nanojit/CodeAlloc.h
@@ -83,16 +83,20 @@ namespace nanojit
         /** return the starting address for this block only */
         NIns* start() { return &code[0]; }
 
         /** return just the usable size of this block */
         size_t size() const { return uintptr_t(end) - uintptr_t(&code[0]); }
 
         /** return the whole size of this block including overhead */
         size_t blockSize() const { return uintptr_t(end) - uintptr_t(this); }
+
+    public:
+        /** true is the given NIns is contained within this block */
+        bool isInBlock(NIns* n) { return (n >= this->start() && n < this->end); }
     };
 
     /**
      * Code memory allocator.
      * Long lived manager for many code blocks,
      * manages interaction with an underlying code memory allocator,
      * setting page permissions, api's for allocating and freeing
      * individual blocks of code memory (for methods, stubs, or compiled
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -2915,10 +2915,14 @@ void Assembler::swapCodeChunks() {
         _nExitSlot = exitStart;
     SWAP(NIns*, _nIns, _nExitIns);
     SWAP(NIns*, _nSlot, _nExitSlot);        // this one is ARM-specific
     SWAP(NIns*, codeStart, exitStart);
     SWAP(NIns*, codeEnd, exitEnd);
     verbose_only( SWAP(size_t, codeBytes, exitBytes); )
 }
 
+void Assembler::asm_insert_random_nop() {
+    NanoAssert(0); // not supported
+}
+
 }
 #endif /* FEATURE_NANOJIT */
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -266,16 +266,17 @@ verbose_only( extern const char* shiftNa
                                                                                 \
     void        BranchWithLink(NIns* addr);                                     \
     inline void BLX(Register addr, bool chk = true);                            \
     void        JMP_far(NIns*);                                                 \
     void        B_cond_chk(ConditionCode, NIns*, bool);                         \
     void        underrunProtect(int bytes);                                     \
     void        nativePageReset();                                              \
     void        nativePageSetup();                                              \
+    bool        hardenNopInsertion(const Config& c) { return false; }           \
     void        asm_immd_nochk(Register, int32_t, int32_t);                     \
     void        asm_regarg(ArgType, LIns*, Register);                           \
     void        asm_stkarg(LIns* p, int stkd);                                  \
     void        asm_cmpi(Register, int32_t imm);                                \
     void        asm_ldr_chk(Register d, Register b, int32_t off, bool chk);     \
     int32_t     asm_str(Register rt, Register rr, int32_t off);                 \
     void        asm_cmp(LIns *cond);                                            \
     void        asm_cmpd(LIns *cond);                                           \
--- a/js/src/nanojit/NativeMIPS.cpp
+++ b/js/src/nanojit/NativeMIPS.cpp
@@ -2057,11 +2057,17 @@ namespace nanojit
         if (!_nExitSlot)
             _nExitSlot = exitStart;
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, _nSlot, _nExitSlot);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
+
+    void
+    Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 }
 
 #endif // FEATURE_NANOJIT && NANOJIT_MIPS
--- a/js/src/nanojit/NativeMIPS.h
+++ b/js/src/nanojit/NativeMIPS.h
@@ -237,16 +237,17 @@ namespace nanojit {
 
 // REQ: Platform specific declarations to include in Assembler class
 #define DECLARE_PLATFORM_ASSEMBLER()                                    \
     const static Register argRegs[4];                                   \
     const static Register retRegs[2];                                   \
     void nativePageSetup(void);                                         \
     void nativePageReset(void);                                         \
     void underrunProtect(int bytes);                                    \
+    bool hardenNopInsertion(const Config& c) { return false; }          \
     NIns *_nSlot;                                                       \
     NIns *_nExitSlot;                                                   \
     int max_out_args;                                                   \
     Register ovreg;                                                     \
                                                                         \
     void asm_ldst(int op, Register r, int offset, Register b);          \
     void asm_ldst64(bool store, Register fr, int offset, Register b);   \
     void asm_store_imm64(LIns *value, int dr, Register rbase);          \
--- a/js/src/nanojit/NativePPC.cpp
+++ b/js/src/nanojit/NativePPC.cpp
@@ -1444,11 +1444,15 @@ namespace nanojit
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         }
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 } // namespace nanojit
 
 #endif // FEATURE_NANOJIT && NANOJIT_PPC
--- a/js/src/nanojit/NativePPC.h
+++ b/js/src/nanojit/NativePPC.h
@@ -290,16 +290,17 @@ namespace nanojit
     #define DECL_PPC64()
 #endif
 
     #define DECLARE_PLATFORM_ASSEMBLER()                                    \
         const static Register argRegs[8], retRegs[2];                       \
         void underrunProtect(int bytes);                                    \
         void nativePageReset();                                             \
         void nativePageSetup();                                             \
+        bool hardenNopInsertion(const Config& c) { return false; }          \
         void br(NIns *addr, int link);                                      \
         void br_far(NIns *addr, int link);                                  \
         void asm_regarg(ArgType, LIns*, Register);                          \
         void asm_li(Register r, int32_t imm);                               \
         void asm_li32(Register r, int32_t imm);                             \
         void asm_li64(Register r, uint64_t imm);                            \
         void asm_cmp(LOpcode op, LIns *a, LIns *b, ConditionRegister);      \
         NIns* asm_branch_far(bool onfalse, LIns *cond, NIns * const targ);  \
--- a/js/src/nanojit/NativeSH4.cpp
+++ b/js/src/nanojit/NativeSH4.cpp
@@ -3229,10 +3229,15 @@ namespace nanojit
         if (_nIns - nb_bytes < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
 
             // This jump will call underrunProtect again, but since we're on
             // a new page large enough to host its code, nothing will happen.
             JMP(pc, true);
         }
     }
+
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 }
 #endif // FEATURE_NANOJIT && FEATURE_SH4
--- a/js/src/nanojit/NativeSH4.h
+++ b/js/src/nanojit/NativeSH4.h
@@ -162,16 +162,17 @@ namespace nanojit
     const static int NumArgDregs;                                       \
     const static Register argRegs[4], retRegs[2];                       \
     const static Register argDregs[4], retDregs[1];                     \
     int max_stack_args;                                                 \
                                                                         \
     void nativePageReset();                                             \
     void nativePageSetup();                                             \
     void underrunProtect(int);                                          \
+    bool hardenNopInsertion(const Config& c) { return false; }          \
     bool simplifyOpcode(LOpcode &);                                     \
                                                                         \
     NIns *asm_immi(int, Register, bool force = false);                  \
     void asm_immd(uint64_t, Register);                                  \
     void asm_immd_nochk(uint64_t, Register);                            \
     void asm_arg_regi(LIns*, Register);                                 \
     void asm_arg_regd(LIns*, Register);                                 \
     void asm_arg_stacki(LIns*, int);                                    \
--- a/js/src/nanojit/NativeSparc.cpp
+++ b/js/src/nanojit/NativeSparc.cpp
@@ -1569,10 +1569,14 @@ namespace nanojit
         if (!_nExitIns)
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 #endif /* FEATURE_NANOJIT */
 }
--- a/js/src/nanojit/NativeSparc.h
+++ b/js/src/nanojit/NativeSparc.h
@@ -203,16 +203,17 @@ namespace nanojit
 #define DECLARE_PLATFORM_REGALLOC()
 
 #define DECLARE_PLATFORM_ASSEMBLER()    \
      const static Register argRegs[6], retRegs[1]; \
      bool has_cmov; \
      void nativePageReset(); \
      void nativePageSetup(); \
      void underrunProtect(int bytes); \
+     bool hardenNopInsertion(const Config& c) { return false; } \
      void asm_align_code(); \
      void asm_cmp(LIns *cond); \
      void asm_cmpd(LIns *cond); \
      NIns* asm_branchd(bool, LIns*, NIns*); \
      void IMM32(int32_t i) { \
          --_nIns; \
          *((int32_t*)_nIns) = i; \
      } \
--- a/js/src/nanojit/NativeX64.cpp
+++ b/js/src/nanojit/NativeX64.cpp
@@ -2177,11 +2177,15 @@ namespace nanojit
             codeAlloc(exitStart, exitEnd, _nExitIns verbose_only(, exitBytes));
         }
         SWAP(NIns*, _nIns, _nExitIns);
         SWAP(NIns*, codeStart, exitStart);
         SWAP(NIns*, codeEnd, exitEnd);
         verbose_only( SWAP(size_t, codeBytes, exitBytes); )
     }
 
+    void Assembler::asm_insert_random_nop() {
+        NanoAssert(0); // not supported
+    }
+
 } // namespace nanojit
 
 #endif // FEATURE_NANOJIT && NANOJIT_X64
--- a/js/src/nanojit/NativeX64.h
+++ b/js/src/nanojit/NativeX64.h
@@ -368,16 +368,17 @@ namespace nanojit
     #define DECLARE_PLATFORM_STATS()
     #define DECLARE_PLATFORM_REGALLOC()
 
     #define DECLARE_PLATFORM_ASSEMBLER()                                    \
         const static Register argRegs[NumArgRegs], retRegs[1];              \
         void underrunProtect(ptrdiff_t bytes);                              \
         void nativePageReset();                                             \
         void nativePageSetup();                                             \
+        bool hardenNopInsertion(const Config& c) { return false; }          \
         void asm_qbinop(LIns*);                                             \
         void MR(Register, Register);\
         void JMP(NIns*);\
         void JMPl(NIns*);\
         void emit(uint64_t op);\
         void emit8(uint64_t op, int64_t val);\
         void emit_target8(size_t underrun, uint64_t op, NIns* target);\
         void emit_target32(size_t underrun, uint64_t op, NIns* target);\
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -612,18 +612,18 @@ namespace nanojit
         underrunProtect(2);
         MODRMm(4, 0, r);
         *(--_nIns) = 0xff;
         asm_output("jmp   *(%s)", gpn(r));
     }
 
     inline void Assembler::JMP_indexed(Register x, I32 ss, NIns** addr) {
         underrunProtect(7);
-        IMM32(int32_t(addr));           
-        SIB(ss, REGNUM(x), 5);          
+        IMM32(int32_t(addr));
+        SIB(ss, REGNUM(x), 5);
         MODRM(0, 4, 4);                 // amode == addr(table + x<<ss)
         *(--_nIns) = uint8_t(0xff);     // jmp
         asm_output("jmp   *(%s*%d+%p)", gpn(x), 1 << ss, (void*)addr);
     }
 
     inline void Assembler::JE(NIns* t)   { JCC(0x04, t, "je"); }
     inline void Assembler::JNE(NIns* t)  { JCC(0x05, t, "jne"); }
     inline void Assembler::JP(NIns* t)   { JCC(0x0A, t, "jp"); }
@@ -1680,17 +1680,17 @@ namespace nanojit
         // SETcc only sets low 8 bits, so extend
         MOVZX8(r,r);
 
         if (_config.i386_sse2) {
             // LIR_ltd and LIR_gtd are handled by the same case because
             // asm_cmpd() converts LIR_ltd(a,b) to LIR_gtd(b,a).  Likewise
             // for LIR_led/LIR_ged.
             switch (opcode) {
-            case LIR_eqd:   
+            case LIR_eqd:
                 if (ins->oprnd1() == ins->oprnd2()) {
                     SETNP(r);
                 } else {
                     // result = ZF & !PF, must do logic on flags
                     AND8R(r);       // and      rl,rh    rl &= rh
                     SETNPH(r);      // setnp    rh       rh = !PF
                     SETE(r);        // sete     rl       rl = ZF
                 }
@@ -2646,17 +2646,17 @@ namespace nanojit
                     if (cond->oprnd1() == cond->oprnd2()) {
                         JNP(targ);
                     } else {
                         // jp skip (2byte)
                         // je target
                         // skip: ...
                         underrunProtect(16); // underrun of 7 needed but we write 2 instr --> 16
                         NIns *skip = _nIns;
-                        JE(targ);      
+                        JE(targ);
                         at = _nIns;
                         JP(skip);
                     }
                     break;
                 case LIR_ltd:
                 case LIR_gtd:   JA(targ);       break;
                 case LIR_led:
                 case LIR_ged:   JAE(targ);      break;
@@ -2666,17 +2666,17 @@ namespace nanojit
         } else {
             if (branchOnFalse)
                 JP(targ);
             else
                 JNP(targ);
         }
 
         if (!at)
-            at = _nIns; 
+            at = _nIns;
         asm_cmpd(cond);
 
         return at;
     }
 
     // WARNING: This function cannot generate any code that will affect the
     // condition codes prior to the generation of the
     // ucomisd/fcompp/fcmop/fcom.  See asm_cmp() for more details.
@@ -2701,17 +2701,17 @@ namespace nanojit
 
             // LIR_eqd, if lhs == rhs:
             //   ucomisd       ZPC   outcome (SETNP/JNP succeeds if P==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETNP/JNP fails
             //   EQUAL         100   SETNP/JNP succeeds
             //
             // LIR_eqd, if lsh != rhs;
-            //   ucomisd       ZPC   outcome (SETP/JP succeeds if P==0, 
+            //   ucomisd       ZPC   outcome (SETP/JP succeeds if P==0,
             //                                SETE/JE succeeds if Z==0)
             //   -------       ---   -------
             //   UNORDERED     111   SETP/JP succeeds (and skips to fail target)
             //   EQUAL         100   SETP/JP fails, SETE/JE succeeds
             //   GREATER_THAN  000   SETP/JP fails, SETE/JE fails
             //   LESS_THAN     001   SETP/JP fails, SETE/JE fails
             //
             // LIR_gtd:
@@ -2859,16 +2859,30 @@ namespace nanojit
         NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
         // This may be in a normal code chunk or an exit code chunk.
         if (eip - n < codeStart) {
             codeAlloc(codeStart, codeEnd, _nIns verbose_only(, codeBytes));
             JMP(eip);
         }
     }
 
+    void Assembler::asm_insert_random_nop()
+    {
+        // one of a random nop instructions
+        uint32_t r = _noise->getValue(5);
+        switch(r)
+        {
+            case 0: MR(rEAX,rEAX);        break;
+            case 1: MR(rEDI,rEDI);        break;
+            case 2: MR(rECX,rECX);        break;
+            case 3: LEA(rECX,0,rECX);     break;
+            case 4: LEA(rESP,0,rESP);     break;
+        }
+    }
+
     void Assembler::asm_ret(LIns* ins)
     {
         genEpilogue();
 
         // Restore rESP from rEBP, undoing SUBi(SP,amt) in the prologue
         MR(SP,FP);
 
         releaseRegisters();
--- a/js/src/nanojit/Nativei386.h
+++ b/js/src/nanojit/Nativei386.h
@@ -188,16 +188,17 @@ namespace nanojit
     #define JMP32 0xe9
 
     #define DECLARE_PLATFORM_ASSEMBLER()    \
         const static Register argRegs[2], retRegs[2]; \
         int32_t max_stk_args;\
         void nativePageReset();\
         void nativePageSetup();\
         void underrunProtect(int);\
+        bool hardenNopInsertion(const Config& c) { return c.harden_nop_insertion; } \
         void asm_immi(Register r, int32_t val, bool canClobberCCs);\
         void asm_stkarg(LIns* p, int32_t& stkd);\
         void asm_farg(LIns*, int32_t& stkd);\
         void asm_arg(ArgType ty, LIns* p, Register r, int32_t& stkd);\
         void asm_pusharg(LIns*);\
         void asm_cmpd(LIns *cond);\
         NIns* asm_branchd(bool, LIns*, NIns*);\
         void asm_cmp(LIns *cond); \
--- a/js/src/nanojit/njconfig.cpp
+++ b/js/src/nanojit/njconfig.cpp
@@ -88,16 +88,17 @@ namespace nanojit
 
 #ifdef NANOJIT_IA32
         int const features = getCpuFeatures();
         i386_sse2 = (features & (1<<26)) != 0;
         i386_use_cmov = (features & (1<<15)) != 0;
         i386_fixed_esp = false;
 #endif
         harden_function_alignment = false;
+        harden_nop_insertion = false;
 
 #if defined(NANOJIT_ARM)
 
         // XXX: temporarily disabled, see bug 547063.
         //NanoStaticAssert(NJ_COMPILER_ARM_ARCH >= 5 && NJ_COMPILER_ARM_ARCH <= 7);
 
         arm_arch = NJ_COMPILER_ARM_ARCH;
         arm_vfp = (arm_arch >= 7);
--- a/js/src/nanojit/njconfig.h
+++ b/js/src/nanojit/njconfig.h
@@ -92,13 +92,16 @@ namespace nanojit
         uint32_t arm_show_stats:1;
 
         // If true, use softfloat for all floating point operations,
         // whether or not an FPU is present. (ARM only for now, but might also includes MIPS in the future)
         uint32_t soft_float:1;
 
         // If true, compiler will insert a random amount of space in between functions (x86-32 only)
         uint32_t harden_function_alignment:1;
+
+        // If true, compiler will insert randomly choosen no-op instructions at random locations within a compiled method (x86-32 only)
+        uint32_t harden_nop_insertion:1;
     };
 }
 
 #endif // FEATURE_NANOJIT
 #endif // __njconfig_h__