Bug 784739 - Switch from NULL to nullptr in js/src/jit/ subdirectories; r=ehsan
authorBirunthan Mohanathas <birunthan@mohanathas.com>
Fri, 27 Sep 2013 16:30:34 -0400
changeset 149121 bef1a1ec6ea37727c7597ee8f497f19980b6acd4
parent 149120 b7e1f15c2964ce3d7945d0b34815677d3f290cf6
child 149122 e34fa34cc870f2cb62e730c62248369ca8967559
push id34441
push usereakhgari@mozilla.com
push dateSat, 28 Sep 2013 06:27:19 +0000
treeherdermozilla-inbound@632e32739bc5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs784739
milestone27.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 784739 - Switch from NULL to nullptr in js/src/jit/ subdirectories; r=ehsan
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/IonFrames-arm.h
js/src/jit/arm/LIR-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/Assembler-x86-shared.cpp
js/src/jit/shared/BaselineCompiler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/IonAssemblerBuffer.h
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
js/src/jit/shared/IonFrames-x86-shared.h
js/src/jit/shared/LIR-x86-shared.h
js/src/jit/shared/Lowering-shared.cpp
js/src/jit/shared/Lowering-shared.h
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jit/x86/Trampoline-x86.cpp
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -241,39 +241,39 @@ InstDTR::isTHIS(const Instruction &i)
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstDTR *
 InstDTR::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstDTR*)&i;
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstLDR::isTHIS(const Instruction &i)
 {
     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
 }
 
 InstLDR *
 InstLDR::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstLDR*)&i;
-    return NULL;
+    return nullptr;
 }
 
 InstNOP *
 InstNOP::asTHIS(Instruction &i)
 {
     if (isTHIS(i))
         return (InstNOP*) (&i);
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstNOP::isTHIS(const Instruction &i)
 {
     return (i.encode() & 0x0fffffff) == NopInst;
 }
 
@@ -283,17 +283,17 @@ InstBranchReg::isTHIS(const Instruction 
     return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i);
 }
 
 InstBranchReg *
 InstBranchReg::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstBranchReg*)&i;
-    return NULL;
+    return nullptr;
 }
 void
 InstBranchReg::extractDest(Register *dest)
 {
     *dest = toR(*this);
 }
 bool
 InstBranchReg::checkDest(Register dest)
@@ -307,17 +307,17 @@ InstBranchImm::isTHIS(const Instruction 
     return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i);
 }
 
 InstBranchImm *
 InstBranchImm::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstBranchImm*)&i;
-    return NULL;
+    return nullptr;
 }
 
 void
 InstBranchImm::extractImm(BOffImm *dest)
 {
     *dest = BOffImm(*this);
 }
 
@@ -327,71 +327,71 @@ InstBXReg::isTHIS(const Instruction &i)
     return (i.encode() & IsBRegMask) == IsBX;
 }
 
 InstBXReg *
 InstBXReg::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstBXReg*)&i;
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstBLXReg::isTHIS(const Instruction &i)
 {
     return (i.encode() & IsBRegMask) == IsBLX;
 
 }
 InstBLXReg *
 InstBLXReg::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstBLXReg*)&i;
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstBImm::isTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsB;
 }
 InstBImm *
 InstBImm::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstBImm*)&i;
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstBLImm::isTHIS(const Instruction &i)
 {
     return (i.encode () & IsBImmMask) == IsBL;
 
 }
 InstBLImm *
 InstBLImm::asTHIS(Instruction &i)
 {
     if (isTHIS(i))
         return (InstBLImm*)&i;
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstMovWT::isTHIS(Instruction &i)
 {
     return  InstMovW::isTHIS(i) || InstMovT::isTHIS(i);
 }
 InstMovWT *
 InstMovWT::asTHIS(Instruction &i)
 {
     if (isTHIS(i))
         return (InstMovWT*)&i;
-    return NULL;
+    return nullptr;
 }
 
 void
 InstMovWT::extractImm(Imm16 *imm)
 {
     *imm = Imm16(*this);
 }
 bool
@@ -417,38 +417,38 @@ InstMovW::isTHIS(const Instruction &i)
     return (i.encode() & IsWTMask) == IsW;
 }
 
 InstMovW *
 InstMovW::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstMovW*) (&i);
-    return NULL;
+    return nullptr;
 }
 InstMovT *
 InstMovT::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstMovT*) (&i);
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstMovT::isTHIS(const Instruction &i)
 {
     return (i.encode() & IsWTMask) == IsT;
 }
 
 InstALU *
 InstALU::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstALU*) (&i);
-    return NULL;
+    return nullptr;
 }
 bool
 InstALU::isTHIS(const Instruction &i)
 {
     return (i.encode() & ALUMask) == 0;
 }
 void
 InstALU::extractOp(ALUOp *ret)
@@ -488,31 +488,31 @@ InstALU::extractOp2()
     return Operand2(encode());
 }
 
 InstCMP *
 InstCMP::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstCMP*) (&i);
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstCMP::isTHIS(const Instruction &i)
 {
     return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp);
 }
 
 InstMOV *
 InstMOV::asTHIS(const Instruction &i)
 {
     if (isTHIS(i))
         return (InstMOV*) (&i);
-    return NULL;
+    return nullptr;
 }
 
 bool
 InstMOV::isTHIS(const Instruction &i)
 {
     return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov);
 }
 
@@ -737,17 +737,17 @@ Assembler::getCF32Target(Iter *iter)
 
     MOZ_ASSUME_UNREACHABLE("unsupported branch relocation");
 }
 
 uintptr_t
 Assembler::getPointer(uint8_t *instPtr)
 {
     InstructionIterator iter((Instruction*)instPtr);
-    uintptr_t ret = (uintptr_t)getPtr32Target(&iter, NULL, NULL);
+    uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr);
     return ret;
 }
 
 template<class Iter>
 const uint32_t *
 Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
 {
     Instruction *load1 = start->cur();
@@ -1337,26 +1337,26 @@ Assembler::bytesNeeded() const
         dataRelocationTableBytes() +
         preBarrierTableBytes();
 }
 
 // write a blob of binary into the instruction stream
 BufferOffset
 Assembler::writeInst(uint32_t x, uint32_t *dest)
 {
-    if (dest == NULL)
+    if (dest == nullptr)
         return m_buffer.putInt(x);
 
     writeInstStatic(x, dest);
     return BufferOffset();
 }
 void
 Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
 {
-    JS_ASSERT(dest != NULL);
+    JS_ASSERT(dest != nullptr);
     *dest = x;
 }
 
 BufferOffset
 Assembler::align(int alignment)
 {
     BufferOffset ret;
     if (alignment == 8) {
@@ -1811,17 +1811,17 @@ Assembler::patchConstantPoolLoad(void* l
         break;
       case PoolHintData::poolBranch:
         // Either this used to be a poolBranch, and the label was already bound, so it was
         // replaced with a real branch, or this may happen in the future.
         // If this is going to happen in the future, then the actual bits that are written here
         // don't matter (except the condition code, since that is always preserved across
         // patchings) but if it does not get bound later,
         // then we want to make sure this is a load from the pool entry (and the pool entry
-        // should be NULL so it will crash).
+        // should be nullptr so it will crash).
         if (data.isValidPoolHint()) {
             dummy->as_dtr(IsLoad, 32, Offset, pc,
                           DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
                           data.getCond(), instAddr);
         }
         break;
       case PoolHintData::poolVDTR: {
         VFPRegister dest = data.getVFPReg();
@@ -2503,17 +2503,17 @@ struct PoolHeader : Instruction {
         Header tmp(this);
         return tmp.isNatural;
     }
     static bool isTHIS(const Instruction &i) {
         return (*i.raw() & 0xffff0000) == 0xffff0000;
     }
     static const PoolHeader *asTHIS(const Instruction &i) {
         if (!isTHIS(i))
-            return NULL;
+            return nullptr;
         return static_cast<const PoolHeader*>(&i);
     }
 };
 
 
 void
 Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural)
 {
@@ -2600,33 +2600,33 @@ Assembler::patchWrite_Imm32(CodeLocation
     *(raw-1) = imm.value;
 }
 
 
 uint8_t *
 Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
 {
     Instruction *inst = reinterpret_cast<Instruction*>(inst_);
-    if (count != NULL)
+    if (count != nullptr)
         *count += sizeof(Instruction);
     return reinterpret_cast<uint8_t*>(inst->next());
 }
 
 static bool
 InstIsGuard(Instruction *inst, const PoolHeader **ph)
 {
     Assembler::Condition c;
     inst->extractCond(&c);
     if (c != Assembler::Always)
         return false;
     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
         return false;
     // See if the next instruction is a pool header.
     *ph = (inst+1)->as<const PoolHeader>();
-    return *ph != NULL;
+    return *ph != nullptr;
 }
 
 static bool
 InstIsBNop(Instruction *inst) {
     // In some special situations, it is necessary to insert a NOP
     // into the instruction stream that nobody knows about, since nobody should know about
     // it, make sure it gets skipped when Instruction::next() is called.
     // this generates a very specific nop, namely a branch to the next instruction.
@@ -2810,17 +2810,17 @@ AutoFlushCache::~AutoFlushCache()
 {
    if (!runtime_)
         return;
 
     flushAnyway();
     IonSpewCont(IonSpew_CacheFlush, ">", name_);
     if (runtime_->flusher() == this) {
         IonSpewFin(IonSpew_CacheFlush);
-        runtime_->setFlusher(NULL);
+        runtime_->setFlusher(nullptr);
     }
 }
 
 void
 AutoFlushCache::flushAnyway()
 {
     if (!runtime_)
         return;
@@ -2828,21 +2828,21 @@ AutoFlushCache::flushAnyway()
     IonSpewCont(IonSpew_CacheFlush, "|", name_);
 
     if (!used_)
         return;
 
     if (start_) {
         JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_ + sizeof(Instruction)));
     } else {
-        JSC::ExecutableAllocator::cacheFlush(NULL, 0xff000000);
+        JSC::ExecutableAllocator::cacheFlush(nullptr, 0xff000000);
     }
     used_ = false;
 }
 InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) {
     const PoolHeader *ph;
     // If this is a guard, and the next instruction is a header, always work around the pool
     // If it isn't a guard, then start looking ahead.
     if (InstIsArtificialGuard(i, &ph)) {
         i = i->next();
     }
 }
-Assembler *Assembler::dummy = NULL;
+Assembler *Assembler::dummy = nullptr;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1308,17 +1308,17 @@ class Assembler
         new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true);
         // Set up the backwards 32 bit region
         new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true);
         // Set up the forwards double region
         new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]);
         // Set up the forwards 32 bit region
         new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
         for (int i = 0; i < 4; i++) {
-            if (pools_[i].poolData == NULL) {
+            if (pools_[i].poolData == nullptr) {
                 m_buffer.fail_oom();
                 return;
             }
         }
     }
 
     static Condition InvertCondition(Condition cond);
 
@@ -1354,17 +1354,17 @@ class Assembler
     // Given the start of a Control Flow sequence, grab the value that is finally branched to
     // given the start of a function that loads an address into a register get the address that
     // ends up in the register.
     template <class Iter>
     static const uint32_t * getCF32Target(Iter *iter);
 
     static uintptr_t getPointer(uint8_t *);
     template <class Iter>
-    static const uint32_t * getPtr32Target(Iter *iter, Register *dest = NULL, RelocStyle *rs = NULL);
+    static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
 
     bool oom() const;
 
     void setPrinter(Sprinter *sp) {
     }
 
   private:
     bool isFinished;
@@ -1396,35 +1396,35 @@ class Assembler
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
     size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
     size_t bytesNeeded() const;
 
     // Write a blob of binary into the instruction stream *OR*
-    // into a destination address. If dest is NULL (the default), then the
+    // into a destination address. If dest is nullptr (the default), then the
     // instruction gets written into the instruction stream. If dest is not null
     // it is interpreted as a pointer to the location that we want the
     // instruction to be written.
-    BufferOffset writeInst(uint32_t x, uint32_t *dest = NULL);
+    BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
     // A static variant for the cases where we don't want to have an assembler
-    // object at all. Normally, you would use the dummy (NULL) object.
+    // object at all. Normally, you would use the dummy (nullptr) object.
     static void writeInstStatic(uint32_t x, uint32_t *dest);
 
   public:
     void writeCodePointer(AbsoluteLabel *label);
 
     BufferOffset align(int alignment);
     BufferOffset as_nop();
     BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
-                ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
+                ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
 
     BufferOffset as_mov(Register dest,
-                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = NULL);
+                Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
     BufferOffset as_mvn(Register dest, Operand2 op2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     // logical operations
     BufferOffset as_and(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_bic(Register dest, Register src1,
                 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_eor(Register dest, Register src1,
@@ -1452,18 +1452,18 @@ class Assembler
     BufferOffset as_teq(Register src1, Operand2 op2,
                 Condition c = Always);
     BufferOffset as_tst(Register src1, Operand2 op2,
                 Condition c = Always);
 
     // Not quite ALU worthy, but useful none the less:
     // These also have the isue of these being formatted
     // completly differently from the standard ALU operations.
-    BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = NULL);
-    BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = NULL);
+    BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
+    BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
 
     BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
                    MULOp op, SetCond_ sc, Condition c = Always);
     BufferOffset as_mul(Register dest, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     BufferOffset as_umaal(Register dest1, Register dest2, Register src1, Register src2,
@@ -1481,36 +1481,36 @@ class Assembler
 
     BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
     BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
 
     // Data transfer instructions: ldr, str, ldrb, strb.
     // Using an int to differentiate between 8 bits and 32 bits is
     // overkill, but meh
     BufferOffset as_dtr(LoadStore ls, int size, Index mode,
-                Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = NULL);
+                Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr);
     // Handles all of the other integral data transferring functions:
     // ldrsb, ldrsh, ldrd, etc.
     // size is given in bits.
     BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
-                   Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = NULL);
+                   Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr);
 
     BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
                 DTMMode mode, DTMWriteBack wb, Condition c = Always);
     //overwrite a pool entry with new data.
     void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data);
     // load a 32 bit immediate from a pool into a register
-    BufferOffset as_Imm32Pool(Register dest, uint32_t value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
+    BufferOffset as_Imm32Pool(Register dest, uint32_t value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
     // make a patchable jump that can target the entire 32 bit address space.
-    BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
+    BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
 
     // load a 64 bit floating point immediate from a pool into a register
-    BufferOffset as_FImm64Pool(VFPRegister dest, double value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
+    BufferOffset as_FImm64Pool(VFPRegister dest, double value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
     // load a 32 bit floating point immediate from a pool into a register
-    BufferOffset as_FImm32Pool(VFPRegister dest, float value, ARMBuffer::PoolEntry *pe = NULL, Condition c = Always);
+    BufferOffset as_FImm32Pool(VFPRegister dest, float value, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
 
     // Control flow stuff:
 
     // bx can *only* branch to a register
     // never to an immediate.
     BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false);
 
     // Branch can branch to an immediate *or* to a register.
@@ -1542,17 +1542,17 @@ class Assembler
     // VFP instructions!
   private:
 
     enum vfp_size {
         isDouble = 1 << 8,
         isSingle = 0 << 8
     };
 
-    BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=NULL);
+    BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr);
     // Unityped variants: all registers hold the same (ieee754 single/double)
     // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
     BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                       VFPOp op, Condition c = Always);
 
   public:
     BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
                  Condition c = Always);
@@ -1614,17 +1614,17 @@ class Assembler
     BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
                          Condition c = Always);
     // hard coded to a 32 bit fixed width result for now
     BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always);
 
     /* xfer between VFP and memory*/
     BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
                  Condition c = Always /* vfp doesn't have a wb option*/,
-                 uint32_t *dest = NULL);
+                 uint32_t *dest = nullptr);
 
     // VFP's ldm/stm work differently from the standard arm ones.
     // You can only transfer a range
 
     BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
                  /*also has update conditions*/Condition c = Always);
 
     BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
@@ -1806,17 +1806,17 @@ class Assembler
     static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
     static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
                                         ImmPtr expectedValue);
     static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
     static uint32_t alignDoubleArg(uint32_t offset) {
         return (offset+1)&~1;
     }
-    static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = NULL);
+    static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
     // Toggle a jmp or cmp emitted by toggledJump().
 
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
     static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
--- a/js/src/jit/arm/IonFrames-arm.h
+++ b/js/src/jit/arm/IonFrames-arm.h
@@ -75,21 +75,21 @@ class IonJSFrameLayout : public IonEntry
 
     static size_t offsetOfCalleeToken() {
         return offsetof(IonJSFrameLayout, calleeToken_);
     }
     static size_t offsetOfNumActualArgs() {
         return offsetof(IonJSFrameLayout, numActualArgs_);
     }
     static size_t offsetOfThis() {
-        IonJSFrameLayout *base = NULL;
+        IonJSFrameLayout *base = nullptr;
         return reinterpret_cast<size_t>(&base->argv()[0]);
     }
     static size_t offsetOfActualArgs() {
-        IonJSFrameLayout *base = NULL;
+        IonJSFrameLayout *base = nullptr;
         // +1 to skip |this|.
         return reinterpret_cast<size_t>(&base->argv()[1]);
     }
     static size_t offsetOfActualArg(size_t arg) {
         return offsetOfActualArgs() + arg * sizeof(Value);
     }
 
     Value thisv() {
@@ -214,25 +214,25 @@ class IonExitFrameLayout : public IonCom
         uint8_t *sp = reinterpret_cast<uint8_t *>(this);
         return reinterpret_cast<IonExitFooterFrame *>(sp - IonExitFooterFrame::Size());
     }
 
     // argBase targets the point which precedes the exit frame. Arguments of VM
     // each wrapper are pushed before the exit frame.  This correspond exactly
     // to the value of the argBase register of the generateVMWrapper function.
     inline uint8_t *argBase() {
-        JS_ASSERT(footer()->ionCode() != NULL);
+        JS_ASSERT(footer()->ionCode() != nullptr);
         return top();
     }
 
     inline bool isWrapperExit() {
-        return footer()->function() != NULL;
+        return footer()->function() != nullptr;
     }
     inline bool isNativeExit() {
-        return footer()->ionCode() == NULL;
+        return footer()->ionCode() == nullptr;
     }
     inline bool isOOLNativeExit() {
         return footer()->ionCode() == ION_FRAME_OOL_NATIVE;
     }
     inline bool isOOLPropertyOpExit() {
         return footer()->ionCode() == ION_FRAME_OOL_PROPERTY_OP;
     }
     inline bool isOOLProxyExit() {
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -287,17 +287,17 @@ class LTableSwitch : public LInstruction
     const LAllocation *index() {
         return getOperand(0);
     }
     const LAllocation *tempInt() {
         return getTemp(0)->output();
     }
     // This is added to share the same CodeGenerator prefixes.
     const LAllocation *tempPointer() {
-        return NULL;
+        return nullptr;
     }
 };
 
 // Takes a tableswitch with an integer to decide
 class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 2>
 {
   public:
     LIR_HEADER(TableSwitchV);
@@ -318,17 +318,17 @@ class LTableSwitchV : public LInstructio
 
     const LAllocation *tempInt() {
         return getTemp(0)->output();
     }
     const LAllocation *tempFloat() {
         return getTemp(1)->output();
     }
     const LAllocation *tempPointer() {
-        return NULL;
+        return nullptr;
     }
 };
 
 class LGuardShape : public LInstructionHelper<0, 1, 1>
 {
   public:
     LIR_HEADER(GuardShape);
 
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -360,21 +360,21 @@ MacroAssemblerARM::ma_alu(Register src1,
         // then use that
         as_movw(ScratchRegister, imm.value & 0xffff, c);
         if ((imm.value >> 16) != 0)
             as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c);
     } else {
         // Going to have to use a load.  If the operation is a move, then just move it into the
         // destination register
         if (op == op_mov) {
-            as_Imm32Pool(dest, imm.value, NULL, c);
+            as_Imm32Pool(dest, imm.value, nullptr, c);
             return;
         } else {
             // If this isn't just going into a register, then stick it in a temp, and then proceed.
-            as_Imm32Pool(ScratchRegister, imm.value, NULL, c);
+            as_Imm32Pool(ScratchRegister, imm.value, nullptr, c);
         }
     }
     as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c);
 }
 
 void
 MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
             SetCond_ sc, Assembler::Condition c)
@@ -393,18 +393,18 @@ void
 MacroAssemblerARM::ma_nop()
 {
     as_nop();
 }
 
 Instruction *
 NextInst(Instruction *i)
 {
-    if (i == NULL)
-        return NULL;
+    if (i == nullptr)
+        return nullptr;
     return i->next();
 }
 
 void
 MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c,
                                    RelocStyle rs, Instruction *i)
 {
     int32_t imm = imm_.value;
@@ -414,25 +414,25 @@ MacroAssemblerARM::ma_movPatchable(Imm32
         // The InstructionIterator already does this and handles edge cases,
         // so, just asking an iterator for its current instruction should be
         // enough to make sure we don't accidentally inspect an artificial guard.
         i = InstructionIterator(i).cur();
     }
     switch(rs) {
       case L_MOVWT:
         as_movw(dest, Imm16(imm & 0xffff), c, i);
-        // i can be NULL here.  that just means "insert in the next in sequence."
-        // NextInst is special cased to not do anything when it is passed NULL, so two
-        // consecutive instructions will be inserted.
+        // i can be nullptr here.  that just means "insert in the next in sequence."
+        // NextInst is special cased to not do anything when it is passed nullptr, so
+        // two consecutive instructions will be inserted.
         i = NextInst(i);
         as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i);
         break;
       case L_LDR:
-        if(i == NULL)
-            as_Imm32Pool(dest, imm, NULL, c);
+        if(i == nullptr)
+            as_Imm32Pool(dest, imm, nullptr, c);
         else
             as_WritePoolEntry(i, c, imm);
         break;
     }
 }
 
 void
 MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest,
@@ -1314,21 +1314,21 @@ MacroAssemblerARM::ma_b(void *target, Re
     switch (b_type()) {
       case Assembler::B_MOVWT:
         as_movw(ScratchRegister, Imm16(trg & 0xffff), c);
         as_movt(ScratchRegister, Imm16(trg >> 16), c);
         // this is going to get the branch predictor pissed off.
         as_bx(ScratchRegister, c);
         break;
       case Assembler::B_LDR_BX:
-        as_Imm32Pool(ScratchRegister, trg, NULL, c);
+        as_Imm32Pool(ScratchRegister, trg, nullptr, c);
         as_bx(ScratchRegister, c);
         break;
       case Assembler::B_LDR:
-        as_Imm32Pool(pc, trg, NULL, c);
+        as_Imm32Pool(pc, trg, nullptr, c);
         if (c == Always)
             m_buffer.markGuard();
         break;
       default:
         MOZ_ASSUME_UNREACHABLE("Other methods of generating tracable jumps NYI");
     }
 }
 
@@ -1472,17 +1472,17 @@ MacroAssemblerARM::ma_vimm(double value,
             if (enc.isValid()) {
                 as_vimm(dest, enc, cc);
                 return;
             }
 
         }
     }
     // Fall back to putting the value in a pool.
-    as_FImm64Pool(dest, value, NULL, cc);
+    as_FImm64Pool(dest, value, nullptr, cc);
 }
 
 static inline uint32_t
 Float32Word(const float& value)
 {
     return *reinterpret_cast<const uint32_t*>(&value);
 }
 
@@ -1500,17 +1500,17 @@ MacroAssemblerARM::ma_vimm_f32(float val
 
         VFPImm enc(DoubleHighWord(double(value)));
         if (enc.isValid()) {
             as_vimm(vd, enc, cc);
             return;
         }
     }
     // Fall back to putting the value in a pool.
-    as_FImm32Pool(vd, value, NULL, cc);
+    as_FImm32Pool(vd, value, nullptr, cc);
 }
 
 void
 MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc)
 {
     as_vcmp(VFPRegister(src1), VFPRegister(src2), cc);
 }
 void
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -83,19 +83,19 @@ class MacroAssemblerARM : public Assembl
     void ma_alu(Register src1, Imm32 imm, Register dest,
                 ALUOp op,
                 SetCond_ sc =  NoSetCond, Condition c = Always);
 
     void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
                 SetCond_ sc = NoSetCond, Condition c = Always);
     void ma_nop();
     void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
-                         RelocStyle rs, Instruction *i = NULL);
+                         RelocStyle rs, Instruction *i = nullptr);
     void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
-                         RelocStyle rs, Instruction *i = NULL);
+                         RelocStyle rs, Instruction *i = nullptr);
     // These should likely be wrapped up as a set of macros
     // or something like that.  I cannot think of a good reason
     // to explicitly have all of this code.
     // ALU based ops
     // mov
     void ma_mov(Register src, Register dest,
                 SetCond_ sc = NoSetCond, Condition c = Always);
 
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -467,17 +467,17 @@ IonRuntime::generateArgumentsRectifier(J
     // Construct IonJSFrameLayout.
     masm.ma_push(r0); // actual arguments.
     masm.pushCalleeToken(r1, mode);
     masm.ma_push(r6); // frame descriptor.
 
     // Call the target function.
     // Note that this code assumes the function is JITted.
     masm.ma_ldr(DTRAddr(r1, DtrOffImm(JSFunction::offsetOfNativeOrScript())), r3);
-    masm.loadBaselineOrIonRaw(r3, r3, mode, NULL);
+    masm.loadBaselineOrIonRaw(r3, r3, mode, nullptr);
     masm.ma_callIonHalfPush(r3);
 
     uint32_t returnOffset = masm.currentOffset();
 
     // arg1
     //  ...
     // argN
     // num actual args
@@ -814,22 +814,22 @@ IonRuntime::generateVMWrapper(JSContext 
     masm.leaveExitFrame();
     masm.retn(Imm32(sizeof(IonExitFrameLayout) +
                     f.explicitStackSlots() * sizeof(void *) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
     IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
     if (!wrapper)
-        return NULL;
+        return nullptr;
 
     // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
     // use relookupOrAdd instead of add.
     if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
-        return NULL;
+        return nullptr;
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
 #endif
 
     return wrapper;
 }
 
@@ -885,24 +885,24 @@ IonRuntime::generateDebugTrapHandler(JSC
     Register scratch1 = r0;
     Register scratch2 = r1;
 
     // Load BaselineFrame pointer in scratch1.
     masm.mov(r11, scratch1);
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
-    // the stub frame has a NULL ICStub pointer, since this pointer is marked
-    // during GC.
-    masm.movePtr(ImmPtr(NULL), BaselineStubReg);
+    // the stub frame has a nullptr ICStub pointer, since this pointer is
+    // marked during GC.
+    masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch2);
 
     IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
     if (!code)
-        return NULL;
+        return nullptr;
 
     masm.push(lr);
     masm.push(scratch1);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
 
     // If the stub returns |true|, we have to perform a forced return
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -105,17 +105,17 @@ struct ImmWord
 };
 
 #ifdef DEBUG
 static inline bool
 IsCompilingAsmJS()
 {
     // asm.js compilation pushes an IonContext with a null JSCompartment.
     IonContext *ictx = MaybeGetIonContext();
-    return ictx && ictx->compartment == NULL;
+    return ictx && ictx->compartment == nullptr;
 }
 #endif
 
 // Pointer to be embedded as an immediate in an instruction.
 struct ImmPtr
 {
     void *value;
 
@@ -165,17 +165,17 @@ struct ImmPtr
 
 // The same as ImmPtr except that the intention is to patch this
 // instruction. The initial value of the immediate is 'addr' and this value is
 // either clobbered or used in the patching process.
 struct PatchedImmPtr {
     void *value;
 
     explicit PatchedImmPtr()
-      : value(NULL)
+      : value(nullptr)
     { }
     explicit PatchedImmPtr(const void *value)
       : value(const_cast<void*>(value))
     { }
 };
 
 // Used for immediates which require relocation.
 struct ImmGCPtr
@@ -227,17 +227,17 @@ struct AbsoluteAddress {
 
 // The same as AbsoluteAddress except that the intention is to patch this
 // instruction. The initial value of the immediate is 'addr' and this value is
 // either clobbered or used in the patching process.
 struct PatchedAbsoluteAddress {
     void *addr;
 
     explicit PatchedAbsoluteAddress()
-      : addr(NULL)
+      : addr(nullptr)
     { }
     explicit PatchedAbsoluteAddress(const void *addr)
       : addr(const_cast<void*>(addr))
     { }
 };
 
 // Specifies an address computed in the form of a register base and a constant,
 // 32-bit offset.
@@ -549,17 +549,17 @@ class CodeLocationJump
 #endif
 
 #ifdef JS_SMALL_BRANCH
     uint8_t *jumpTableEntry_;
 #endif
 
   public:
     CodeLocationJump() {
-        raw_ = NULL;
+        raw_ = nullptr;
         setUninitialized();
 #ifdef JS_SMALL_BRANCH
         jumpTableEntry_ = (uint8_t *) 0xdeadab1e;
 #endif
     }
     CodeLocationJump(IonCode *code, CodeOffsetJump base) {
         *this = base;
         repoint(code);
@@ -568,17 +568,17 @@ class CodeLocationJump
     void operator = (CodeOffsetJump base) {
         raw_ = (uint8_t *) base.offset();
         setRelative();
 #ifdef JS_SMALL_BRANCH
         jumpTableEntry_ = (uint8_t *) base.jumpTableIndex();
 #endif
     }
 
-    void repoint(IonCode *code, MacroAssembler* masm = NULL);
+    void repoint(IonCode *code, MacroAssembler* masm = nullptr);
 
     uint8_t *raw() const {
         JS_ASSERT(state_ == Absolute);
         return raw_;
     }
     uint8_t *offset() const {
         JS_ASSERT(state_ == Relative);
         return raw_;
@@ -613,17 +613,17 @@ class CodeLocationLabel
     void setAbsolute() const {
     }
     void setRelative() const {
     }
 #endif
 
   public:
     CodeLocationLabel() {
-        raw_ = NULL;
+        raw_ = nullptr;
         setUninitialized();
     }
     CodeLocationLabel(IonCode *code, CodeOffsetLabel base) {
         *this = base;
         repoint(code);
     }
     CodeLocationLabel(IonCode *code) {
         raw_ = code->raw();
@@ -637,17 +637,17 @@ class CodeLocationLabel
     void operator = (CodeOffsetLabel base) {
         raw_ = (uint8_t *)base.offset();
         setRelative();
     }
     ptrdiff_t operator - (const CodeLocationLabel &other) {
         return raw_ - other.raw_;
     }
 
-    void repoint(IonCode *code, MacroAssembler *masm = NULL);
+    void repoint(IonCode *code, MacroAssembler *masm = nullptr);
 
 #ifdef DEBUG
     bool isSet() const {
         return state_ != Uninitialized;
     }
 #endif
 
     uint8_t *raw() const {
--- a/js/src/jit/shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/shared/Assembler-x86-shared.cpp
@@ -143,10 +143,10 @@ AutoFlushCache::flushAnyway()
 }
 
 AutoFlushCache::~AutoFlushCache()
 {
     if (!runtime_)
         return;
 
     if (runtime_->flusher() == this)
-        runtime_->setFlusher(NULL);
+        runtime_->setFlusher(nullptr);
 }
--- a/js/src/jit/shared/BaselineCompiler-shared.h
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -67,21 +67,21 @@ class BaselineCompilerShared
     mozilla::DebugOnly<bool> inCall_;
 
     CodeOffsetLabel spsPushToggleOffset_;
 
     BaselineCompilerShared(JSContext *cx, HandleScript script);
 
     ICEntry *allocateICEntry(ICStub *stub, bool isForOp) {
         if (!stub)
-            return NULL;
+            return nullptr;
 
         // Create the entry and add it to the vector.
         if (!icEntries_.append(ICEntry((uint32_t) (pc - script->code), isForOp)))
-            return NULL;
+            return nullptr;
         ICEntry &vecEntry = icEntries_[icEntries_.length() - 1];
 
         // Set the first stub for the IC entry to the fallback stub
         vecEntry.setFirstStub(stub);
 
         // Return pointer to the IC entry
         return &vecEntry;
     }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -30,23 +30,23 @@ CodeGeneratorShared::ensureMasm(MacroAss
 {
     if (masmArg)
         return *masmArg;
     maybeMasm_.construct();
     return maybeMasm_.ref();
 }
 
 CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masmArg)
-  : oolIns(NULL),
+  : oolIns(nullptr),
     maybeMasm_(),
     masm(ensureMasm(masmArg)),
     gen(gen),
     graph(*graph),
-    current(NULL),
-    deoptTable_(NULL),
+    current(nullptr),
+    deoptTable_(nullptr),
 #ifdef DEBUG
     pushedArgs_(0),
 #endif
     lastOsiPointOffset_(0),
     sps_(&GetIonContext()->runtime->spsProfiler, &lastPC_),
     osrEntryOffset_(0),
     skipArgCheckEntryOffset_(0),
     frameDepth_(graph->localSlotCount() * sizeof(STACK_SLOT_SIZE) +
@@ -94,32 +94,32 @@ CodeGeneratorShared::generateOutOfLineCo
         lastPC_ = outOfLineCode_[i]->pc();
         sps_.setPushed(outOfLineCode_[i]->script());
         outOfLineCode_[i]->bind(&masm);
 
         oolIns = outOfLineCode_[i];
         if (!outOfLineCode_[i]->generate(this))
             return false;
     }
-    oolIns = NULL;
+    oolIns = nullptr;
 
     return true;
 }
 
 bool
 CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code)
 {
     code->setFramePushed(masm.framePushed());
     // If an OOL instruction adds another OOL instruction, then use the original
     // instruction's script/pc instead of the basic block's that we're on
     // because they're probably not relevant any more.
     if (oolIns)
         code->setSource(oolIns->script(), oolIns->pc());
     else
-        code->setSource(current ? current->mir()->info().script() : NULL, lastPC_);
+        code->setSource(current ? current->mir()->info().script() : nullptr, lastPC_);
     return outOfLineCode_.append(code);
 }
 
 // see OffsetOfFrameSlot
 static inline int32_t
 ToStackIndex(LAllocation *a)
 {
     if (a->isStackSlot()) {
@@ -676,17 +676,17 @@ class OutOfLineTruncateSlow : public Out
     }
 };
 
 OutOfLineCode *
 CodeGeneratorShared::oolTruncateDouble(const FloatRegister &src, const Register &dest)
 {
     OutOfLineTruncateSlow *ool = new OutOfLineTruncateSlow(src, dest);
     if (!addOutOfLineCode(ool))
-        return NULL;
+        return nullptr;
     return ool;
 }
 
 bool
 CodeGeneratorShared::emitTruncateDouble(const FloatRegister &src, const Register &dest)
 {
     OutOfLineCode *ool = oolTruncateDouble(src, dest);
     if (!ool)
@@ -755,17 +755,17 @@ CodeGeneratorShared::markArgumentSlots(L
 }
 
 OutOfLineAbortPar *
 CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock,
                                  jsbytecode *bytecode)
 {
     OutOfLineAbortPar *ool = new OutOfLineAbortPar(cause, basicBlock, bytecode);
     if (!ool || !addOutOfLineCode(ool))
-        return NULL;
+        return nullptr;
     return ool;
 }
 
 OutOfLineAbortPar *
 CodeGeneratorShared::oolAbortPar(ParallelBailoutCause cause, LInstruction *lir)
 {
     MDefinition *mir = lir->mirRaw();
     MBasicBlock *block = mir->block();
@@ -779,31 +779,31 @@ CodeGeneratorShared::oolAbortPar(Paralle
     return oolAbortPar(cause, block, pc);
 }
 
 OutOfLinePropagateAbortPar *
 CodeGeneratorShared::oolPropagateAbortPar(LInstruction *lir)
 {
     OutOfLinePropagateAbortPar *ool = new OutOfLinePropagateAbortPar(lir);
     if (!ool || !addOutOfLineCode(ool))
-        return NULL;
+        return nullptr;
     return ool;
 }
 
 bool
 OutOfLineAbortPar::generate(CodeGeneratorShared *codegen)
 {
-    codegen->callTraceLIR(0xDEADBEEF, NULL, "AbortPar");
+    codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
     return codegen->visitOutOfLineAbortPar(this);
 }
 
 bool
 OutOfLinePropagateAbortPar::generate(CodeGeneratorShared *codegen)
 {
-    codegen->callTraceLIR(0xDEADBEEF, NULL, "AbortPar");
+    codegen->callTraceLIR(0xDEADBEEF, nullptr, "AbortPar");
     return codegen->visitOutOfLinePropagateAbortPar(this);
 }
 
 bool
 CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir,
                                   const char *bailoutName)
 {
     JS_ASSERT_IF(!lir, bailoutName);
@@ -829,25 +829,25 @@ CodeGeneratorShared::callTraceLIR(uint32
     if (lir) {
         lirIndex = lir->id();
         lirOpName = lir->opName();
         if (MDefinition *mir = lir->mirRaw()) {
             mirOpName = mir->opName();
             script = mir->block()->info().script();
             pc = mir->trackedPc();
         } else {
-            mirOpName = NULL;
-            script = NULL;
-            pc = NULL;
+            mirOpName = nullptr;
+            script = nullptr;
+            pc = nullptr;
         }
     } else {
         blockIndex = lirIndex = 0xDEADBEEF;
         lirOpName = mirOpName = bailoutName;
-        script = NULL;
-        pc = NULL;
+        script = nullptr;
+        pc = nullptr;
     }
 
     masm.store32(Imm32(blockIndex),
                  Address(StackPointer, offsetof(IonLIRTraceData, blockIndex)));
     masm.store32(Imm32(lirIndex),
                  Address(StackPointer, offsetof(IonLIRTraceData, lirIndex)));
     masm.store32(Imm32(execMode),
                  Address(StackPointer, offsetof(IonLIRTraceData, execModeInt)));
@@ -887,22 +887,22 @@ CodeGeneratorShared::labelForBackedgeWit
             if (iter->isLabel() || iter->isMoveGroup()) {
                 // Continue searching for an interrupt check.
             } else if (iter->isInterruptCheckImplicit()) {
                 return iter->toInterruptCheckImplicit()->oolEntry();
             } else {
                 // The interrupt check should be the first instruction in the
                 // loop header other than the initial label and move groups.
                 JS_ASSERT(iter->isInterruptCheck() || iter->isCheckInterruptPar());
-                return NULL;
+                return nullptr;
             }
         }
     }
 
-    return NULL;
+    return nullptr;
 }
 
 void
 CodeGeneratorShared::jumpToBlock(MBasicBlock *mir)
 {
     // No jump necessary if we can fall through to the next block.
     if (isNextBlock(mir->lir()))
         return;
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -359,23 +359,23 @@ class CodeGeneratorShared : public LInst
         masm.storeCallFloatResult(reg);
     }
 
     template <typename T>
     void storeResultValueTo(const T &t) {
         masm.storeCallResultValue(t);
     }
 
-    bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = NULL);
+    bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr);
 
     template <class ArgSeq, class StoreOutputTo>
     inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args,
                                     const StoreOutputTo &out);
 
-    bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = NULL) {
+    bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) {
         return callVM(f[gen->info().executionMode()], ins, dynStack);
     }
 
     template <class ArgSeq, class StoreOutputTo>
     inline OutOfLineCode *oolCallVM(const VMFunctionsModal &f, LInstruction *ins,
                                     const ArgSeq &args, const StoreOutputTo &out)
     {
         return oolCallVM(f[gen->info().executionMode()], ins, args, out);
@@ -406,17 +406,17 @@ class CodeGeneratorShared : public LInst
 
   public:
     template <class ArgSeq, class StoreOutputTo>
     bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool);
 
     bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
 
   public:
-    bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = NULL);
+    bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr);
 
     // Parallel aborts:
     //
     //    Parallel aborts work somewhat differently from sequential
     //    bailouts.  When an abort occurs, we first invoke
     //    ReportAbortPar() and then we return JS_ION_ERROR.  Each
     //    call on the stack will check for this error return and
     //    propagate it upwards until the C++ code that invoked the ion
@@ -440,18 +440,18 @@ class OutOfLineCode : public TempObject
     Label rejoin_;
     uint32_t framePushed_;
     jsbytecode *pc_;
     JSScript *script_;
 
   public:
     OutOfLineCode()
       : framePushed_(0),
-        pc_(NULL),
-        script_(NULL)
+        pc_(nullptr),
+        script_(nullptr)
     { }
 
     virtual bool generate(CodeGeneratorShared *codegen) = 0;
 
     Label *entry() {
         return &entry_;
     }
     virtual void bind(MacroAssembler *masm) {
@@ -667,17 +667,17 @@ class OutOfLineCallVM : public OutOfLine
 
 template <class ArgSeq, class StoreOutputTo>
 inline OutOfLineCode *
 CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args,
                                const StoreOutputTo &out)
 {
     OutOfLineCode *ool = new OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
     if (!addOutOfLineCode(ool))
-        return NULL;
+        return nullptr;
     return ool;
 }
 
 template <class ArgSeq, class StoreOutputTo>
 bool
 CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool)
 {
     LInstruction *lir = ool->lir();
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -790,17 +790,17 @@ CodeGeneratorX86Shared::visitDivI(LDivI 
 
     MDiv *mir = ins->mir();
 
     JS_ASSERT(remainder == edx);
     JS_ASSERT(lhs == eax);
     JS_ASSERT(output == eax);
 
     Label done;
-    ReturnZero *ool = NULL;
+    ReturnZero *ool = nullptr;
 
     // Handle divide by zero.
     if (mir->canBeDivideByZero()) {
         masm.testl(rhs, rhs);
         if (mir->isTruncated()) {
             // Truncated division by zero is zero (Infinity|0 == 0)
             if (!ool)
                 ool = new ReturnZero(output);
@@ -947,18 +947,18 @@ CodeGeneratorX86Shared::visitModI(LModI 
     JS_ASSERT(temp == eax);
 
     if (lhs != temp) {
         masm.mov(lhs, temp);
         lhs = temp;
     }
 
     Label done;
-    ReturnZero *ool = NULL;
-    ModOverflowCheck *overflow = NULL;
+    ReturnZero *ool = nullptr;
+    ModOverflowCheck *overflow = nullptr;
 
     // Prevent divide by zero.
     if (ins->mir()->canBeDivideByZero()) {
         masm.testl(rhs, rhs);
         if (ins->mir()->isTruncated()) {
             if (!ool)
                 ool = new ReturnZero(edx);
             masm.j(Assembler::Zero, ool->entry());
--- a/js/src/jit/shared/IonAssemblerBuffer.h
+++ b/js/src/jit/shared/IonAssemblerBuffer.h
@@ -57,39 +57,39 @@ struct BufferSlice {
     BufferSlice<SliceSize> *prev;
     BufferSlice<SliceSize> *next;
     // How much data has been added to the current node.
     uint32_t nodeSize;
   public:
     BufferSlice *getNext() { return this->next; }
     BufferSlice *getPrev() { return this->prev; }
     void setNext(BufferSlice<SliceSize> *next_) {
-        JS_ASSERT(this->next == NULL);
-        JS_ASSERT(next_->prev == NULL);
+        JS_ASSERT(this->next == nullptr);
+        JS_ASSERT(next_->prev == nullptr);
         this->next = next_;
         next_->prev = this;
     }
 
     uint8_t instructions [SliceSize];
     unsigned int size() {
         return nodeSize;
     }
-    BufferSlice() : prev(NULL), next(NULL), nodeSize(0) {}
+    BufferSlice() : prev(nullptr), next(nullptr), nodeSize(0) {}
     void putBlob(uint32_t instSize, uint8_t* inst) {
-        if (inst != NULL)
+        if (inst != nullptr)
             memcpy(&instructions[size()], inst, instSize);
         nodeSize += instSize;
     }
 };
 
 template<int SliceSize, class Inst>
 struct AssemblerBuffer
 {
   public:
-    AssemblerBuffer() : head(NULL), tail(NULL), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
+    AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false), m_bail(false), bufferSize(0), LifoAlloc_(8192) {}
   protected:
     typedef BufferSlice<SliceSize> Slice;
     typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
     Slice *head;
     Slice *tail;
   public:
     bool m_oom;
     bool m_bail;
@@ -100,33 +100,33 @@ struct AssemblerBuffer
         // make sure the requested alignment is a power of two.
         JS_ASSERT((alignment & (alignment-1)) == 0);
         return !(size() & (alignment - 1));
     }
     virtual Slice *newSlice(LifoAlloc &a) {
         Slice *tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
         if (!tmp) {
             m_oom = true;
-            return NULL;
+            return nullptr;
         }
         new (tmp) Slice;
         return tmp;
     }
     bool ensureSpace(int size) {
-        if (tail != NULL && tail->size()+size <= SliceSize)
+        if (tail != nullptr && tail->size()+size <= SliceSize)
             return true;
         Slice *tmp = newSlice(LifoAlloc_);
-        if (tmp == NULL)
+        if (tmp == nullptr)
             return false;
-        if (tail != NULL) {
+        if (tail != nullptr) {
             bufferSize += tail->size();
             tail->setNext(tmp);
         }
         tail = tmp;
-        if (head == NULL) {
+        if (head == nullptr) {
             finger = tmp;
             finger_offset = 0;
             head = tmp;
         }
         return true;
     }
 
     BufferOffset putByte(uint8_t value) {
@@ -144,17 +144,17 @@ struct AssemblerBuffer
         if (!ensureSpace(instSize))
             return BufferOffset();
         BufferOffset ret = nextOffset();
         tail->putBlob(instSize, inst);
         return ret;
     }
     unsigned int size() const {
         int executableSize;
-        if (tail != NULL)
+        if (tail != nullptr)
             executableSize = bufferSize + tail->size();
         else
             executableSize = bufferSize;
         return executableSize;
     }
     unsigned int uncheckedSize() const {
         return size();
     }
@@ -172,17 +172,17 @@ struct AssemblerBuffer
     }
     // finger for speeding up accesses
     Slice *finger;
     unsigned int finger_offset;
     Inst *getInst(BufferOffset off) {
         int local_off = off.getOffset();
         // don't update the structure's finger in place, so there is the option
         // to not update it.
-        Slice *cur = NULL;
+        Slice *cur = nullptr;
         int cur_off;
         // get the offset that we'd be dealing with by walking through backwards
         int end_off = bufferSize - local_off;
         // If end_off is negative, then it is in the last chunk, and there is no
         // real work to be done.
         if (end_off <= 0) {
             return (Inst*)&tail->instructions[-end_off];
         }
@@ -199,46 +199,46 @@ struct AssemblerBuffer
             cur_off = 0;
         } else {
             // it is closest to the end
             cur = tail;
             cur_off = bufferSize;
         }
         int count = 0;
         if (local_off < cur_off) {
-            for (; cur != NULL; cur = cur->getPrev(), cur_off -= cur->size()) {
+            for (; cur != nullptr; cur = cur->getPrev(), cur_off -= cur->size()) {
                 if (local_off >= cur_off) {
                     local_off -= cur_off;
                     break;
                 }
                 count++;
             }
-            JS_ASSERT(cur != NULL);
+            JS_ASSERT(cur != nullptr);
         } else {
-            for (; cur != NULL; cur = cur->getNext()) {
+            for (; cur != nullptr; cur = cur->getNext()) {
                 int cur_size = cur->size();
                 if (local_off < cur_off + cur_size) {
                     local_off -= cur_off;
                     break;
                 }
                 cur_off += cur_size;
                 count++;
             }
-            JS_ASSERT(cur != NULL);
+            JS_ASSERT(cur != nullptr);
         }
         if (count > 2 || used_finger) {
             finger = cur;
             finger_offset = cur_off;
         }
         // the offset within this node should not be larger than the node itself.
         JS_ASSERT(local_off < (int)cur->size());
         return (Inst*)&cur->instructions[local_off];
     }
     BufferOffset nextOffset() const {
-        if (tail != NULL)
+        if (tail != nullptr)
             return BufferOffset(bufferSize + tail->size());
         else
             return BufferOffset(bufferSize);
     }
     BufferOffset prevOffset() const {
         MOZ_ASSUME_UNREACHABLE("Don't current record lastInstSize");
     }
 
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -46,17 +46,17 @@ struct Pool
     // until very late.
     // Lastly, it may be beneficial to interleave the pools.  I have absolutely no idea
     // how that will work, but my suspicions are that it will be difficult.
 
     BufferOffset limitingUser;
     int limitingUsee;
 
     Pool(int maxOffset_, int immSize_, int instSize_, int bias_, int alignment_, LifoAlloc &LifoAlloc_,
-         bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = NULL)
+         bool isBackref_ = false, bool canDedup_ = false, Pool *other_ = nullptr)
         : maxOffset(maxOffset_), immSize(immSize_), instSize(instSize_),
           bias(bias_), alignment(alignment_),
           isBackref(isBackref_), canDedup(canDedup_), other(other_),
           poolData(static_cast<uint8_t *>(LifoAlloc_.alloc(8*immSize))), numEntries(0),
           buffSize(8), loadOffsets(), limitingUser(), limitingUsee(INT_MIN)
     {
     }
     static const int garbage=0xa5a5a5a5;
@@ -130,36 +130,36 @@ struct Pool
     }
 
     // By the time this function is called, we'd damn well better know that this is going to succeed.
     uint32_t insertEntry(uint8_t *data, BufferOffset off, LifoAlloc &LifoAlloc_) {
         if (numEntries == buffSize) {
             buffSize <<= 1;
             uint8_t *tmp = static_cast<uint8_t*>(LifoAlloc_.alloc(immSize * buffSize));
             memcpy(tmp, poolData,  immSize * numEntries);
-            if (poolData == NULL) {
+            if (poolData == nullptr) {
                 buffSize = 0;
                 return -1;
             }
             poolData = tmp;
         }
         memcpy(&poolData[numEntries * immSize], data, immSize);
         loadOffsets.append(off.getOffset());
         return numEntries++;
     }
 
     bool reset(LifoAlloc &a) {
         numEntries = 0;
         buffSize = 8;
         poolData = static_cast<uint8_t*>(a.alloc(buffSize * immSize));
-        if (poolData == NULL)
+        if (poolData == nullptr)
             return false;
 
         void *otherSpace = a.alloc(sizeof(Pool));
-        if (otherSpace == NULL)
+        if (otherSpace == nullptr)
             return false;
 
         other = new (otherSpace) Pool(other->maxOffset, other->immSize, other->instSize,
                                       other->bias, other->alignment, a, other->isBackref,
                                       other->canDedup);
         new (&loadOffsets) LoadOffsets;
 
         limitingUser = BufferOffset();
@@ -207,17 +207,17 @@ struct Pool
 template <int SliceSize, int InstBaseSize>
 struct BufferSliceTail : public BufferSlice<SliceSize> {
     Pool *data;
     uint8_t isBranch[(SliceSize + (InstBaseSize * 8 - 1)) / (InstBaseSize * 8)];
     bool isNatural : 1;
     BufferSliceTail *getNext() {
         return (BufferSliceTail *)this->next;
     }
-    BufferSliceTail() : data(NULL), isNatural(true) {
+    BufferSliceTail() : data(nullptr), isNatural(true) {
         memset(isBranch, 0, sizeof(isBranch));
     }
     void markNextAsBranch() {
         int idx = this->nodeSize / InstBaseSize;
         isBranch[idx >> 3] |= 1 << (idx & 0x7);
     }
     bool isNextBranch() {
         if (this->nodeSize == InstBaseSize)
@@ -360,30 +360,30 @@ struct AssemblerBufferWithConstantPool :
     BufferSlice ** getTail() {
         return (BufferSlice**)&this->tail;
     }
 
     virtual BufferSlice *newSlice(LifoAlloc &a) {
         BufferSlice *tmp = static_cast<BufferSlice*>(a.alloc(sizeof(BufferSlice)));
         if (!tmp) {
             this->m_oom = true;
-            return NULL;
+            return nullptr;
         }
         new (tmp) BufferSlice;
         return tmp;
     }
   public:
     AssemblerBufferWithConstantPool(int guardSize_, int headerSize_, int footerSize_, Pool *pools_, int instBufferAlign_)
         : guardSize(guardSize_), headerSize(headerSize_),
           footerSize(footerSize_),
           pools(pools_),
           instBufferAlign(instBufferAlign_), numDumps(0),
-          poolInfo(NULL),
+          poolInfo(nullptr),
           poolSize(0), canNotPlacePool(0), inBackref(false),
-          perforatedNode(NULL), id(-1)
+          perforatedNode(nullptr), id(-1)
     {
         for (int idx = 0; idx < numPoolKinds; idx++) {
             entryCount[idx] = 0;
         }
     }
 
     // We need to wait until an AutoIonContextAlloc is created by the
     // IonMacroAssembler, before allocating any space.
@@ -406,29 +406,29 @@ struct AssemblerBufferWithConstantPool :
             JS_ASSERT(pools[idx].numEntries == 0 && pools[idx].other->numEntries == 0);
         }
         typedef uint8_t Chunk[InstBaseSize];
         mozilla::DebugOnly<Chunk *> start = (Chunk*)dest_;
         Chunk *dest = (Chunk*)(((uint32_t)dest_ + instBufferAlign - 1) & ~(instBufferAlign -1));
         int curIndex = 0;
         int curInstOffset = 0;
         JS_ASSERT(start == dest);
-        for (BufferSlice * cur = *getHead(); cur != NULL; cur = cur->getNext()) {
+        for (BufferSlice * cur = *getHead(); cur != nullptr; cur = cur->getNext()) {
             Chunk *src = (Chunk*)cur->instructions;
             for (unsigned int idx = 0; idx <cur->size()/InstBaseSize;
                  idx++, curInstOffset += InstBaseSize) {
                 // Is the current instruction a branch?
                 if (cur->isBranch[idx >> 3] & (1<<(idx&7))) {
                     // It's a branch.  fix up the branchiness!
                     patchBranch((Inst*)&src[idx], curIndex, BufferOffset(curInstOffset));
                 }
                 memcpy(&dest[idx], &src[idx], sizeof(Chunk));
             }
             dest+=cur->size()/InstBaseSize;
-            if (cur->data != NULL) {
+            if (cur->data != nullptr) {
                 // have the repatcher move on to the next pool
                 curIndex ++;
                 // loop over all of the pools, copying them into place.
                 uint8_t *poolDest = (uint8_t*)dest;
                 Asm::writePoolHeader(poolDest, cur->data, cur->isNatural);
                 poolDest += headerSize;
                 for (int idx = 0; idx < numPoolKinds; idx++) {
                     Pool *curPool = &cur->data[idx];
@@ -449,60 +449,60 @@ struct AssemblerBufferWithConstantPool :
                 Asm::writePoolFooter(poolDest, cur->data, cur->isNatural);
                 poolDest += footerSize;
                 // at this point, poolDest had better still be aligned to a chunk boundary.
                 dest = (Chunk*) poolDest;
             }
         }
     }
 
-    BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = NULL) {
+    BufferOffset insertEntry(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data, PoolEntry *pe = nullptr) {
         if (this->oom() && !this->bail())
             return BufferOffset();
         int token;
-        if (p != NULL) {
+        if (p != nullptr) {
             int poolId = p - pools;
             const char sigil = inBackref ? 'B' : 'F';
 
             IonSpew(IonSpew_Pools, "[%d]{%c} Inserting entry into pool %d", id, sigil, poolId);
             IonSpewStart(IonSpew_Pools, "[%d] data is: 0x", id);
             spewEntry(data, p->immSize);
             IonSpewFin(IonSpew_Pools);
         }
         // insert the pool value
         if (inBackref)
             token = insertEntryBackwards(instSize, inst, p, data);
         else
             token = insertEntryForwards(instSize, inst, p, data);
         // now to get an instruction to write
         PoolEntry retPE;
-        if (p != NULL) {
+        if (p != nullptr) {
             if (this->oom())
                 return BufferOffset();
             int poolId = p - pools;
             IonSpew(IonSpew_Pools, "[%d] Entry has token %d, offset ~%d", id, token, size());
             Asm::insertTokenIntoTag(instSize, inst, token);
             JS_ASSERT(poolId < (1 << poolKindBits));
             JS_ASSERT(poolId >= 0);
             // Figure out the offset within like-kinded pool entries
             retPE = PoolEntry(entryCount[poolId], poolId);
             entryCount[poolId]++;
         }
         // Now inst is a valid thing to insert into the instruction stream
-        if (pe != NULL)
+        if (pe != nullptr)
             *pe = retPE;
         return this->putBlob(instSize, inst);
     }
 
     uint32_t insertEntryBackwards(uint32_t instSize, uint8_t *inst, Pool *p, uint8_t *data) {
         // unlike the forward case, inserting an instruction without inserting
         // anything into a pool after a pool has been placed, we don't affect
         // anything relevant, so we can skip this check entirely!
 
-        if (p == NULL)
+        if (p == nullptr)
             return INT_MIN;
         // TODO: calculating offsets for the alignment requirements is *hard*
         // Instead, assume that we always add the maximum.
         int poolOffset = footerSize;
         Pool *cur, *tmp;
         // NOTE: we want to process the pools from last to first.
         // Since the last pool is pools[0].other, and the first pool
         // is pools[numPoolKinds-1], we actually want to process this
@@ -511,17 +511,17 @@ struct AssemblerBufferWithConstantPool :
             // fetch the pool for the backwards half.
             tmp = cur->other;
             if (p == cur)
                 tmp->updateLimiter(this->nextOffset());
 
             if (tmp->checkFullBackref(poolOffset, perforation.getOffset())) {
                 // uh-oh, the backwards pool is full.  Time to finalize it, and
                 // switch to a new forward pool.
-                if (p != NULL)
+                if (p != nullptr)
                     IonSpew(IonSpew_Pools, "[%d]Inserting pool entry caused a spill", id);
                 else
                     IonSpew(IonSpew_Pools, "[%d]Inserting instruction(%d) caused a spill", id, size());
 
                 this->finishPool();
                 if (this->oom())
                     return uint32_t(-1);
                 return this->insertEntryForwards(instSize, inst, p, data);
@@ -568,37 +568,37 @@ struct AssemblerBufferWithConstantPool :
 
             // If we're at the pool we want to insert into, find a new limiter
             // before we do the range check.
             if (p == tmp) {
                 p->updateLimiter(BufferOffset(nextOffset));
             }
             if (tmp->checkFull(poolOffset)) {
                 // uh-oh. DUMP DUMP DUMP
-                if (p != NULL)
+                if (p != nullptr)
                     IonSpew(IonSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
                 else
                     IonSpew(IonSpew_Pools, "[%d] Inserting instruction(%d) caused a spill", id, size());
 
                 this->dumpPool();
                 return this->insertEntryBackwards(instSize, inst, p, data);
             }
             // include the size of this pool in the running total
             if (p == tmp) {
                 nextOffset += tmp->immSize;
             }
             nextOffset += tmp->immSize * tmp->numEntries;
         }
-        if (p == NULL) {
+        if (p == nullptr) {
             return INT_MIN;
         }
         return p->insertEntry(data, this->nextOffset(), this->LifoAlloc_);
     }
     BufferOffset putInt(uint32_t value) {
-        return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, NULL, NULL);
+        return insertEntry(sizeof(uint32_t) / sizeof(uint8_t), (uint8_t*)&value, nullptr, nullptr);
     }
     // Mark the current section as an area where we can
     // later go to dump a pool
     void perforate() {
         // If we're filling the backrefrences, we don't want to start looking for a new dumpsite.
         if (inBackref)
             return;
         if (canNotPlacePool)
@@ -633,17 +633,17 @@ struct AssemblerBufferWithConstantPool :
         int initOffset = prevEnd + (perfOffset - prevOffset);
         int finOffset = initOffset;
         bool poolIsEmpty = true;
         for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
             if (pools[poolIdx].numEntries != 0) {
                 poolIsEmpty = false;
                 break;
             }
-            if (pools[poolIdx].other != NULL && pools[poolIdx].other->numEntries != 0) {
+            if (pools[poolIdx].other != nullptr && pools[poolIdx].other->numEntries != 0) {
                 poolIsEmpty = false;
                 break;
             }
         }
         if (!poolIsEmpty) {
             finOffset += headerSize;
             for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
                 finOffset=pools[poolIdx].align(finOffset);
@@ -671,27 +671,27 @@ struct AssemblerBufferWithConstantPool :
         // The per-buffer pools need to be reset, and we need to record the size of the pool.
         IonSpew(IonSpew_Pools, "[%d] Finishing pool %d", id, numDumps);
         JS_ASSERT(inBackref);
         PoolInfo newPoolInfo = getPoolData();
         if (newPoolInfo.size == 0) {
             // The code below also creates a new pool, but that is not necessary, since
             // the pools have not been modified at all.
             new (&perforation) BufferOffset();
-            perforatedNode = NULL;
+            perforatedNode = nullptr;
             inBackref = false;
             IonSpew(IonSpew_Pools, "[%d] Aborting because the pool is empty", id);
             // Bail out early, since we don't want to even pretend these pools exist.
             return;
         }
-        JS_ASSERT(perforatedNode != NULL);
+        JS_ASSERT(perforatedNode != nullptr);
         if (numDumps >= (1<<logBasePoolInfo) && (numDumps & (numDumps-1)) == 0) {
             // need to resize.
             PoolInfo *tmp = static_cast<PoolInfo*>(this->LifoAlloc_.alloc(sizeof(PoolInfo) * numDumps * 2));
-            if (tmp == NULL) {
+            if (tmp == nullptr) {
                 this->fail_oom();
                 return;
             }
             memcpy(tmp, poolInfo, sizeof(PoolInfo) * numDumps);
             poolInfo = tmp;
 
         }
 
@@ -707,17 +707,17 @@ struct AssemblerBufferWithConstantPool :
         }
         LoadOffsets outcasts[1 << poolKindBits];
         uint8_t *outcastEntries[1 << poolKindBits];
         // All of the pool loads referred to by this code are going to
         // need fixing up here.
         int skippedBytes = 0;
         for (int poolIdx = numPoolKinds-1; poolIdx >= 0; poolIdx--) {
             Pool *p =  pools[poolIdx].other;
-            JS_ASSERT(p != NULL);
+            JS_ASSERT(p != nullptr);
             unsigned int idx = p->numEntries-1;
             // Allocate space for tracking information that needs to be propagated to the next pool
             // as well as space for quickly updating the pool entries in the current pool to remove
             // the entries that don't actually fit.  I probably should change this over to a vector
             outcastEntries[poolIdx] = new uint8_t[p->getPoolSize()];
             bool *preservedEntries = new bool[p->numEntries];
             // Hacks on top of Hacks!
             // the patching code takes in the address of the instruction to be patched,
@@ -781,17 +781,17 @@ struct AssemblerBufferWithConstantPool :
                 p->numEntries -= numSkips;
             }
             poolOffset += p->numEntries * p->immSize;
             delete[] preservedEntries;
         }
         // bind the current pool to the perforation point.
         Pool **tmp = &perforatedNode->data;
         *tmp = static_cast<Pool*>(this->LifoAlloc_.alloc(sizeof(Pool) * numPoolKinds));
-        if (tmp == NULL) {
+        if (tmp == nullptr) {
             this->fail_oom();
             return;
         }
         // The above operations may have changed the size of pools!
         // recalibrate the size of the pool.
         newPoolInfo = getPoolData();
         poolInfo[numDumps] = newPoolInfo;
         poolSize += poolInfo[numDumps].size;
@@ -802,17 +802,17 @@ struct AssemblerBufferWithConstantPool :
         // reset everything to the state that it was in when we started
         for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
             if (!pools[poolIdx].reset(this->LifoAlloc_)) {
                 this->fail_oom();
                 return;
             }
         }
         new (&perforation) BufferOffset();
-        perforatedNode = NULL;
+        perforatedNode = nullptr;
         inBackref = false;
 
         // Now that the backwards pool has been emptied, and a new forward pool
         // has been allocated, it is time to populate the new forward pool with
         // any entries that couldn't fit in the backwards pool.
         for (int poolIdx = 0; poolIdx < numPoolKinds; poolIdx++) {
             // Technically, the innermost pool will never have this issue, but it is easier
             // to just handle this case.
@@ -863,17 +863,17 @@ struct AssemblerBufferWithConstantPool :
 
         IonSpew(IonSpew_Pools, "[%d] Dumping %d bytes", id, newPoolInfo.size);
         if (!perforation.assigned()) {
             IonSpew(IonSpew_Pools, "[%d] No Perforation point selected, generating a new one", id);
             // There isn't a perforation here, we need to dump the pool with a guard.
             BufferOffset branch = this->nextOffset();
             bool shouldMarkAsBranch = this->isNextBranch();
             this->markNextAsBranch();
-            this->putBlob(guardSize, NULL);
+            this->putBlob(guardSize, nullptr);
             BufferOffset afterPool = this->nextOffset();
             Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
             markGuard();
             perforatedNode->isNatural = false;
             if (shouldMarkAsBranch)
                 this->markNextAsBranch();
         }
 
@@ -1007,21 +1007,21 @@ struct AssemblerBufferWithConstantPool :
             // b next; <= perforation point
             // next:
             // #beginning of no pool zone
             // push pc
             // blx r12
 
             BufferOffset branch = this->nextOffset();
             this->markNextAsBranch();
-            this->putBlob(guardSize, NULL);
+            this->putBlob(guardSize, nullptr);
             BufferOffset afterPool = this->nextOffset();
             Asm::writePoolGuard(branch, this->getInst(branch), afterPool);
             markGuard();
-            if (perforatedNode != NULL)
+            if (perforatedNode != nullptr)
                 perforatedNode->isNatural = false;
         }
         canNotPlacePool++;
     }
     void leaveNoPool() {
         canNotPlacePool--;
     }
     int size() const {
@@ -1030,21 +1030,21 @@ struct AssemblerBufferWithConstantPool :
     Pool *getPool(int idx) {
         return &pools[idx];
     }
     void markNextAsBranch() {
         // If the previous thing inserted was the last instruction of
         // the node, then whoops, we want to mark the first instruction of
         // the next node.
         this->ensureSpace(InstBaseSize);
-        JS_ASSERT(*this->getTail() != NULL);
+        JS_ASSERT(*this->getTail() != nullptr);
         (*this->getTail())->markNextAsBranch();
     }
     bool isNextBranch() {
-        JS_ASSERT(*this->getTail() != NULL);
+        JS_ASSERT(*this->getTail() != nullptr);
         return (*this->getTail())->isNextBranch();
     }
 
     int uncheckedSize() const {
         PoolInfo pi = getPoolData();
         int codeEnd = this->nextOffset().getOffset();
         return (codeEnd - pi.offset) + pi.finalPos;
     }
@@ -1061,51 +1061,51 @@ struct AssemblerBufferWithConstantPool :
         if (cur == 0)
             return 0;
         return poolInfo[cur-1].finalPos - poolInfo[cur-1].offset;
     }
 
   private:
     void getPEPool(PoolEntry pe, Pool **retP, int32_t * retOffset, int32_t *poolNum) const {
         int poolKind = pe.poolKind();
-        Pool *p = NULL;
+        Pool *p = nullptr;
         uint32_t offset = pe.offset() * pools[poolKind].immSize;
         int idx;
         for (idx = 0; idx < numDumps; idx++) {
             p = &poolInfo[idx].slice->data[poolKind];
             if (p->getPoolSize() > offset)
                 break;
             offset -= p->getPoolSize();
             p = p->other;
             if (p->getPoolSize() > offset)
                 break;
             offset -= p->getPoolSize();
-            p = NULL;
+            p = nullptr;
         }
-        if (poolNum != NULL)
+        if (poolNum != nullptr)
             *poolNum = idx;
         // If this offset is contained in any finished pool, forward or backwards, p now
         // points to that pool, if it is not in any pool (should be in the currently building pool)
-        // then p is NULL.
-        if (p == NULL) {
+        // then p is nullptr.
+        if (p == nullptr) {
             p = &pools[poolKind];
             if (offset >= p->getPoolSize()) {
                 p = p->other;
                 offset -= p->getPoolSize();
             }
         }
-        JS_ASSERT(p != NULL);
+        JS_ASSERT(p != nullptr);
         JS_ASSERT(offset < p->getPoolSize());
         *retP = p;
         *retOffset = offset;
     }
     uint8_t *getPoolEntry(PoolEntry pe) {
         Pool *p;
         int32_t offset;
-        getPEPool(pe, &p, &offset, NULL);
+        getPEPool(pe, &p, &offset, nullptr);
         return &p->poolData[offset];
     }
     size_t getPoolEntrySize(PoolEntry pe) {
         int idx = pe.poolKind();
         return pools[idx].immSize;
     }
 
   public:
--- a/js/src/jit/shared/IonFrames-x86-shared.h
+++ b/js/src/jit/shared/IonFrames-x86-shared.h
@@ -65,21 +65,21 @@ class IonJSFrameLayout : public IonCommo
 
     static size_t offsetOfCalleeToken() {
         return offsetof(IonJSFrameLayout, calleeToken_);
     }
     static size_t offsetOfNumActualArgs() {
         return offsetof(IonJSFrameLayout, numActualArgs_);
     }
     static size_t offsetOfThis() {
-        IonJSFrameLayout *base = NULL;
+        IonJSFrameLayout *base = nullptr;
         return reinterpret_cast<size_t>(&base->argv()[0]);
     }
     static size_t offsetOfActualArgs() {
-        IonJSFrameLayout *base = NULL;
+        IonJSFrameLayout *base = nullptr;
         // +1 to skip |this|.
         return reinterpret_cast<size_t>(&base->argv()[1]);
     }
     static size_t offsetOfActualArg(size_t arg) {
         return offsetOfActualArgs() + arg * sizeof(Value);
     }
 
     Value thisv() {
@@ -179,25 +179,25 @@ class IonExitFrameLayout : public IonCom
         uint8_t *sp = reinterpret_cast<uint8_t *>(this);
         return reinterpret_cast<IonExitFooterFrame *>(sp - IonExitFooterFrame::Size());
     }
 
     // argBase targets the point which precedes the exit frame. Arguments of VM
     // each wrapper are pushed before the exit frame.  This correspond exactly
     // to the value of the argBase register of the generateVMWrapper function.
     inline uint8_t *argBase() {
-        JS_ASSERT(footer()->ionCode() != NULL);
+        JS_ASSERT(footer()->ionCode() != nullptr);
         return top();
     }
 
     inline bool isWrapperExit() {
-        return footer()->function() != NULL;
+        return footer()->function() != nullptr;
     }
     inline bool isNativeExit() {
-        return footer()->ionCode() == NULL;
+        return footer()->ionCode() == nullptr;
     }
     inline bool isOOLNativeExit() {
         return footer()->ionCode() == ION_FRAME_OOL_NATIVE;
     }
     inline bool isOOLPropertyOpExit() {
         return footer()->ionCode() == ION_FRAME_OOL_PROPERTY_OP;
     }
     inline bool isOOLProxyExit() {
--- a/js/src/jit/shared/LIR-x86-shared.h
+++ b/js/src/jit/shared/LIR-x86-shared.h
@@ -27,17 +27,17 @@ class LDivI : public LBinaryMath<1>
                 return mir()->canBeNegativeOverflow()
                        ? "Truncate_NegativeZero_NegativeOverflow"
                        : "Truncate_NegativeZero";
             }
             return mir()->canBeNegativeOverflow() ? "Truncate_NegativeOverflow" : "Truncate";
         }
         if (mir()->canBeNegativeZero())
             return mir()->canBeNegativeOverflow() ? "NegativeZero_NegativeOverflow" : "NegativeZero";
-        return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : NULL;
+        return mir()->canBeNegativeOverflow() ? "NegativeOverflow" : nullptr;
     }
 
     const LDefinition *remainder() {
         return getTemp(0);
     }
     MDiv *mir() const {
         return mir_->toDiv();
     }
@@ -98,17 +98,17 @@ class LModI : public LBinaryMath<1>
 
     LModI(const LAllocation &lhs, const LAllocation &rhs, const LDefinition &temp) {
         setOperand(0, lhs);
         setOperand(1, rhs);
         setTemp(0, temp);
     }
 
     const char *extraName() const {
-        return mir()->isTruncated() ? "Truncated" : NULL;
+        return mir()->isTruncated() ? "Truncated" : nullptr;
     }
 
     const LDefinition *remainder() {
         return getDef(0);
     }
     MMod *mir() const {
         return mir_->toMod();
     }
@@ -276,17 +276,17 @@ class LMulI : public LBinaryMath<0, 1>
         setOperand(0, lhs);
         setOperand(1, rhs);
         setOperand(2, lhsCopy);
     }
 
     const char *extraName() const {
         return (mir()->mode() == MMul::Integer)
                ? "Integer"
-               : (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : NULL);
+               : (mir()->canBeNegativeZero() ? "CanBeNegativeZero" : nullptr);
     }
 
     MMul *mir() const {
         return mir_->toMul();
     }
     const LAllocation *lhsCopy() {
         return this->getOperand(2);
     }
--- a/js/src/jit/shared/Lowering-shared.cpp
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -57,21 +57,21 @@ LIRGeneratorShared::lowerTypedPhiInput(M
 }
 
 #ifdef JS_NUNBOX32
 LSnapshot *
 LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
 {
     LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
     if (!snapshot)
-        return NULL;
+        return nullptr;
 
     FlattenedMResumePointIter iter(rp);
     if (!iter.init())
-        return NULL;
+        return nullptr;
 
     size_t i = 0;
     for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
         MResumePoint *mir = *it;
         for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
             MDefinition *ins = mir->getOperand(j);
 
             LAllocation *type = snapshot->typeOfSlot(i);
@@ -115,21 +115,21 @@ LIRGeneratorShared::buildSnapshot(LInstr
 
 #elif JS_PUNBOX64
 
 LSnapshot *
 LIRGeneratorShared::buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind)
 {
     LSnapshot *snapshot = LSnapshot::New(gen, rp, kind);
     if (!snapshot)
-        return NULL;
+        return nullptr;
 
     FlattenedMResumePointIter iter(rp);
     if (!iter.init())
-        return NULL;
+        return nullptr;
 
     size_t i = 0;
     for (MResumePoint **it = iter.begin(), **end = iter.end(); it != end; ++it) {
         MResumePoint *mir = *it;
         for (size_t j = 0, e = mir->numOperands(); j < e; ++i, ++j) {
             MDefinition *def = mir->getOperand(j);
 
             if (def->isPassArg())
--- a/js/src/jit/shared/Lowering-shared.h
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -33,18 +33,18 @@ class LIRGeneratorShared : public MInstr
     MResumePoint *lastResumePoint_;
     LOsiPoint *osiPoint_;
 
   public:
     LIRGeneratorShared(MIRGenerator *gen, MIRGraph &graph, LIRGraph &lirGraph)
       : gen(gen),
         graph(graph),
         lirGraph_(lirGraph),
-        lastResumePoint_(NULL),
-        osiPoint_(NULL)
+        lastResumePoint_(nullptr),
+        osiPoint_(nullptr)
     { }
 
     MIRGenerator *mir() {
         return gen;
     }
 
   protected:
     // A backend can decide that an instruction should be emitted at its uses,
@@ -138,24 +138,24 @@ class LIRGeneratorShared : public MInstr
     // redefine(), but used when creating new LIR.
     inline bool defineAs(LInstruction *outLir, MDefinition *outMir, MDefinition *inMir);
 
     uint32_t getVirtualRegister() {
         return lirGraph_.getVirtualRegister();
     }
 
     template <typename T> void annotate(T *ins);
-    template <typename T> bool add(T *ins, MInstruction *mir = NULL);
+    template <typename T> bool add(T *ins, MInstruction *mir = nullptr);
 
     void lowerTypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
     bool defineTypedPhi(MPhi *phi, size_t lirIndex);
 
     LOsiPoint *popOsiPoint() {
         LOsiPoint *tmp = osiPoint_;
-        osiPoint_ = NULL;
+        osiPoint_ = nullptr;
         return tmp;
     }
 
     LSnapshot *buildSnapshot(LInstruction *ins, MResumePoint *rp, BailoutKind kind);
     bool assignPostSnapshot(MInstruction *mir, LInstruction *ins);
 
     // Marks this instruction as fallible, meaning that before it performs
     // effects (if any), it may check pre-conditions and bailout if they do not
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -91,34 +91,34 @@ Assembler::writeRelocation(JmpSrc src, R
         jumpRelocations_.writeUnsigned(src.offset());
         jumpRelocations_.writeUnsigned(jumps_.length());
     }
 }
 
 void
 Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
 {
-    JS_ASSERT(target.value != NULL);
+    JS_ASSERT(target.value != nullptr);
 
     // Emit reloc before modifying the jump table, since it computes a 0-based
     // index. This jump is not patchable at runtime.
     if (reloc == Relocation::IONCODE)
         writeRelocation(src, reloc);
     enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
 }
 
 size_t
 Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
 {
     // This jump is patchable at runtime so we always need to make sure the
     // jump table is emitted.
     writeRelocation(src, reloc);
 
     size_t index = jumps_.length();
-    enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), NULL, reloc));
+    enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
     return index;
 }
 
 /* static */
 uint8_t *
 Assembler::PatchableJumpAddress(IonCode *code, size_t index)
 {
     // The assembler stashed the offset into the code of the fragments used
@@ -171,19 +171,19 @@ void
 Assembler::executableCopy(uint8_t *buffer)
 {
     AssemblerX86Shared::executableCopy(buffer);
 
     for (size_t i = 0; i < jumps_.length(); i++) {
         RelativePatch &rp = jumps_[i];
         uint8_t *src = buffer + rp.offset;
         if (!rp.target) {
-            // The patch target is NULL for jumps that have been linked to a
-            // label within the same code block, but may be repatched later to
-            // jump to a different code block.
+            // The patch target is nullptr for jumps that have been linked to
+            // a label within the same code block, but may be repatched later
+            // to jump to a different code block.
             continue;
         }
         if (JSC::X86Assembler::canRelinkJump(src, rp.target)) {
             JSC::X86Assembler::setRel32(src, rp.target);
         } else {
             // An extended jump table must exist, and its offset must be in
             // range.
             JS_ASSERT(extendedJumpTable_);
--- a/js/src/jit/x64/Trampoline-x64.cpp
+++ b/js/src/jit/x64/Trampoline-x64.cpp
@@ -241,17 +241,17 @@ IonRuntime::generateEnterJIT(JSContext *
 
     // Call function.
     masm.call(reg_code);
 
     if (type == EnterJitBaseline) {
         // Baseline OSR will return here.
         masm.bind(returnLabel.src());
         if (!masm.addCodeLabel(returnLabel))
-            return NULL;
+            return nullptr;
     }
 
     // Pop arguments and padding from stack.
     masm.pop(r14);              // Pop and decode descriptor.
     masm.shrq(Imm32(FRAMESIZE_SHIFT), r14);
     masm.addq(r14, rsp);        // Remove arguments.
 
     /*****************************************************************
@@ -404,17 +404,17 @@ IonRuntime::generateArgumentsRectifier(J
     // Construct IonJSFrameLayout.
     masm.push(rdx); // numActualArgs
     masm.pushCalleeToken(rax, mode);
     masm.push(r9); // descriptor
 
     // Call the target function.
     // Note that this code assumes the function is JITted.
     masm.loadPtr(Address(rax, JSFunction::offsetOfNativeOrScript()), rax);
-    masm.loadBaselineOrIonRaw(rax, rax, mode, NULL);
+    masm.loadBaselineOrIonRaw(rax, rax, mode, nullptr);
     masm.call(rax);
     uint32_t returnOffset = masm.currentOffset();
 
     // Remove the rectifier frame.
     masm.pop(r9);             // r9 <- descriptor with FrameType.
     masm.shrq(Imm32(FRAMESIZE_SHIFT), r9);
     masm.pop(r11);            // Discard calleeToken.
     masm.pop(r11);            // Discard numActualArgs.
@@ -670,26 +670,26 @@ IonRuntime::generateVMWrapper(JSContext 
     masm.leaveExitFrame();
     masm.retn(Imm32(sizeof(IonExitFrameLayout) +
                     f.explicitStackSlots() * sizeof(void *) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
     IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
     if (!wrapper)
-        return NULL;
+        return nullptr;
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
 #endif
 
     // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
     // use relookupOrAdd instead of add.
     if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
-        return NULL;
+        return nullptr;
 
     return wrapper;
 }
 
 IonCode *
 IonRuntime::generatePreBarrier(JSContext *cx, MIRType type)
 {
     MacroAssembler masm;
@@ -739,24 +739,24 @@ IonRuntime::generateDebugTrapHandler(JSC
     // Load the return address in scratch1.
     masm.loadPtr(Address(rsp, 0), scratch1);
 
     // Load BaselineFrame pointer in scratch2.
     masm.mov(rbp, scratch2);
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
-    // the stub frame has a NULL ICStub pointer, since this pointer is marked
+    // the stub frame has a nullptr ICStub pointer, since this pointer is marked
     // during GC.
-    masm.movePtr(ImmPtr(NULL), BaselineStubReg);
+    masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch3);
 
     IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
     if (!code)
-        return NULL;
+        return nullptr;
 
     masm.push(scratch1);
     masm.push(scratch2);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
 
     // If the stub returns |true|, we have to perform a forced return
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -450,17 +450,17 @@ bool
 CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
 {
     const MLoadTypedArrayElementStatic *mir = ins->mir();
     ArrayBufferView::ViewType vt = mir->viewType();
 
     Register ptr = ToRegister(ins->ptr());
     const LDefinition *out = ins->output();
 
-    OutOfLineLoadTypedArrayOutOfBounds *ool = NULL;
+    OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
     if (!mir->fallible()) {
         ool = new OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out));
         if (!addOutOfLineCode(ool))
             return false;
     }
 
     masm.cmpl(ptr, Imm32(mir->length()));
     if (ool)
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -19,28 +19,28 @@ using namespace js;
 using namespace js::jit;
 
 MacroAssemblerX86::Double *
 MacroAssemblerX86::getDouble(double d)
 {
     if (!doubleMap_.initialized()) {
         enoughMemory_ &= doubleMap_.init();
         if (!enoughMemory_)
-            return NULL;
+            return nullptr;
     }
     size_t doubleIndex;
     DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d);
     if (p) {
         doubleIndex = p->value;
     } else {
         doubleIndex = doubles_.length();
         enoughMemory_ &= doubles_.append(Double(d));
         enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
         if (!enoughMemory_)
-            return NULL;
+            return nullptr;
     }
     Double &dbl = doubles_[doubleIndex];
     JS_ASSERT(!dbl.uses.bound());
     return &dbl;
 }
 
 void
 MacroAssemblerX86::loadConstantDouble(double d, const FloatRegister &dest)
@@ -65,28 +65,28 @@ MacroAssemblerX86::addConstantDouble(dou
 }
 
 MacroAssemblerX86::Float *
 MacroAssemblerX86::getFloat(float f)
 {
     if (!floatMap_.initialized()) {
         enoughMemory_ &= floatMap_.init();
         if (!enoughMemory_)
-            return NULL;
+            return nullptr;
     }
     size_t floatIndex;
     FloatMap::AddPtr p = floatMap_.lookupForAdd(f);
     if (p) {
         floatIndex = p->value;
     } else {
         floatIndex = floats_.length();
         enoughMemory_ &= floats_.append(Float(f));
         enoughMemory_ &= floatMap_.add(p, f, floatIndex);
         if (!enoughMemory_)
-            return NULL;
+            return nullptr;
     }
     Float &flt = floats_[floatIndex];
     JS_ASSERT(!flt.uses.bound());
     return &flt;
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32(float f, const FloatRegister &dest)
--- a/js/src/jit/x86/Trampoline-x86.cpp
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -238,17 +238,17 @@ IonRuntime::generateEnterJIT(JSContext *
         passed in return value pointer
     ***************************************************************/
     masm.call(Operand(ebp, ARG_JITCODE));
 
     if (type == EnterJitBaseline) {
         // Baseline OSR will return here.
         masm.bind(returnLabel.src());
         if (!masm.addCodeLabel(returnLabel))
-            return NULL;
+            return nullptr;
     }
 
     // Pop arguments off the stack.
     // eax <- 8*argc (size of all arguments we pushed on the stack)
     masm.pop(eax);
     masm.shrl(Imm32(FRAMESIZE_SHIFT), eax); // Unmark EntryFrame.
     masm.addl(eax, esp);
 
@@ -408,17 +408,17 @@ IonRuntime::generateArgumentsRectifier(J
     // Construct IonJSFrameLayout.
     masm.push(edx); // number of actual arguments
     masm.pushCalleeToken(eax, mode);
     masm.push(ebx); // descriptor
 
     // Call the target function.
     // Note that this assumes the function is JITted.
     masm.loadPtr(Address(eax, JSFunction::offsetOfNativeOrScript()), eax);
-    masm.loadBaselineOrIonRaw(eax, eax, mode, NULL);
+    masm.loadBaselineOrIonRaw(eax, eax, mode, nullptr);
     masm.call(eax);
     uint32_t returnOffset = masm.currentOffset();
 
     // Remove the rectifier frame.
     masm.pop(ebx);            // ebx <- descriptor with FrameType.
     masm.shrl(Imm32(FRAMESIZE_SHIFT), ebx); // ebx <- descriptor.
     masm.pop(edi);            // Discard calleeToken.
     masm.pop(edi);            // Discard number of actual arguments.
@@ -703,26 +703,26 @@ IonRuntime::generateVMWrapper(JSContext 
     masm.leaveExitFrame();
     masm.retn(Imm32(sizeof(IonExitFrameLayout) +
                     f.explicitStackSlots() * sizeof(void *) +
                     f.extraValuesToPop * sizeof(Value)));
 
     Linker linker(masm);
     IonCode *wrapper = linker.newCode(cx, JSC::OTHER_CODE);
     if (!wrapper)
-        return NULL;
+        return nullptr;
 
 #ifdef JS_ION_PERF
     writePerfSpewerIonCodeProfile(wrapper, "VMWrapper");
 #endif
 
     // linker.newCode may trigger a GC and sweep functionWrappers_ so we have to
     // use relookupOrAdd instead of add.
     if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
-        return NULL;
+        return nullptr;
 
     return wrapper;
 }
 
 IonCode *
 IonRuntime::generatePreBarrier(JSContext *cx, MIRType type)
 {
     MacroAssembler masm;
@@ -779,24 +779,24 @@ IonRuntime::generateDebugTrapHandler(JSC
     // Load the return address in scratch1.
     masm.loadPtr(Address(esp, 0), scratch1);
 
     // Load BaselineFrame pointer in scratch2.
     masm.mov(ebp, scratch2);
     masm.subPtr(Imm32(BaselineFrame::Size()), scratch2);
 
     // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
-    // the stub frame has a NULL ICStub pointer, since this pointer is marked
-    // during GC.
-    masm.movePtr(ImmPtr(NULL), BaselineStubReg);
+    // the stub frame has a nullptr ICStub pointer, since this pointer is
+    // marked during GC.
+    masm.movePtr(ImmPtr(nullptr), BaselineStubReg);
     EmitEnterStubFrame(masm, scratch3);
 
     IonCode *code = cx->runtime()->ionRuntime()->getVMWrapper(HandleDebugTrapInfo);
     if (!code)
-        return NULL;
+        return nullptr;
 
     masm.push(scratch1);
     masm.push(scratch2);
     EmitCallVM(code, masm);
 
     EmitLeaveStubFrame(masm);
 
     // If the stub returns |true|, we have to perform a forced return