Bug 784739 - Switch from NULL to nullptr in mozglue/linker/; r=ehsan
authorBirunthan Mohanathas <birunthan@mohanathas.com>
Mon, 11 Nov 2013 14:15:46 -0500
changeset 169003 8c947073f4ead3194bbd9086591229abb39966bf
parent 169002 ccca3c4247e1333b0695825b01bb420b67cf47e6
child 169004 0fc828acb6535ab046b3c2cbae148a178343029f
push id3224
push userlsblakk@mozilla.com
push dateTue, 04 Feb 2014 01:06:49 +0000
treeherdermozilla-beta@60c04d0987f1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs784739
milestone28.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 784739 - Switch from NULL to nullptr in mozglue/linker/; r=ehsan
mozglue/linker/CustomElf.cpp
mozglue/linker/ElfLoader.cpp
mozglue/linker/ElfLoader.h
mozglue/linker/Mappable.cpp
mozglue/linker/SeekableZStream.cpp
mozglue/linker/Utils.h
mozglue/linker/Zip.cpp
mozglue/linker/Zip.h
mozglue/linker/szip.cpp
--- a/mozglue/linker/CustomElf.cpp
+++ b/mozglue/linker/CustomElf.cpp
@@ -23,34 +23,34 @@ extern "C" {
 }
 #else
 #define report_mapping(...)
 #endif
 
 const Ehdr *Ehdr::validate(const void *buf)
 {
   if (!buf || buf == MAP_FAILED)
-    return NULL;
+    return nullptr;
 
   const Ehdr *ehdr = reinterpret_cast<const Ehdr *>(buf);
 
   /* Only support ELF executables or libraries for the host system */
   if (memcmp(ELFMAG, &ehdr->e_ident, SELFMAG) ||
       ehdr->e_ident[EI_CLASS] != ELFCLASS ||
       ehdr->e_ident[EI_DATA] != ELFDATA ||
       ehdr->e_ident[EI_VERSION] != 1 ||
       (ehdr->e_ident[EI_OSABI] != ELFOSABI && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) ||
 #ifdef EI_ABIVERSION
       ehdr->e_ident[EI_ABIVERSION] != ELFABIVERSION ||
 #endif
       (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ||
       ehdr->e_machine != ELFMACHINE ||
       ehdr->e_version != 1 ||
       ehdr->e_phentsize != sizeof(Phdr))
-    return NULL;
+    return nullptr;
 
   return ehdr;
 }
 
 namespace {
 
 void debug_phdr(const char *type, const Phdr *phdr)
 {
@@ -69,17 +69,17 @@ void debug_phdr(const char *type, const 
 /**
  * RAII wrapper for a mapping of the first page off a Mappable object.
  * This calls Mappable::munmap instead of system munmap.
  */
 class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
 public:
   Mappable1stPagePtr(Mappable *mappable)
   : GenericMappedPtr<Mappable1stPagePtr>(
-      mappable->mmap(NULL, PageSize(), PROT_READ, MAP_PRIVATE, 0))
+      mappable->mmap(nullptr, PageSize(), PROT_READ, MAP_PRIVATE, 0))
   , mappable(mappable)
   {
     /* Ensure the content of this page */
     mappable->ensure(*this);
   }
 
 private:
   friend class GenericMappedPtr<Mappable1stPagePtr>;
@@ -91,40 +91,40 @@ private:
 };
 
 
 TemporaryRef<LibHandle>
 CustomElf::Load(Mappable *mappable, const char *path, int flags)
 {
   DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = ...", path, flags);
   if (!mappable)
-    return NULL;
+    return nullptr;
   /* Keeping a RefPtr of the CustomElf is going to free the appropriate
-   * resources when returning NULL */
+   * resources when returning nullptr */
   RefPtr<CustomElf> elf = new CustomElf(mappable, path);
   /* Map the first page of the Elf object to access Elf and program headers */
   Mappable1stPagePtr ehdr_raw(mappable);
   if (ehdr_raw == MAP_FAILED)
-    return NULL;
+    return nullptr;
 
   const Ehdr *ehdr = Ehdr::validate(ehdr_raw);
   if (!ehdr)
-    return NULL;
+    return nullptr;
 
   /* Scan Elf Program Headers and gather some information about them */
   std::vector<const Phdr *> pt_loads;
   Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest
   Addr max_vaddr = 0;         // virtual address used by this Elf.
-  const Phdr *dyn = NULL;
+  const Phdr *dyn = nullptr;
 
   const Phdr *first_phdr = reinterpret_cast<const Phdr *>(
                            reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
   const Phdr *end_phdr = &first_phdr[ehdr->e_phnum];
 #ifdef __ARM_EABI__
-  const Phdr *arm_exidx_phdr = NULL;
+  const Phdr *arm_exidx_phdr = nullptr;
 #endif
 
   for (const Phdr *phdr = first_phdr; phdr < end_phdr; phdr++) {
     switch (phdr->p_type) {
       case PT_LOAD:
         debug_phdr("PT_LOAD", phdr);
         pt_loads.push_back(phdr);
         if (phdr->p_vaddr < min_vaddr)
@@ -133,33 +133,33 @@ CustomElf::Load(Mappable *mappable, cons
           max_vaddr = phdr->p_vaddr + phdr->p_memsz;
         break;
       case PT_DYNAMIC:
         debug_phdr("PT_DYNAMIC", phdr);
         if (!dyn) {
           dyn = phdr;
         } else {
           LOG("%s: Multiple PT_DYNAMIC segments detected", elf->GetPath());
-          return NULL;
+          return nullptr;
         }
         break;
       case PT_TLS:
         debug_phdr("PT_TLS", phdr);
         if (phdr->p_memsz) {
           LOG("%s: TLS is not supported", elf->GetPath());
-          return NULL;
+          return nullptr;
         }
         break;
       case PT_GNU_STACK:
         debug_phdr("PT_GNU_STACK", phdr);
 // Skip on Android until bug 706116 is fixed
 #ifndef ANDROID
         if (phdr->p_flags & PF_X) {
           LOG("%s: Executable stack is not supported", elf->GetPath());
-          return NULL;
+          return nullptr;
         }
 #endif
         break;
 #ifdef __ARM_EABI__
       case PT_ARM_EXIDX:
         /* We cannot initialize arm_exidx here
            because we don't have a base yet */
         arm_exidx_phdr = phdr;
@@ -169,61 +169,61 @@ CustomElf::Load(Mappable *mappable, cons
         DEBUG_LOG("%s: Warning: program header type #%d not handled",
                   elf->GetPath(), phdr->p_type);
     }
   }
 
   if (min_vaddr != 0) {
     LOG("%s: Unsupported minimal virtual address: 0x%08" PRIxAddr,
         elf->GetPath(), min_vaddr);
-    return NULL;
+    return nullptr;
   }
   if (!dyn) {
     LOG("%s: No PT_DYNAMIC segment found", elf->GetPath());
-    return NULL;
+    return nullptr;
   }
 
   /* Reserve enough memory to map the complete virtual address space for this
    * library.
    * As we are using the base address from here to mmap something else with
    * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For
    * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap
    * MAP_PRIVATE only returns a 4k aligned address. So we first get a base
    * address with MAP_SHARED, which guarantees the kernel returns an address
    * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at
    * the same address, because of some bad side effects of keeping it as
    * MAP_SHARED. */
-  elf->base.Assign(MemoryRange::mmap(NULL, max_vaddr, PROT_NONE,
+  elf->base.Assign(MemoryRange::mmap(nullptr, max_vaddr, PROT_NONE,
                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0));
   if ((elf->base == MAP_FAILED) ||
       (mmap(elf->base, max_vaddr, PROT_NONE,
             MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) {
     LOG("%s: Failed to mmap", elf->GetPath());
-    return NULL;
+    return nullptr;
   }
 
   /* Load and initialize library */
   for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
        it < pt_loads.end(); ++it)
     if (!elf->LoadSegment(*it))
-      return NULL;
+      return nullptr;
 
   /* We're not going to mmap anymore */
   mappable->finalize();
 
   report_mapping(const_cast<char *>(elf->GetName()), elf->base,
                  (max_vaddr + PAGE_SIZE - 1) & PAGE_MASK, 0);
 
   elf->l_addr = elf->base;
   elf->l_name = elf->GetPath();
   elf->l_ld = elf->GetPtr<Dyn>(dyn->p_vaddr);
   ElfLoader::Singleton.Register(elf);
 
   if (!elf->InitDyn(dyn))
-    return NULL;
+    return nullptr;
 
 #ifdef __ARM_EABI__
   if (arm_exidx_phdr)
     elf->arm_exidx.InitSize(elf->GetPtr(arm_exidx_phdr->p_vaddr),
                             arm_exidx_phdr->p_memsz);
 #endif
 
   elf->stats("oneLibLoaded");
@@ -270,17 +270,17 @@ CustomElf::GetSymbolPtr(const char *symb
 {
   return GetSymbolPtr(symbol, ElfHash(symbol));
 }
 
 void *
 CustomElf::GetSymbolPtr(const char *symbol, unsigned long hash) const
 {
   const Sym *sym = GetSymbol(symbol, hash);
-  void *ptr = NULL;
+  void *ptr = nullptr;
   if (sym && sym->st_shndx != SHN_UNDEF)
     ptr = GetPtr(sym->st_value);
   DEBUG_LOG("CustomElf::GetSymbolPtr(%p [\"%s\"], \"%s\") = %p",
             reinterpret_cast<const void *>(this), GetPath(), symbol, ptr);
   return ptr;
 }
 
 void *
@@ -347,17 +347,17 @@ CustomElf::GetSymbolPtrInDeps(const char
 #ifndef __GLIBC__
     } else {
       sym = (*it)->GetSymbolPtr(symbol);
 #endif
     }
     if (sym)
       return sym;
   }
-  return NULL;
+  return nullptr;
 }
 
 const Sym *
 CustomElf::GetSymbol(const char *symbol, unsigned long hash) const
 {
   /* Search symbol with the buckets and chains tables.
    * The hash computed from the symbol name gives an index in the buckets
    * table. The corresponding value in the bucket table is an index in the
@@ -367,17 +367,17 @@ CustomElf::GetSymbol(const char *symbol,
    * in both tables, which corresponding symbol is tested and so on and so
    * forth */
   size_t bucket = hash % buckets.numElements();
   for (size_t y = buckets[bucket]; y != STN_UNDEF; y = chains[y]) {
     if (strcmp(symbol, strtab.GetStringAt(symtab[y].st_name)))
       continue;
     return &symtab[y];
   }
-  return NULL;
+  return nullptr;
 }
 
 bool
 CustomElf::Contains(void *addr) const
 {
   return base.Contains(addr);
 }
 
@@ -385,17 +385,17 @@ CustomElf::Contains(void *addr) const
 const void *
 CustomElf::FindExidx(int *pcount) const
 {
   if (arm_exidx) {
     *pcount = arm_exidx.numElements();
     return arm_exidx;
   }
   *pcount = 0;
-  return NULL;
+  return nullptr;
 }
 #endif
 
 void
 CustomElf::stats(const char *when) const
 {
   mappable->stats(when, GetPath());
 }
@@ -666,17 +666,17 @@ CustomElf::InitDyn(const Phdr *pt_dyn)
   return Relocate() && RelocateJumps() && CallInit();
 }
 
 bool
 CustomElf::Relocate()
 {
   DEBUG_LOG("Relocate %s @%p", GetPath(), static_cast<void *>(base));
   uint32_t symtab_index = (uint32_t) -1;
-  void *symptr = NULL;
+  void *symptr = nullptr;
   for (Array<Reloc>::iterator rel = relocations.begin();
        rel < relocations.end(); ++rel) {
     /* Location of the relocation */
     void *ptr = GetPtr(rel->r_offset);
 
     /* R_*_RELATIVE relocations apply directly at the given location */
     if (ELF_R_TYPE(rel->r_info) == R_RELATIVE) {
       *(void **) ptr = GetPtr(rel->GetAddend(base));
@@ -685,22 +685,22 @@ CustomElf::Relocate()
     /* Other relocation types need a symbol resolution */
     /* Avoid symbol resolution when it's the same symbol as last iteration */
     if (symtab_index != ELF_R_SYM(rel->r_info)) {
       symtab_index = ELF_R_SYM(rel->r_info);
       const Sym sym = symtab[symtab_index];
       if (sym.st_shndx != SHN_UNDEF) {
         symptr = GetPtr(sym.st_value);
       } else {
-        /* TODO: handle symbol resolving to NULL vs. being undefined. */
+        /* TODO: handle symbol resolving to nullptr vs. being undefined. */
         symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
       }
     }
 
-    if (symptr == NULL)
+    if (symptr == nullptr)
       LOG("%s: Warning: relocation to NULL @0x%08" PRIxAddr,
           GetPath(), rel->r_offset);
 
     /* Apply relocation */
     switch (ELF_R_TYPE(rel->r_info)) {
     case R_GLOB_DAT:
       /* R_*_GLOB_DAT relocations simply use the symbol value */
       *(void **) ptr = symptr;
@@ -736,17 +736,17 @@ CustomElf::RelocateJumps()
     /* TODO: Avoid code duplication with the relocations above */
     const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
     void *symptr;
     if (sym.st_shndx != SHN_UNDEF)
       symptr = GetPtr(sym.st_value);
     else
       symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
 
-    if (symptr == NULL) {
+    if (symptr == nullptr) {
       LOG("%s: %s: relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"",
           GetPath(),
           (ELF_ST_BIND(sym.st_info) == STB_WEAK) ? "Warning" : "Error",
           rel->r_offset, strtab.GetStringAt(sym.st_name));
       if (ELF_ST_BIND(sym.st_info) != STB_WEAK)
         return false;
     }
     /* Apply relocation */
@@ -785,13 +785,13 @@ CustomElf::CallFini()
   if (fini)
     CallFunction(fini);
 }
 
 Mappable *
 CustomElf::GetMappable() const
 {
   if (!mappable)
-    return NULL;
+    return nullptr;
   if (mappable->GetKind() == Mappable::MAPPABLE_EXTRACT_FILE)
     return mappable;
   return ElfLoader::GetMappableFromPath(GetPath());
 }
--- a/mozglue/linker/ElfLoader.cpp
+++ b/mozglue/linker/ElfLoader.cpp
@@ -52,26 +52,26 @@ void *
     handle->AddDirectRef();
   return handle;
 }
 
 const char *
 __wrap_dlerror(void)
 {
   const char *error = ElfLoader::Singleton.lastError;
-  ElfLoader::Singleton.lastError = NULL;
+  ElfLoader::Singleton.lastError = nullptr;
   return error;
 }
 
 void *
 __wrap_dlsym(void *handle, const char *symbol)
 {
   if (!handle) {
     ElfLoader::Singleton.lastError = "dlsym(NULL, sym) unsupported";
-    return NULL;
+    return nullptr;
   }
   if (handle != RTLD_DEFAULT && handle != RTLD_NEXT) {
     LibHandle *h = reinterpret_cast<LibHandle *>(handle);
     return h->GetSymbolPtr(symbol);
   }
   return dlsym(handle, symbol);
 }
 
@@ -102,17 +102,17 @@ int
   if (!ElfLoader::Singleton.dbg)
     return -1;
 
   for (ElfLoader::DebuggerHelper::iterator it = ElfLoader::Singleton.dbg.begin();
        it < ElfLoader::Singleton.dbg.end(); ++it) {
     dl_phdr_info info;
     info.dlpi_addr = reinterpret_cast<Elf::Addr>(it->l_addr);
     info.dlpi_name = it->l_name;
-    info.dlpi_phdr = NULL;
+    info.dlpi_phdr = nullptr;
     info.dlpi_phnum = 0;
 
     // Assuming l_addr points to Elf headers (in most cases, this is true),
     // get the Phdr location from there.
     uint8_t mapped;
     // If the page is not mapped, mincore returns an error.
     if (!mincore(const_cast<void*>(it->l_addr), PageSize(), &mapped)) {
       const Elf::Ehdr *ehdr = Elf::Ehdr::validate(it->l_addr);
@@ -135,17 +135,17 @@ const void *
 __wrap___gnu_Unwind_Find_exidx(void *pc, int *pcount)
 {
   RefPtr<LibHandle> handle = ElfLoader::Singleton.GetHandleByPtr(pc);
   if (handle)
     return handle->FindExidx(pcount);
   if (__gnu_Unwind_Find_exidx)
     return __gnu_Unwind_Find_exidx(pc, pcount);
   *pcount = 0;
-  return NULL;
+  return nullptr;
 }
 #endif
 
 /**
  * faulty.lib public API
  */
 
 MFBT_API size_t
@@ -154,17 +154,17 @@ MFBT_API size_t
     return 0;
   return reinterpret_cast<LibHandle *>(handle)->GetMappableLength();
 }
 
 MFBT_API void *
 __dl_mmap(void *handle, void *addr, size_t length, off_t offset)
 {
   if (!handle)
-    return NULL;
+    return nullptr;
   return reinterpret_cast<LibHandle *>(handle)->MappableMMap(addr, length,
                                                              offset);
 }
 
 MFBT_API void
 __dl_munmap(void *handle, void *addr, size_t length)
 {
   if (!handle)
@@ -200,17 +200,17 @@ LeafName(const char *path)
 LibHandle::~LibHandle()
 {
   free(path);
 }
 
 const char *
 LibHandle::GetName() const
 {
-  return path ? LeafName(path) : NULL;
+  return path ? LeafName(path) : nullptr;
 }
 
 size_t
 LibHandle::GetMappableLength() const
 {
   if (!mappable)
     mappable = GetMappable();
   if (!mappable)
@@ -246,29 +246,29 @@ LibHandle::MappableMUnmap(void *addr, si
  * SystemElf
  */
 TemporaryRef<LibHandle>
 SystemElf::Load(const char *path, int flags)
 {
   /* The Android linker returns a handle when the file name matches an
    * already loaded library, even when the full path doesn't exist */
   if (path && path[0] == '/' && (access(path, F_OK) == -1)){
-    DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, (void *)NULL);
-    return NULL;
+    DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, (void *)nullptr);
+    return nullptr;
   }
 
   void *handle = dlopen(path, flags);
   DEBUG_LOG("dlopen(\"%s\", 0x%x) = %p", path, flags, handle);
   ElfLoader::Singleton.lastError = dlerror();
   if (handle) {
     SystemElf *elf = new SystemElf(path, handle);
     ElfLoader::Singleton.Register(elf);
     return elf;
   }
-  return NULL;
+  return nullptr;
 }
 
 SystemElf::~SystemElf()
 {
   if (!dlhandle)
     return;
   DEBUG_LOG("dlclose(%p [\"%s\"])", dlhandle, GetPath());
   dlclose(dlhandle);
@@ -285,17 +285,17 @@ SystemElf::GetSymbolPtr(const char *symb
   return sym;
 }
 
 Mappable *
 SystemElf::GetMappable() const
 {
   const char *path = GetPath();
   if (!path)
-    return NULL;
+    return nullptr;
 #ifdef ANDROID
   /* On Android, if we don't have the full path, try in /system/lib */
   const char *name = LeafName(path);
   std::string systemPath;
   if (name == path) {
     systemPath = "/system/lib/";
     systemPath += path;
     path = systemPath.c_str();
@@ -307,17 +307,17 @@ SystemElf::GetMappable() const
 
 #ifdef __ARM_EABI__
 const void *
 SystemElf::FindExidx(int *pcount) const
 {
   /* TODO: properly implement when ElfLoader::GetHandleByPtr
      does return SystemElf handles */
   *pcount = 0;
-  return NULL;
+  return nullptr;
 }
 #endif
 
 /**
  * ElfLoader
  */
 
 /* Unique ElfLoader instance */
@@ -326,19 +326,19 @@ ElfLoader ElfLoader::Singleton;
 TemporaryRef<LibHandle>
 ElfLoader::Load(const char *path, int flags, LibHandle *parent)
 {
   /* Ensure logging is initialized or refresh if environment changed. */
   Logging::Init();
 
   RefPtr<LibHandle> handle;
 
-  /* Handle dlopen(NULL) directly. */
+  /* Handle dlopen(nullptr) directly. */
   if (!path) {
-    handle = SystemElf::Load(NULL, flags);
+    handle = SystemElf::Load(nullptr, flags);
     return handle;
   }
 
   /* TODO: Handle relative paths correctly */
   const char *name = LeafName(path);
 
   /* Search the list of handles we already have for a match. When the given
    * path is not absolute, compare file names, otherwise compare full paths. */
@@ -347,17 +347,17 @@ ElfLoader::Load(const char *path, int fl
       if ((*it)->GetName() && (strcmp((*it)->GetName(), name) == 0))
         return *it;
   } else {
     for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it)
       if ((*it)->GetPath() && (strcmp((*it)->GetPath(), path) == 0))
         return *it;
   }
 
-  char *abs_path = NULL;
+  char *abs_path = nullptr;
   const char *requested_path = path;
 
   /* When the path is not absolute and the library is being loaded for
    * another, first try to load the library from the directory containing
    * that parent library. */
   if ((name == path) && parent) {
     const char *parentPath = parent->GetPath();
     abs_path = new char[strlen(parentPath) + strlen(path)];
@@ -393,24 +393,24 @@ ElfLoader::Load(const char *path, int fl
 mozilla::TemporaryRef<LibHandle>
 ElfLoader::GetHandleByPtr(void *addr)
 {
   /* Scan the list of handles we already have for a match */
   for (LibHandleList::iterator it = handles.begin(); it < handles.end(); ++it) {
     if ((*it)->Contains(addr))
       return *it;
   }
-  return NULL;
+  return nullptr;
 }
 
 Mappable *
 ElfLoader::GetMappableFromPath(const char *path)
 {
   const char *name = LeafName(path);
-  Mappable *mappable = NULL;
+  Mappable *mappable = nullptr;
   RefPtr<Zip> zip;
   const char *subpath;
   if ((subpath = strchr(path, '!'))) {
     char *zip_path = strndup(path, subpath - path);
     while (*(++subpath) == '/') { }
     zip = ZipCollection::GetZip(zip_path);
     Zip::Stream s;
     if (zip && zip->GetStream(subpath, &s)) {
@@ -552,47 +552,47 @@ ElfLoader::__wrap_cxa_finalize(void *dso
 
 void
 ElfLoader::DestructorCaller::Call()
 {
   if (destructor) {
     DEBUG_LOG("ElfLoader::DestructorCaller::Call(%p, %p, %p)",
               FunctionPtr(destructor), object, dso_handle);
     destructor(object);
-    destructor = NULL;
+    destructor = nullptr;
   }
 }
 
-ElfLoader::DebuggerHelper::DebuggerHelper(): dbg(NULL)
+ElfLoader::DebuggerHelper::DebuggerHelper(): dbg(nullptr)
 {
   /* Find ELF auxiliary vectors.
    *
    * The kernel stores the following data on the stack when starting a
    * program:
    *   argc
    *   argv[0] (pointer into argv strings defined below)
    *   argv[1] (likewise)
    *   ...
    *   argv[argc - 1] (likewise)
-   *   NULL
+   *   nullptr
    *   envp[0] (pointer into environment strings defined below)
    *   envp[1] (likewise)
    *   ...
    *   envp[n] (likewise)
-   *   NULL
+   *   nullptr
    *   ... (more NULLs on some platforms such as Android 4.3)
    *   auxv[0] (first ELF auxiliary vector)
    *   auxv[1] (second ELF auxiliary vector)
    *   ...
    *   auxv[p] (last ELF auxiliary vector)
-   *   (AT_NULL, NULL)
+   *   (AT_NULL, nullptr)
    *   padding
    *   argv strings, separated with '\0'
    *   environment strings, separated with '\0'
-   *   NULL
+   *   nullptr
    *
    * What we are after are the auxv values defined by the following struct.
    */
   struct AuxVector {
     Elf::Addr type;
     Elf::Addr value;
   };
 
@@ -631,17 +631,17 @@ ElfLoader::DebuggerHelper::DebuggerHelpe
     scan++;
 
   AuxVector *auxv = reinterpret_cast<AuxVector *>(scan);
 
   /* The two values of interest in the auxiliary vectors are AT_PHDR and
    * AT_PHNUM, which gives us the the location and size of the ELF program
    * headers. */
   Array<Elf::Phdr> phdrs;
-  char *base = NULL;
+  char *base = nullptr;
   while (auxv->type) {
     if (auxv->type == AT_PHDR) {
       phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
       /* Assume the base address is the first byte of the same page */
       base = reinterpret_cast<char *>(PageAlignedPtr(auxv->value));
     }
     if (auxv->type == AT_PHNUM)
       phdrs.Init(auxv->value);
@@ -805,17 +805,17 @@ private:
  */
 void
 ElfLoader::DebuggerHelper::Add(ElfLoader::link_map *map)
 {
   if (!dbg->r_brk)
     return;
   dbg->r_state = r_debug::RT_ADD;
   dbg->r_brk();
-  map->l_prev = NULL;
+  map->l_prev = nullptr;
   map->l_next = dbg->r_map;
   if (!firstAdded) {
     firstAdded = map;
     /* When adding a library for the first time, r_map points to data
      * handled by the system linker, and that data may be read-only */
     EnsureWritable w(&dbg->r_map->l_prev);
     dbg->r_map->l_prev = map;
   } else
@@ -971,93 +971,96 @@ SEGVHandler::SEGVHandler()
   /* Initialize oldStack.ss_flags to an invalid value when used to set
    * an alternative stack, meaning we haven't got information about the
    * original alternative stack and thus don't mean to restore it */
   oldStack.ss_flags = SS_ONSTACK;
   if (!Divert(sigaction, __wrap_sigaction))
     return;
 
   /* Get the current segfault signal handler. */
-  sys_sigaction(SIGSEGV, NULL, &this->action);
+  sys_sigaction(SIGSEGV, nullptr, &this->action);
 
   /* Some devices don't provide useful information to their SIGSEGV handlers,
    * making it impossible for on-demand decompression to work. To check if
    * we're on such a device, setup a temporary handler and deliberately
    * trigger a segfault. The handler will set signalHandlingBroken if the
    * provided information is bogus.
    * Some other devices have a kernel option enabled that makes SIGSEGV handler
    * have an overhead so high that it affects how on-demand decompression
    * performs. The handler will also set signalHandlingSlow if the triggered
    * SIGSEGV took too much time. */
   struct sigaction action;
   action.sa_sigaction = &SEGVHandler::test_handler;
   sigemptyset(&action.sa_mask);
   action.sa_flags = SA_SIGINFO | SA_NODEFER;
-  action.sa_restorer = NULL;
-  if (sys_sigaction(SIGSEGV, &action, NULL))
+  action.sa_restorer = nullptr;
+  if (sys_sigaction(SIGSEGV, &action, nullptr))
     return;
-  stackPtr.Assign(MemoryRange::mmap(NULL, PageSize(), PROT_READ | PROT_WRITE,
+  stackPtr.Assign(MemoryRange::mmap(nullptr, PageSize(),
+                                    PROT_READ | PROT_WRITE,
                                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
   if (stackPtr.get() == MAP_FAILED)
     return;
 
   TmpData *data = reinterpret_cast<TmpData*>(stackPtr.get());
   data->crash_timestamp = ProcessTimeStamp_Now();
   mprotect(stackPtr, stackPtr.GetLength(), PROT_NONE);
   data->crash_int = 123;
   stackPtr.Assign(MAP_FAILED, 0);
   if (signalHandlingBroken || signalHandlingSlow) {
     /* Restore the original segfault signal handler. */
-    sys_sigaction(SIGSEGV, &this->action, NULL);
+    sys_sigaction(SIGSEGV, &this->action, nullptr);
     return;
   }
 
   /* Setup an alternative stack if the already existing one is not big
    * enough, or if there is none. */
-  if (sigaltstack(NULL, &oldStack) == 0) {
+  if (sigaltstack(nullptr, &oldStack) == 0) {
     if (oldStack.ss_flags == SS_ONSTACK)
       oldStack.ss_flags = 0;
     if (!oldStack.ss_sp || oldStack.ss_size < stackSize) {
-      stackPtr.Assign(MemoryRange::mmap(NULL, stackSize, PROT_READ | PROT_WRITE,
+      stackPtr.Assign(MemoryRange::mmap(nullptr, stackSize,
+                                        PROT_READ | PROT_WRITE,
                                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
       if (stackPtr.get() == MAP_FAILED)
         return;
       stack_t stack;
       stack.ss_sp = stackPtr;
       stack.ss_size = stackSize;
       stack.ss_flags = 0;
-      if (sigaltstack(&stack, NULL) != 0)
+      if (sigaltstack(&stack, nullptr) != 0)
         return;
     }
   }
   /* Register our own handler, and store the already registered one in
    * SEGVHandler's struct sigaction member */
   action.sa_sigaction = &SEGVHandler::handler;
   action.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
-  registeredHandler = !sys_sigaction(SIGSEGV, &action, NULL);
+  registeredHandler = !sys_sigaction(SIGSEGV, &action, nullptr);
 }
 
 SEGVHandler::~SEGVHandler()
 {
   /* Restore alternative stack for signals */
   if (oldStack.ss_flags != SS_ONSTACK)
-    sigaltstack(&oldStack, NULL);
+    sigaltstack(&oldStack, nullptr);
   /* Restore original signal handler */
   if (registeredHandler)
-    sys_sigaction(SIGSEGV, &this->action, NULL);
+    sys_sigaction(SIGSEGV, &this->action, nullptr);
 }
 
 /* Test handler for a deliberately triggered SIGSEGV that determines whether
  * useful information is provided to signal handlers, particularly whether
  * si_addr is filled in properly, and whether the segfault handler is called
  * quickly enough. */
 void SEGVHandler::test_handler(int signum, siginfo_t *info, void *context)
 {
   SEGVHandler &that = ElfLoader::Singleton;
-  if (signum != SIGSEGV || info == NULL || info->si_addr != that.stackPtr.get())
+  if (signum != SIGSEGV ||
+      info == nullptr || info->si_addr != that.stackPtr.get())
     that.signalHandlingBroken = true;
   mprotect(that.stackPtr, that.stackPtr.GetLength(), PROT_READ | PROT_WRITE);
   TmpData *data = reinterpret_cast<TmpData*>(that.stackPtr.get());
   uint64_t latency = ProcessTimeStamp_Now() - data->crash_timestamp;
   DEBUG_LOG("SEGVHandler latency: %" PRIu64, latency);
   /* See bug 886736 for timings on different devices, 150 ┬Ás is reasonably above
    * the latency on "working" devices and seems to be reasonably fast to incur
    * a huge overhead to on-demand decompression. */
@@ -1088,17 +1091,17 @@ void SEGVHandler::handler(int signum, si
   SEGVHandler &that = ElfLoader::Singleton;
   if (that.action.sa_flags & SA_SIGINFO) {
     DEBUG_LOG("Redispatching to registered handler @%p",
               FunctionPtr(that.action.sa_sigaction));
     that.action.sa_sigaction(signum, info, context);
   } else if (that.action.sa_handler == SIG_DFL) {
     DEBUG_LOG("Redispatching to default handler");
     /* Reset the handler to the default one, and trigger it. */
-    sys_sigaction(signum, &that.action, NULL);
+    sys_sigaction(signum, &that.action, nullptr);
     raise(signum);
   } else if (that.action.sa_handler != SIG_IGN) {
     DEBUG_LOG("Redispatching to registered handler @%p",
               FunctionPtr(that.action.sa_handler));
     that.action.sa_handler(signum);
   } else {
     DEBUG_LOG("Ignoring");
   }
--- a/mozglue/linker/ElfLoader.h
+++ b/mozglue/linker/ElfLoader.h
@@ -91,17 +91,17 @@ template <> inline RefCounted<LibHandle,
 class LibHandle: public mozilla::AtomicRefCounted<LibHandle>
 {
 public:
   /**
    * Constructor. Takes the path of the loaded library and will store a copy
    * of the leaf name.
    */
   LibHandle(const char *path)
-  : directRefCnt(0), path(path ? strdup(path) : NULL), mappable(NULL) { }
+  : directRefCnt(0), path(path ? strdup(path) : nullptr), mappable(nullptr) { }
 
   /**
    * Destructor.
    */
   virtual ~LibHandle();
 
   /**
    * Returns the pointer to the address to which the given symbol resolves
@@ -282,17 +282,17 @@ protected:
   virtual bool IsSystemElf() const { return true; }
 
   /**
    * Remove the reference to the system linker handle. This avoids dlclose()
    * being called when the instance is destroyed.
    */
   void Forget()
   {
-    dlhandle = NULL;
+    dlhandle = nullptr;
   }
 
 private:
   /**
    * Private constructor
    */
   SystemElf(const char *path, void *handle)
   : LibHandle(path), dlhandle(handle) { }
@@ -376,17 +376,17 @@ public:
 
   /**
    * Loads the given library with the given flags. Equivalent to dlopen()
    * The extra "parent" argument optionally gives the handle of the library
    * requesting the given library to be loaded. The loader may look in the
    * directory containing that parent library for the library to load.
    */
   mozilla::TemporaryRef<LibHandle> Load(const char *path, int flags,
-                                        LibHandle *parent = NULL);
+                                        LibHandle *parent = nullptr);
 
   /**
    * Returns the handle of the library containing the given address in
    * its virtual address space, i.e. the library handle for which
    * LibHandle::Contains returns true. Its purpose is to allow to
    * implement dladdr().
    */
   mozilla::TemporaryRef<LibHandle> GetHandleByPtr(void *addr);
@@ -570,36 +570,36 @@ private:
       const link_map &operator ++()
       {
         item = item->l_next;
         return *item;
       }
 
       bool operator<(const iterator &other) const
       {
-        if (other.item == NULL)
+        if (other.item == nullptr)
           return item ? true : false;
         MOZ_CRASH("DebuggerHelper::iterator::operator< called with something else than DebuggerHelper::end()");
       }
     protected:
       friend class DebuggerHelper;
       iterator(const link_map *item): item(item) { }
 
     private:
       const link_map *item;
     };
 
     iterator begin() const
     {
-      return iterator(dbg ? dbg->r_map : NULL);
+      return iterator(dbg ? dbg->r_map : nullptr);
     }
 
     iterator end() const
     {
-      return iterator(NULL);
+      return iterator(nullptr);
     }
 
   private:
     r_debug *dbg;
     link_map *firstAdded;
   };
   friend int __wrap_dl_iterate_phdr(dl_phdr_cb callback, void *data);
   DebuggerHelper dbg;
--- a/mozglue/linker/Mappable.cpp
+++ b/mozglue/linker/Mappable.cpp
@@ -19,17 +19,17 @@
 #include "Logging.h"
 
 Mappable *
 MappableFile::Create(const char *path)
 {
   int fd = open(path, O_RDONLY);
   if (fd != -1)
     return new MappableFile(fd);
-  return NULL;
+  return nullptr;
 }
 
 MemoryRange
 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
                    off_t offset)
 {
   MOZ_ASSERT(fd != -1);
   MOZ_ASSERT(!(flags & MAP_SHARED));
@@ -55,17 +55,17 @@ MappableFile::GetLength() const
 
 Mappable *
 MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
 {
   const char *cachePath = getenv("MOZ_LINKER_CACHE");
   if (!cachePath || !*cachePath) {
     LOG("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
         "not extracting");
-    return NULL;
+    return nullptr;
   }
   mozilla::ScopedDeleteArray<char> path;
   path = new char[strlen(cachePath) + strlen(name) + 2];
   sprintf(path, "%s/%s", cachePath, name);
   struct stat cacheStat;
   if (stat(path, &cacheStat) == 0) {
     struct stat zipStat;
     stat(zip->GetName(), &zipStat);
@@ -75,76 +75,76 @@ MappableExtractFile::Create(const char *
     }
   }
   DEBUG_LOG("Extracting to %s", static_cast<char *>(path));
   AutoCloseFD fd;
   fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
                   S_IRUSR | S_IWUSR);
   if (fd == -1) {
     LOG("Couldn't open %s to decompress library", path.get());
-    return NULL;
+    return nullptr;
   }
   AutoUnlinkFile file;
   file = path.forget();
   if (stream->GetType() == Zip::Stream::DEFLATE) {
     if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
       LOG("Couldn't ftruncate %s to decompress library", file.get());
-      return NULL;
+      return nullptr;
     }
     /* Map the temporary file for use as inflate buffer */
-    MappedPtr buffer(MemoryRange::mmap(NULL, stream->GetUncompressedSize(),
+    MappedPtr buffer(MemoryRange::mmap(nullptr, stream->GetUncompressedSize(),
                                        PROT_WRITE, MAP_SHARED, fd, 0));
     if (buffer == MAP_FAILED) {
       LOG("Couldn't map %s to decompress library", file.get());
-      return NULL;
+      return nullptr;
     }
 
     z_stream zStream = stream->GetZStream(buffer);
 
     /* Decompress */
     if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
       LOG("inflateInit failed: %s", zStream.msg);
-      return NULL;
+      return nullptr;
     }
     if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
       LOG("inflate failed: %s", zStream.msg);
-      return NULL;
+      return nullptr;
     }
     if (inflateEnd(&zStream) != Z_OK) {
       LOG("inflateEnd failed: %s", zStream.msg);
-      return NULL;
+      return nullptr;
     }
     if (zStream.total_out != stream->GetUncompressedSize()) {
       LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
           static_cast<unsigned int>(stream->GetUncompressedSize()));
-      return NULL;
+      return nullptr;
     }
   } else if (stream->GetType() == Zip::Stream::STORE) {
     SeekableZStream zStream;
     if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
       LOG("Couldn't initialize SeekableZStream for %s", name);
-      return NULL;
+      return nullptr;
     }
     if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
       LOG("Couldn't ftruncate %s to decompress library", file.get());
-      return NULL;
+      return nullptr;
     }
-    MappedPtr buffer(MemoryRange::mmap(NULL, zStream.GetUncompressedSize(),
+    MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
                                        PROT_WRITE, MAP_SHARED, fd, 0));
     if (buffer == MAP_FAILED) {
       LOG("Couldn't map %s to decompress library", file.get());
-      return NULL;
+      return nullptr;
     }
 
     if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
       LOG("%s: failed to decompress", name);
-      return NULL;
+      return nullptr;
     }
   } else {
-    return NULL;
+    return nullptr;
   }
 
   return new MappableExtractFile(fd.forget(), file.forget());
 }
 
 MappableExtractFile::~MappableExtractFile()
 {
   /* When destroying from a forked process, we don't want the file to be
@@ -170,52 +170,53 @@ public:
    */
   static _MappableBuffer *Create(const char *name, size_t length)
   {
     AutoCloseFD fd;
 #ifdef ANDROID
     /* On Android, initialize an ashmem region with the given length */
     fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
     if (fd == -1)
-      return NULL;
+      return nullptr;
     char str[ASHMEM_NAME_LEN];
     strlcpy(str, name, sizeof(str));
     ioctl(fd, ASHMEM_SET_NAME, str);
     if (ioctl(fd, ASHMEM_SET_SIZE, length))
-      return NULL;
+      return nullptr;
 
     /* The Gecko crash reporter is confused by adjacent memory mappings of
      * the same file and chances are we're going to map from the same file
      * descriptor right away. To avoid problems with the crash reporter,
      * create an empty anonymous page before or after the ashmem mapping,
      * depending on how mappings grow in the address space.
      */
 #if defined(__arm__)
-    void *buf = ::mmap(NULL, length + PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+    void *buf = ::mmap(nullptr, length + PAGE_SIZE, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, fd, 0);
     if (buf != MAP_FAILED) {
       ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
              PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
       DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
                 length, str, buf);
       return new _MappableBuffer(fd.forget(), buf, length);
     }
 #elif defined(__i386__)
     size_t anon_mapping_length = length + PAGE_SIZE;
-    void *buf = ::mmap(NULL, anon_mapping_length, PROT_NONE,
+    void *buf = ::mmap(nullptr, anon_mapping_length, PROT_NONE,
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
     if (buf != MAP_FAILED) {
       char *first_page = reinterpret_cast<char *>(buf);
       char *map_page = first_page + PAGE_SIZE;
 
       void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
                                 MAP_FIXED | MAP_SHARED, fd, 0);
       if (actual_buf == MAP_FAILED) {
         ::munmap(buf, anon_mapping_length);
         DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
-        return NULL;
+        return nullptr;
       }
 
       DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
                 length, str, actual_buf);
       return new _MappableBuffer(fd.forget(), actual_buf, length);
     }
 #else
 #error need to add a case for your CPU
@@ -223,28 +224,29 @@ public:
 #else
     /* On Linux, use /dev/shm as base directory for temporary files, assuming
      * it's on tmpfs */
     /* TODO: check that /dev/shm is tmpfs */
     char path[256];
     sprintf(path, "/dev/shm/%s.XXXXXX", name);
     fd = mkstemp(path);
     if (fd == -1)
-      return NULL;
+      return nullptr;
     unlink(path);
     ftruncate(fd, length);
 
-    void *buf = ::mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+    void *buf = ::mmap(nullptr, length, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, fd, 0);
     if (buf != MAP_FAILED) {
       DEBUG_LOG("Decompression buffer of size %ld in \"%s\", mapped @%p",
                 length, path, buf);
       return new _MappableBuffer(fd.forget(), buf, length);
     }
 #endif
-    return NULL;
+    return nullptr;
   }
 
   void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
   {
     MOZ_ASSERT(fd != -1);
 #ifdef ANDROID
     /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
      * there is content in the ashmem */
@@ -280,17 +282,17 @@ private:
 
 Mappable *
 MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
 {
   MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
   _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
   if (buf)
     return new MappableDeflate(buf, zip, stream);
-  return NULL;
+  return nullptr;
 }
 
 MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
                                  Zip::Stream *stream)
 : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
 
 MappableDeflate::~MappableDeflate() { }
 
@@ -348,19 +350,19 @@ MappableDeflate::mmap(const void *addr, 
 }
 
 void
 MappableDeflate::finalize()
 {
   /* Free zlib internal buffers */
   inflateEnd(&zStream);
   /* Free decompression buffer */
-  buffer = NULL;
+  buffer = nullptr;
   /* Remove reference to Zip archive */
-  zip = NULL;
+  zip = nullptr;
 }
 
 size_t
 MappableDeflate::GetLength() const
 {
   return buffer->GetLength();
 }
 
@@ -372,25 +374,25 @@ MappableSeekableZStream::Create(const ch
   mozilla::ScopedDeletePtr<MappableSeekableZStream> mappable;
   mappable = new MappableSeekableZStream(zip);
 
   pthread_mutexattr_t recursiveAttr;
   pthread_mutexattr_init(&recursiveAttr);
   pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
 
   if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
-    return NULL;
+    return nullptr;
 
   if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
-    return NULL;
+    return nullptr;
 
   mappable->buffer = _MappableBuffer::Create(name,
                               mappable->zStream.GetUncompressedSize());
   if (!mappable->buffer)
-    return NULL;
+    return nullptr;
 
   mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
   memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
 
   return mappable.forget();
 }
 
 MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
--- a/mozglue/linker/SeekableZStream.cpp
+++ b/mozglue/linker/SeekableZStream.cpp
@@ -251,12 +251,12 @@ SeekableZStream::GetFilter(SeekableZStre
   switch (id) {
   case BCJ_THUMB:
     return BCJ_Thumb_filter;
   case BCJ_ARM:
     return BCJ_ARM_filter;
   case BCJ_X86:
     return BCJ_X86_filter;
   default:
-    return NULL;
+    return nullptr;
   }
-  return NULL;
+  return nullptr;
 }
--- a/mozglue/linker/Utils.h
+++ b/mozglue/linker/Utils.h
@@ -91,17 +91,17 @@ struct AutoCloseFDTraits
 typedef mozilla::Scoped<AutoCloseFDTraits> AutoCloseFD;
 
 /**
  * AutoCloseFILE is a RAII wrapper for POSIX streams
  */
 struct AutoCloseFILETraits
 {
   typedef FILE *type;
-  static FILE *empty() { return NULL; }
+  static FILE *empty() { return nullptr; }
   static void release(FILE *f) { if (f) fclose(f); }
 };
 typedef mozilla::Scoped<AutoCloseFILETraits> AutoCloseFILE;
 
 /**
  * Page alignment helpers
  */
 static inline size_t PageSize()
@@ -312,36 +312,36 @@ private:
  * UnsizedArray is a way to access raw arrays of data in memory.
  *
  *   struct S { ... };
  *   UnsizedArray<S> a(buf);
  *   UnsizedArray<S> b; b.Init(buf);
  *
  * This is roughly equivalent to
  *   const S *a = reinterpret_cast<const S *>(buf);
- *   const S *b = NULL; b = reinterpret_cast<const S *>(buf);
+ *   const S *b = nullptr; b = reinterpret_cast<const S *>(buf);
  *
  * An UnsizedArray has no known length, and it's up to the caller to make
  * sure the accessed memory is mapped and makes sense.
  */
 template <typename T>
 class UnsizedArray
 {
 public:
   typedef size_t idx_t;
 
   /**
    * Constructors and Initializers
    */
-  UnsizedArray(): contents(NULL) { }
+  UnsizedArray(): contents(nullptr) { }
   UnsizedArray(const void *buf): contents(reinterpret_cast<const T *>(buf)) { }
 
   void Init(const void *buf)
   {
-    MOZ_ASSERT(contents == NULL);
+    MOZ_ASSERT(contents == nullptr);
     contents = reinterpret_cast<const T *>(buf);
   }
 
   /**
    * Returns the nth element of the array
    */
   const T &operator[](const idx_t index) const
   {
@@ -353,17 +353,17 @@ public:
   {
     return contents;
   }
   /**
    * Returns whether the array points somewhere
    */
   operator bool() const
   {
-    return contents != NULL;
+    return contents != nullptr;
   }
 private:
   const T *contents;
 };
 
 /**
  * Array, like UnsizedArray, is a way to access raw arrays of data in memory.
  * Unlike UnsizedArray, it has a known length, and is enumerable with an
@@ -460,17 +460,17 @@ public:
    *   Array<S> a(buf, len);
    *   for (Array<S>::iterator it = a.begin(); it < a.end(); ++it) {
    *     // Do something with *it.
    *   }
    */
   class iterator
   {
   public:
-    iterator(): item(NULL) { }
+    iterator(): item(nullptr) { }
 
     const T &operator *() const
     {
       return *item;
     }
 
     const T *operator ->() const
     {
@@ -521,17 +521,17 @@ public:
    *   Array<S> a(buf, len);
    *   for (Array<S>::reverse_iterator it = a.rbegin(); it < a.rend(); ++it) {
    *     // Do something with *it.
    *   }
    */
   class reverse_iterator
   {
   public:
-    reverse_iterator(): item(NULL) { }
+    reverse_iterator(): item(nullptr) { }
 
     const T &operator *() const
     {
       const T *tmp = item;
       return *--tmp;
     }
 
     const T *operator ->() const
--- a/mozglue/linker/Zip.cpp
+++ b/mozglue/linker/Zip.cpp
@@ -14,61 +14,61 @@
 
 mozilla::TemporaryRef<Zip>
 Zip::Create(const char *filename)
 {
   /* Open and map the file in memory */
   AutoCloseFD fd(open(filename, O_RDONLY));
   if (fd == -1) {
     LOG("Error opening %s: %s", filename, strerror(errno));
-    return NULL;
+    return nullptr;
   }
   struct stat st;
   if (fstat(fd, &st) == -1) {
     LOG("Error stating %s: %s", filename, strerror(errno));
-    return NULL;
+    return nullptr;
   }
   size_t size = st.st_size;
   if (size <= sizeof(CentralDirectoryEnd)) {
     LOG("Error reading %s: too short", filename);
-    return NULL;
+    return nullptr;
   }
-  void *mapped = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+  void *mapped = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0);
   if (mapped == MAP_FAILED) {
     LOG("Error mmapping %s: %s", filename, strerror(errno));
-    return NULL;
+    return nullptr;
   }
   DEBUG_LOG("Mapped %s @%p", filename, mapped);
 
   return Create(filename, mapped, size);
 }
 
 mozilla::TemporaryRef<Zip>
 Zip::Create(const char *filename, void *mapped, size_t size)
 {
   mozilla::RefPtr<Zip> zip = new Zip(filename, mapped, size);
 
   // If neither the first Local File entry nor central directory entries
   // have been found, the zip was invalid.
   if (!zip->nextFile && !zip->entries) {
     LOG("%s - Invalid zip", filename);
-    return NULL;
+    return nullptr;
   }
 
   ZipCollection::Singleton.Register(zip);
   return zip;
 }
 
 Zip::Zip(const char *filename, void *mapped, size_t size)
-: name(filename ? strdup(filename) : NULL)
+: name(filename ? strdup(filename) : nullptr)
 , mapped(mapped)
 , size(size)
 , nextFile(LocalFile::validate(mapped)) // first Local File entry
-, nextDir(NULL)
-, entries(NULL)
+, nextDir(nullptr)
+, entries(nullptr)
 {
   // If the first local file entry couldn't be found (which can happen
   // with optimized jars), check the first central directory entry.
   if (!nextFile)
     GetFirstEntry();
 }
 
 Zip::~Zip()
@@ -144,36 +144,36 @@ Zip::GetStream(const char *path, Zip::St
   const char *data = reinterpret_cast<const char *>(nextFile->GetData());
   out->compressedBuf = data;
   out->compressedSize = nextDir->compressedSize;
   out->uncompressedSize = nextDir->uncompressedSize;
   out->type = static_cast<Stream::Type>(uint16_t(nextDir->compression));
 
   /* Store the next directory entry */
   nextDir = nextDir->GetNext();
-  nextFile = NULL;
+  nextFile = nullptr;
   return true;
 }
 
 const Zip::DirectoryEntry *
 Zip::GetFirstEntry() const
 {
   if (entries)
     return entries;
 
-  const CentralDirectoryEnd *end = NULL;
+  const CentralDirectoryEnd *end = nullptr;
   const char *_end = static_cast<const char *>(mapped) + size
                      - sizeof(CentralDirectoryEnd);
 
   /* Scan for the Central Directory End */
   for (; _end > mapped && !end; _end--)
     end = CentralDirectoryEnd::validate(_end);
   if (!end) {
     LOG("%s - Couldn't find end of central directory record", name);
-    return NULL;
+    return nullptr;
   }
 
   entries = DirectoryEntry::validate(static_cast<const char *>(mapped)
                                  + end->offset);
   if (!entries) {
     LOG("%s - Couldn't find central directory record", name);
   }
   return entries;
--- a/mozglue/linker/Zip.h
+++ b/mozglue/linker/Zip.h
@@ -25,26 +25,26 @@ class ZipCollection;
  * code fail in bad ways. However, since the only intended use is to load
  * libraries from Zip archives, there is no interest in making this code
  * safe, since the libraries could contain malicious code anyways.
  */
 class Zip: public mozilla::AtomicRefCounted<Zip>
 {
 public:
   /**
-   * Create a Zip instance for the given file name. Returns NULL in case
+   * Create a Zip instance for the given file name. Returns nullptr in case
    * of failure.
    */
   static mozilla::TemporaryRef<Zip> Create(const char *filename);
 
   /**
    * Create a Zip instance using the given buffer.
    */
   static mozilla::TemporaryRef<Zip> Create(void *buffer, size_t size) {
-    return Create(NULL, buffer, size);
+    return Create(nullptr, buffer, size);
   }
 
 private:
   static mozilla::TemporaryRef<Zip> Create(const char *filename,
                                            void *buffer, size_t size);
 
   /**
    * Private constructor
@@ -69,17 +69,17 @@ public:
     enum Type {
       STORE = 0,
       DEFLATE = 8
     };
 
     /**
      * Constructor
      */
-    Stream(): compressedBuf(NULL), compressedSize(0), uncompressedSize(0)
+    Stream(): compressedBuf(nullptr), compressedSize(0), uncompressedSize(0)
             , type(STORE) { }
 
     /**
      * Getters
      */
     const void *GetBuffer() { return compressedBuf; }
     size_t GetSize() { return compressedSize; }
     size_t GetUncompressedSize() { return uncompressedSize; }
@@ -173,17 +173,17 @@ public:
      * Equivalent to reinterpret_cast<const T *>(buf), with an additional
      * check of the signature.
      */
     static const T *validate(const void *buf)
     {
       const T *ret = static_cast<const T *>(buf);
       if (ret->signature == T::magic)
         return ret;
-      return NULL;
+      return nullptr;
     }
 
     SignedEntity(uint32_t magic): signature(magic) { }
   private:
     le_uint32 signature;
   };
 
 private:
--- a/mozglue/linker/szip.cpp
+++ b/mozglue/linker/szip.cpp
@@ -25,27 +25,27 @@ const char *filterName[] = {
   "thumb",
   "arm",
   "x86",
   "auto"
 };
 
 /* Maximum supported size for chunkSize */
 static const size_t maxChunkSize =
-  1 << (8 * std::min(sizeof(((SeekableZStreamHeader *)NULL)->chunkSize),
-                     sizeof(((SeekableZStreamHeader *)NULL)->lastChunkSize)) - 1);
+  1 << (8 * std::min(sizeof(((SeekableZStreamHeader *)nullptr)->chunkSize),
+                     sizeof(((SeekableZStreamHeader *)nullptr)->lastChunkSize)) - 1);
 
 class Buffer: public MappedPtr
 {
 public:
   virtual ~Buffer() { }
 
   virtual bool Resize(size_t size)
   {
-    MemoryRange buf = mmap(NULL, size, PROT_READ | PROT_WRITE,
+    MemoryRange buf = mmap(nullptr, size, PROT_READ | PROT_WRITE,
                            MAP_PRIVATE | MAP_ANON, -1, 0);
     if (buf == MAP_FAILED)
       return false;
     if (*this != MAP_FAILED)
       memcpy(buf, *this, std::min(size, GetLength()));
     Assign(buf);
     return true;
   }
@@ -73,17 +73,18 @@ public:
   }
 
   virtual bool Resize(size_t size)
   {
     if (writable) {
       if (ftruncate(fd, size) == -1)
         return false;
     }
-    Assign(MemoryRange::mmap(NULL, size, PROT_READ | (writable ? PROT_WRITE : 0),
+    Assign(MemoryRange::mmap(nullptr, size,
+                             PROT_READ | (writable ? PROT_WRITE : 0),
                              writable ? MAP_SHARED : MAP_PRIVATE, fd, 0));
     return this != MAP_FAILED;
   }
 
   int getFd()
   {
     return fd;
   }
@@ -268,32 +269,32 @@ int SzipCompress::run(const char *name, 
     firstFilter = lastFilter = filter;
     ++lastFilter;
     scanFilters = false;
   }
 
   mozilla::ScopedDeletePtr<Buffer> filteredBuf;
   Buffer *origData;
   for (SeekableZStream::FilterId f = firstFilter; f < lastFilter; ++f) {
-    FilteredBuffer *filteredTmp = NULL;
+    FilteredBuffer *filteredTmp = nullptr;
     Buffer tmpBuf;
     if (f != SeekableZStream::NONE) {
       DEBUG_LOG("Applying filter \"%s\"", filterName[f]);
       filteredTmp = new FilteredBuffer();
       filteredTmp->Filter(origBuf, f, chunkSize);
       origData = filteredTmp;
     } else {
       origData = &origBuf;
     }
     if (dictSize  && !scanFilters) {
       filteredBuf = filteredTmp;
       break;
     }
     DEBUG_LOG("Compressing with no dictionary");
-    if (do_compress(*origData, tmpBuf, NULL, 0, f) == 0) {
+    if (do_compress(*origData, tmpBuf, nullptr, 0, f) == 0) {
       if (tmpBuf.GetLength() < outBuf.GetLength()) {
         outBuf.Fill(tmpBuf);
         compressed = true;
         filter = f;
         filteredBuf = filteredTmp;
         continue;
       }
     }