Bug 882608 - Fix various issues in faulty.lib with incomplete pages and MOZ_LINKER_ONDEMAND=0. r=nfroyd
authorMike Hommey <mh+mozilla@glandium.org>
Tue, 23 Jul 2013 07:26:07 +0900
changeset 139483 d7a7b56432fbe555968b7417945633b8ab9d3335
parent 139482 1aceeed1147810f8342422b9875e2c7506276411
child 139484 5435370944bf3bc64c3839b2e34c975c13e729e4
push id31384
push usermh@glandium.org
push dateMon, 22 Jul 2013 22:29:54 +0000
treeherdermozilla-inbound@dbf147bd0c67 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnfroyd
bugs882608
milestone25.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 882608 - Fix various issues in faulty.lib with incomplete pages and MOZ_LINKER_ONDEMAND=0. r=nfroyd
mozglue/linker/CustomElf.cpp
mozglue/linker/Mappable.cpp
mozglue/linker/Mappable.h
--- a/mozglue/linker/CustomElf.cpp
+++ b/mozglue/linker/CustomElf.cpp
@@ -376,19 +376,20 @@ CustomElf::LoadSegment(const Phdr *pt_lo
   }
 
   int prot = ((pt_load->p_flags & PF_X) ? PROT_EXEC : 0) |
              ((pt_load->p_flags & PF_W) ? PROT_WRITE : 0) |
              ((pt_load->p_flags & PF_R) ? PROT_READ : 0);
 
   /* Mmap at page boundary */
   Addr align = PageSize();
+  Addr align_offset;
   void *mapped, *where;
   do {
-    Addr align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
+    align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
     where = GetPtr(pt_load->p_vaddr - align_offset);
     DEBUG_LOG("%s: Loading segment @%p %c%c%c", GetPath(), where,
                                                 prot & PROT_READ ? 'r' : '-',
                                                 prot & PROT_WRITE ? 'w' : '-',
                                                 prot & PROT_EXEC ? 'x' : '-');
     mapped = mappable->mmap(where, pt_load->p_filesz + align_offset,
                             prot, MAP_PRIVATE | MAP_FIXED,
                             pt_load->p_offset - align_offset);
@@ -415,32 +416,38 @@ CustomElf::LoadSegment(const Phdr *pt_lo
   }
 
   /* Ensure the availability of all pages within the mapping if on-demand
    * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not
    * registered). */
   const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
   if (!ElfLoader::Singleton.hasRegisteredHandler() ||
       (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
-    for (Addr off = 0; off < pt_load->p_filesz; off += PageSize()) {
+    for (Addr off = 0; off < pt_load->p_filesz + align_offset;
+         off += PageSize()) {
       mappable->ensure(reinterpret_cast<char *>(mapped) + off);
     }
   }
   /* When p_memsz is greater than p_filesz, we need to have nulled out memory
    * after p_filesz and before p_memsz.
-   * Mappable::mmap already guarantees that after p_filesz and up to the end
-   * of the page p_filesz is in, memory is nulled out.
-   * Above the end of that page, and up to p_memsz, we already have nulled out
-   * memory because we mapped anonymous memory on the whole library virtual
+   * Above the end of the last page, and up to p_memsz, we already have nulled
+   * out memory because we mapped anonymous memory on the whole library virtual
    * address space. We just need to adjust this anonymous memory protection
    * flags. */
   if (pt_load->p_memsz > pt_load->p_filesz) {
     Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
     Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
     Addr next_page = PageAlignedEndPtr(file_end);
+    if (next_page > file_end) {
+      /* The library is not registered at this point, so we can't rely on
+       * on-demand decompression to handle missing pages here. */
+      void *ptr = GetPtr(file_end);
+      mappable->ensure(ptr);
+      memset(ptr, 0, next_page - file_end);
+    }
     if (mem_end > next_page) {
       if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
         LOG("%s: Failed to mprotect", GetPath());
         return false;
       }
     }
   }
   return true;
--- a/mozglue/linker/Mappable.cpp
+++ b/mozglue/linker/Mappable.cpp
@@ -30,28 +30,18 @@ MappableFile::Create(const char *path)
 MemoryRange
 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
                    off_t offset)
 {
   MOZ_ASSERT(fd != -1);
   MOZ_ASSERT(!(flags & MAP_SHARED));
   flags |= MAP_PRIVATE;
 
-  MemoryRange mapped = MemoryRange::mmap(const_cast<void *>(addr), length,
-                                         prot, flags, fd, offset);
-  if (mapped == MAP_FAILED)
-    return mapped;
-
-  /* Fill the remainder of the last page with zeroes when the requested
-   * protection has write bits. */
-  if ((mapped != MAP_FAILED) && (prot & PROT_WRITE) &&
-      (PageAlignedSize(length) > length)) {
-    memset(mapped + length, 0, PageAlignedSize(length) - length);
-  }
-  return mapped;
+  return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
+                           fd, offset);
 }
 
 void
 MappableFile::finalize()
 {
   /* Close file ; equivalent to close(fd.forget()) */
   fd = -1;
 }
@@ -448,19 +438,20 @@ MappableSeekableZStream::ensure(const vo
     return false;
 
   /* Find corresponding chunk */
   off_t mapOffset = map->offsetOf(addrPage);
   off_t chunk = mapOffset / zStream.GetChunkSize();
 
   /* In the typical case, we just need to decompress the chunk entirely. But
    * when the current mapping ends in the middle of the chunk, we want to
-   * stop there. However, if another mapping needs the last part of the
-   * chunk, we still need to continue. As mappings are ordered by offset
-   * and length, we don't need to scan the entire list of mappings.
+   * stop at the end of the corresponding page.
+   * However, if another mapping needs the last part of the chunk, we still
+   * need to continue. As mappings are ordered by offset and length, we don't
+   * need to scan the entire list of mappings.
    * It is safe to run through lazyMaps here because the linker is never
    * going to call mmap (which adds lazyMaps) while this function is
    * called. */
   size_t length = zStream.GetChunkSize(chunk);
   off_t chunkStart = chunk * zStream.GetChunkSize();
   off_t chunkEnd = chunkStart + length;
   std::vector<LazyMap>::iterator it;
   for (it = map; it < lazyMaps.end(); ++it) {
@@ -468,16 +459,18 @@ MappableSeekableZStream::ensure(const vo
       break;
   }
   if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
     /* The mapping "it" points at now is past the interesting one */
     --it;
     length = it->endOffset() - chunkStart;
   }
 
+  length = PageAlignedSize(length);
+
   AutoLock lock(&mutex);
 
   /* The very first page is mapped and accessed separately of the rest, and
    * as such, only the first page of the first chunk is decompressed this way.
    * When we fault in the remaining pages of that chunk, we want to decompress
    * the complete chunk again. Short of doing that, we would end up with
    * no data between PageSize() and chunkSize, which would effectively corrupt
    * symbol resolution in the underlying library. */
--- a/mozglue/linker/Mappable.h
+++ b/mozglue/linker/Mappable.h
@@ -11,21 +11,18 @@
 #include "SeekableZStream.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/Scoped.h"
 #include "zlib.h"
 
 /**
  * Abstract class to handle mmap()ing from various kind of entities, such as
  * plain files or Zip entries. The virtual members are meant to act as the
- * equivalent system functions, with a few differences:
- * - mapped memory is always MAP_PRIVATE, even though a given implementation
- *   may use something different internally.
- * - memory after length and up to the end of the corresponding page is nulled
- *   out.
+ * equivalent system functions, except mapped memory is always MAP_PRIVATE,
+ * even though a given implementation may use something different internally.
  */
 class Mappable: public mozilla::RefCounted<Mappable>
 {
 public:
   virtual ~Mappable() { }
 
   virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags,
                            off_t offset) = 0;