Bug 886730 - Add and use a MemoryRange class and helper functions for page alignment in faulty.lib. r=nfroyd
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 27 Jun 2013 09:35:49 +0900
changeset 136636 e307319bf2e4b2354e1e28a10fcd7e87656c30d7
parent 136635 476f3ffebc407a786017f67b7e7fcf214b631d7b
child 136637 ae6f3b0bc23d1ba77365d2ca8510d7150d44e759
push id24889
push useremorley@mozilla.com
push dateThu, 27 Jun 2013 10:31:05 +0000
treeherdermozilla-central@b8a80bf3f9da [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnfroyd
bugs886730
milestone25.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 886730 - Add and use a MemoryRange class and helper functions for page alignment in faulty.lib. r=nfroyd
mozglue/linker/CustomElf.cpp
mozglue/linker/ElfLoader.cpp
mozglue/linker/Mappable.cpp
mozglue/linker/Mappable.h
mozglue/linker/SeekableZStream.cpp
mozglue/linker/Utils.h
mozglue/linker/szip.cpp
--- a/mozglue/linker/CustomElf.cpp
+++ b/mozglue/linker/CustomElf.cpp
@@ -8,24 +8,16 @@
 #include <dlfcn.h>
 #include "CustomElf.h"
 #include "Mappable.h"
 #include "Logging.h"
 
 using namespace Elf;
 using namespace mozilla;
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
-#ifndef PAGE_MASK
-#define PAGE_MASK (~ (PAGE_SIZE - 1))
-#endif
-
 /* TODO: Fill ElfLoader::Singleton.lastError on errors. */
 
 /* Function used to report library mappings from the custom linker to Gecko
  * crash reporter */
 #ifdef ANDROID
 extern "C" {
   void report_mapping(char *name, void *base, uint32_t len, uint32_t offset);
 }
@@ -77,17 +69,17 @@ void debug_phdr(const char *type, const 
 /**
  * RAII wrapper for a mapping of the first page off a Mappable object.
  * This calls Mappable::munmap instead of system munmap.
  */
 class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
 public:
   Mappable1stPagePtr(Mappable *mappable)
   : GenericMappedPtr<Mappable1stPagePtr>(
-      mappable->mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, 0), PAGE_SIZE)
+      mappable->mmap(NULL, PageSize(), PROT_READ, MAP_PRIVATE, 0))
   , mappable(mappable)
   {
     /* Ensure the content of this page */
     mappable->ensure(*this);
   }
 
 private:
   friend class GenericMappedPtr<Mappable1stPagePtr>;
@@ -184,18 +176,18 @@ CustomElf::Load(Mappable *mappable, cons
    * As we are using the base address from here to mmap something else with
    * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For
    * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap
    * MAP_PRIVATE only returns a 4k aligned address. So we first get a base
    * address with MAP_SHARED, which guarantees the kernel returns an address
    * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at
    * the same address, because of some bad side effects of keeping it as
    * MAP_SHARED. */
-  elf->base.Assign(mmap(NULL, max_vaddr, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS,
-                      -1, 0), max_vaddr);
+  elf->base.Assign(MemoryRange::mmap(NULL, max_vaddr, PROT_NONE,
+                                     MAP_SHARED | MAP_ANONYMOUS, -1, 0));
   if ((elf->base == MAP_FAILED) ||
       (mmap(elf->base, max_vaddr, PROT_NONE,
             MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) {
     LOG("%s: Failed to mmap", elf->GetPath());
     return NULL;
   }
 
   /* Load and initialize library */
@@ -383,20 +375,20 @@ CustomElf::LoadSegment(const Phdr *pt_lo
     return false;;
   }
 
   int prot = ((pt_load->p_flags & PF_X) ? PROT_EXEC : 0) |
              ((pt_load->p_flags & PF_W) ? PROT_WRITE : 0) |
              ((pt_load->p_flags & PF_R) ? PROT_READ : 0);
 
   /* Mmap at page boundary */
-  Addr align = PAGE_SIZE;
+  Addr align = PageSize();
   void *mapped, *where;
   do {
-    Addr align_offset = pt_load->p_vaddr & (align - 1);
+    Addr align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
     where = GetPtr(pt_load->p_vaddr - align_offset);
     DEBUG_LOG("%s: Loading segment @%p %c%c%c", GetPath(), where,
                                                 prot & PROT_READ ? 'r' : '-',
                                                 prot & PROT_WRITE ? 'w' : '-',
                                                 prot & PROT_EXEC ? 'x' : '-');
     mapped = mappable->mmap(where, pt_load->p_filesz + align_offset,
                             prot, MAP_PRIVATE | MAP_FIXED,
                             pt_load->p_offset - align_offset);
@@ -423,32 +415,32 @@ CustomElf::LoadSegment(const Phdr *pt_lo
   }
 
   /* Ensure the availability of all pages within the mapping if on-demand
    * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not
    * registered). */
   const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
   if (!ElfLoader::Singleton.hasRegisteredHandler() ||
       (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
-    for (Addr off = 0; off < pt_load->p_filesz; off += PAGE_SIZE) {
+    for (Addr off = 0; off < pt_load->p_filesz; off += PageSize()) {
       mappable->ensure(reinterpret_cast<char *>(mapped) + off);
     }
   }
   /* When p_memsz is greater than p_filesz, we need to have nulled out memory
    * after p_filesz and before p_memsz.
    * Mappable::mmap already guarantees that after p_filesz and up to the end
    * of the page p_filesz is in, memory is nulled out.
    * Above the end of that page, and up to p_memsz, we already have nulled out
    * memory because we mapped anonymous memory on the whole library virtual
    * address space. We just need to adjust this anonymous memory protection
    * flags. */
   if (pt_load->p_memsz > pt_load->p_filesz) {
     Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
     Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
-    Addr next_page = (file_end + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+    Addr next_page = PageAlignedEndPtr(file_end);
     if (mem_end > next_page) {
       if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
         LOG("%s: Failed to mprotect", GetPath());
         return false;
       }
     }
   }
   return true;
--- a/mozglue/linker/ElfLoader.cpp
+++ b/mozglue/linker/ElfLoader.cpp
@@ -29,24 +29,16 @@ inline int sigaltstack(const stack_t *ss
 }
 
 } /* extern "C" */
 #endif /* __ANDROID_API__ */
 #endif /* ANDROID */
 
 using namespace mozilla;
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
-#ifndef PAGE_MASK
-#define PAGE_MASK (~ (PAGE_SIZE - 1))
-#endif
-
 /**
  * dlfcn.h replacements functions
  */
 
 void *
 __wrap_dlopen(const char *path, int flags)
 {
   RefPtr<LibHandle> handle = ElfLoader::Singleton.Load(path, flags);
@@ -111,17 +103,17 @@ int
     info.dlpi_name = it->l_name;
     info.dlpi_phdr = NULL;
     info.dlpi_phnum = 0;
 
     // Assuming l_addr points to Elf headers (in most cases, this is true),
     // get the Phdr location from there.
     uint8_t mapped;
     // If the page is not mapped, mincore returns an error.
-    if (!mincore(const_cast<void*>(it->l_addr), PAGE_SIZE, &mapped)) {
+    if (!mincore(const_cast<void*>(it->l_addr), PageSize(), &mapped)) {
       const Elf::Ehdr *ehdr = Elf::Ehdr::validate(it->l_addr);
       if (ehdr) {
         info.dlpi_phdr = reinterpret_cast<const Elf::Phdr *>(
                          reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
         info.dlpi_phnum = ehdr->e_phnum;
       }
     }
 
@@ -205,17 +197,17 @@ LibHandle::MappableMMap(void *addr, size
 {
   if (!mappable)
     mappable = GetMappable();
   if (!mappable)
     return MAP_FAILED;
   void* mapped = mappable->mmap(addr, length, PROT_READ, MAP_PRIVATE, offset);
   if (mapped != MAP_FAILED) {
     /* Ensure the availability of all pages within the mapping */
-    for (size_t off = 0; off < length; off += PAGE_SIZE) {
+    for (size_t off = 0; off < length; off += PageSize()) {
       mappable->ensure(reinterpret_cast<char *>(mapped) + off);
     }
   }
   return mapped;
 }
 
 void
 LibHandle::MappableMUnmap(void *addr, size_t length) const
@@ -597,43 +589,44 @@ ElfLoader::DebuggerHelper::DebuggerHelpe
    * AT_PHNUM, which gives us the the location and size of the ELF program
    * headers. */
   Array<Elf::Phdr> phdrs;
   char *base = NULL;
   while (auxv->type) {
     if (auxv->type == AT_PHDR) {
       phdrs.Init(reinterpret_cast<Elf::Phdr*>(auxv->value));
       /* Assume the base address is the first byte of the same page */
-      base = reinterpret_cast<char *>(auxv->value & PAGE_MASK);
+      base = reinterpret_cast<char *>(PageAlignedPtr(auxv->value));
     }
     if (auxv->type == AT_PHNUM)
       phdrs.Init(auxv->value);
     auxv++;
   }
 
   if (!phdrs) {
     DEBUG_LOG("Couldn't find program headers");
     return;
   }
 
   /* In some cases, the address for the program headers we get from the
    * auxiliary vectors is not mapped, because of the PT_LOAD segments
    * definitions in the program executable. Trying to map anonymous memory
    * with a hint giving the base address will return a different address
    * if something is mapped there, and the base address otherwise. */
-  MappedPtr mem(mmap(base, PAGE_SIZE, PROT_NONE,
-                     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0), PAGE_SIZE);
+  MappedPtr mem(MemoryRange::mmap(base, PageSize(), PROT_NONE,
+                                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
   if (mem == base) {
     /* If program headers aren't mapped, try to map them */
     int fd = open("/proc/self/exe", O_RDONLY);
     if (fd == -1) {
       DEBUG_LOG("Failed to open /proc/self/exe");
       return;
     }
-    mem.Assign(mmap(base, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0), PAGE_SIZE);
+    mem.Assign(MemoryRange::mmap(base, PageSize(), PROT_READ, MAP_PRIVATE,
+                                 fd, 0));
     /* If we don't manage to map at the right address, just give up. */
     if (mem != base) {
       DEBUG_LOG("Couldn't read program headers");
       return;
     }
   }
   /* Sanity check: the first bytes at the base address should be an ELF
    * header. */
@@ -677,34 +670,34 @@ ElfLoader::DebuggerHelper::DebuggerHelpe
  * restored to their original value when the instance is destroyed.
  */
 class EnsureWritable
 {
 public:
   template <typename T>
   EnsureWritable(T *ptr, size_t length_ = sizeof(T))
   {
-    MOZ_ASSERT(length_ < PAGE_SIZE);
+    MOZ_ASSERT(length_ < PageSize());
     prot = -1;
     page = MAP_FAILED;
 
-    uintptr_t firstPage = reinterpret_cast<uintptr_t>(ptr) & PAGE_MASK;
-    length = (reinterpret_cast<uintptr_t>(ptr) + length_ - firstPage
-              + PAGE_SIZE - 1) & PAGE_MASK;
-
+    char *firstPage = PageAlignedPtr(reinterpret_cast<char *>(ptr));
+    char *lastPageEnd = PageAlignedEndPtr(reinterpret_cast<char *>(ptr) + length_);
+    length = lastPageEnd - firstPage;
+    uintptr_t start = reinterpret_cast<uintptr_t>(firstPage);
     uintptr_t end;
 
-    prot = getProt(firstPage, &end);
-    if (prot == -1 || (firstPage + length) > end)
+    prot = getProt(start, &end);
+    if (prot == -1 || (start + length) > end)
       MOZ_CRASH();
 
     if (prot & PROT_WRITE)
       return;
 
-    page = reinterpret_cast<void *>(firstPage);
+    page = firstPage;
     mprotect(page, length, prot | PROT_WRITE);
   }
 
   ~EnsureWritable()
   {
     if (page != MAP_FAILED) {
       mprotect(page, length, prot);
 }
@@ -903,18 +896,18 @@ SEGVHandler::SEGVHandler()
 : registeredHandler(false)
 {
   if (!Divert(sigaction, __wrap_sigaction))
     return;
   /* Setup an alternative stack if the already existing one is not big
    * enough, or if there is none. */
   if (sigaltstack(NULL, &oldStack) == -1 || !oldStack.ss_sp ||
       oldStack.ss_size < stackSize) {
-    stackPtr.Assign(mmap(NULL, stackSize, PROT_READ | PROT_WRITE,
-                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0), stackSize);
+    stackPtr.Assign(MemoryRange::mmap(NULL, stackSize, PROT_READ | PROT_WRITE,
+                                      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
     stack_t stack;
     stack.ss_sp = stackPtr;
     stack.ss_size = stackSize;
     stack.ss_flags = 0;
     sigaltstack(&stack, NULL);
   }
   /* Register our own handler, and store the already registered one in
    * SEGVHandler's struct sigaction member */
--- a/mozglue/linker/Mappable.cpp
+++ b/mozglue/linker/Mappable.cpp
@@ -13,52 +13,43 @@
 #ifdef ANDROID
 #include <linux/ashmem.h>
 #endif
 #include <sys/stat.h>
 #include "ElfLoader.h"
 #include "SeekableZStream.h"
 #include "Logging.h"
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
-#ifndef PAGE_MASK
-#define PAGE_MASK (~ (PAGE_SIZE - 1))
-#endif
-
 Mappable *
 MappableFile::Create(const char *path)
 {
   int fd = open(path, O_RDONLY);
   if (fd != -1)
     return new MappableFile(fd);
   return NULL;
 }
 
-void *
+MemoryRange
 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
                    off_t offset)
 {
   MOZ_ASSERT(fd != -1);
   MOZ_ASSERT(!(flags & MAP_SHARED));
   flags |= MAP_PRIVATE;
 
-  void *mapped = ::mmap(const_cast<void *>(addr), length, prot, flags,
-                        fd, offset);
+  MemoryRange mapped = MemoryRange::mmap(const_cast<void *>(addr), length,
+                                         prot, flags, fd, offset);
   if (mapped == MAP_FAILED)
     return mapped;
 
   /* Fill the remainder of the last page with zeroes when the requested
    * protection has write bits. */
   if ((mapped != MAP_FAILED) && (prot & PROT_WRITE) &&
-      (length & (PAGE_SIZE - 1))) {
-    memset(reinterpret_cast<char *>(mapped) + length, 0,
-           PAGE_SIZE - (length & ~(PAGE_MASK)));
+      (PageAlignedSize(length) > length)) {
+    memset(mapped + length, 0, PageAlignedSize(length) - length);
   }
   return mapped;
 }
 
 void
 MappableFile::finalize()
 {
   /* Close file ; equivalent to close(fd.forget()) */
@@ -104,18 +95,18 @@ MappableExtractFile::Create(const char *
   AutoUnlinkFile file;
   file = path.forget();
   if (stream->GetType() == Zip::Stream::DEFLATE) {
     if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
       LOG("Couldn't ftruncate %s to decompress library", file.get());
       return NULL;
     }
     /* Map the temporary file for use as inflate buffer */
-    MappedPtr buffer(::mmap(NULL, stream->GetUncompressedSize(), PROT_WRITE,
-                            MAP_SHARED, fd, 0), stream->GetUncompressedSize());
+    MappedPtr buffer(MemoryRange::mmap(NULL, stream->GetUncompressedSize(),
+                                       PROT_WRITE, MAP_SHARED, fd, 0));
     if (buffer == MAP_FAILED) {
       LOG("Couldn't map %s to decompress library", file.get());
       return NULL;
     }
 
     z_stream zStream = stream->GetZStream(buffer);
 
     /* Decompress */
@@ -141,18 +132,18 @@ MappableExtractFile::Create(const char *
     if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
       LOG("Couldn't initialize SeekableZStream for %s", name);
       return NULL;
     }
     if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
       LOG("Couldn't ftruncate %s to decompress library", file.get());
       return NULL;
     }
-    MappedPtr buffer(::mmap(NULL, zStream.GetUncompressedSize(), PROT_WRITE,
-                            MAP_SHARED, fd, 0), zStream.GetUncompressedSize());
+    MappedPtr buffer(MemoryRange::mmap(NULL, zStream.GetUncompressedSize(),
+                                       PROT_WRITE, MAP_SHARED, fd, 0));
     if (buffer == MAP_FAILED) {
       LOG("Couldn't map %s to decompress library", file.get());
       return NULL;
     }
 
     if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
       LOG("%s: failed to decompress", name);
       return NULL;
@@ -280,67 +271,67 @@ MappableDeflate::Create(const char *name
 }
 
 MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
                                  Zip::Stream *stream)
 : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
 
 MappableDeflate::~MappableDeflate() { }
 
-void *
+MemoryRange
 MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
 {
   MOZ_ASSERT(buffer);
   MOZ_ASSERT(!(flags & MAP_SHARED));
   flags |= MAP_PRIVATE;
 
   /* The deflate stream is uncompressed up to the required offset + length, if
    * it hasn't previously been uncompressed */
   ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
   if (missing > 0) {
     uInt avail_out = zStream.avail_out;
     zStream.avail_out = missing;
     if ((*buffer == zStream.next_out) &&
         (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
       LOG("inflateInit failed: %s", zStream.msg);
-      return MAP_FAILED;
+      return MemoryRange(MAP_FAILED, 0);
     }
     int ret = inflate(&zStream, Z_SYNC_FLUSH);
     if (ret < 0) {
       LOG("inflate failed: %s", zStream.msg);
-      return MAP_FAILED;
+      return MemoryRange(MAP_FAILED, 0);
     }
     if (ret == Z_NEED_DICT) {
       LOG("zstream requires a dictionary. %s", zStream.msg);
-      return MAP_FAILED;
+      return MemoryRange(MAP_FAILED, 0);
     }
     zStream.avail_out = avail_out - missing + zStream.avail_out;
     if (ret == Z_STREAM_END) {
       if (inflateEnd(&zStream) != Z_OK) {
         LOG("inflateEnd failed: %s", zStream.msg);
-        return MAP_FAILED;
+        return MemoryRange(MAP_FAILED, 0);
       }
       if (zStream.total_out != buffer->GetLength()) {
         LOG("File not fully uncompressed! %ld / %d", zStream.total_out,
             static_cast<unsigned int>(buffer->GetLength()));
-        return MAP_FAILED;
+        return MemoryRange(MAP_FAILED, 0);
       }
     }
   }
 #if defined(ANDROID) && defined(__arm__)
   if (prot & PROT_EXEC) {
     /* We just extracted data that may be executed in the future.
      * We thus need to ensure Instruction and Data cache coherency. */
     DEBUG_LOG("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
     cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
                reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
   }
 #endif
 
-  return buffer->mmap(addr, length, prot, flags, offset);
+  return MemoryRange(buffer->mmap(addr, length, prot, flags, offset), length);
 }
 
 void
 MappableDeflate::finalize()
 {
   /* Free zlib internal buffers */
   inflateEnd(&zStream);
   /* Free decompression buffer */
@@ -383,36 +374,36 @@ MappableSeekableZStream::Create(const ch
 MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
 : zip(zip), chunkAvailNum(0) { }
 
 MappableSeekableZStream::~MappableSeekableZStream()
 {
   pthread_mutex_destroy(&mutex);
 }
 
-void *
+MemoryRange
 MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
                               int flags, off_t offset)
 {
   /* Map with PROT_NONE so that accessing the mapping would segfault, and
    * bring us to ensure() */
   void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
   if (res == MAP_FAILED)
-    return MAP_FAILED;
+    return MemoryRange(MAP_FAILED, 0);
 
   /* Store the mapping, ordered by offset and length */
   std::vector<LazyMap>::reverse_iterator it;
   for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
     if ((it->offset < offset) ||
         ((it->offset == offset) && (it->length < length)))
       break;
   }
   LazyMap map = { res, length, prot, offset };
   lazyMaps.insert(it.base(), map);
-  return res;
+  return MemoryRange(res, length);
 }
 
 void
 MappableSeekableZStream::munmap(void *addr, size_t length)
 {
   std::vector<LazyMap>::iterator it;
   for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
     if ((it->addr = addr) && (it->length == length)) {
@@ -441,18 +432,17 @@ public:
 private:
   pthread_mutex_t *mutex;
 };
 
 bool
 MappableSeekableZStream::ensure(const void *addr)
 {
   DEBUG_LOG("ensure @%p", addr);
-  void *addrPage = reinterpret_cast<void *>
-                   (reinterpret_cast<uintptr_t>(addr) & PAGE_MASK);
+  const void *addrPage = PageAlignedPtr(addr);
   /* Find the mapping corresponding to the given page */
   std::vector<LazyMap>::iterator map;
   for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
     if (map->Contains(addrPage))
       break;
   }
   if (map == lazyMaps.end())
     return false;
@@ -484,36 +474,36 @@ MappableSeekableZStream::ensure(const vo
   }
 
   AutoLock lock(&mutex);
 
   /* The very first page is mapped and accessed separately of the rest, and
    * as such, only the first page of the first chunk is decompressed this way.
    * When we fault in the remaining pages of that chunk, we want to decompress
    * the complete chunk again. Short of doing that, we would end up with
-   * no data between PAGE_SIZE and chunkSize, which would effectively corrupt
+   * no data between PageSize() and chunkSize, which would effectively corrupt
    * symbol resolution in the underlying library. */
-  if (chunkAvail[chunk] < (length + PAGE_SIZE - 1) / PAGE_SIZE) {
+  if (chunkAvail[chunk] < PageNumber(length)) {
     if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
       return false;
 
 #if defined(ANDROID) && defined(__arm__)
     if (map->prot & PROT_EXEC) {
       /* We just extracted data that may be executed in the future.
        * We thus need to ensure Instruction and Data cache coherency. */
       DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
       cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
                  reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
     }
 #endif
     /* Only count if we haven't already decompressed parts of the chunk */
     if (chunkAvail[chunk] == 0)
       chunkAvailNum++;
 
-    chunkAvail[chunk] = (length + PAGE_SIZE - 1) / PAGE_SIZE;
+    chunkAvail[chunk] = PageNumber(length);
   }
 
   /* Flip the chunk mapping protection to the recorded flags. We could
    * also flip the protection for other mappings of the same chunk,
    * but it's easier to skip that and let further segfaults call
    * ensure again. */
   const void *chunkAddr = reinterpret_cast<const void *>
                           (reinterpret_cast<uintptr_t>(addrPage)
--- a/mozglue/linker/Mappable.h
+++ b/mozglue/linker/Mappable.h
@@ -22,18 +22,18 @@
  * - memory after length and up to the end of the corresponding page is nulled
  *   out.
  */
 class Mappable: public mozilla::RefCounted<Mappable>
 {
 public:
   virtual ~Mappable() { }
 
-  virtual void *mmap(const void *addr, size_t length, int prot, int flags,
-                     off_t offset) = 0;
+  virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags,
+                           off_t offset) = 0;
 
   enum Kind {
     MAPPABLE_FILE,
     MAPPABLE_EXTRACT_FILE,
     MAPPABLE_DEFLATE,
     MAPPABLE_SEEKABLE_ZSTREAM
   };
 
@@ -87,17 +87,17 @@ public:
   ~MappableFile() { }
 
   /**
    * Create a MappableFile instance for the given file path.
    */
   static Mappable *Create(const char *path);
 
   /* Inherited from Mappable */
-  virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+  virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
   virtual void finalize();
   virtual size_t GetLength() const;
 
   virtual Kind GetKind() const { return MAPPABLE_FILE; };
 protected:
   MappableFile(int fd): fd(fd) { }
 
 private:
@@ -165,17 +165,17 @@ public:
   /**
    * Create a MappableDeflate instance for the given Zip stream. The name
    * argument is used for an appropriately named temporary file, and the Zip
    * instance is given for the MappableDeflate to keep a reference of it.
    */
   static Mappable *Create(const char *name, Zip *zip, Zip::Stream *stream);
 
   /* Inherited from Mappable */
-  virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+  virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
   virtual void finalize();
   virtual size_t GetLength() const;
 
   virtual Kind GetKind() const { return MAPPABLE_DEFLATE; };
 private:
   MappableDeflate(_MappableBuffer *buf, Zip *zip, Zip::Stream *stream);
 
   /* Zip reference */
@@ -202,17 +202,17 @@ public:
    * name argument is used for an appropriately named temporary file, and the
    * Zip instance is given for the MappableSeekableZStream to keep a reference
    * of it.
    */
   static Mappable *Create(const char *name, Zip *zip,
                                          Zip::Stream *stream);
 
   /* Inherited from Mappable */
-  virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
+  virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
   virtual void munmap(void *addr, size_t length);
   virtual void finalize();
   virtual bool ensure(const void *addr);
   virtual void stats(const char *when, const char *name) const;
   virtual size_t GetLength() const;
 
   virtual Kind GetKind() const { return MAPPABLE_SEEKABLE_ZSTREAM; };
 private:
--- a/mozglue/linker/SeekableZStream.cpp
+++ b/mozglue/linker/SeekableZStream.cpp
@@ -1,24 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <algorithm>
 #include "SeekableZStream.h"
 #include "Logging.h"
 
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
-#ifndef PAGE_MASK
-#define PAGE_MASK (~ (PAGE_SIZE - 1))
-#endif
-
 bool
 SeekableZStream::Init(const void *buf, size_t length)
 {
   const SeekableZStreamHeader *header = SeekableZStreamHeader::validate(buf);
   if (!header) {
     LOG("Not a seekable zstream");
     return false;
   }
@@ -30,18 +22,18 @@ SeekableZStream::Init(const void *buf, s
   windowBits = header->windowBits;
   dictionary.Init(buffer + sizeof(SeekableZStreamHeader), header->dictSize);
   offsetTable.Init(buffer + sizeof(SeekableZStreamHeader) + header->dictSize,
                    header->nChunks);
   filter = GetFilter(header->filter);
 
   /* Sanity check */
   if ((chunkSize == 0) ||
-      (chunkSize % PAGE_SIZE) ||
-      (chunkSize > 8 * PAGE_SIZE) ||
+      (!IsPageAlignedSize(chunkSize)) ||
+      (chunkSize > 8 * PageSize()) ||
       (offsetTable.numElements() < 1) ||
       (lastChunkSize == 0) ||
       (lastChunkSize > chunkSize) ||
       (length < totalSize)) {
     LOG("Malformed or broken seekable zstream");
     return false;
   }
 
--- a/mozglue/linker/Utils.h
+++ b/mozglue/linker/Utils.h
@@ -97,40 +97,122 @@ struct AutoCloseFILETraits
 {
   typedef FILE *type;
   static FILE *empty() { return NULL; }
   static void release(FILE *f) { if (f) fclose(f); }
 };
 typedef mozilla::Scoped<AutoCloseFILETraits> AutoCloseFILE;
 
 /**
- * MappedPtr is a RAII wrapper for mmap()ed memory. It can be used as
- * a simple void * or unsigned char *.
- *
- * It is defined as a derivative of a template that allows to use a
- * different unmapping strategy.
+ * Page alignment helpers
  */
+static inline size_t PageSize()
+{
+  return 4096;
+}
+
+static inline uintptr_t AlignedPtr(uintptr_t ptr, size_t alignment)
+{
+  return ptr & ~(alignment - 1);
+}
+
 template <typename T>
-class GenericMappedPtr
+static inline T *AlignedPtr(T *ptr, size_t alignment)
+{
+  return reinterpret_cast<T *>(
+         AlignedPtr(reinterpret_cast<uintptr_t>(ptr), alignment));
+}
+
+template <typename T>
+static inline T PageAlignedPtr(T ptr)
+{
+  return AlignedPtr(ptr, PageSize());
+}
+
+static inline uintptr_t AlignedEndPtr(uintptr_t ptr, size_t alignment)
+{
+  return AlignedPtr(ptr + alignment - 1, alignment);
+}
+
+template <typename T>
+static inline T *AlignedEndPtr(T *ptr, size_t alignment)
+{
+  return reinterpret_cast<T *>(
+         AlignedEndPtr(reinterpret_cast<uintptr_t>(ptr), alignment));
+}
+
+template <typename T>
+static inline T PageAlignedEndPtr(T ptr)
+{
+  return AlignedEndPtr(ptr,  PageSize());
+}
+
+static inline size_t AlignedSize(size_t size, size_t alignment)
+{
+  return (size + alignment - 1) & ~(alignment - 1);
+}
+
+static inline size_t PageAlignedSize(size_t size)
+{
+  return AlignedSize(size, PageSize());
+}
+
+static inline bool IsAlignedPtr(uintptr_t ptr, size_t alignment)
+{
+  return ptr % alignment == 0;
+}
+
+template <typename T>
+static inline bool IsAlignedPtr(T *ptr, size_t alignment)
+{
+  return IsAlignedPtr(reinterpret_cast<uintptr_t>(ptr), alignment);
+}
+
+template <typename T>
+static inline bool IsPageAlignedPtr(T ptr)
+{
+  return IsAlignedPtr(ptr, PageSize());
+}
+
+static inline bool IsAlignedSize(size_t size, size_t alignment)
+{
+  return size % alignment == 0;
+}
+
+static inline bool IsPageAlignedSize(size_t size)
+{
+  return IsAlignedSize(size, PageSize());
+}
+
+static inline size_t PageNumber(size_t size)
+{
+  return (size + PageSize() - 1) / PageSize();
+}
+
+/**
+ * MemoryRange stores a pointer, size pair.
+ */
+class MemoryRange
 {
 public:
-  GenericMappedPtr(void *buf, size_t length): buf(buf), length(length) { }
-  GenericMappedPtr(): buf(MAP_FAILED), length(0) { }
+  MemoryRange(void *buf, size_t length): buf(buf), length(length) { }
 
   void Assign(void *b, size_t len) {
-    if (buf != MAP_FAILED)
-      static_cast<T *>(this)->munmap(buf, length);
     buf = b;
     length = len;
   }
 
-  ~GenericMappedPtr()
+  void Assign(const MemoryRange& other) {
+    buf = other.buf;
+    length = other.length;
+  }
+
+  void *get() const
   {
-    if (buf != MAP_FAILED)
-      static_cast<T *>(this)->munmap(buf, length);
+    return buf;
   }
 
   operator void *() const
   {
     return buf;
   }
 
   operator unsigned char *() const
@@ -162,25 +244,65 @@ public:
   /**
    * Returns the length of the mapped range
    */
   size_t GetLength() const
   {
     return length;
   }
 
+  static MemoryRange mmap(void *addr, size_t length, int prot, int flags,
+                          int fd, off_t offset) {
+    return MemoryRange(::mmap(addr, length, prot, flags, fd, offset), length);
+  }
+
 private:
   void *buf;
   size_t length;
 };
 
+/**
+ * MappedPtr is a RAII wrapper for mmap()ed memory. It can be used as
+ * a simple void * or unsigned char *.
+ *
+ * It is defined as a derivative of a template that allows to use a
+ * different unmapping strategy.
+ */
+template <typename T>
+class GenericMappedPtr: public MemoryRange
+{
+public:
+  GenericMappedPtr(void *buf, size_t length): MemoryRange(buf, length) { }
+  GenericMappedPtr(const MemoryRange& other): MemoryRange(other) { }
+  GenericMappedPtr(): MemoryRange(MAP_FAILED, 0) { }
+
+  void Assign(void *b, size_t len) {
+    if (get() != MAP_FAILED)
+      static_cast<T *>(this)->munmap(get(), GetLength());
+    MemoryRange::Assign(b, len);
+  }
+
+  void Assign(const MemoryRange& other) {
+    Assign(other.get(), other.GetLength());
+  }
+
+  ~GenericMappedPtr()
+  {
+    if (get() != MAP_FAILED)
+      static_cast<T *>(this)->munmap(get(), GetLength());
+  }
+
+};
+
 struct MappedPtr: public GenericMappedPtr<MappedPtr>
 {
   MappedPtr(void *buf, size_t length)
   : GenericMappedPtr<MappedPtr>(buf, length) { }
+  MappedPtr(const MemoryRange& other)
+  : GenericMappedPtr<MappedPtr>(other) { }
   MappedPtr(): GenericMappedPtr<MappedPtr>() { }
 
 private:
   friend class GenericMappedPtr<MappedPtr>;
   void munmap(void *buf, size_t length)
   {
     ::munmap(buf, length);
   }
--- a/mozglue/linker/szip.cpp
+++ b/mozglue/linker/szip.cpp
@@ -33,23 +33,23 @@ static const size_t maxChunkSize =
 
 class Buffer: public MappedPtr
 {
 public:
   virtual ~Buffer() { }
 
   virtual bool Resize(size_t size)
   {
-    void *buf = mmap(NULL, size, PROT_READ | PROT_WRITE,
-                     MAP_PRIVATE | MAP_ANON, -1, 0);
+    MemoryRange buf = mmap(NULL, size, PROT_READ | PROT_WRITE,
+                           MAP_PRIVATE | MAP_ANON, -1, 0);
     if (buf == MAP_FAILED)
       return false;
     if (*this != MAP_FAILED)
       memcpy(buf, *this, std::min(size, GetLength()));
-    Assign(buf, size);
+    Assign(buf);
     return true;
   }
 
   bool Fill(Buffer &other)
   {
     size_t size = other.GetLength();
     if (!size || !Resize(size))
       return false;
@@ -71,18 +71,18 @@ public:
   }
 
   virtual bool Resize(size_t size)
   {
     if (writable) {
       if (ftruncate(fd, size) == -1)
         return false;
     }
-    Assign(mmap(NULL, size, PROT_READ | (writable ? PROT_WRITE : 0),
-                writable ? MAP_SHARED : MAP_PRIVATE, fd, 0), size);
+    Assign(MemoryRange::mmap(NULL, size, PROT_READ | (writable ? PROT_WRITE : 0),
+                             writable ? MAP_SHARED : MAP_PRIVATE, fd, 0));
     return this != MAP_FAILED;
   }
 
   int getFd()
   {
     return fd;
   }