Bug 920558 - map anonymous pages differently on ARM and x86; r=glandium
authorNathan Froyd <froydnj@mozilla.com>
Thu, 26 Sep 2013 11:17:05 -0400
changeset 148921 a1ea604002b659472abbb678e7c1b2c81d2d5e13
parent 148920 fdc6054e33e4f9b045196401947c13df186fb37a
child 148922 7ccdb7bec8b2630e5eb68d0f369d09c6c0866aa6
push id34381
push usernfroyd@mozilla.com
push dateFri, 27 Sep 2013 01:50:30 +0000
treeherdermozilla-inbound@a1ea604002b6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs920558
milestone27.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 920558 - map anonymous pages differently on ARM and x86; r=glandium
mozglue/linker/Mappable.cpp
--- a/mozglue/linker/Mappable.cpp
+++ b/mozglue/linker/Mappable.cpp
@@ -180,44 +180,52 @@ public:
     strlcpy(str, name, sizeof(str));
     ioctl(fd, ASHMEM_SET_NAME, str);
     if (ioctl(fd, ASHMEM_SET_SIZE, length))
       return NULL;
 
     /* The Gecko crash reporter is confused by adjacent memory mappings of
      * the same file and chances are we're going to map from the same file
      * descriptor right away. To avoid problems with the crash reporter,
-     * create an empty anonymous page before and after the ashmem mapping.
-     * We create two anonymous pages because on some platforms, subsequent
-     * mappings are placed at adjacent, increasing memory addresses (ARM)
-     * and on others, such mappings are placed at adjacent, decreasing
-     * memory addresses (x86). It is more convenient to just do two pages
-     * everywhere than to twiddle with platform-specific #defines.
+     * create an empty anonymous page before or after the ashmem mapping,
+     * depending on how mappings grow in the address space.
      */
-    size_t anon_mapping_length = length + 2 * PAGE_SIZE;
+#if defined(__arm__)
+    void *buf = ::mmap(NULL, length + PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+    if (buf != MAP_FAILED) {
+      ::mmap(AlignedEndPtr(reinterpret_cast<char *>(buf) + length, PAGE_SIZE),
+             PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+      DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
+                length, str, buf);
+      return new _MappableBuffer(fd.forget(), buf, length);
+    }
+#elif defined(__i386__)
+    size_t anon_mapping_length = length + PAGE_SIZE;
     void *buf = ::mmap(NULL, anon_mapping_length, PROT_NONE,
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
     if (buf != MAP_FAILED) {
       char *first_page = reinterpret_cast<char *>(buf);
       char *map_page = first_page + PAGE_SIZE;
-      char *last_page = map_page + ((length + PAGE_SIZE - 1) & PAGE_MASK);
 
-      void *actual_buf = ::mmap(map_page, last_page - map_page, PROT_READ | PROT_WRITE,
+      void *actual_buf = ::mmap(map_page, length, PROT_READ | PROT_WRITE,
                                 MAP_FIXED | MAP_SHARED, fd, 0);
       if (actual_buf == MAP_FAILED) {
         ::munmap(buf, anon_mapping_length);
         DEBUG_LOG("Fixed allocation of decompression buffer at %p failed", map_page);
         return NULL;
       }
 
       DEBUG_LOG("Decompression buffer of size 0x%x in ashmem \"%s\", mapped @%p",
                 length, str, actual_buf);
       return new _MappableBuffer(fd.forget(), actual_buf, length);
     }
 #else
+#error need to add a case for your CPU
+#endif
+#else
     /* On Linux, use /dev/shm as base directory for temporary files, assuming
      * it's on tmpfs */
     /* TODO: check that /dev/shm is tmpfs */
     char path[256];
     sprintf(path, "/dev/shm/%s.XXXXXX", name);
     fd = mkstemp(path);
     if (fd == -1)
       return NULL;
@@ -245,18 +253,24 @@ public:
       flags |= MAP_SHARED;
     }
 #endif
     return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
   }
 
 #ifdef ANDROID
   ~_MappableBuffer() {
-    /* Free the additional pages we allocated. See _MappableBuffer::Create */
-    ::munmap(*this - PAGE_SIZE, GetLength() + 2 * PAGE_SIZE);
+    /* Free the additional page we allocated. See _MappableBuffer::Create */
+#if defined(__arm__)
+    ::munmap(AlignedEndPtr(*this + GetLength(), PAGE_SIZE), PAGE_SIZE);
+#elif defined(__i386__)
+    ::munmap(*this - PAGE_SIZE, GetLength() + PAGE_SIZE);
+#else
+#error need to add a case for your CPU
+#endif
   }
 #endif
 
 private:
   _MappableBuffer(int fd, void *buf, size_t length)
   : MappedPtr(buf, length), fd(fd) { }
 
   /* File descriptor for the temporary file or ashmem */