Bug 805855 - Add an extra function to jemalloc to free dirty unused pages. r=glandium
authorGabriele Svelto <gsvelto@mozilla.com>
Thu, 08 Nov 2012 20:06:50 +0100
changeset 113168 8bdc6ccc3705491adeaeb7d7d3cec439925dd058
parent 113167 93a465ae3e4d054e05dd867e364c43930bde5f94
child 113169 2351a225d4db7e78c37febd6b5c146dff90681fb
push id23859
push useremorley@mozilla.com
push dateWed, 14 Nov 2012 14:36:31 +0000
treeherdermozilla-central@87928cd21b40 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs805855
milestone19.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 805855 - Add an extra function to jemalloc to free dirty unused pages. r=glandium
memory/mozjemalloc/jemalloc.c
memory/mozjemalloc/jemalloc.h
mozglue/build/mozglue.def.in
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -1380,17 +1380,17 @@ static void	chunk_dealloc(void *chunk, s
 static arena_t	*choose_arena_hard(void);
 #endif
 static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
     bool large, bool zero);
 static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
 static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
 static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
     size_t size, bool large, bool zero);
-static void	arena_purge(arena_t *arena);
+static void	arena_purge(arena_t *arena, bool all);
 static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
 static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
     arena_run_t *run, size_t oldsize, size_t newsize);
 static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
 static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
@@ -3589,41 +3589,43 @@ arena_run_alloc(arena_t *arena, arena_bi
 		    (arena_chunk_header_npages << pagesize_2pow));
 	}
 	/* Update page map. */
 	arena_run_split(arena, run, size, large, zero);
 	return (run);
 }
 
 static void
-arena_purge(arena_t *arena)
+arena_purge(arena_t *arena, bool all)
 {
 	arena_chunk_t *chunk;
 	size_t i, npages;
+	/* If all is set purge all dirty pages. */
+	size_t dirty_max = all ? 1 : opt_dirty_max;
 #ifdef MALLOC_DEBUG
 	size_t ndirty = 0;
 	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
 	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
 	assert(ndirty == arena->ndirty);
 #endif
-	RELEASE_ASSERT(arena->ndirty > opt_dirty_max);
+	RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
 
 #ifdef MALLOC_STATS
 	arena->stats.npurge++;
 #endif
 
 	/*
 	 * Iterate downward through chunks until enough dirty memory has been
 	 * purged.  Terminate as soon as possible in order to minimize the
 	 * number of system calls, even if a chunk has only been partially
 	 * purged.
 	 */
-	while (arena->ndirty > (opt_dirty_max >> 1)) {
+	while (arena->ndirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
 		bool madvised = false;
 #endif
 		chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
 		RELEASE_ASSERT(chunk != NULL);
 
 		for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
 			RELEASE_ASSERT(i >= arena_chunk_header_npages);
@@ -3670,17 +3672,17 @@ arena_purge(arena_t *arena)
 #  ifdef MALLOC_DOUBLE_PURGE
 				madvised = true;
 #  endif
 #endif
 #ifdef MALLOC_STATS
 				arena->stats.nmadvise++;
 				arena->stats.purged += npages;
 #endif
-				if (arena->ndirty <= (opt_dirty_max >> 1))
+				if (arena->ndirty <= (dirty_max >> 1))
 					break;
 			}
 		}
 
 		if (chunk->ndirty == 0) {
 			arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
 			    chunk);
 		}
@@ -3795,17 +3797,17 @@ arena_run_dalloc(arena_t *arena, arena_r
 
 	/* Deallocate chunk if it is now completely unused. */
 	if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
 	    CHUNK_MAP_ALLOCATED)) == arena_maxclass)
 		arena_chunk_dealloc(arena, chunk);
 
 	/* Enforce opt_dirty_max. */
 	if (arena->ndirty > opt_dirty_max)
-		arena_purge(arena);
+		arena_purge(arena, false);
 }
 
 static void
 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
@@ -6849,16 +6851,31 @@ void*
 size_t
 _msize(const void *ptr)
 {
 
 	return malloc_usable_size(ptr);
 }
 #endif
 
+void
+jemalloc_free_dirty_pages(void)
+{
+	size_t i;
+	for (i = 0; i < narenas; i++) {
+		arena_t *arena = arenas[i];
+
+		if (arena != NULL) {
+			malloc_spin_lock(&arena->lock);
+			arena_purge(arena, true);
+			malloc_spin_unlock(&arena->lock);
+		}
+	}
+}
+
 /*
  * End non-standard functions.
  */
 /******************************************************************************/
 /*
  * Begin library-private functions, used by threading libraries for protection
  * of malloc during fork().  These functions are only called if the program is
  * running in threaded mode, so there is no need to check whether the program
--- a/memory/mozjemalloc/jemalloc.h
+++ b/memory/mozjemalloc/jemalloc.h
@@ -111,15 +111,29 @@ static inline size_t je_malloc_usable_si
  * If MALLOC_DOUBLE_PURGE is not defined, this function does nothing.
  */
 #if defined(MOZ_MEMORY_LINUX) || defined(MOZ_JEMALLOC)
 static inline void jemalloc_purge_freed_pages() { }
 #else
 void    jemalloc_purge_freed_pages();
 #endif
 
+/*
+ * Free all unused dirty pages in all arenas. Calling this function will slow
+ * down subsequent allocations so it is recommended to use it only when
+ * memory needs to be reclaimed at all costs (see bug 805855). This function
+ * provides functionality similar to mallctl("arenas.purge") in jemalloc 3.
+ */
+
+#if !defined(MOZ_NATIVE_JEMALLOC)
+#if defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD)
+__attribute__((weak))
+#endif /* defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD) */
+void    jemalloc_free_dirty_pages();
+#endif /* !defined(MOZ_NATIVE_JEMALLOC) */
+
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
 
 #undef wrap
 
 #endif /* _JEMALLOC_H_ */
--- a/mozglue/build/mozglue.def.in
+++ b/mozglue/build/mozglue.def.in
@@ -19,11 +19,12 @@ EXPORTS
   _wcsdup=je_wcsdup
   malloc_usable_size=je_malloc_usable_size
 #ifdef MOZ_JEMALLOC
   je_nallocm
 #else
   je_malloc_good_size
 #endif
   jemalloc_stats
+  jemalloc_free_dirty_pages
   ; A hack to work around the CRT (see giant comment in Makefile.in)
   frex=je_dumb_free_thunk
 #endif