--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -1380,17 +1380,17 @@ static void chunk_dealloc(void *chunk, s
static arena_t *choose_arena_hard(void);
#endif
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, bool zero);
static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
size_t size, bool large, bool zero);
-static void arena_purge(arena_t *arena);
+static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
@@ -3589,41 +3589,43 @@ arena_run_alloc(arena_t *arena, arena_bi
(arena_chunk_header_npages << pagesize_2pow));
}
/* Update page map. */
arena_run_split(arena, run, size, large, zero);
return (run);
}
static void
-arena_purge(arena_t *arena)
+arena_purge(arena_t *arena, bool all)
{
arena_chunk_t *chunk;
size_t i, npages;
+ /* If all is set purge all dirty pages. */
+ size_t dirty_max = all ? 1 : opt_dirty_max;
#ifdef MALLOC_DEBUG
size_t ndirty = 0;
rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
chunk) {
ndirty += chunk->ndirty;
} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
assert(ndirty == arena->ndirty);
#endif
- RELEASE_ASSERT(arena->ndirty > opt_dirty_max);
+ RELEASE_ASSERT(all || (arena->ndirty > opt_dirty_max));
#ifdef MALLOC_STATS
arena->stats.npurge++;
#endif
/*
* Iterate downward through chunks until enough dirty memory has been
* purged. Terminate as soon as possible in order to minimize the
* number of system calls, even if a chunk has only been partially
* purged.
*/
- while (arena->ndirty > (opt_dirty_max >> 1)) {
+ while (arena->ndirty > (dirty_max >> 1)) {
#ifdef MALLOC_DOUBLE_PURGE
bool madvised = false;
#endif
chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
RELEASE_ASSERT(chunk != NULL);
for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
RELEASE_ASSERT(i >= arena_chunk_header_npages);
@@ -3670,17 +3672,17 @@ arena_purge(arena_t *arena)
# ifdef MALLOC_DOUBLE_PURGE
madvised = true;
# endif
#endif
#ifdef MALLOC_STATS
arena->stats.nmadvise++;
arena->stats.purged += npages;
#endif
- if (arena->ndirty <= (opt_dirty_max >> 1))
+ if (arena->ndirty <= (dirty_max >> 1))
break;
}
}
if (chunk->ndirty == 0) {
arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
chunk);
}
@@ -3795,17 +3797,17 @@ arena_run_dalloc(arena_t *arena, arena_r
/* Deallocate chunk if it is now completely unused. */
if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
CHUNK_MAP_ALLOCATED)) == arena_maxclass)
arena_chunk_dealloc(arena, chunk);
/* Enforce opt_dirty_max. */
if (arena->ndirty > opt_dirty_max)
- arena_purge(arena);
+ arena_purge(arena, false);
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
@@ -6849,16 +6851,31 @@ void*
size_t
_msize(const void *ptr)
{
return malloc_usable_size(ptr);
}
#endif
+void
+jemalloc_free_dirty_pages(void)
+{
+ size_t i;
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+
+ if (arena != NULL) {
+ malloc_spin_lock(&arena->lock);
+ arena_purge(arena, true);
+ malloc_spin_unlock(&arena->lock);
+ }
+ }
+}
+
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin library-private functions, used by threading libraries for protection
* of malloc during fork(). These functions are only called if the program is
* running in threaded mode, so there is no need to check whether the program