--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -930,17 +930,17 @@ struct arena_chunk_s {
/* Map of pages within chunk that keeps track of free/large/small. */
arena_chunk_map_t map[1]; /* Dynamically sized. */
};
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
typedef struct arena_run_s arena_run_t;
struct arena_run_s {
-#ifdef MALLOC_DEBUG
+#if defined(MALLOC_DEBUG) || defined(MOZ_TEMP_INVESTIGATION)
uint32_t magic;
# define ARENA_RUN_MAGIC 0x384adf93
#endif
/* Bin this run is associated with. */
arena_bin_t *bin;
/* Index of first element that might have a free region. */
@@ -986,17 +986,17 @@ struct arena_bin_s {
#ifdef MALLOC_STATS
/* Bin statistics. */
malloc_bin_stats_t stats;
#endif
};
struct arena_s {
-#ifdef MALLOC_DEBUG
+#if defined(MALLOC_DEBUG) || defined(MOZ_TEMP_INVESTIGATION)
uint32_t magic;
# define ARENA_MAGIC 0x947d3d24
#endif
/* All operations on this arena require that lock be locked. */
#ifdef MOZ_MEMORY
malloc_spinlock_t lock;
#else
@@ -1547,16 +1547,31 @@ void (*_malloc_message)(const char *p1,
_malloc_message("\"", #e, "\"\n", ""); \
abort(); \
} \
} while (0)
#else
#define assert(e)
#endif
+/* See bug 764192 for details on what we're hoping to see with these
+ * RELEASE_ASSERTs and the other code ifdef'ed by MOZ_TEMP_INVESTIGATION. */
+
+#include <mozilla/Assertions.h>
+
+#if defined(MOZ_TEMP_INVESTIGATION)
+# define RELEASE_ASSERT(assertion) do { \
+ if (!(assertion)) { \
+ MOZ_CRASH(); \
+ } \
+} while (0)
+#elif
+# define RELEASE_ASSERT(assertion) assert(assertion)
+#endif
+
/******************************************************************************/
/*
* Begin mutex. We can't use normal pthread mutexes in all places, because
* they require malloc()ed memory, which causes bootstrapping issues in some
* cases.
*/
static bool
@@ -3187,17 +3202,17 @@ arena_run_reg_alloc(arena_run_t *run, ar
* contains a free region.
*/
run->regs_minelm = i; /* Low payoff: + (mask == 0); */
return (ret);
}
}
/* Not reached. */
- assert(0);
+ RELEASE_ASSERT(0);
return (NULL);
}
static inline void
arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
{
/*
* To divide by a number D that is not a power of two we multiply
@@ -3280,24 +3295,24 @@ arena_run_reg_dalloc(arena_run_t *run, a
/*
* size_invs isn't large enough to handle this size class, so
* calculate regind using actual division. This only happens
* if the user increases small_max via the 'S' runtime
* configuration option.
*/
regind = diff / size;
};
- assert(diff == regind * size);
- assert(regind < bin->nregs);
+ RELEASE_ASSERT(diff == regind * size);
+ RELEASE_ASSERT(regind < bin->nregs);
elm = regind >> (SIZEOF_INT_2POW + 3);
if (elm < run->regs_minelm)
run->regs_minelm = elm;
bit = regind - (elm << (SIZEOF_INT_2POW + 3));
- assert((run->regs_mask[elm] & (1U << bit)) == 0);
+ RELEASE_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
run->regs_mask[elm] |= (1U << bit);
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
bool zero)
@@ -3678,30 +3693,30 @@ static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
>> pagesize_2pow);
- assert(run_ind >= arena_chunk_header_npages);
- assert(run_ind < chunk_npages);
+ RELEASE_ASSERT(run_ind >= arena_chunk_header_npages);
+ RELEASE_ASSERT(run_ind < chunk_npages);
if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
size = chunk->map[run_ind].bits & ~pagesize_mask;
else
size = run->bin->run_size;
run_pages = (size >> pagesize_2pow);
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
size_t i;
for (i = 0; i < run_pages; i++) {
- assert((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+ RELEASE_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
== 0);
chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
}
if (chunk->ndirty == 0) {
arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
chunk);
}
@@ -3731,17 +3746,17 @@ arena_run_dalloc(arena_t *arena, arena_r
* inserted later.
*/
arena_avail_tree_remove(&arena->runs_avail,
&chunk->map[run_ind+run_pages]);
size += nrun_size;
run_pages = size >> pagesize_2pow;
- assert((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+ RELEASE_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
== nrun_size);
chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
pagesize_mask);
chunk->map[run_ind+run_pages-1].bits = size |
(chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
}
/* Try to coalesce backward. */
@@ -3756,17 +3771,17 @@ arena_run_dalloc(arena_t *arena, arena_r
* inserted later.
*/
arena_avail_tree_remove(&arena->runs_avail,
&chunk->map[run_ind]);
size += prun_size;
run_pages = size >> pagesize_2pow;
- assert((chunk->map[run_ind].bits & ~pagesize_mask) ==
+ RELEASE_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
prun_size);
chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
pagesize_mask);
chunk->map[run_ind+run_pages-1].bits = size |
(chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
}
/* Insert into runs_avail, now that coalescing is complete. */
@@ -3871,17 +3886,17 @@ arena_bin_nonfull_run_get(arena_t *arena
/* The last element has spare bits that need to be unset. */
run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
- remainder));
}
run->regs_minelm = 0;
run->nfree = bin->nregs;
-#ifdef MALLOC_DEBUG
+#if defined(MALLOC_DEBUG) || defined(MOZ_TEMP_INVESTIGATION)
run->magic = ARENA_RUN_MAGIC;
#endif
#ifdef MALLOC_STATS
bin->stats.nruns++;
bin->stats.curruns++;
if (bin->stats.curruns > bin->stats.highruns)
bin->stats.highruns = bin->stats.curruns;
@@ -3890,36 +3905,36 @@ arena_bin_nonfull_run_get(arena_t *arena
}
/* bin->runcur must have space available before this function is called. */
static inline void *
arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
{
void *ret;
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(run->nfree > 0);
+ RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
+ RELEASE_ASSERT(run->nfree > 0);
ret = arena_run_reg_alloc(run, bin);
- assert(ret != NULL);
+ RELEASE_ASSERT(ret != NULL);
run->nfree--;
return (ret);
}
/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
bin->runcur = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur == NULL)
return (NULL);
- assert(bin->runcur->magic == ARENA_RUN_MAGIC);
- assert(bin->runcur->nfree > 0);
+ RELEASE_ASSERT(bin->runcur->magic == ARENA_RUN_MAGIC);
+ RELEASE_ASSERT(bin->runcur->nfree > 0);
return (arena_bin_malloc_easy(arena, bin, bin->runcur));
}
/*
* Calculate bin->run_size such that it meets the following constraints:
*
* *) bin->run_size >= min_run_size
@@ -4086,17 +4101,17 @@ arena_malloc_small(arena_t *arena, size_
bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
- 1];
} else {
/* Sub-page. */
size = pow2_ceil(size);
bin = &arena->bins[ntbins + nqbins
+ (ffs((int)(size >> opt_small_max_2pow)) - 2)];
}
- assert(size == bin->reg_size);
+ RELEASE_ASSERT(size == bin->reg_size);
#ifdef MALLOC_BALANCE
arena_lock_balance(arena);
#else
malloc_spin_lock(&arena->lock);
#endif
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_bin_malloc_easy(arena, bin, run);
@@ -4121,16 +4136,22 @@ arena_malloc_small(arena_t *arena, size_
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
} else
memset(ret, 0, size);
+#ifdef MOZ_TEMP_INVESTIGATION
+ if (size == 72) {
+ memset(ret, 0xe5, size);
+ }
+#endif
+
return (ret);
}
static void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
@@ -4165,17 +4186,17 @@ arena_malloc_large(arena_t *arena, size_
return (ret);
}
static inline void *
arena_malloc(arena_t *arena, size_t size, bool zero)
{
assert(arena != NULL);
- assert(arena->magic == ARENA_MAGIC);
+ RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
assert(size != 0);
assert(QUANTUM_CEILING(size) <= arena_maxclass);
if (size <= bin_maxclass) {
return (arena_malloc_small(arena, size, zero));
} else
return (arena_malloc_large(arena, size, zero));
}
@@ -4469,24 +4490,29 @@ static inline void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
{
arena_run_t *run;
arena_bin_t *bin;
size_t size;
run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
- assert(run->magic == ARENA_RUN_MAGIC);
+ RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
size = bin->reg_size;
#ifdef MALLOC_FILL
if (opt_junk)
memset(ptr, 0x5a, size);
#endif
+#ifdef MOZ_TEMP_INVESTIGATION
+ if (size == 72) {
+ memset(ptr, 0x75, size);
+ }
+#endif
arena_run_reg_dalloc(run, bin, ptr, size);
run->nfree++;
if (run->nfree == bin->nregs) {
/* Deallocate run. */
if (run == bin->runcur)
bin->runcur = NULL;
@@ -4495,21 +4521,21 @@ arena_dalloc_small(arena_t *arena, arena
(uintptr_t)chunk)) >> pagesize_2pow;
arena_chunk_map_t *run_mapelm =
&chunk->map[run_pageind];
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
- assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
- run_mapelm);
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
+ run_mapelm);
arena_run_tree_remove(&bin->runs, run_mapelm);
}
-#ifdef MALLOC_DEBUG
+#if defined(MALLOC_DEBUG) || defined(MOZ_TEMP_INVESTIGATION)
run->magic = 0;
#endif
VALGRIND_FREELIKE_BLOCK(run, 0);
arena_run_dalloc(arena, run, true);
#ifdef MALLOC_STATS
bin->stats.curruns--;
#endif
} else if (run->nfree == 1 && run != bin->runcur) {
@@ -4526,29 +4552,29 @@ arena_dalloc_small(arena_t *arena, arena
(arena_chunk_t*)CHUNK_ADDR2BASE(bin->runcur);
size_t runcur_pageind =
(((uintptr_t)bin->runcur -
(uintptr_t)runcur_chunk)) >> pagesize_2pow;
arena_chunk_map_t *runcur_mapelm =
&runcur_chunk->map[runcur_pageind];
/* Insert runcur. */
- assert(arena_run_tree_search(&bin->runs,
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs,
runcur_mapelm) == NULL);
arena_run_tree_insert(&bin->runs,
runcur_mapelm);
}
bin->runcur = run;
} else {
size_t run_pageind = (((uintptr_t)run -
(uintptr_t)chunk)) >> pagesize_2pow;
arena_chunk_map_t *run_mapelm =
&chunk->map[run_pageind];
- assert(arena_run_tree_search(&bin->runs, run_mapelm) ==
+ RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
NULL);
arena_run_tree_insert(&bin->runs, run_mapelm);
}
}
#ifdef MALLOC_STATS
arena->stats.allocated_small -= size;
arena->stats.ndalloc_small++;
#endif
@@ -4598,21 +4624,21 @@ arena_dalloc(void *ptr, size_t offset)
assert(ptr != NULL);
assert(offset != 0);
assert(CHUNK_ADDR2OFFSET(ptr) == offset);
chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
arena = chunk->arena;
assert(arena != NULL);
- assert(arena->magic == ARENA_MAGIC);
+ RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
pageind = offset >> pagesize_2pow;
mapelm = &chunk->map[pageind];
- assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
+ RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */
malloc_spin_lock(&arena->lock);
arena_dalloc_small(arena, chunk, ptr, mapelm);
malloc_spin_unlock(&arena->lock);
} else
arena_dalloc_large(arena, chunk, ptr);
VALGRIND_FREELIKE_BLOCK(ptr, 0);
@@ -4915,17 +4941,17 @@ arena_new(arena_t *arena)
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
-#ifdef MALLOC_DEBUG
+#if defined(MALLOC_DEBUG) || defined(MOZ_TEMP_INVESTIGATION)
arena->magic = ARENA_MAGIC;
#endif
return (false);
}
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *