Bug 1401099 - Use Gecko style names for arena_t members. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 17:20:01 +0900
changeset 382249 1252fd56a1c9aec98f1b52dd14fd35d6145751f0
parent 382248 84188868fc418b5d79c218a6abc33296f57a5ad6
child 382250 cab516c56578a04f8d10ac59d2965482fc2015c5
push id32551
push userkwierso@gmail.com
push dateThu, 21 Sep 2017 23:29:53 +0000
treeherdermozilla-central@d6d6fd889f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1401099
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1401099 - Use Gecko style names for arena_t members. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -516,17 +516,17 @@ struct malloc_rtree_t {
 struct arena_t;
 struct arena_bin_t;
 
 /* Each element of the chunk map corresponds to one page within the chunk. */
 struct arena_chunk_map_t {
 	/*
 	 * Linkage for run trees.  There are two disjoint uses:
 	 *
-	 * 1) arena_t's runs_avail tree.
+	 * 1) arena_t's tree or available runs.
 	 * 2) arena_run_t conceptually uses this linkage for in-use non-full
 	 *    runs, rather than directly embedding linkage.
 	 */
 	rb_node(arena_chunk_map_t)	link;
 
 	/*
 	 * Run address (or size) and various flags are stored together.  The bit
 	 * layout looks like (assuming 32-bit system):
@@ -599,17 +599,17 @@ struct arena_chunk_map_t {
 typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
 typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
 
 /* Arena chunk header. */
 struct arena_chunk_t {
 	/* Arena that owns the chunk. */
 	arena_t		*arena;
 
-	/* Linkage for the arena's chunks_dirty tree. */
+	/* Linkage for the arena's tree of dirty chunks. */
 	rb_node(arena_chunk_t) link_dirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
 	/* If we're double-purging, we maintain a linked list of chunks which
 	 * have pages which have been madvise(MADV_FREE)'d but not explicitly
 	 * purged.
 	 *
 	 * We're currently lazy and don't remove a chunk from this list when
@@ -691,88 +691,88 @@ struct arena_bin_t {
 	uint32_t	reg0_offset;
 
 	/* Bin statistics. */
 	malloc_bin_stats_t stats;
 };
 
 struct arena_t {
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	uint32_t		magic;
+  uint32_t mMagic;
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
-	/* All operations on this arena require that lock be locked. */
-	malloc_spinlock_t	lock;
-
-	arena_stats_t		stats;
-
-	/* Tree of dirty-page-containing chunks this arena manages. */
-	arena_chunk_tree_t	chunks_dirty;
+  /* All operations on this arena require that lock be locked. */
+  malloc_spinlock_t mLock;
+
+  arena_stats_t mStats;
+
+  /* Tree of dirty-page-containing chunks this arena manages. */
+  arena_chunk_tree_t mChunksDirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
-	/* Head of a linked list of MADV_FREE'd-page-containing chunks this
-	 * arena manages. */
-	mozilla::DoublyLinkedList<arena_chunk_t> chunks_madvised;
+  /* Head of a linked list of MADV_FREE'd-page-containing chunks this
+   * arena manages. */
+  mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
 #endif
 
-	/*
-	 * In order to avoid rapid chunk allocation/deallocation when an arena
-	 * oscillates right on the cusp of needing a new chunk, cache the most
-	 * recently freed chunk.  The spare is left in the arena's chunk trees
-	 * until it is deleted.
-	 *
-	 * There is one spare chunk per arena, rather than one spare total, in
-	 * order to avoid interactions between multiple threads that could make
-	 * a single spare inadequate.
-	 */
-	arena_chunk_t		*spare;
-
-	/*
-	 * Current count of pages within unused runs that are potentially
-	 * dirty, and for which madvise(... MADV_FREE) has not been called.  By
-	 * tracking this, we can institute a limit on how much dirty unused
-	 * memory is mapped for each arena.
-	 */
-	size_t			ndirty;
-	/*
-	 * Maximum value allowed for ndirty.
-	 */
-	size_t			dirty_max;
-
-	/*
-	 * Size/address-ordered tree of this arena's available runs.  This tree
-	 * is used for first-best-fit run allocation.
-	 */
-	arena_avail_tree_t	runs_avail;
-
-	/*
-	 * bins is used to store rings of free regions of the following sizes,
-	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
-	 *
-	 *   bins[i] | size |
-	 *   --------+------+
-	 *        0  |    2 |
-	 *        1  |    4 |
-	 *        2  |    8 |
-	 *   --------+------+
-	 *        3  |   16 |
-	 *        4  |   32 |
-	 *        5  |   48 |
-	 *        6  |   64 |
-	 *           :      :
-	 *           :      :
-	 *       33  |  496 |
-	 *       34  |  512 |
-	 *   --------+------+
-	 *       35  | 1024 |
-	 *       36  | 2048 |
-	 *   --------+------+
-	 */
-	arena_bin_t		bins[1]; /* Dynamically sized. */
+  /*
+   * In order to avoid rapid chunk allocation/deallocation when an arena
+   * oscillates right on the cusp of needing a new chunk, cache the most
+   * recently freed chunk.  The spare is left in the arena's chunk trees
+   * until it is deleted.
+   *
+   * There is one spare chunk per arena, rather than one spare total, in
+   * order to avoid interactions between multiple threads that could make
+   * a single spare inadequate.
+   */
+  arena_chunk_t* mSpare;
+
+  /*
+   * Current count of pages within unused runs that are potentially
+   * dirty, and for which madvise(... MADV_FREE) has not been called.  By
+   * tracking this, we can institute a limit on how much dirty unused
+   * memory is mapped for each arena.
+   */
+  size_t mNumDirty;
+  /*
+   * Maximum value allowed for mNumDirty.
+   */
+  size_t mMaxDirty;
+
+  /*
+   * Size/address-ordered tree of this arena's available runs.  This tree
+   * is used for first-best-fit run allocation.
+   */
+  arena_avail_tree_t mRunsAvail;
+
+  /*
+   * mBins is used to store rings of free regions of the following sizes,
+   * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
+   *
+   *   mBins[i] | size |
+   *   --------+------+
+   *        0  |    2 |
+   *        1  |    4 |
+   *        2  |    8 |
+   *   --------+------+
+   *        3  |   16 |
+   *        4  |   32 |
+   *        5  |   48 |
+   *        6  |   64 |
+   *           :      :
+   *           :      :
+   *       33  |  496 |
+   *       34  |  512 |
+   *   --------+------+
+   *       35  | 1024 |
+   *       36  | 2048 |
+   *   --------+------+
+   */
+  arena_bin_t mBins[1]; /* Dynamically sized. */
 };
 
 /******************************************************************************/
 /*
  * Data.
  */
 
 /*
@@ -2549,27 +2549,27 @@ arena_run_split(arena_t *arena, arena_ru
 	    >> pagesize_2pow);
 	total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
 	    pagesize_2pow;
 	need_pages = (size >> pagesize_2pow);
 	MOZ_ASSERT(need_pages > 0);
 	MOZ_ASSERT(need_pages <= total_pages);
 	rem_pages = total_pages - need_pages;
 
-	arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
+	arena_avail_tree_remove(&arena->mRunsAvail, &chunk->map[run_ind]);
 
 	/* Keep track of trailing unused pages for later use. */
 	if (rem_pages > 0) {
 		chunk->map[run_ind+need_pages].bits = (rem_pages <<
 		    pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
 		    pagesize_mask);
 		chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
 		    pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
 		    pagesize_mask);
-		arena_avail_tree_insert(&arena->runs_avail,
+		arena_avail_tree_insert(&arena->mRunsAvail,
 		    &chunk->map[run_ind+need_pages]);
 	}
 
 	for (i = 0; i < need_pages; i++) {
 		/*
 		 * Commit decommitted pages if necessary.  If a decommitted
 		 * page is encountered, commit all needed adjacent decommitted
 		 * pages in one operation, in order to reduce system call
@@ -2593,17 +2593,17 @@ arena_run_split(arena_t *arena, arena_ru
 				    ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
 			}
 
 #  ifdef MALLOC_DECOMMIT
 			pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
 			    << pagesize_2pow)), (j << pagesize_2pow));
 #  endif
 
-			arena->stats.committed += j;
+			arena->mStats.committed += j;
 
 #  ifndef MALLOC_DECOMMIT
                 }
 #  else
 		} else /* No need to zero since commit zeros. */
 #  endif
 
 		/* Zero if necessary. */
@@ -2614,17 +2614,17 @@ arena_run_split(arena_t *arena, arena_ru
 				    + i) << pagesize_2pow)), 0, pagesize);
 				/* CHUNK_MAP_ZEROED is cleared below. */
 			}
 		}
 
 		/* Update dirty page accounting. */
 		if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
 			chunk->ndirty--;
-			arena->ndirty--;
+			arena->mNumDirty--;
 			/* CHUNK_MAP_DIRTY is cleared below. */
 		}
 
 		/* Initialize the chunk map. */
 		if (large) {
 			chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
 			    | CHUNK_MAP_ALLOCATED;
 		} else {
@@ -2638,17 +2638,17 @@ arena_run_split(arena_t *arena, arena_ru
 	 * primarily a debugging aid, since the lack of size info for trailing
 	 * pages only matters if the application tries to operate on an
 	 * interior pointer.
 	 */
 	if (large)
 		chunk->map[run_ind].bits |= size;
 
 	if (chunk->ndirty == 0 && old_ndirty > 0)
-		arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
+		arena_chunk_tree_dirty_remove(&arena->mChunksDirty, chunk);
 }
 
 static void
 arena_chunk_init(arena_t *arena, arena_chunk_t *chunk, bool zeroed)
 {
 	size_t i;
 	/* WARNING: The following relies on !zeroed meaning "used to be an arena
          * chunk".
@@ -2658,17 +2658,17 @@ arena_chunk_init(arena_t *arena, arena_c
          * all it can contain is an arena chunk header (which we're overwriting),
          * and zeroed or poisoned memory (because a recycled arena chunk will
          * have been emptied before being recycled). In that case, we can get
          * away with reusing the chunk as-is, marking all runs as madvised.
          */
 	size_t flags = zeroed ? CHUNK_MAP_DECOMMITTED | CHUNK_MAP_ZEROED
 	                      : CHUNK_MAP_MADVISED;
 
-	arena->stats.mapped += chunksize;
+	arena->mStats.mapped += chunksize;
 
 	chunk->arena = arena;
 
 	/*
 	 * Claim that no pages are in use, since the header is merely overhead.
 	 */
 	chunk->ndirty = 0;
 
@@ -2688,95 +2688,95 @@ arena_chunk_init(arena_t *arena, arena_c
 
 #ifdef MALLOC_DECOMMIT
 	/*
 	 * Start out decommitted, in order to force a closer correspondence
 	 * between dirty pages and committed untouched pages.
 	 */
 	pages_decommit(run, arena_maxclass);
 #endif
-	arena->stats.committed += arena_chunk_header_npages;
-
-	/* Insert the run into the runs_avail tree. */
-	arena_avail_tree_insert(&arena->runs_avail,
+	arena->mStats.committed += arena_chunk_header_npages;
+
+	/* Insert the run into the tree of available runs. */
+	arena_avail_tree_insert(&arena->mRunsAvail,
 	    &chunk->map[arena_chunk_header_npages]);
 
 #ifdef MALLOC_DOUBLE_PURGE
 	new (&chunk->chunks_madvised_elem) mozilla::DoublyLinkedListElement<arena_chunk_t>();
 #endif
 }
 
 static void
 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
 {
 
-	if (arena->spare) {
-		if (arena->spare->ndirty > 0) {
+	if (arena->mSpare) {
+		if (arena->mSpare->ndirty > 0) {
 			arena_chunk_tree_dirty_remove(
-			    &chunk->arena->chunks_dirty, arena->spare);
-			arena->ndirty -= arena->spare->ndirty;
-			arena->stats.committed -= arena->spare->ndirty;
+			    &chunk->arena->mChunksDirty, arena->mSpare);
+			arena->mNumDirty -= arena->mSpare->ndirty;
+			arena->mStats.committed -= arena->mSpare->ndirty;
 		}
 
 #ifdef MALLOC_DOUBLE_PURGE
-		if (arena->chunks_madvised.ElementProbablyInList(arena->spare)) {
-			arena->chunks_madvised.remove(arena->spare);
+		if (arena->mChunksMAdvised.ElementProbablyInList(arena->mSpare)) {
+			arena->mChunksMAdvised.remove(arena->mSpare);
 		}
 #endif
 
-		chunk_dealloc((void *)arena->spare, chunksize, ARENA_CHUNK);
-		arena->stats.mapped -= chunksize;
-		arena->stats.committed -= arena_chunk_header_npages;
+		chunk_dealloc((void *)arena->mSpare, chunksize, ARENA_CHUNK);
+		arena->mStats.mapped -= chunksize;
+		arena->mStats.committed -= arena_chunk_header_npages;
 	}
 
 	/*
-	 * Remove run from runs_avail, so that the arena does not use it.
-	 * Dirty page flushing only uses the chunks_dirty tree, so leaving this
+	 * Remove run from the tree of available runs, so that the arena does not use it.
+	 * Dirty page flushing only uses the tree of dirty chunks, so leaving this
 	 * chunk in the chunks_* trees is sufficient for that purpose.
 	 */
-	arena_avail_tree_remove(&arena->runs_avail,
+	arena_avail_tree_remove(&arena->mRunsAvail,
 	    &chunk->map[arena_chunk_header_npages]);
 
-	arena->spare = chunk;
+	arena->mSpare = chunk;
 }
 
 static arena_run_t *
 arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
     bool zero)
 {
 	arena_run_t *run;
 	arena_chunk_map_t *mapelm, key;
 
 	MOZ_ASSERT(size <= arena_maxclass);
 	MOZ_ASSERT((size & pagesize_mask) == 0);
 
 	/* Search the arena's chunks for the lowest best fit. */
 	key.bits = size | CHUNK_MAP_KEY;
-	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
+	mapelm = arena_avail_tree_nsearch(&arena->mRunsAvail, &key);
 	if (mapelm) {
 		arena_chunk_t *chunk =
 		    (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
 		size_t pageind = ((uintptr_t)mapelm -
 		    (uintptr_t)chunk->map) /
 		    sizeof(arena_chunk_map_t);
 
 		run = (arena_run_t *)((uintptr_t)chunk + (pageind
 		    << pagesize_2pow));
 		arena_run_split(arena, run, size, large, zero);
 		return (run);
 	}
 
-	if (arena->spare) {
+	if (arena->mSpare) {
 		/* Use the spare. */
-		arena_chunk_t *chunk = arena->spare;
-		arena->spare = nullptr;
+		arena_chunk_t *chunk = arena->mSpare;
+		arena->mSpare = nullptr;
 		run = (arena_run_t *)((uintptr_t)chunk +
 		    (arena_chunk_header_npages << pagesize_2pow));
-		/* Insert the run into the runs_avail tree. */
-		arena_avail_tree_insert(&arena->runs_avail,
+		/* Insert the run into the tree of available runs. */
+		arena_avail_tree_insert(&arena->mRunsAvail,
 		    &chunk->map[arena_chunk_header_npages]);
 		arena_run_split(arena, run, size, large, zero);
 		return (run);
 	}
 
 	/*
 	 * No usable runs.  Create a new chunk from which to allocate
 	 * the run.
@@ -2798,38 +2798,38 @@ arena_run_alloc(arena_t *arena, arena_bi
 }
 
 static void
 arena_purge(arena_t *arena, bool all)
 {
 	arena_chunk_t *chunk;
 	size_t i, npages;
 	/* If all is set purge all dirty pages. */
-	size_t dirty_max = all ? 1 : arena->dirty_max;
+	size_t dirty_max = all ? 1 : arena->mMaxDirty;
 #ifdef MOZ_DEBUG
 	size_t ndirty = 0;
-	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
+	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->mChunksDirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
-	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
-	MOZ_ASSERT(ndirty == arena->ndirty);
+	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->mChunksDirty, chunk)
+	MOZ_ASSERT(ndirty == arena->mNumDirty);
 #endif
-	MOZ_DIAGNOSTIC_ASSERT(all || (arena->ndirty > arena->dirty_max));
+	MOZ_DIAGNOSTIC_ASSERT(all || (arena->mNumDirty > arena->mMaxDirty));
 
 	/*
 	 * Iterate downward through chunks until enough dirty memory has been
 	 * purged.  Terminate as soon as possible in order to minimize the
 	 * number of system calls, even if a chunk has only been partially
 	 * purged.
 	 */
-	while (arena->ndirty > (dirty_max >> 1)) {
+	while (arena->mNumDirty > (dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
 		bool madvised = false;
 #endif
-		chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
+		chunk = arena_chunk_tree_dirty_last(&arena->mChunksDirty);
 		MOZ_DIAGNOSTIC_ASSERT(chunk);
 
 		for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
 			MOZ_DIAGNOSTIC_ASSERT(i >= arena_chunk_header_npages);
 
 			if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
 				const size_t free_operation = CHUNK_MAP_DECOMMITTED;
@@ -2845,50 +2845,50 @@ arena_purge(arena_t *arena, bool all)
 				       (chunk->map[i - 1].bits & CHUNK_MAP_DIRTY);
 				     npages++) {
 					i--;
 					MOZ_ASSERT((chunk->map[i].bits &
 					            CHUNK_MAP_MADVISED_OR_DECOMMITTED) == 0);
 					chunk->map[i].bits ^= free_operation | CHUNK_MAP_DIRTY;
 				}
 				chunk->ndirty -= npages;
-				arena->ndirty -= npages;
+				arena->mNumDirty -= npages;
 
 #ifdef MALLOC_DECOMMIT
 				pages_decommit((void *)((uintptr_t)
 				    chunk + (i << pagesize_2pow)),
 				    (npages << pagesize_2pow));
 #endif
-				arena->stats.committed -= npages;
+				arena->mStats.committed -= npages;
 
 #ifndef MALLOC_DECOMMIT
 				madvise((void *)((uintptr_t)chunk + (i <<
 				    pagesize_2pow)), (npages << pagesize_2pow),
 				    MADV_FREE);
 #  ifdef MALLOC_DOUBLE_PURGE
 				madvised = true;
 #  endif
 #endif
-				if (arena->ndirty <= (dirty_max >> 1))
+				if (arena->mNumDirty <= (dirty_max >> 1))
 					break;
 			}
 		}
 
 		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
+			arena_chunk_tree_dirty_remove(&arena->mChunksDirty,
 			    chunk);
 		}
 #ifdef MALLOC_DOUBLE_PURGE
 		if (madvised) {
 			/* The chunk might already be in the list, but this
 			 * makes sure it's at the front. */
-			if (arena->chunks_madvised.ElementProbablyInList(chunk)) {
-				arena->chunks_madvised.remove(chunk);
+			if (arena->mChunksMAdvised.ElementProbablyInList(chunk)) {
+				arena->mChunksMAdvised.remove(chunk);
 			}
-			arena->chunks_madvised.pushFront(chunk);
+			arena->mChunksMAdvised.pushFront(chunk);
 		}
 #endif
 	}
 }
 
 static void
 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 {
@@ -2912,21 +2912,21 @@ arena_run_dalloc(arena_t *arena, arena_r
 
 		for (i = 0; i < run_pages; i++) {
 			MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
 			    == 0);
 			chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
 		}
 
 		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
+			arena_chunk_tree_dirty_insert(&arena->mChunksDirty,
 			    chunk);
 		}
 		chunk->ndirty += run_pages;
-		arena->ndirty += run_pages;
+		arena->mNumDirty += run_pages;
 	} else {
 		size_t i;
 
 		for (i = 0; i < run_pages; i++) {
 			chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
 			    CHUNK_MAP_ALLOCATED);
 		}
 	}
@@ -2937,20 +2937,20 @@ arena_run_dalloc(arena_t *arena, arena_r
 
 	/* Try to coalesce forward. */
 	if (run_ind + run_pages < chunk_npages &&
 	    (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
 		size_t nrun_size = chunk->map[run_ind+run_pages].bits &
 		    ~pagesize_mask;
 
 		/*
-		 * Remove successor from runs_avail; the coalesced run is
+		 * Remove successor from tree of available runs; the coalesced run is
 		 * inserted later.
 		 */
-		arena_avail_tree_remove(&arena->runs_avail,
+		arena_avail_tree_remove(&arena->mRunsAvail,
 		    &chunk->map[run_ind+run_pages]);
 
 		size += nrun_size;
 		run_pages = size >> pagesize_2pow;
 
 		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
 		    == nrun_size);
 		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
@@ -2962,43 +2962,43 @@ arena_run_dalloc(arena_t *arena, arena_r
 	/* Try to coalesce backward. */
 	if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
 	    CHUNK_MAP_ALLOCATED) == 0) {
 		size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
 
 		run_ind -= prun_size >> pagesize_2pow;
 
 		/*
-		 * Remove predecessor from runs_avail; the coalesced run is
+		 * Remove predecessor from tree of available runs; the coalesced run is
 		 * inserted later.
 		 */
-		arena_avail_tree_remove(&arena->runs_avail,
+		arena_avail_tree_remove(&arena->mRunsAvail,
 		    &chunk->map[run_ind]);
 
 		size += prun_size;
 		run_pages = size >> pagesize_2pow;
 
 		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
 		    prun_size);
 		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
 		    pagesize_mask);
 		chunk->map[run_ind+run_pages-1].bits = size |
 		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
 	}
 
-	/* Insert into runs_avail, now that coalescing is complete. */
-	arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
+	/* Insert into tree of available runs, now that coalescing is complete. */
+	arena_avail_tree_insert(&arena->mRunsAvail, &chunk->map[run_ind]);
 
 	/* Deallocate chunk if it is now completely unused. */
 	if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
 	    CHUNK_MAP_ALLOCATED)) == arena_maxclass)
 		arena_chunk_dealloc(arena, chunk);
 
-	/* Enforce dirty_max. */
-	if (arena->ndirty > arena->dirty_max)
+	/* Enforce mMaxDirty. */
+	if (arena->mNumDirty > arena->mMaxDirty)
 		arena_purge(arena, false);
 }
 
 static void
 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
@@ -3210,51 +3210,51 @@ arena_malloc_small(arena_t *arena, size_
 {
 	void *ret;
 	arena_bin_t *bin;
 	arena_run_t *run;
 
 	if (size < small_min) {
 		/* Tiny. */
 		size = pow2_ceil(size);
-		bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
+		bin = &arena->mBins[ffs((int)(size >> (TINY_MIN_2POW +
 		    1)))];
 		/*
 		 * Bin calculation is always correct, but we may need
 		 * to fix size for the purposes of assertions and/or
 		 * stats accuracy.
 		 */
 		if (size < (1U << TINY_MIN_2POW))
 			size = (1U << TINY_MIN_2POW);
 	} else if (size <= small_max) {
 		/* Quantum-spaced. */
 		size = QUANTUM_CEILING(size);
-		bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
+		bin = &arena->mBins[ntbins + (size >> opt_quantum_2pow)
 		    - 1];
 	} else {
 		/* Sub-page. */
 		size = pow2_ceil(size);
-		bin = &arena->bins[ntbins + nqbins
+		bin = &arena->mBins[ntbins + nqbins
 		    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
 	}
 	MOZ_DIAGNOSTIC_ASSERT(size == bin->reg_size);
 
-	malloc_spin_lock(&arena->lock);
+	malloc_spin_lock(&arena->mLock);
 	if ((run = bin->runcur) && run->nfree > 0)
 		ret = arena_bin_malloc_easy(arena, bin, run);
 	else
 		ret = arena_bin_malloc_hard(arena, bin);
 
 	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
+		malloc_spin_unlock(&arena->mLock);
 		return nullptr;
 	}
 
-	arena->stats.allocated_small += size;
-	malloc_spin_unlock(&arena->lock);
+	arena->mStats.allocated_small += size;
+	malloc_spin_unlock(&arena->mLock);
 
 	if (zero == false) {
 		if (opt_junk)
 			memset(ret, kAllocJunk, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 	} else
 		memset(ret, 0, size);
@@ -3264,41 +3264,41 @@ arena_malloc_small(arena_t *arena, size_
 
 static void *
 arena_malloc_large(arena_t *arena, size_t size, bool zero)
 {
 	void *ret;
 
 	/* Large allocation. */
 	size = PAGE_CEILING(size);
-	malloc_spin_lock(&arena->lock);
+	malloc_spin_lock(&arena->mLock);
 	ret = (void *)arena_run_alloc(arena, nullptr, size, true, zero);
 	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
+		malloc_spin_unlock(&arena->mLock);
 		return nullptr;
 	}
-	arena->stats.allocated_large += size;
-	malloc_spin_unlock(&arena->lock);
+	arena->mStats.allocated_large += size;
+	malloc_spin_unlock(&arena->mLock);
 
 	if (zero == false) {
 		if (opt_junk)
 			memset(ret, kAllocJunk, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 	}
 
 	return (ret);
 }
 
 static inline void *
 arena_malloc(arena_t *arena, size_t size, bool zero)
 {
 
 	MOZ_ASSERT(arena);
-	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
+	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 	MOZ_ASSERT(size != 0);
 	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= bin_maxclass) {
 		return (arena_malloc_small(arena, size, zero));
 	} else
 		return (arena_malloc_large(arena, size, zero));
 }
@@ -3331,20 +3331,20 @@ arena_palloc(arena_t *arena, size_t alig
 {
 	void *ret;
 	size_t offset;
 	arena_chunk_t *chunk;
 
 	MOZ_ASSERT((size & pagesize_mask) == 0);
 	MOZ_ASSERT((alignment & pagesize_mask) == 0);
 
-	malloc_spin_lock(&arena->lock);
+	malloc_spin_lock(&arena->mLock);
 	ret = (void *)arena_run_alloc(arena, nullptr, alloc_size, true, false);
 	if (!ret) {
-		malloc_spin_unlock(&arena->lock);
+		malloc_spin_unlock(&arena->mLock);
 		return nullptr;
 	}
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
 
 	offset = (uintptr_t)ret & (alignment - 1);
 	MOZ_ASSERT((offset & pagesize_mask) == 0);
 	MOZ_ASSERT(offset < alloc_size);
@@ -3364,18 +3364,18 @@ arena_palloc(arena_t *arena, size_t alig
 		if (trailsize != 0) {
 			/* Trim trailing space. */
 			MOZ_ASSERT(trailsize < alloc_size);
 			arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
 			    size, false);
 		}
 	}
 
-	arena->stats.allocated_large += size;
-	malloc_spin_unlock(&arena->lock);
+	arena->mStats.allocated_large += size;
+	malloc_spin_unlock(&arena->mLock);
 
 	if (opt_junk)
 		memset(ret, kAllocJunk, size);
 	else if (opt_zero)
 		memset(ret, 0, size);
 	return (ret);
 }
 
@@ -3522,17 +3522,17 @@ isalloc_validate(const void* ptr)
     return 0;
   }
 
   if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
     return 0;
   }
 
   if (chunk != ptr) {
-    MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+    MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
     return arena_salloc(ptr);
   } else {
     size_t ret;
     extent_node_t* node;
     extent_node_t key;
 
     /* Chunk. */
     key.addr = (void*)chunk;
@@ -3553,17 +3553,17 @@ isalloc(const void *ptr)
 	size_t ret;
 	arena_chunk_t *chunk;
 
 	MOZ_ASSERT(ptr);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk != ptr) {
 		/* Region. */
-		MOZ_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+		MOZ_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
 		ret = arena_salloc(ptr);
 	} else {
 		extent_node_t *node, key;
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
@@ -3609,17 +3609,17 @@ MozJemalloc::jemalloc_ptr_info(const voi
   }
 
   // It's not a huge allocation. Check if we have a known chunk.
   if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
-  MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(chunk->arena->mMagic == ARENA_MAGIC);
 
   // Get the page number within the chunk.
   size_t pageind = (((uintptr_t)aPtr - (uintptr_t)chunk) >> pagesize_2pow);
   if (pageind < arena_chunk_header_npages) {
     // Within the chunk header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
@@ -3781,28 +3781,28 @@ arena_dalloc_small(arena_t *arena, arena
 			arena_chunk_map_t *run_mapelm =
 			    &chunk->map[run_pageind];
 
 			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 			    nullptr);
 			arena_run_tree_insert(&bin->runs, run_mapelm);
 		}
 	}
-	arena->stats.allocated_small -= size;
+	arena->mStats.allocated_small -= size;
 }
 
 static void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 	    pagesize_2pow;
 	size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
 	memset(ptr, kAllocPoison, size);
-	arena->stats.allocated_large -= size;
+	arena->mStats.allocated_large -= size;
 
 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
@@ -3812,30 +3812,30 @@ arena_dalloc(void *ptr, size_t offset)
 
 	MOZ_ASSERT(ptr);
 	MOZ_ASSERT(offset != 0);
 	MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
 	MOZ_ASSERT(arena);
-	MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
-
-	malloc_spin_lock(&arena->lock);
+	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+
+	malloc_spin_lock(&arena->mLock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
 		arena_dalloc_small(arena, chunk, ptr, mapelm);
 	} else {
 		/* Large allocation. */
 		arena_dalloc_large(arena, chunk, ptr);
 	}
-	malloc_spin_unlock(&arena->lock);
+	malloc_spin_unlock(&arena->mLock);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	MOZ_ASSERT(ptr);
@@ -3853,31 +3853,31 @@ arena_ralloc_large_shrink(arena_t *arena
 {
 
 	MOZ_ASSERT(size < oldsize);
 
 	/*
 	 * Shrink the run, and make trailing pages available for other
 	 * allocations.
 	 */
-	malloc_spin_lock(&arena->lock);
+	malloc_spin_lock(&arena->mLock);
 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
 	    true);
-	arena->stats.allocated_large -= oldsize - size;
-	malloc_spin_unlock(&arena->lock);
+	arena->mStats.allocated_large -= oldsize - size;
+	malloc_spin_unlock(&arena->mLock);
 }
 
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
 
-	malloc_spin_lock(&arena->lock);
+	malloc_spin_lock(&arena->mLock);
 	MOZ_DIAGNOSTIC_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
 	/* Try to extend the run. */
 	MOZ_ASSERT(size > oldsize);
 	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
 	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
 	    ~pagesize_mask) >= size - oldsize) {
 		/*
@@ -3889,21 +3889,21 @@ arena_ralloc_large_grow(arena_t *arena, 
 		    ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
 		    false);
 
 		chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
 		    CHUNK_MAP_ALLOCATED;
 		chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
 		    CHUNK_MAP_ALLOCATED;
 
-		arena->stats.allocated_large += size - oldsize;
-		malloc_spin_unlock(&arena->lock);
+		arena->mStats.allocated_large += size - oldsize;
+		malloc_spin_unlock(&arena->mLock);
 		return (false);
 	}
-	malloc_spin_unlock(&arena->lock);
+	malloc_spin_unlock(&arena->mLock);
 
 	return (true);
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
  */
@@ -3921,17 +3921,17 @@ arena_ralloc_large(void *ptr, size_t siz
 		}
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
-		MOZ_DIAGNOSTIC_ASSERT(arena->magic == ARENA_MAGIC);
+		MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 			/* Fill before shrinking in order avoid a race. */
 			memset((void *)((uintptr_t)ptr + size), kAllocPoison,
 			    oldsize - size);
 			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
 			    oldsize);
 			return (false);
@@ -4019,79 +4019,79 @@ iralloc(void *ptr, size_t size)
 
 static bool
 arena_new(arena_t *arena)
 {
 	unsigned i;
 	arena_bin_t *bin;
 	size_t prev_run_size;
 
-	if (malloc_spin_init(&arena->lock))
+	if (malloc_spin_init(&arena->mLock))
 		return (true);
 
-	memset(&arena->stats, 0, sizeof(arena_stats_t));
+	memset(&arena->mStats, 0, sizeof(arena_stats_t));
 
 	/* Initialize chunks. */
-	arena_chunk_tree_dirty_new(&arena->chunks_dirty);
+	arena_chunk_tree_dirty_new(&arena->mChunksDirty);
 #ifdef MALLOC_DOUBLE_PURGE
-	new (&arena->chunks_madvised) mozilla::DoublyLinkedList<arena_chunk_t>();
+	new (&arena->mChunksMAdvised) mozilla::DoublyLinkedList<arena_chunk_t>();
 #endif
-	arena->spare = nullptr;
-
-	arena->ndirty = 0;
+	arena->mSpare = nullptr;
+
+	arena->mNumDirty = 0;
 	// Reduce the maximum amount of dirty pages we allow to be kept on
 	// thread local arenas. TODO: make this more flexible.
-	arena->dirty_max = opt_dirty_max >> 3;
-
-	arena_avail_tree_new(&arena->runs_avail);
+	arena->mMaxDirty = opt_dirty_max >> 3;
+
+	arena_avail_tree_new(&arena->mRunsAvail);
 
 	/* Initialize bins. */
 	prev_run_size = pagesize;
 
 	/* (2^n)-spaced tiny bins. */
 	for (i = 0; i < ntbins; i++) {
-		bin = &arena->bins[i];
+		bin = &arena->mBins[i];
 		bin->runcur = nullptr;
 		arena_run_tree_new(&bin->runs);
 
 		bin->reg_size = (1ULL << (TINY_MIN_2POW + i));
 
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
 	/* Quantum-spaced bins. */
 	for (; i < ntbins + nqbins; i++) {
-		bin = &arena->bins[i];
+		bin = &arena->mBins[i];
 		bin->runcur = nullptr;
 		arena_run_tree_new(&bin->runs);
 
 		bin->reg_size = quantum * (i - ntbins + 1);
 
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
 	/* (2^n)-spaced sub-page bins. */
 	for (; i < ntbins + nqbins + nsbins; i++) {
-		bin = &arena->bins[i];
+		bin = &arena->mBins[i];
 		bin->runcur = nullptr;
 		arena_run_tree_new(&bin->runs);
 
 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
 
 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
 
 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
 	}
 
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	arena->magic = ARENA_MAGIC;
+	arena->mMagic = ARENA_MAGIC;
 #endif
 
 	return (false);
 }
 
 static inline arena_t *
 arenas_fallback()
 {
@@ -4627,17 +4627,17 @@ MALLOC_OUT:
   if (!arenas || !arenas[0]) {
 #ifndef XP_WIN
     malloc_mutex_unlock(&init_lock);
 #endif
     return true;
   }
   /* arena_new() sets this to a lower value for thread local arenas;
    * reset to the default value for the main arenas */
-  arenas[0]->dirty_max = opt_dirty_max;
+  arenas[0]->mMaxDirty = opt_dirty_max;
 
 #ifndef NO_TLS
   /*
    * Assign the initial arena to the initial thread.
    */
   thread_arena.set(arenas[0]);
 #endif
 
@@ -4968,46 +4968,46 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
     if (!arena) {
       continue;
     }
 
     arena_headers = 0;
     arena_unused = 0;
 
-    malloc_spin_lock(&arena->lock);
-
-    arena_mapped = arena->stats.mapped;
+    malloc_spin_lock(&arena->mLock);
+
+    arena_mapped = arena->mStats.mapped;
 
     /* "committed" counts dirty and allocated memory. */
-    arena_committed = arena->stats.committed << pagesize_2pow;
-
-    arena_allocated = arena->stats.allocated_small +
-                      arena->stats.allocated_large;
-
-    arena_dirty = arena->ndirty << pagesize_2pow;
+    arena_committed = arena->mStats.committed << pagesize_2pow;
+
+    arena_allocated = arena->mStats.allocated_small +
+                      arena->mStats.allocated_large;
+
+    arena_dirty = arena->mNumDirty << pagesize_2pow;
 
     for (j = 0; j < ntbins + nqbins + nsbins; j++) {
-      arena_bin_t* bin = &arena->bins[j];
+      arena_bin_t* bin = &arena->mBins[j];
       size_t bin_unused = 0;
 
       rb_foreach_begin(arena_chunk_map_t, link, &bin->runs, mapelm) {
         run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
         bin_unused += run->nfree * bin->reg_size;
       } rb_foreach_end(arena_chunk_map_t, link, &bin->runs, mapelm)
 
       if (bin->runcur) {
         bin_unused += bin->runcur->nfree * bin->reg_size;
       }
 
       arena_unused += bin_unused;
       arena_headers += bin->stats.curruns * bin->reg0_offset;
     }
 
-    malloc_spin_unlock(&arena->lock);
+    malloc_spin_unlock(&arena->mLock);
 
     MOZ_ASSERT(arena_mapped >= arena_committed);
     MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
 
     /* "waste" is committed memory that is neither dirty nor
      * allocated. */
     aStats->mapped += arena_mapped;
     aStats->allocated += arena_allocated;
@@ -5062,24 +5062,24 @@ hard_purge_chunk(arena_chunk_t *chunk)
 		i += npages;
 	}
 }
 
 /* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
 static void
 hard_purge_arena(arena_t *arena)
 {
-	malloc_spin_lock(&arena->lock);
-
-	while (!arena->chunks_madvised.isEmpty()) {
-		arena_chunk_t *chunk = arena->chunks_madvised.popFront();
+	malloc_spin_lock(&arena->mLock);
+
+	while (!arena->mChunksMAdvised.isEmpty()) {
+		arena_chunk_t *chunk = arena->mChunksMAdvised.popFront();
 		hard_purge_chunk(chunk);
 	}
 
-	malloc_spin_unlock(&arena->lock);
+	malloc_spin_unlock(&arena->mLock);
 }
 
 template<> inline void
 MozJemalloc::jemalloc_purge_freed_pages()
 {
   size_t i;
   malloc_spin_lock(&arenas_lock);
   for (i = 0; i < narenas; i++) {
@@ -5106,19 +5106,19 @@ template<> inline void
 MozJemalloc::jemalloc_free_dirty_pages(void)
 {
   size_t i;
   malloc_spin_lock(&arenas_lock);
   for (i = 0; i < narenas; i++) {
     arena_t* arena = arenas[i];
 
     if (arena) {
-      malloc_spin_lock(&arena->lock);
+      malloc_spin_lock(&arena->mLock);
       arena_purge(arena, true);
-      malloc_spin_unlock(&arena->lock);
+      malloc_spin_unlock(&arena->mLock);
     }
   }
   malloc_spin_unlock(&arenas_lock);
 }
 
 /*
  * End non-standard functions.
  */
@@ -5138,17 +5138,17 @@ void
 {
 	unsigned i;
 
 	/* Acquire all mutexes in a safe order. */
 
 	malloc_spin_lock(&arenas_lock);
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_lock(&arenas[i]->lock);
+			malloc_spin_lock(&arenas[i]->mLock);
 	}
 
 	malloc_mutex_lock(&base_mtx);
 
 	malloc_mutex_lock(&huge_mtx);
 }
 
 #ifndef XP_DARWIN
@@ -5162,17 +5162,17 @@ void
 	/* Release all mutexes, now that fork() has completed. */
 
 	malloc_mutex_unlock(&huge_mtx);
 
 	malloc_mutex_unlock(&base_mtx);
 
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_unlock(&arenas[i]->lock);
+			malloc_spin_unlock(&arenas[i]->mLock);
 	}
 	malloc_spin_unlock(&arenas_lock);
 }
 
 #ifndef XP_DARWIN
 static
 #endif
 void
@@ -5183,17 +5183,17 @@ void
 	/* Reinitialize all mutexes, now that fork() has completed. */
 
 	malloc_mutex_init(&huge_mtx);
 
 	malloc_mutex_init(&base_mtx);
 
 	for (i = 0; i < narenas; i++) {
 		if (arenas[i])
-			malloc_spin_init(&arenas[i]->lock);
+			malloc_spin_init(&arenas[i]->mLock);
 	}
 	malloc_spin_init(&arenas_lock);
 }
 
 /*
  * End library-private functions.
  */
 /******************************************************************************/