Bug 1411786 - clang-format chunk_{recycle,record,alloc,dealloc}. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 26 Oct 2017 08:36:26 +0900
changeset 686643 37bf3caa9a0e60f4b50d10992961365411a71761
parent 686642 6cb10204ba6a1e1209faa4920038e817b844d943
child 686644 5020684d66ccfe6dd3ef4fe0a23403f551146a80
push id86234
push userbmo:mh+mozilla@glandium.org
push dateThu, 26 Oct 2017 05:06:52 +0000
reviewersnjn
bugs1411786
milestone58.0a1
Bug 1411786 - clang-format chunk_{recycle,record,alloc,dealloc}. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -1922,96 +1922,96 @@ pages_purge(void *addr, size_t length, b
 	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
 	return JEMALLOC_MADV_ZEROS && err == 0;
 #    undef JEMALLOC_MADV_PURGE
 #    undef JEMALLOC_MADV_ZEROS
 #  endif
 #endif
 }
 
-static void *
-chunk_recycle(size_t size, size_t alignment, bool *zeroed)
+static void*
+chunk_recycle(size_t size, size_t alignment, bool* zeroed)
 {
-	void *ret;
-	extent_node_t *node;
-	extent_node_t key;
-	size_t alloc_size, leadsize, trailsize;
-	ChunkType chunk_type;
-
-	alloc_size = size + alignment - chunksize;
-	/* Beware size_t wrap-around. */
-	if (alloc_size < size)
-		return nullptr;
-	key.addr = nullptr;
-	key.size = alloc_size;
-	chunks_mtx.Lock();
-	node = gChunksBySize.SearchOrNext(&key);
-	if (!node) {
-		chunks_mtx.Unlock();
-		return nullptr;
-	}
-	leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
-	    (uintptr_t)node->addr;
-	MOZ_ASSERT(node->size >= leadsize + size);
-	trailsize = node->size - leadsize - size;
-	ret = (void *)((uintptr_t)node->addr + leadsize);
-	chunk_type = node->chunk_type;
-	if (zeroed) {
-		*zeroed = (chunk_type == ZEROED_CHUNK);
-	}
-	/* Remove node from the tree. */
-	gChunksBySize.Remove(node);
-	gChunksByAddress.Remove(node);
-	if (leadsize != 0) {
-		/* Insert the leading space as a smaller chunk. */
-		node->size = leadsize;
-		gChunksBySize.Insert(node);
-		gChunksByAddress.Insert(node);
-		node = nullptr;
-	}
-	if (trailsize != 0) {
-		/* Insert the trailing space as a smaller chunk. */
-		if (!node) {
-			/*
+  void* ret;
+  extent_node_t* node;
+  extent_node_t key;
+  size_t alloc_size, leadsize, trailsize;
+  ChunkType chunk_type;
+
+  alloc_size = size + alignment - chunksize;
+  /* Beware size_t wrap-around. */
+  if (alloc_size < size)
+    return nullptr;
+  key.addr = nullptr;
+  key.size = alloc_size;
+  chunks_mtx.Lock();
+  node = gChunksBySize.SearchOrNext(&key);
+  if (!node) {
+    chunks_mtx.Unlock();
+    return nullptr;
+  }
+  leadsize =
+    ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - (uintptr_t)node->addr;
+  MOZ_ASSERT(node->size >= leadsize + size);
+  trailsize = node->size - leadsize - size;
+  ret = (void*)((uintptr_t)node->addr + leadsize);
+  chunk_type = node->chunk_type;
+  if (zeroed) {
+    *zeroed = (chunk_type == ZEROED_CHUNK);
+  }
+  /* Remove node from the tree. */
+  gChunksBySize.Remove(node);
+  gChunksByAddress.Remove(node);
+  if (leadsize != 0) {
+    /* Insert the leading space as a smaller chunk. */
+    node->size = leadsize;
+    gChunksBySize.Insert(node);
+    gChunksByAddress.Insert(node);
+    node = nullptr;
+  }
+  if (trailsize != 0) {
+    /* Insert the trailing space as a smaller chunk. */
+    if (!node) {
+      /*
 			 * An additional node is required, but
 			 * base_node_alloc() can cause a new base chunk to be
 			 * allocated.  Drop chunks_mtx in order to avoid
 			 * deadlock, and if node allocation fails, deallocate
 			 * the result before returning an error.
 			 */
-			chunks_mtx.Unlock();
-			node = base_node_alloc();
-			if (!node) {
-				chunk_dealloc(ret, size, chunk_type);
-				return nullptr;
-			}
-			chunks_mtx.Lock();
-		}
-		node->addr = (void *)((uintptr_t)(ret) + size);
-		node->size = trailsize;
-		node->chunk_type = chunk_type;
-		gChunksBySize.Insert(node);
-		gChunksByAddress.Insert(node);
-		node = nullptr;
-	}
-
-	recycled_size -= size;
-
-	chunks_mtx.Unlock();
-
-	if (node)
-		base_node_dealloc(node);
+      chunks_mtx.Unlock();
+      node = base_node_alloc();
+      if (!node) {
+        chunk_dealloc(ret, size, chunk_type);
+        return nullptr;
+      }
+      chunks_mtx.Lock();
+    }
+    node->addr = (void*)((uintptr_t)(ret) + size);
+    node->size = trailsize;
+    node->chunk_type = chunk_type;
+    gChunksBySize.Insert(node);
+    gChunksByAddress.Insert(node);
+    node = nullptr;
+  }
+
+  recycled_size -= size;
+
+  chunks_mtx.Unlock();
+
+  if (node)
+    base_node_dealloc(node);
 #ifdef MALLOC_DECOMMIT
-	pages_commit(ret, size);
-	// pages_commit is guaranteed to zero the chunk.
-	if (zeroed) {
-		*zeroed = true;
-	}
+  pages_commit(ret, size);
+  // pages_commit is guaranteed to zero the chunk.
+  if (zeroed) {
+    *zeroed = true;
+  }
 #endif
-	return (ret);
+  return (ret);
 }
 
 #ifdef XP_WIN
 /*
  * On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
  * awkward to recycle allocations of varying sizes. Therefore we only allow
  * recycling when the size equals the chunksize, unless deallocation is entirely
  * disabled.
@@ -2023,53 +2023,53 @@ chunk_recycle(size_t size, size_t alignm
 
 /* Allocates `size` bytes of system memory aligned for `alignment`.
  * `base` indicates whether the memory will be used for the base allocator
  * (e.g. base_alloc).
  * `zeroed` is an outvalue that returns whether the allocated memory is
  * guaranteed to be full of zeroes. It can be omitted when the caller doesn't
  * care about the result.
  */
-static void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zeroed)
+static void*
+chunk_alloc(size_t size, size_t alignment, bool base, bool* zeroed)
 {
-	void *ret;
-
-	MOZ_ASSERT(size != 0);
-	MOZ_ASSERT((size & chunksize_mask) == 0);
-	MOZ_ASSERT(alignment != 0);
-	MOZ_ASSERT((alignment & chunksize_mask) == 0);
-
-	// Base allocations can't be fulfilled by recycling because of
-	// possible deadlock or infinite recursion.
-	if (CAN_RECYCLE(size) && !base) {
-		ret = chunk_recycle(size, alignment, zeroed);
-		if (ret)
-			goto RETURN;
-	}
-	ret = chunk_alloc_mmap(size, alignment);
-	if (zeroed)
-		*zeroed = true;
-	if (ret) {
-		goto RETURN;
-	}
-
-	/* All strategies for allocation failed. */
-	ret = nullptr;
+  void* ret;
+
+  MOZ_ASSERT(size != 0);
+  MOZ_ASSERT((size & chunksize_mask) == 0);
+  MOZ_ASSERT(alignment != 0);
+  MOZ_ASSERT((alignment & chunksize_mask) == 0);
+
+  // Base allocations can't be fulfilled by recycling because of
+  // possible deadlock or infinite recursion.
+  if (CAN_RECYCLE(size) && !base) {
+    ret = chunk_recycle(size, alignment, zeroed);
+    if (ret)
+      goto RETURN;
+  }
+  ret = chunk_alloc_mmap(size, alignment);
+  if (zeroed)
+    *zeroed = true;
+  if (ret) {
+    goto RETURN;
+  }
+
+  /* All strategies for allocation failed. */
+  ret = nullptr;
 RETURN:
 
-	if (ret && base == false) {
-		if (!gChunkRTree.Set(ret, ret)) {
-			chunk_dealloc(ret, size, UNKNOWN_CHUNK);
-			return nullptr;
-		}
-	}
-
-	MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
-	return (ret);
+  if (ret && base == false) {
+    if (!gChunkRTree.Set(ret, ret)) {
+      chunk_dealloc(ret, size, UNKNOWN_CHUNK);
+      return nullptr;
+    }
+  }
+
+  MOZ_ASSERT(CHUNK_ADDR2BASE(ret) == ret);
+  return (ret);
 }
 
 static void
 chunk_ensure_zero(void* aPtr, size_t aSize, bool aZeroed)
 {
   if (aZeroed == false) {
     memset(aPtr, 0, aSize);
   }
@@ -2081,140 +2081,139 @@ chunk_ensure_zero(void* aPtr, size_t aSi
     for (i = 0; i < aSize / sizeof(size_t); i++) {
       MOZ_ASSERT(p[i] == 0);
     }
   }
 #endif
 }
 
 static void
-chunk_record(void *chunk, size_t size, ChunkType chunk_type)
+chunk_record(void* chunk, size_t size, ChunkType chunk_type)
 {
-	extent_node_t *xnode, *node, *prev, *xprev, key;
-
-	if (chunk_type != ZEROED_CHUNK) {
-		if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
-			chunk_type = ZEROED_CHUNK;
-		}
-	}
-
-	/*
+  extent_node_t *xnode, *node, *prev, *xprev, key;
+
+  if (chunk_type != ZEROED_CHUNK) {
+    if (pages_purge(chunk, size, chunk_type == HUGE_CHUNK)) {
+      chunk_type = ZEROED_CHUNK;
+    }
+  }
+
+  /*
 	 * Allocate a node before acquiring chunks_mtx even though it might not
 	 * be needed, because base_node_alloc() may cause a new base chunk to
 	 * be allocated, which could cause deadlock if chunks_mtx were already
 	 * held.
 	 */
-	xnode = base_node_alloc();
-	/* Use xprev to implement conditional deferred deallocation of prev. */
-	xprev = nullptr;
-
-	chunks_mtx.Lock();
-	key.addr = (void *)((uintptr_t)chunk + size);
-	node = gChunksByAddress.SearchOrNext(&key);
-	/* Try to coalesce forward. */
-	if (node && node->addr == key.addr) {
-		/*
+  xnode = base_node_alloc();
+  /* Use xprev to implement conditional deferred deallocation of prev. */
+  xprev = nullptr;
+
+  chunks_mtx.Lock();
+  key.addr = (void*)((uintptr_t)chunk + size);
+  node = gChunksByAddress.SearchOrNext(&key);
+  /* Try to coalesce forward. */
+  if (node && node->addr == key.addr) {
+    /*
 		 * Coalesce chunk with the following address range.  This does
 		 * not change the position within chunks_ad, so only
 		 * remove/insert from/into chunks_szad.
 		 */
-		gChunksBySize.Remove(node);
-		node->addr = chunk;
-		node->size += size;
-		if (node->chunk_type != chunk_type) {
-			node->chunk_type = RECYCLED_CHUNK;
-		}
-		gChunksBySize.Insert(node);
-	} else {
-		/* Coalescing forward failed, so insert a new node. */
-		if (!xnode) {
-			/*
+    gChunksBySize.Remove(node);
+    node->addr = chunk;
+    node->size += size;
+    if (node->chunk_type != chunk_type) {
+      node->chunk_type = RECYCLED_CHUNK;
+    }
+    gChunksBySize.Insert(node);
+  } else {
+    /* Coalescing forward failed, so insert a new node. */
+    if (!xnode) {
+      /*
 			 * base_node_alloc() failed, which is an exceedingly
 			 * unlikely failure.  Leak chunk; its pages have
 			 * already been purged, so this is only a virtual
 			 * memory leak.
 			 */
-			goto label_return;
-		}
-		node = xnode;
-		xnode = nullptr; /* Prevent deallocation below. */
-		node->addr = chunk;
-		node->size = size;
-		node->chunk_type = chunk_type;
-		gChunksByAddress.Insert(node);
-		gChunksBySize.Insert(node);
-	}
-
-	/* Try to coalesce backward. */
-	prev = gChunksByAddress.Prev(node);
-	if (prev && (void *)((uintptr_t)prev->addr + prev->size) ==
-	    chunk) {
-		/*
+      goto label_return;
+    }
+    node = xnode;
+    xnode = nullptr; /* Prevent deallocation below. */
+    node->addr = chunk;
+    node->size = size;
+    node->chunk_type = chunk_type;
+    gChunksByAddress.Insert(node);
+    gChunksBySize.Insert(node);
+  }
+
+  /* Try to coalesce backward. */
+  prev = gChunksByAddress.Prev(node);
+  if (prev && (void*)((uintptr_t)prev->addr + prev->size) == chunk) {
+    /*
 		 * Coalesce chunk with the previous address range.  This does
 		 * not change the position within chunks_ad, so only
 		 * remove/insert node from/into chunks_szad.
 		 */
-		gChunksBySize.Remove(prev);
-		gChunksByAddress.Remove(prev);
-
-		gChunksBySize.Remove(node);
-		node->addr = prev->addr;
-		node->size += prev->size;
-		if (node->chunk_type != prev->chunk_type) {
-			node->chunk_type = RECYCLED_CHUNK;
-		}
-		gChunksBySize.Insert(node);
-
-		xprev = prev;
-	}
-
-	recycled_size += size;
+    gChunksBySize.Remove(prev);
+    gChunksByAddress.Remove(prev);
+
+    gChunksBySize.Remove(node);
+    node->addr = prev->addr;
+    node->size += prev->size;
+    if (node->chunk_type != prev->chunk_type) {
+      node->chunk_type = RECYCLED_CHUNK;
+    }
+    gChunksBySize.Insert(node);
+
+    xprev = prev;
+  }
+
+  recycled_size += size;
 
 label_return:
-	chunks_mtx.Unlock();
-	/*
+  chunks_mtx.Unlock();
+  /*
 	 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
 	 * avoid potential deadlock.
 	 */
-	if (xnode)
-		base_node_dealloc(xnode);
-	if (xprev)
-		base_node_dealloc(xprev);
+  if (xnode)
+    base_node_dealloc(xnode);
+  if (xprev)
+    base_node_dealloc(xprev);
 }
 
 static void
-chunk_dealloc(void *chunk, size_t size, ChunkType type)
+chunk_dealloc(void* chunk, size_t size, ChunkType type)
 {
 
-	MOZ_ASSERT(chunk);
-	MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
-	MOZ_ASSERT(size != 0);
-	MOZ_ASSERT((size & chunksize_mask) == 0);
-
-	gChunkRTree.Unset(chunk);
-
-	if (CAN_RECYCLE(size)) {
-		size_t recycled_so_far = load_acquire_z(&recycled_size);
-		// In case some race condition put us above the limit.
-		if (recycled_so_far < recycle_limit) {
-			size_t recycle_remaining = recycle_limit - recycled_so_far;
-			size_t to_recycle;
-			if (size > recycle_remaining) {
-				to_recycle = recycle_remaining;
-				// Drop pages that would overflow the recycle limit
-				pages_trim(chunk, size, 0, to_recycle);
-			} else {
-				to_recycle = size;
-			}
-			chunk_record(chunk, to_recycle, type);
-			return;
-		}
-	}
-
-	pages_unmap(chunk, size);
+  MOZ_ASSERT(chunk);
+  MOZ_ASSERT(CHUNK_ADDR2BASE(chunk) == chunk);
+  MOZ_ASSERT(size != 0);
+  MOZ_ASSERT((size & chunksize_mask) == 0);
+
+  gChunkRTree.Unset(chunk);
+
+  if (CAN_RECYCLE(size)) {
+    size_t recycled_so_far = load_acquire_z(&recycled_size);
+    // In case some race condition put us above the limit.
+    if (recycled_so_far < recycle_limit) {
+      size_t recycle_remaining = recycle_limit - recycled_so_far;
+      size_t to_recycle;
+      if (size > recycle_remaining) {
+        to_recycle = recycle_remaining;
+        // Drop pages that would overflow the recycle limit
+        pages_trim(chunk, size, 0, to_recycle);
+      } else {
+        to_recycle = size;
+      }
+      chunk_record(chunk, to_recycle, type);
+      return;
+    }
+  }
+
+  pages_unmap(chunk, size);
 }
 
 #undef CAN_RECYCLE
 
 /*
  * End chunk management functions.
  */
 /******************************************************************************/