Bug 1520366 - Update LifoAlloc growth heuristics to track smallAllocsSize. r=nbp
authorTed Campbell <tcampbell@mozilla.com>
Tue, 22 Jan 2019 00:54:03 -0500
changeset 514901 acd0dadf80139d92223cf7506618621919418797
parent 514900 13e451a690836e7b38a6bdb7677e9247fdbb6fc3
child 514902 e2775f83185e13bc4eb050b8182fd59d00ca7867
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1520366
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1520366 - Update LifoAlloc growth heuristics to track smallAllocsSize. r=nbp - Replace oversizeSize with smallAllocsSize. This will track sizes of non-transferred small allocation chunks. It excludes unused chunks, oversize chunks and chunks transfered from other LifoAlloc. This new counter is used to determine chunk size growth heuristics. This aims to reduce memory spikes due to transferFrom allocation patterns that we see in the wild. - Also fix a pre-existing typo in LifoAlloc::reset
js/src/ds/LifoAlloc.cpp
js/src/ds/LifoAlloc.h
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -99,47 +99,50 @@ void BumpChunk::setReadWrite() {
 
 void LifoAlloc::reset(size_t defaultChunkSize) {
   MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
 
   while (!chunks_.empty()) {
     chunks_.popFirst();
   }
   while (!oversize_.empty()) {
-    chunks_.popFirst();
+    oversize_.popFirst();
   }
   while (!unused_.empty()) {
     unused_.popFirst();
   }
   defaultChunkSize_ = defaultChunkSize;
   oversizeThreshold_ = defaultChunkSize;
   markCount = 0;
   curSize_ = 0;
-  oversizeSize_ = 0;
+  smallAllocsSize_ = 0;
 }
 
 void LifoAlloc::freeAll() {
+  // When free-ing all chunks, we can no longer determine which chunks were
+  // transferred and which were not, so simply clear the heuristic to zero
+  // right away.
+  smallAllocsSize_ = 0;
+
   while (!chunks_.empty()) {
     UniqueBumpChunk bc = chunks_.popFirst();
     decrementCurSize(bc->computedSizeOfIncludingThis());
   }
   while (!oversize_.empty()) {
     UniqueBumpChunk bc = oversize_.popFirst();
     decrementCurSize(bc->computedSizeOfIncludingThis());
-    oversizeSize_ -= bc->computedSizeOfIncludingThis();
   }
   while (!unused_.empty()) {
     UniqueBumpChunk bc = unused_.popFirst();
     decrementCurSize(bc->computedSizeOfIncludingThis());
   }
 
   // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
   // excellent sanity check.
   MOZ_ASSERT(curSize_ == 0);
-  MOZ_ASSERT(oversizeSize_ == 0);
 }
 
 // Round at the same page granularity used by malloc.
 static size_t MallocGoodSize(size_t aSize) {
 #if defined(MOZ_MEMORY)
   return malloc_good_size(aSize);
 #else
   return aSize;
@@ -171,21 +174,24 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::ne
   // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
 
   size_t minSize;
   if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
                    (minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
     return nullptr;
   }
 
-  MOZ_ASSERT(curSize_ >= oversizeSize_);
+  // Note: When computing chunkSize growth, we only are interested in chunks
+  // used for small allocations. This excludes unused chunks, oversized chunks,
+  // and chunks transferred in from another LifoAlloc.
+  MOZ_ASSERT(curSize_ >= smallAllocsSize_);
   const size_t chunkSize =
       (oversize || minSize > defaultChunkSize_)
           ? MallocGoodSize(minSize)
-          : NextSize(defaultChunkSize_, curSize_ - oversizeSize_);
+          : NextSize(defaultChunkSize_, smallAllocsSize_);
 
   // Create a new BumpChunk, and allocate space for it.
   UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
   if (!result) {
     return nullptr;
   }
   MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
   return result;
@@ -214,43 +220,44 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::ge
     }
   }
 
   // Allocate a new BumpChunk with enough space for the next allocation.
   UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
   if (!newChunk) {
     return newChunk;
   }
-  size_t size = newChunk->computedSizeOfIncludingThis();
-  incrementCurSize(size);
+  incrementCurSize(newChunk->computedSizeOfIncludingThis());
   return newChunk;
 }
 
 void* LifoAlloc::allocImplColdPath(size_t n) {
   void* result;
   UniqueBumpChunk newChunk = getOrCreateChunk(n);
   if (!newChunk) {
     return nullptr;
   }
 
+  // This new chunk is about to be used for small allocations.
+  smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
+
   // Since we just created a large enough chunk, this can't fail.
   chunks_.append(std::move(newChunk));
   result = chunks_.last()->tryAlloc(n);
   MOZ_ASSERT(result);
   return result;
 }
 
 void* LifoAlloc::allocImplOversize(size_t n) {
   void* result;
   UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
   if (!newChunk) {
     return nullptr;
   }
   incrementCurSize(newChunk->computedSizeOfIncludingThis());
-  oversizeSize_ += newChunk->computedSizeOfIncludingThis();
 
   // Since we just created a large enough chunk, this can't fail.
   oversize_.append(std::move(newChunk));
   result = oversize_.last()->tryAlloc(n);
   MOZ_ASSERT(result);
   return result;
 }
 
@@ -261,19 +268,18 @@ bool LifoAlloc::ensureUnusedApproximateC
       return true;
     }
   }
 
   UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
   if (!newChunk) {
     return false;
   }
-  size_t size = newChunk->computedSizeOfIncludingThis();
+  incrementCurSize(newChunk->computedSizeOfIncludingThis());
   unused_.pushFront(std::move(newChunk));
-  incrementCurSize(size);
   return true;
 }
 
 LifoAlloc::Mark LifoAlloc::mark() {
   markCount++;
   Mark res;
   if (!chunks_.empty()) {
     res.chunk = chunks_.last()->mark();
@@ -320,25 +326,28 @@ void LifoAlloc::release(Mark mark) {
     }
   };
 
   // Release the content of all the blocks which are after the marks, and keep
   // blocks as unused.
   cutAtMark(mark.chunk, chunks_);
   for (detail::BumpChunk& bc : released) {
     bc.release();
+
+    // Chunks moved from (after a mark) in chunks_ to unused_ are no longer
+    // considered small allocations.
+    smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
   }
   unused_.appendAll(std::move(released));
 
   // Free the content of all the blocks which are after the marks.
   cutAtMark(mark.oversize, oversize_);
   while (!released.empty()) {
     UniqueBumpChunk bc = released.popFirst();
     decrementCurSize(bc->computedSizeOfIncludingThis());
-    oversizeSize_ -= bc->computedSizeOfIncludingThis();
   }
 }
 
 void LifoAlloc::steal(LifoAlloc* other) {
   MOZ_ASSERT(!other->markCount);
   MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
   MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
   MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
@@ -348,35 +357,41 @@ void LifoAlloc::steal(LifoAlloc* other) 
   chunks_ = std::move(other->chunks_);
   oversize_ = std::move(other->oversize_);
   unused_ = std::move(other->unused_);
   markCount = other->markCount;
   defaultChunkSize_ = other->defaultChunkSize_;
   oversizeThreshold_ = other->oversizeThreshold_;
   curSize_ = other->curSize_;
   peakSize_ = Max(peakSize_, other->peakSize_);
-  oversizeSize_ = other->oversizeSize_;
+  smallAllocsSize_ = other->smallAllocsSize_;
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
   fallibleScope_ = other->fallibleScope_;
 #endif
 
   other->reset(defaultChunkSize_);
 }
 
 void LifoAlloc::transferFrom(LifoAlloc* other) {
   MOZ_ASSERT(!markCount);
   MOZ_ASSERT(!other->markCount);
 
+  // Transferred chunks are not counted as part of |smallAllocsSize| as this
+  // could introduce bias in the |NextSize| heuristics, leading to
+  // over-allocations in *this* LifoAlloc. As well, to avoid interference with
+  // small allocations made with |this|, the last chunk of the |chunks_| list
+  // should remain the last chunk. Therefore, the transferred chunks are
+  // prepended to the |chunks_| list.
   incrementCurSize(other->curSize_);
-  oversizeSize_ += other->oversizeSize_;
+
   appendUnused(std::move(other->unused_));
   chunks_.prependAll(std::move(other->chunks_));
   oversize_.prependAll(std::move(other->oversize_));
   other->curSize_ = 0;
-  other->oversizeSize_ = 0;
+  other->smallAllocsSize_ = 0;
 }
 
 void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
   MOZ_ASSERT(!markCount);
 
   size_t size = 0;
   for (detail::BumpChunk& bc : other->unused_) {
     size += bc.computedSizeOfIncludingThis();
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -511,31 +511,38 @@ class LifoAlloc {
   // List of chunks containing allocated data of size smaller than the default
   // chunk size. In the common case, the last chunk of this list is always
   // used to perform the allocations. When the allocation cannot be performed,
   // we move a Chunk from the unused set to the list of used chunks.
   BumpChunkList chunks_;
 
   // List of chunks containing allocated data where each allocation is larger
   // than the oversize threshold. Each chunk contains exactly on allocation.
-  // This reduces wasted space in the normal chunk list.
+  // This reduces wasted space in the chunk list.
   //
   // Oversize chunks are allocated on demand and freed as soon as they are
   // released, instead of being pushed to the unused list.
   BumpChunkList oversize_;
 
   // Set of unused chunks, which can be reused for future allocations.
   BumpChunkList unused_;
 
   size_t markCount;
   size_t defaultChunkSize_;
   size_t oversizeThreshold_;
+
+  // Size of all chunks in chunks_, oversize_, unused_ lists.
   size_t curSize_;
   size_t peakSize_;
-  size_t oversizeSize_;
+
+  // Size of all chunks containing small bump allocations. This heuristic is
+  // used to compute growth rate while ignoring chunks such as oversized,
+  // now-unused, or transferred (which followed their own growth patterns).
+  size_t smallAllocsSize_;
+
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
   bool fallibleScope_;
 #endif
 
   void operator=(const LifoAlloc&) = delete;
   LifoAlloc(const LifoAlloc&) = delete;
 
   // Return a BumpChunk that can perform an allocation of at least size |n|.
@@ -568,16 +575,17 @@ class LifoAlloc {
     curSize_ += size;
     if (curSize_ > peakSize_) {
       peakSize_ = curSize_;
     }
   }
   void decrementCurSize(size_t size) {
     MOZ_ASSERT(curSize_ >= size);
     curSize_ -= size;
+    MOZ_ASSERT(curSize_ >= smallAllocsSize_);
   }
 
   void* allocImplColdPath(size_t n);
   void* allocImplOversize(size_t n);
 
   MOZ_ALWAYS_INLINE
   void* allocImpl(size_t n) {
     void* result;
@@ -766,26 +774,32 @@ class LifoAlloc {
   void release(Mark mark);
 
  private:
   void cancelMark(Mark mark) { markCount--; }
 
  public:
   void releaseAll() {
     MOZ_ASSERT(!markCount);
+
+    // When releasing all chunks, we can no longer determine which chunks were
+    // transferred and which were not, so simply clear the heuristic to zero
+    // right away.
+    smallAllocsSize_ = 0;
+
     for (detail::BumpChunk& bc : chunks_) {
       bc.release();
     }
     unused_.appendAll(std::move(chunks_));
+
     // On release, we free any oversize allocations instead of keeping them
     // in unused chunks.
     while (!oversize_.empty()) {
       UniqueBumpChunk bc = oversize_.popFirst();
       decrementCurSize(bc->computedSizeOfIncludingThis());
-      oversizeSize_ -= bc->computedSizeOfIncludingThis();
     }
   }
 
   // Protect the content of the LifoAlloc chunks.
 #ifdef LIFO_CHUNK_PROTECT
   void setReadOnly();
   void setReadWrite();
 #else