Bug 1472638 - Only protect the last chunk when we append a new chunk. r=tcampbell
authorNicolas B. Pierron <nicolas.b.pierron@gmail.com>
Mon, 02 Jul 2018 12:41:18 +0000
changeset 425327 b0328b70458abdb81d3cd40de14b411be0117193
parent 425326 cf8b3521ee576e2b834937bbbea0fce96f1c56aa
child 425328 aee8a4964b591817e92a80a8a46d7e18dbf8bc5c
push id105024
push usernpierron@mozilla.com
push dateFri, 06 Jul 2018 12:09:09 +0000
treeherdermozilla-inbound@b0328b70458a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstcampbell
bugs1472638
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1472638 - Only protect the last chunk when we append a new chunk. r=tcampbell
js/src/ds/LifoAlloc.cpp
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -201,51 +201,56 @@ LifoAlloc::newChunkWithCapacity(size_t n
 }
 
 bool
 LifoAlloc::getOrCreateChunk(size_t n)
 {
     // This function is adding a new BumpChunk in which all upcoming allocation
     // would be made. Thus, we protect against out-of-bounds the last chunk in
     // which we did our previous allocations.
-    if (!chunks_.empty())
-        chunks_.last()->setRWUntil(Loc::Reserved);
+    auto protectLast = [&]() {
+        if (!chunks_.empty())
+            chunks_.last()->setRWUntil(Loc::Reserved);
+    };
 
     // Look for existing unused BumpChunks to satisfy the request, and pick the
     // first one which is large enough, and move it into the list of used
     // chunks.
     if (!unused_.empty()) {
         if (unused_.begin()->canAlloc(n)) {
+            protectLast();
             chunks_.append(unused_.popFirst());
             chunks_.last()->setRWUntil(Loc::End);
             return true;
         }
 
         BumpChunkList::Iterator e(unused_.end());
         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
             detail::BumpChunk* elem = i->next();
             MOZ_ASSERT(elem->empty());
             if (elem->canAlloc(n)) {
                 BumpChunkList temp = unused_.splitAfter(i.get());
+                protectLast();
                 chunks_.append(temp.popFirst());
                 unused_.appendAll(std::move(temp));
                 chunks_.last()->setRWUntil(Loc::End);
                 return true;
             }
         }
     }
 
     // Allocate a new BumpChunk with enough space for the next allocation.
     BumpChunk newChunk = newChunkWithCapacity(n);
     if (!newChunk)
         return false;
     size_t size = newChunk->computedSizeOfIncludingThis();
     // The last chunk in which allocations are performed should be protected
     // with setRWUntil(Loc::End), but this is not necessary here because any new
     // allocation should be protected as RW already.
+    protectLast();
     chunks_.append(std::move(newChunk));
     incrementCurSize(size);
     return true;
 }
 
 void
 LifoAlloc::transferFrom(LifoAlloc* other)
 {