Bug 1472638 - Only protect the last chunk when we append a new chunk. r=tcampbell, a=lizzard
authorNicolas B. Pierron <nicolas.b.pierron@gmail.com>
Mon, 02 Jul 2018 12:41:18 +0000
changeset 477911 bf0bb0c068781fe2aeef71dea5162bb4da6a9833
parent 477910 1a3e6da8c4dfed54c83d5f7b5a6176aa5d09e6ce
child 477912 36d63961e4f89660ec4268de764873fce274ab91
push id9462
push userryanvm@gmail.com
push dateThu, 12 Jul 2018 00:35:00 +0000
treeherdermozilla-beta@bf0bb0c06878 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstcampbell, lizzard
bugs1472638
milestone62.0
Bug 1472638 - Only protect the last chunk when we append a new chunk. r=tcampbell, a=lizzard
js/src/ds/LifoAlloc.cpp
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -201,51 +201,56 @@ LifoAlloc::newChunkWithCapacity(size_t n
 }
 
 bool
 LifoAlloc::getOrCreateChunk(size_t n)
 {
     // This function is adding a new BumpChunk in which all upcoming allocation
     // would be made. Thus, we protect against out-of-bounds the last chunk in
     // which we did our previous allocations.
-    if (!chunks_.empty())
-        chunks_.last()->setRWUntil(Loc::Reserved);
+    auto protectLast = [&]() {
+        if (!chunks_.empty())
+            chunks_.last()->setRWUntil(Loc::Reserved);
+    };
 
     // Look for existing unused BumpChunks to satisfy the request, and pick the
     // first one which is large enough, and move it into the list of used
     // chunks.
     if (!unused_.empty()) {
         if (unused_.begin()->canAlloc(n)) {
+            protectLast();
             chunks_.append(unused_.popFirst());
             chunks_.last()->setRWUntil(Loc::End);
             return true;
         }
 
         BumpChunkList::Iterator e(unused_.end());
         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
             detail::BumpChunk* elem = i->next();
             MOZ_ASSERT(elem->empty());
             if (elem->canAlloc(n)) {
                 BumpChunkList temp = unused_.splitAfter(i.get());
+                protectLast();
                 chunks_.append(temp.popFirst());
                 unused_.appendAll(std::move(temp));
                 chunks_.last()->setRWUntil(Loc::End);
                 return true;
             }
         }
     }
 
     // Allocate a new BumpChunk with enough space for the next allocation.
     BumpChunk newChunk = newChunkWithCapacity(n);
     if (!newChunk)
         return false;
     size_t size = newChunk->computedSizeOfIncludingThis();
     // The last chunk in which allocations are performed should be protected
     // with setRWUntil(Loc::End), but this is not necessary here because any new
     // allocation should be protected as RW already.
+    protectLast();
     chunks_.append(std::move(newChunk));
     incrementCurSize(size);
     return true;
 }
 
 void
 LifoAlloc::transferFrom(LifoAlloc* other)
 {