Bug 1489572 - LifoAlloc: Move cold code to LifoAlloc.cpp. r=tcampbell
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -198,16 +198,107 @@ LifoAlloc::getOrCreateChunk(size_t n)
return false;
}
size_t size = newChunk->computedSizeOfIncludingThis();
chunks_.append(std::move(newChunk));
incrementCurSize(size);
return true;
}
+void*
+LifoAlloc::allocImplColdPath(size_t n)
+{
+ void* result;
+ if (!getOrCreateChunk(n)) {
+ return nullptr;
+ }
+
+ // Since we just created a large enough chunk, this can't fail.
+ result = chunks_.last()->tryAlloc(n);
+ MOZ_ASSERT(result);
+ return result;
+}
+
+bool
+LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total)
+{
+ for (detail::BumpChunk& bc : unused_) {
+ total += bc.unused();
+ if (total >= n) {
+ return true;
+ }
+ }
+
+ UniqueBumpChunk newChunk = newChunkWithCapacity(n);
+ if (!newChunk) {
+ return false;
+ }
+ size_t size = newChunk->computedSizeOfIncludingThis();
+ unused_.pushFront(std::move(newChunk));
+ incrementCurSize(size);
+ return true;
+}
+
+LifoAlloc::Mark
+LifoAlloc::mark()
+{
+ markCount++;
+ if (chunks_.empty()) {
+ return Mark();
+ }
+ return chunks_.last()->mark();
+}
+
+void
+LifoAlloc::release(Mark mark)
+{
+ markCount--;
+
+ // Move the blocks which are after the mark to the set of unused chunks.
+ BumpChunkList released;
+ if (!mark.markedChunk()) {
+ released = std::move(chunks_);
+ } else {
+ released = chunks_.splitAfter(mark.markedChunk());
+ }
+
+ // Release the content of all the blocks which are after the marks.
+ for (detail::BumpChunk& bc : released) {
+ bc.release();
+ }
+ unused_.appendAll(std::move(released));
+
+ // Release everything which follows the mark in the last chunk.
+ if (!chunks_.empty()) {
+ chunks_.last()->release(mark);
+ }
+}
+
+void
+LifoAlloc::steal(LifoAlloc* other)
+{
+ MOZ_ASSERT(!other->markCount);
+ MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
+ MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
+
+ // Copy everything from |other| to |this| except for |peakSize_|, which
+ // requires some care.
+ chunks_ = std::move(other->chunks_);
+ unused_ = std::move(other->unused_);
+ markCount = other->markCount;
+ defaultChunkSize_ = other->defaultChunkSize_;
+ curSize_ = other->curSize_;
+ peakSize_ = Max(peakSize_, other->peakSize_);
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ fallibleScope_ = other->fallibleScope_;
+#endif
+
+ other->reset(defaultChunkSize_);
+}
+
void
LifoAlloc::transferFrom(LifoAlloc* other)
{
MOZ_ASSERT(!markCount);
MOZ_ASSERT(!other->markCount);
incrementCurSize(other->curSize_);
appendUnused(std::move(other->unused_));
@@ -224,8 +315,32 @@ LifoAlloc::transferUnusedFrom(LifoAlloc*
for (detail::BumpChunk& bc : other->unused_) {
size += bc.computedSizeOfIncludingThis();
}
appendUnused(std::move(other->unused_));
incrementCurSize(size);
other->decrementCurSize(size);
}
+
+#ifdef LIFO_CHUNK_PROTECT
+void
+LifoAlloc::setReadOnly()
+{
+ for (detail::BumpChunk& bc : chunks_) {
+ bc.setReadOnly();
+ }
+ for (detail::BumpChunk& bc : unused_) {
+ bc.setReadOnly();
+ }
+}
+
+void
+LifoAlloc::setReadWrite()
+{
+ for (detail::BumpChunk& bc : chunks_) {
+ bc.setReadWrite();
+ }
+ for (detail::BumpChunk& bc : unused_) {
+ bc.setReadWrite();
+ }
+}
+#endif
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -579,63 +579,42 @@ class LifoAlloc
peakSize_ = curSize_;
}
}
void decrementCurSize(size_t size) {
MOZ_ASSERT(curSize_ >= size);
curSize_ -= size;
}
+ void* allocImplColdPath(size_t n);
+
MOZ_ALWAYS_INLINE
void* allocImpl(size_t n) {
void* result;
if (!chunks_.empty() && (result = chunks_.last()->tryAlloc(n))) {
return result;
}
-
- if (!getOrCreateChunk(n)) {
- return nullptr;
- }
+ return allocImplColdPath(n);
+ }
- // Since we just created a large enough chunk, this can't fail.
- result = chunks_.last()->tryAlloc(n);
- MOZ_ASSERT(result);
- return result;
- }
+ // Check for space in unused chunks or allocate a new unused chunk.
+ MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
public:
explicit LifoAlloc(size_t defaultChunkSize)
: peakSize_(0)
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
, fallibleScope_(true)
#endif
{
reset(defaultChunkSize);
}
// Steal allocated chunks from |other|.
- void steal(LifoAlloc* other) {
- MOZ_ASSERT(!other->markCount);
- MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
- MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
-
- // Copy everything from |other| to |this| except for |peakSize_|, which
- // requires some care.
- chunks_ = std::move(other->chunks_);
- unused_ = std::move(other->unused_);
- markCount = other->markCount;
- defaultChunkSize_ = other->defaultChunkSize_;
- curSize_ = other->curSize_;
- peakSize_ = Max(peakSize_, other->peakSize_);
-#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
- fallibleScope_ = other->fallibleScope_;
-#endif
-
- other->reset(defaultChunkSize_);
- }
+ void steal(LifoAlloc* other);
// Append all chunks from |other|. They are removed from |other|.
void transferFrom(LifoAlloc* other);
// Append unused chunks from |other|. They are removed from |other|.
void transferUnusedFrom(LifoAlloc* other);
~LifoAlloc() { freeAll(); }
@@ -661,17 +640,17 @@ class LifoAlloc
JS_OOM_POSSIBLY_FAIL();
}
#endif
return allocImpl(n);
}
template<typename T, typename... Args>
MOZ_ALWAYS_INLINE T*
- allocInSize(size_t n, Args&&... args)
+ newWithSize(size_t n, Args&&... args)
{
MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
"LifoAlloc must provide enough alignment to store T");
void* ptr = alloc(n);
if (!ptr) {
return nullptr;
}
@@ -698,31 +677,17 @@ class LifoAlloc
size_t total = 0;
if (!chunks_.empty()) {
total += chunks_.last()->unused();
if (total >= n) {
return true;
}
}
- for (detail::BumpChunk& bc : unused_) {
- total += bc.unused();
- if (total >= n) {
- return true;
- }
- }
-
- UniqueBumpChunk newChunk = newChunkWithCapacity(n);
- if (!newChunk) {
- return false;
- }
- size_t size = newChunk->computedSizeOfIncludingThis();
- unused_.pushFront(std::move(newChunk));
- incrementCurSize(size);
- return true;
+ return ensureUnusedApproximateColdPath(n, total);
}
MOZ_ALWAYS_INLINE
void setAsInfallibleByDefault() {
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
fallibleScope_ = false;
#endif
}
@@ -765,74 +730,31 @@ class LifoAlloc
size_t bytes;
if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
return nullptr;
}
return static_cast<T*>(alloc(bytes));
}
using Mark = detail::BumpChunk::Mark;
-
- Mark mark() {
- markCount++;
- if (chunks_.empty()) {
- return Mark();
- }
- return chunks_.last()->mark();
- }
-
- void release(Mark mark) {
- markCount--;
-
- // Move the blocks which are after the mark to the set of unused chunks.
- BumpChunkList released;
- if (!mark.markedChunk()) {
- released = std::move(chunks_);
- } else {
- released = chunks_.splitAfter(mark.markedChunk());
- }
-
- // Release the content of all the blocks which are after the marks.
- for (detail::BumpChunk& bc : released) {
- bc.release();
- }
- unused_.appendAll(std::move(released));
-
- // Release everything which follows the mark in the last chunk.
- if (!chunks_.empty()) {
- chunks_.last()->release(mark);
- }
- }
+ Mark mark();
+ void release(Mark mark);
void releaseAll() {
MOZ_ASSERT(!markCount);
for (detail::BumpChunk& bc : chunks_) {
bc.release();
}
unused_.appendAll(std::move(chunks_));
}
// Protect the content of the LifoAlloc chunks.
#ifdef LIFO_CHUNK_PROTECT
- void setReadOnly() {
- for (detail::BumpChunk& bc : chunks_) {
- bc.setReadOnly();
- }
- for (detail::BumpChunk& bc : unused_) {
- bc.setReadOnly();
- }
- }
- void setReadWrite() {
- for (detail::BumpChunk& bc : chunks_) {
- bc.setReadWrite();
- }
- for (detail::BumpChunk& bc : unused_) {
- bc.setReadWrite();
- }
- }
+ void setReadOnly();
+ void setReadWrite();
#else
void setReadOnly() const {}
void setReadWrite() const {}
#endif
// Get the total "used" (occupied bytes) count for the arena chunks.
size_t used() const {
size_t accum = 0;
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -1933,17 +1933,17 @@ Parser<FullParseHandler, Unit>::checkSta
}
template <typename Scope>
typename Scope::Data*
NewEmptyBindingData(JSContext* cx, LifoAlloc& alloc, uint32_t numBindings)
{
using Data = typename Scope::Data;
size_t allocSize = SizeOfData<typename Scope::Data>(numBindings);
- auto* bindings = alloc.allocInSize<Data>(allocSize, numBindings);
+ auto* bindings = alloc.newWithSize<Data>(allocSize, numBindings);
if (!bindings) {
ReportOutOfMemory(cx);
}
return bindings;
}
/**
* Copy-construct |BindingName|s from |bindings| into |cursor|, then return