Bug 1467116 - Add a red-zone for each LifoAlloc allocation. r=tcampbell
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -36,25 +36,16 @@ BumpChunk::newWithCapacity(size_t size,
// We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
// underlying memory allocator -- creating a new BumpChunk should always
// satisfy the LIFO_ALLOC_ALIGN alignment constraint.
MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
return result;
}
-bool
-BumpChunk::canAlloc(size_t n)
-{
- uint8_t* aligned = AlignPtr(bump_);
- uint8_t* newBump = aligned + n;
- // bump_ <= newBump, is necessary to catch overflow.
- return bump_ <= newBump && newBump <= capacity_;
-}
-
#ifdef LIFO_CHUNK_PROTECT
static const uint8_t*
AlignPtrUp(const uint8_t* ptr, uintptr_t align) {
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
uintptr_t uptr = uintptr_t(ptr);
uintptr_t diff = uptr & (align - 1);
diff = (align - diff) & (align - 1);
@@ -181,34 +172,34 @@ LifoAlloc::freeAll()
}
LifoAlloc::UniqueBumpChunk
LifoAlloc::newChunkWithCapacity(size_t n)
{
MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope.");
// Compute the size which should be requested in order to be able to fit |n|
- // bytes in the newly allocated chunk, or default the |defaultChunkSize_|.
- size_t defaultChunkFreeSpace = defaultChunkSize_ - detail::BumpChunkReservedSpace;
- size_t chunkSize;
- if (n > defaultChunkFreeSpace) {
- MOZ_ASSERT(defaultChunkFreeSpace < defaultChunkSize_);
- size_t allocSizeWithCanaries = n + (defaultChunkSize_ - defaultChunkFreeSpace);
+ // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
+ uint8_t* u8begin = nullptr;
+ uint8_t* u8end = u8begin + detail::BumpChunkReservedSpace;
+ u8end = detail::BumpChunk::nextAllocEnd(detail::BumpChunk::nextAllocBase(u8end), n);
+ size_t allocSizeWithCanaries = u8end - u8begin;
- // Guard for overflow.
- if (allocSizeWithCanaries < n ||
- (allocSizeWithCanaries & (size_t(1) << (BitSize<size_t>::value - 1))))
- {
- return nullptr;
- }
+ // Guard for overflow.
+ if (allocSizeWithCanaries < n ||
+ (allocSizeWithCanaries & (size_t(1) << (BitSize<size_t>::value - 1))))
+ {
+ return nullptr;
+ }
+ size_t chunkSize;
+ if (allocSizeWithCanaries > defaultChunkSize_)
chunkSize = RoundUpPow2(allocSizeWithCanaries);
- } else {
+ else
chunkSize = defaultChunkSize_;
- }
bool protect = false;
#ifdef LIFO_CHUNK_PROTECT
protect = protect_;
// In a few cases where we keep adding memory protection, we might OOM while
// doing a mprotect / VirtualProtect due to the consumption of space in the
// page table reserved by the system. This error appears as an OOM on Linux,
// as an Invalid parameters on Windows and as a crash on OS/X. This code caps
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -219,17 +219,17 @@ class BumpChunk : public SingleLinkedLis
uint8_t* const capacity_;
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
// Magic number used to check against poisoned values.
const uintptr_t magic_ : 24;
static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
#endif
-#if defined(DEBUG) || defined(MOZ_ASAN)
+#if defined(DEBUG)
# define LIFO_CHUNK_PROTECT 1
#endif
#ifdef LIFO_CHUNK_PROTECT
// Constant used to know if the current chunk should be protected. This is
// mainly use to prevent dead-lock in the MemoryProtectionExceptionHandler
// methods.
const uintptr_t protect_ : 1;
@@ -245,16 +245,17 @@ class BumpChunk : public SingleLinkedLis
static constexpr int undefinedChunkMemory = 0xcd;
// Byte used for poisoning uninitialized memory after reserving memory.
static constexpr int uninitializedChunkMemory = 0xce;
# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
do { \
uint8_t* base = (addr); \
size_t sz = (size); \
+ MOZ_MAKE_MEM_UNDEFINED(base, sz); \
memset(base, undefinedChunkMemory, sz); \
MOZ_MAKE_MEM_NOACCESS(base, sz); \
} while (0)
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
do { \
uint8_t* base = (addr); \
size_t sz = (size); \
@@ -264,16 +265,23 @@ class BumpChunk : public SingleLinkedLis
} while(0)
#elif defined(MOZ_HAVE_MEM_CHECKS)
# define LIFO_HAVE_MEM_CHECKS 1
# define LIFO_MAKE_MEM_NOACCESS(addr, size) MOZ_MAKE_MEM_NOACCESS((addr), (size))
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) MOZ_MAKE_MEM_UNDEFINED((addr), (size))
#endif
+#ifdef LIFO_HAVE_MEM_CHECKS
+ // Red zone reserved after each allocation.
+ static constexpr size_t RedZoneSize = 16;
+#else
+ static constexpr size_t RedZoneSize = 0;
+#endif
+
void assertInvariants() {
MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
MOZ_ASSERT(begin() <= end());
MOZ_ASSERT(end() <= capacity_);
}
BumpChunk& operator=(const BumpChunk&) = delete;
BumpChunk(const BumpChunk&) = delete;
@@ -314,20 +322,25 @@ class BumpChunk : public SingleLinkedLis
// The memory is flagged as undefined when the bump pointer is moving
// forward.
void setBump(uint8_t* newBump) {
assertInvariants();
MOZ_ASSERT(begin() <= newBump);
MOZ_ASSERT(newBump <= capacity_);
#if defined(LIFO_HAVE_MEM_CHECKS)
// Poison/Unpoison memory that we just free'd/allocated.
- if (bump_ > newBump)
+ if (bump_ > newBump) {
LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
- else if (newBump > bump_)
- LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - bump_);
+ } else if (newBump > bump_) {
+ MOZ_ASSERT(newBump - RedZoneSize >= bump_);
+ LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
+ // The area [newBump - RedZoneSize .. newBump[ is already flagged as
+ // no-access either with the previous if-branch or with the
+ // BumpChunk constructor. No need to mark it twice.
+ }
#endif
bump_ = newBump;
}
public:
~BumpChunk() {
release();
removeMProtectHandler();
@@ -412,33 +425,47 @@ class BumpChunk : public SingleLinkedLis
// Release the memory allocated in this chunk since the corresponding mark
// got created. This function does not call any of the destructors.
void release(Mark m) {
MOZ_RELEASE_ASSERT(contains(m));
setBump(m.bump_);
}
+ // Given a bump chunk pointer, find the next base/end pointers. This is
+ // useful for having consistent allocations, and iterating over known size
+ // allocations.
+ static uint8_t* nextAllocBase(uint8_t* e) {
+ return detail::AlignPtr(e);
+ }
+ static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
+ return b + n + RedZoneSize;
+ }
+
// Returns true, if the unused space is large enough for an allocation of
// |n| bytes.
- bool canAlloc(size_t n);
+ bool canAlloc(size_t n) const {
+ uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
+ // bump_ <= newBump, is necessary to catch overflow.
+ return bump_ <= newBump && newBump <= capacity_;
+ }
// Space remaining in the current chunk.
size_t unused() const {
- uint8_t* aligned = AlignPtr(end());
+ uint8_t* aligned = nextAllocBase(end());
if (aligned < capacity_)
return capacity_ - aligned;
return 0;
}
// Try to perform an allocation of size |n|, returns nullptr if not possible.
MOZ_ALWAYS_INLINE
void* tryAlloc(size_t n) {
- uint8_t* aligned = AlignPtr(end());
- uint8_t* newBump = aligned + n;
+ uint8_t* aligned = nextAllocBase(end());
+ uint8_t* newBump = nextAllocEnd(aligned, n);
if (newBump > capacity_)
return nullptr;
// Check for overflow.
if (MOZ_UNLIKELY(newBump < bump_))
return nullptr;
@@ -898,25 +925,25 @@ class LifoAlloc
// Read head (must be within chunk_).
uint8_t* head_;
// If there is not enough room in the remaining block for |size|,
// advance to the next block and update the position.
uint8_t* seekBaseAndAdvanceBy(size_t size) {
MOZ_ASSERT(!empty());
- uint8_t* aligned = detail::AlignPtr(head_);
- if (aligned + size > chunkIt_->end()) {
+ uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
+ if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
++chunkIt_;
aligned = chunkIt_->begin();
// The current code assumes that if we have a chunk, then we
// have allocated something it in.
MOZ_ASSERT(!chunkIt_->empty());
}
- head_ = aligned + size;
+ head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
MOZ_ASSERT(head_ <= chunkIt_->end());
return aligned;
}
public:
explicit Enum(LifoAlloc& alloc)
: chunkIt_(alloc.chunks_.begin()),
chunkEnd_(alloc.chunks_.end()),