Bug 1437600 - Use mprotect to prevent mutations of inaccessible regions. r=luke
authorNicolas B. Pierron <nicolas.b.pierron@gmail.com>
Wed, 14 Feb 2018 17:12:00 +0000
changeset 423157 44a69a4ebc090ab49ed9872cb331c3de9749a025
parent 423156 7af2b112f3758a63801689d185eea7d03f3a1be0
child 423158 0c4e97614565cb461db8a4eba531978b8ff45908
push id34164
push usercsabou@mozilla.com
push dateThu, 21 Jun 2018 01:17:13 +0000
treeherdermozilla-central@d231a3231680 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1437600
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1437600 - Use mprotect to prevent mutations of inaccessible regions. r=luke
js/src/ds/LifoAlloc.cpp
js/src/ds/LifoAlloc.h
js/src/ds/MemoryProtectionExceptionHandler.cpp
js/src/vm/HelperThreads.cpp
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -3,35 +3,41 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ds/LifoAlloc.h"
 
 #include "mozilla/MathAlgorithms.h"
 
+#include "ds/MemoryProtectionExceptionHandler.h"
+
+#ifdef LIFO_CHUNK_PROTECT
+# include "gc/Memory.h"
+#endif
+
 using namespace js;
 
 using mozilla::RoundUpPow2;
 using mozilla::tl::BitSize;
 
 namespace js {
 namespace detail {
 
 /* static */
 UniquePtr<BumpChunk>
-BumpChunk::newWithCapacity(size_t size)
+BumpChunk::newWithCapacity(size_t size, bool protect)
 {
     MOZ_ASSERT(RoundUpPow2(size) == size);
     MOZ_ASSERT(size >= sizeof(BumpChunk));
     void* mem = js_malloc(size);
     if (!mem)
         return nullptr;
 
-    UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
+    UniquePtr<BumpChunk> result(new (mem) BumpChunk(size, protect));
 
     // We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
     // underlying memory allocator -- creating a new BumpChunk should always
     // satisfy the LIFO_ALLOC_ALIGN alignment constraint.
     MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
     return result;
 }
 
@@ -39,27 +45,119 @@ bool
 BumpChunk::canAlloc(size_t n)
 {
     uint8_t* aligned = AlignPtr(bump_);
     uint8_t* newBump = aligned + n;
     // bump_ <= newBump, is necessary to catch overflow.
     return bump_ <= newBump && newBump <= capacity_;
 }
 
+#ifdef LIFO_CHUNK_PROTECT
+
+static const uint8_t*
+AlignPtrUp(const uint8_t* ptr, uintptr_t align) {
+    MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+    uintptr_t uptr = uintptr_t(ptr);
+    uintptr_t diff = uptr & (align - 1);
+    diff = (align - diff) & (align - 1);
+    uptr = uptr + diff;
+    return (uint8_t*) uptr;
+}
+
+static const uint8_t*
+AlignPtrDown(const uint8_t* ptr, uintptr_t align) {
+    MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+    uintptr_t uptr = uintptr_t(ptr);
+    uptr = uptr & ~(align - 1);
+    return (uint8_t*) uptr;
+}
+
+void
+BumpChunk::setRWUntil(Loc loc) const
+{
+    if (!protect_)
+        return;
+
+    uintptr_t pageSize = gc::SystemPageSize();
+    // The allocated chunks might not be aligned on page boundaries. This code
+    // is used to ensure that we are changing the memory protection of pointers
+    // which are within the range of the BumpChunk, or that the range formed by
+    // [b .. e] is empty.
+    const uint8_t* b = base();
+    const uint8_t* e = capacity_;
+    b = AlignPtrUp(b, pageSize);
+    e = AlignPtrDown(e, pageSize);
+    if (e < b)
+        e = b;
+    // The mid-point is aligned to the next page, and clamp to the end-point to
+    // ensure that it remains in the [b .. e] range.
+    const uint8_t* m = nullptr;
+    switch (loc) {
+      case Loc::Header:
+        m = b;
+        break;
+      case Loc::Allocated:
+        m = begin();
+        break;
+      case Loc::Reserved:
+        m = end();
+        break;
+      case Loc::End:
+        m = e;
+        break;
+    }
+    m = AlignPtrUp(m, pageSize);
+    if (e < m)
+        m = e;
+
+    if (b < m)
+        gc::UnprotectPages(const_cast<uint8_t*>(b), m - b);
+    // Note: We could use no-access protection for everything after begin(), but
+    // we need to read capabilities for reading the bump_ / capacity_ fields
+    // from this function to unprotect the memory later.
+    if (m < e)
+        gc::MakePagesReadOnly(const_cast<uint8_t*>(m), e - m);
+}
+
+// The memory protection handler is catching memory accesses error on the
+// regions registered into it. These method, instead of registering sub-ranges
+// of the BumpChunk within setRWUntil, we just register the full BumpChunk
+// ranges, and let the MemoryProtectionExceptionHandler catch bad memory
+// accesses when it is being protected by setRWUntil.
+void
+BumpChunk::addMProtectHandler() const
+{
+    if (!protect_)
+        return;
+    js::MemoryProtectionExceptionHandler::addRegion(const_cast<uint8_t*>(base()), capacity_ - base());
+}
+
+void
+BumpChunk::removeMProtectHandler() const
+{
+    if (!protect_)
+        return;
+    js::MemoryProtectionExceptionHandler::removeRegion(const_cast<uint8_t*>(base()));
+}
+
+#endif
+
 } // namespace detail
 } // namespace js
 
 void
 LifoAlloc::freeAll()
 {
     while (!chunks_.empty()) {
+        chunks_.begin()->setRWUntil(Loc::End);
         BumpChunk bc = chunks_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
     while (!unused_.empty()) {
+        unused_.begin()->setRWUntil(Loc::End);
         BumpChunk bc = unused_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
 
     // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
     // excellent sanity check.
     MOZ_ASSERT(curSize_ == 0);
 }
@@ -85,53 +183,64 @@ LifoAlloc::newChunkWithCapacity(size_t n
         }
 
         chunkSize = RoundUpPow2(allocSizeWithCanaries);
     } else {
         chunkSize = defaultChunkSize_;
     }
 
     // Create a new BumpChunk, and allocate space for it.
-    BumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
+    BumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize, protect_);
     if (!result)
         return nullptr;
     MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
     return result;
 }
 
 bool
 LifoAlloc::getOrCreateChunk(size_t n)
 {
+    // This function is adding a new BumpChunk in which all upcoming allocation
+    // would be made. Thus, we protect against out-of-bounds the last chunk in
+    // which we did our previous allocations.
+    if (!chunks_.empty())
+        chunks_.last()->setRWUntil(Loc::Reserved);
+
     // Look for existing unused BumpChunks to satisfy the request, and pick the
     // first one which is large enough, and move it into the list of used
     // chunks.
     if (!unused_.empty()) {
         if (unused_.begin()->canAlloc(n)) {
             chunks_.append(unused_.popFirst());
+            chunks_.last()->setRWUntil(Loc::End);
             return true;
         }
 
         BumpChunkList::Iterator e(unused_.end());
         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
             detail::BumpChunk* elem = i->next();
             MOZ_ASSERT(elem->empty());
             if (elem->canAlloc(n)) {
                 BumpChunkList temp = unused_.splitAfter(i.get());
                 chunks_.append(temp.popFirst());
                 unused_.appendAll(std::move(temp));
+                chunks_.last()->setRWUntil(Loc::End);
                 return true;
             }
         }
     }
 
     // Allocate a new BumpChunk with enough space for the next allocation.
     BumpChunk newChunk = newChunkWithCapacity(n);
     if (!newChunk)
         return false;
     size_t size = newChunk->computedSizeOfIncludingThis();
+    // The last chunk in which allocations are performed should be protected
+    // with setRWUntil(Loc::End), but this is not necessary here because any new
+    // allocation should be protected as RW already.
     chunks_.append(std::move(newChunk));
     incrementCurSize(size);
     return true;
 }
 
 void
 LifoAlloc::transferFrom(LifoAlloc* other)
 {
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -211,24 +211,31 @@ AlignPtr(uint8_t* orig) {
 // and the deallocation.
 //
 // This structure is only move-able, but not copyable.
 class BumpChunk : public SingleLinkedListElement<BumpChunk>
 {
   private:
     // Pointer to the last byte allocated in this chunk.
     uint8_t* bump_;
-    // Pointer to the last byte available in this chunk.
-    const uint8_t* capacity_;
+    // Pointer to the first byte after this chunk.
+    uint8_t* const capacity_;
 
 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
     // Magic number used to check against poisoned values.
-    const uintptr_t magic_;
-    static constexpr uintptr_t magicNumber =
-        sizeof(uintptr_t) == 4 ? uintptr_t(0x4c69666f) : uintptr_t(0x4c69666f42756d70);
+    const uintptr_t magic_ : 24;
+    static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
+#endif
+
+#if defined(DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
+# define LIFO_CHUNK_PROTECT 1
+    // Constant used to know if the current chunk should be protected. This is
+    // mainly use to prevent dead-lock in the MemoryProtectionExceptionHandler
+    // methods.
+    const uintptr_t protect_ : 1;
 #endif
 
     // Poison the memory with memset, in order to catch errors due to
     // use-after-free, with undefinedChunkMemory pattern, or to catch
     // use-before-init with uninitializedChunkMemory.
 #if defined(DEBUG)
 # define LIFO_HAVE_MEM_CHECKS 1
 
@@ -264,22 +271,25 @@ class BumpChunk : public SingleLinkedLis
         MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
         MOZ_ASSERT(begin() <= end());
         MOZ_ASSERT(end() <= capacity_);
     }
 
     BumpChunk& operator=(const BumpChunk&) = delete;
     BumpChunk(const BumpChunk&) = delete;
 
-    explicit BumpChunk(uintptr_t capacity)
+    explicit BumpChunk(uintptr_t capacity, bool protect)
       : bump_(begin()),
         capacity_(base() + capacity)
 #ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
       , magic_(magicNumber)
 #endif
+#ifdef LIFO_CHUNK_PROTECT
+      , protect_(protect ? 1 : 0)
+#endif
     {
         // We cannot bake this value inside the BumpChunk class, because
         // sizeof(BumpChunk) can only be computed after the closing brace of the
         // BumpChunk class, or within one of its methods. As a work-around, the
         // reservedSpace value is baked in, and we check that it indeed matches
         // with the space taken by the data of the BumpChunk class, and the
         // alignment of a pointer.
         MOZ_ASSERT(BumpChunk::reservedSpace == AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN),
@@ -287,16 +297,17 @@ class BumpChunk : public SingleLinkedLis
 
         assertInvariants();
 #if defined(LIFO_HAVE_MEM_CHECKS)
         // The memory is freshly allocated and marked as undefined by the
         // allocator of the BumpChunk. Instead, we mark this memory as
         // no-access, as it has not been allocated within the BumpChunk.
         LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
 #endif
+        addMProtectHandler();
     }
 
     // Cast |this| into a uint8_t* pointer.
     //
     // Warning: Are you sure you do not want to use begin() instead?
     const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
     uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
 
@@ -321,16 +332,17 @@ class BumpChunk : public SingleLinkedLis
             LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - bump_);
 #endif
         bump_ = newBump;
     }
 
   public:
     ~BumpChunk() {
         release();
+        removeMProtectHandler();
     }
 
     // Space reserved for the BumpChunk internal data, and the alignment of the
     // first allocation content.  This can be used to ensure there is enough
     // space for the next allocation. (see LifoAlloc::newChunkWithCapacity)
     static constexpr size_t reservedSpace = 4 * sizeof(uintptr_t);
 
     // Returns true if this chunk contains no allocated content.
@@ -345,17 +357,20 @@ class BumpChunk : public SingleLinkedLis
     // LifoAlloc::Enum)
     const uint8_t* begin() const { return base() + reservedSpace; }
     uint8_t* begin() { return base() + reservedSpace; }
     uint8_t* end() const { return bump_; }
 
     // This function is the only way to allocate and construct a chunk. It
     // returns a UniquePtr to the newly allocated chunk.  The size given as
     // argument includes the space needed for the header of the chunk.
-    static UniquePtr<BumpChunk> newWithCapacity(size_t size);
+    //
+    // The protect boolean is used to indicate whether the Bumpchunk memory
+    // should be reported within the MemoryProtectionExceptionHandler.
+    static UniquePtr<BumpChunk> newWithCapacity(size_t size, bool protect);
 
     // Report allocation.
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this);
     }
 
     // Report allocation size.
     size_t computedSizeOfIncludingThis() const {
@@ -438,26 +453,65 @@ class BumpChunk : public SingleLinkedLis
         // Check for overflow.
         if (MOZ_UNLIKELY(newBump < bump_))
             return nullptr;
 
         MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
         setBump(newBump);
         return aligned;
     }
+
+    // These locations are approximated locations, with the base rounded up to
+    // the nearest page boundary.
+    enum class Loc {
+        // Refers to the inherited linked list, this includes any allocated any
+        // reserved bytes, from base() to capacity_.
+        //
+        // This is used when freezing a LifoAlloc, such as moving a LifoAlloc to
+        // another thread.
+        Header    = 0,
+        // Refers to the set of allocated and reserved bytes, from
+        // PageRoundup(begin()), to capacity_.
+        //
+        // This is used when a BumpChunk is moved to the list of unused chunks,
+        // as we want the header to remain mutable.
+        Allocated = 1,
+        // Refers to the set of reserved bytes, from PageRoundup(end()) to
+        // capacity_.
+        //
+        // This is used when a BumpChunk is no longer used for allocation, while
+        // containing live data. This should catch out-of-bound accesses within
+        // the LifoAlloc content.
+        Reserved  = 2,
+        // Refers to the end of the BumpChunk.
+        //
+        // This is used when a BumpChunk is used for doing allocation, as
+        // re-protecting at each setBump would be too costly.
+        End = 3
+    };
+#ifdef LIFO_CHUNK_PROTECT
+    void setRWUntil(Loc loc) const;
+    void addMProtectHandler() const;
+    void removeMProtectHandler() const;
+#else
+    void setRWUntil(Loc loc) const {}
+    void addMProtectHandler() const {}
+    void removeMProtectHandler() const {}
+#endif
 };
 
 } // namespace detail
 
 // LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
 //
 // Note: We leave BumpChunks latent in the set of unused chunks after they've
 // been released to avoid thrashing before a GC.
 class LifoAlloc
 {
+    using Loc = detail::BumpChunk::Loc;
     using BumpChunk = js::UniquePtr<detail::BumpChunk>;
     using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
 
     // List of chunks containing allocated data. In the common case, the last
     // chunk of this list is always used to perform the allocations. When the
     // allocation cannot be performed, we move a Chunk from the unused set to
     // the list of used chunks.
     BumpChunkList chunks_;
@@ -467,33 +521,40 @@ class LifoAlloc
 
     size_t      markCount;
     size_t      defaultChunkSize_;
     size_t      curSize_;
     size_t      peakSize_;
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
     bool        fallibleScope_;
 #endif
+#ifdef LIFO_CHUNK_PROTECT
+    const bool  protect_;
+#endif
 
     void operator=(const LifoAlloc&) = delete;
     LifoAlloc(const LifoAlloc&) = delete;
 
     // Return a BumpChunk that can perform an allocation of at least size |n|.
     BumpChunk newChunkWithCapacity(size_t n);
 
     // Reuse or allocate a BumpChunk that can perform an allocation of at least
     // size |n|, if successful it is placed at the end the list of |chunks_|.
     MOZ_MUST_USE bool getOrCreateChunk(size_t n);
 
     void reset(size_t defaultChunkSize) {
         MOZ_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize);
-        while (!chunks_.empty())
+        while (!chunks_.empty()) {
+            chunks_.begin()->setRWUntil(Loc::End);
             chunks_.popFirst();
-        while (!unused_.empty())
+        }
+        while (!unused_.empty()) {
+            unused_.begin()->setRWUntil(Loc::End);
             unused_.popFirst();
+        }
         defaultChunkSize_ = defaultChunkSize;
         markCount = 0;
         curSize_ = 0;
     }
 
     // Append unused chunks to the end of this LifoAlloc.
     void appendUnused(BumpChunkList&& otherUnused) {
 #ifdef DEBUG
@@ -531,21 +592,24 @@ class LifoAlloc
 
         // Since we just created a large enough chunk, this can't fail.
         result = chunks_.last()->tryAlloc(n);
         MOZ_ASSERT(result);
         return result;
     }
 
   public:
-    explicit LifoAlloc(size_t defaultChunkSize)
+    explicit LifoAlloc(size_t defaultChunkSize, bool protect = true)
       : peakSize_(0)
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
       , fallibleScope_(true)
 #endif
+#ifdef LIFO_CHUNK_PROTECT
+      , protect_(protect)
+#endif
     {
         reset(defaultChunkSize);
     }
 
     // Steal allocated chunks from |other|.
     void steal(LifoAlloc* other) {
         MOZ_ASSERT(!other->markCount);
         MOZ_ASSERT(chunks_.empty());
@@ -636,16 +700,17 @@ class LifoAlloc
             if (total >= n)
                 return true;
         }
 
         BumpChunk newChunk = newChunkWithCapacity(n);
         if (!newChunk)
             return false;
         size_t size = newChunk->computedSizeOfIncludingThis();
+        newChunk->setRWUntil(Loc::Allocated);
         unused_.pushFront(std::move(newChunk));
         incrementCurSize(size);
         return true;
     }
 
     MOZ_ALWAYS_INLINE
     void setAsInfallibleByDefault() {
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
@@ -709,32 +774,59 @@ class LifoAlloc
         // Move the blocks which are after the mark to the set of unused chunks.
         BumpChunkList released;
         if (!mark.markedChunk())
             released = std::move(chunks_);
         else
             released = chunks_.splitAfter(mark.markedChunk());
 
         // Release the content of all the blocks which are after the marks.
-        for (detail::BumpChunk& bc : released)
+        for (detail::BumpChunk& bc : released) {
             bc.release();
+            bc.setRWUntil(Loc::Allocated);
+        }
         unused_.appendAll(std::move(released));
 
         // Release everything which follows the mark in the last chunk.
-        if (!chunks_.empty())
+        if (!chunks_.empty()) {
+            chunks_.last()->setRWUntil(Loc::End);
             chunks_.last()->release(mark);
+        }
     }
 
     void releaseAll() {
         MOZ_ASSERT(!markCount);
-        for (detail::BumpChunk& bc : chunks_)
+        for (detail::BumpChunk& bc : chunks_) {
             bc.release();
+            bc.setRWUntil(Loc::Allocated);
+        }
         unused_.appendAll(std::move(chunks_));
     }
 
+    // Protect the content of the LifoAlloc chunks.
+    void setReadOnly() {
+#ifdef LIFO_CHUNK_PROTECT
+        for (detail::BumpChunk& bc : chunks_)
+            bc.setRWUntil(Loc::Header);
+        for (detail::BumpChunk& bc : unused_)
+            bc.setRWUntil(Loc::Header);
+#endif
+    }
+    void setReadWrite() {
+#ifdef LIFO_CHUNK_PROTECT
+        BumpChunkList::Iterator e(chunks_.last());
+        for (BumpChunkList::Iterator i(chunks_.begin()); i != e; ++i)
+            i->setRWUntil(Loc::Reserved);
+        if (!chunks_.empty())
+            chunks_.last()->setRWUntil(Loc::End);
+        for (detail::BumpChunk& bc : unused_)
+            bc.setRWUntil(Loc::Allocated);
+#endif
+    }
+
     // Get the total "used" (occupied bytes) count for the arena chunks.
     size_t used() const {
         size_t accum = 0;
         for (const detail::BumpChunk& chunk : chunks_)
             accum += chunk.used();
         return accum;
     }
 
--- a/js/src/ds/MemoryProtectionExceptionHandler.cpp
+++ b/js/src/ds/MemoryProtectionExceptionHandler.cpp
@@ -60,17 +60,19 @@ class ProtectedRegionTree
 
     Mutex lock;
     LifoAlloc alloc;
     SplayTree<Region, Region> tree;
 
   public:
     ProtectedRegionTree()
       : lock(mutexid::ProtectedRegionTree),
-        alloc(4096),
+        // Here "false" is used to not use the memory protection mechanism of
+        // LifoAlloc in order to prevent dead-locks.
+        alloc(4096, false),
         tree(&alloc)
     {
         sProtectedRegionsInit = true;
     }
 
     ~ProtectedRegionTree() {
         // See Bug 1445619: Currently many users of the JS engine are leaking
         // the world, unfortunately LifoAlloc owned by JSRuntimes have
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -186,16 +186,20 @@ js::CancelOffThreadWasmTier2Generator()
 }
 
 bool
 js::StartOffThreadIonCompile(jit::IonBuilder* builder, const AutoLockHelperThreadState& lock)
 {
     if (!HelperThreadState().ionWorklist(lock).append(builder))
         return false;
 
+    // The build is moving off-thread. Freeze the LifoAlloc to prevent any
+    // unwanted mutations.
+    builder->alloc().lifoAlloc()->setReadOnly();
+
     HelperThreadState().notifyOne(GlobalHelperThreadState::PRODUCER, lock);
     return true;
 }
 
 bool
 js::StartOffThreadIonFree(jit::IonBuilder* builder, const AutoLockHelperThreadState& lock)
 {
     MOZ_ASSERT(CanUseExtraThreads());
@@ -1992,16 +1996,20 @@ HelperThread::handleIonWorkload(AutoLock
 {
     MOZ_ASSERT(HelperThreadState().canStartIonCompile(locked));
     MOZ_ASSERT(idle());
 
     // Find the IonBuilder in the worklist with the highest priority, and
     // remove it from the worklist.
     jit::IonBuilder* builder = HelperThreadState().highestPriorityPendingIonCompile(locked);
 
+    // The build is taken by this thread. Unfreeze the LifoAlloc to allow
+    // mutations.
+    builder->alloc().lifoAlloc()->setReadWrite();
+
     currentTask.emplace(builder);
 
     JSRuntime* rt = builder->script()->compartment()->runtimeFromAnyThread();
 
     {
         AutoUnlockHelperThreadState unlock(locked);
 
         TraceLoggerThread* logger = TraceLoggerForCurrentThread();