Backed out changeset 58dd47c5aa51 (bug 1573111) for build bustages at TestBaseProfiler.cpp:875:51. CLOSED TREE
authorBrindusan Cristian <cbrindusan@mozilla.com>
Tue, 13 Aug 2019 06:21:16 +0300
changeset 487571 2813bdfb93a35a385de03731279471e82f2ea8c6
parent 487570 58dd47c5aa51acc3095c623148df986354dfbf72
child 487572 8a759cc608d256e8346f618d34b54dfeb9908502
push id92342
push usercbrindusan@mozilla.com
push dateTue, 13 Aug 2019 03:36:01 +0000
treeherderautoland@2813bdfb93a3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1573111
milestone70.0a1
backs out58dd47c5aa51acc3095c623148df986354dfbf72
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 58dd47c5aa51 (bug 1573111) for build bustages at TestBaseProfiler.cpp:875:51. CLOSED TREE
mozglue/baseprofiler/public/BlocksRingBuffer.h
mozglue/tests/TestBaseProfiler.cpp
--- a/mozglue/baseprofiler/public/BlocksRingBuffer.h
+++ b/mozglue/baseprofiler/public/BlocksRingBuffer.h
@@ -13,23 +13,19 @@
 
 #include "mozilla/Maybe.h"
 
 #include <functional>
 #include <utility>
 
 namespace mozilla {
 
-// Thread-safe Ring buffer that can store blocks of different sizes during
-// defined sessions.
+// Thread-safe Ring buffer that can store blocks of different sizes.
 // Each *block* contains an *entry* and the entry size:
 // [ entry_size | entry ] [ entry_size | entry ] ...
-// *In-session* is a period of time during which `BlocksRingBuffer` allows
-// reading and writing. *Out-of-session*, the `BlocksRingBuffer` object is
-// still valid, but contains no data, and gracefully denies accesses.
 //
 // To write an entry, the buffer reserves a block of sufficient size (to contain
 // user data of predetermined size), writes the entry size, and lets the caller
 // fill the entry contents using ModuloBuffer::Iterator APIs and a few entry-
 // specific APIs. E.g.:
 // ```
 // BlockRingsBuffer brb(PowerOfTwo<BlockRingsBuffer::Length>(1024));
 // brb.Put([&](BlocksRingBuffer::EntryReserver aER) {
@@ -125,145 +121,72 @@ class BlocksRingBuffer {
     // and `Index`.
     friend class BlocksRingBuffer;
     explicit BlockIndex(Index aBlockIndex) : mBlockIndex(aBlockIndex) {}
     explicit operator Index() const { return mBlockIndex; }
 
     Index mBlockIndex;
   };
 
-  // Default constructor starts out-of-session (nothing to read or write).
-  BlocksRingBuffer() = default;
-
   // Constructors with no entry destructor, the oldest entries will be silently
   // overwritten/destroyed.
 
   // Create a buffer of the given length.
-  explicit BlocksRingBuffer(PowerOfTwo<Length> aLength)
-      : mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(aLength))) {}
+  explicit BlocksRingBuffer(PowerOfTwo<Length> aLength) : mBuffer(aLength) {}
 
   // Take ownership of an existing buffer.
   BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
                    PowerOfTwo<Length> aLength)
-      : mMaybeUnderlyingBuffer(
-            Some(UnderlyingBuffer(std::move(aExistingBuffer), aLength))) {}
+      : mBuffer(std::move(aExistingBuffer), aLength) {}
 
   // Use an externally-owned buffer.
   BlocksRingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength)
-      : mMaybeUnderlyingBuffer(
-            Some(UnderlyingBuffer(aExternalBuffer, aLength))) {}
+      : mBuffer(aExternalBuffer, aLength) {}
 
   // Constructors with an entry destructor, which will be called with an
   // `EntryReader` before the oldest entries get overwritten/destroyed.
   // Note that this entry destructor may be invoked from another caller's
   // function that writes/clears data, be aware of this re-entrancy! (Details
   // above class.)
 
   // Create a buffer of the given length.
   template <typename EntryDestructor>
   explicit BlocksRingBuffer(PowerOfTwo<Length> aLength,
                             EntryDestructor&& aEntryDestructor)
-      : mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
-            aLength, std::forward<EntryDestructor>(aEntryDestructor)))) {}
+      : mBuffer(aLength),
+        mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
 
   // Take ownership of an existing buffer.
   template <typename EntryDestructor>
   explicit BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
                             PowerOfTwo<Length> aLength,
                             EntryDestructor&& aEntryDestructor)
-      : mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
-            std::move(aExistingBuffer), aLength,
-            std::forward<EntryDestructor>(aEntryDestructor)))) {}
+      : mBuffer(std::move(aExistingBuffer), aLength),
+        mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
 
   // Use an externally-owned buffer.
   template <typename EntryDestructor>
   explicit BlocksRingBuffer(Buffer::Byte* aExternalBuffer,
                             PowerOfTwo<Length> aLength,
                             EntryDestructor&& aEntryDestructor)
-      : mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
-            aExternalBuffer, aLength,
-            std::forward<EntryDestructor>(aEntryDestructor)))) {}
+      : mBuffer(aExternalBuffer, aLength),
+        mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
 
   // Destructor explictly destroys all remaining entries, this may invoke the
   // caller-provided entry destructor.
   ~BlocksRingBuffer() {
 #ifdef DEBUG
     // Needed because of lock DEBUG-check in `DestroyAllEntries()`.
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
 #endif  // DEBUG
     DestroyAllEntries();
   }
 
-  // Remove underlying buffer, if any.
-  void Reset() {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-  }
-
-  // Create a buffer of the given length.
-  void Set(PowerOfTwo<Length> aLength) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(aLength);
-  }
-
-  // Take ownership of an existing buffer.
-  void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
-           PowerOfTwo<Length> aLength) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(std::move(aExistingBuffer), aLength);
-  }
-
-  // Use an externally-owned buffer.
-  void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(aExternalBuffer, aLength);
-  }
-
-  // Create a buffer of the given length, with entry destructor.
-  template <typename EntryDestructor>
-  void Set(PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(
-        aLength, std::forward<EntryDestructor>(aEntryDestructor));
-  }
-
-  // Take ownership of an existing buffer, with entry destructor.
-  template <typename EntryDestructor>
-  void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
-           PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(
-        std::move(aExistingBuffer), aLength,
-        std::forward<EntryDestructor>(aEntryDestructor));
-  }
-
-  // Use an externally-owned buffer, with entry destructor.
-  template <typename EntryDestructor>
-  void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength,
-           EntryDestructor&& aEntryDestructor) {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    ResetUnderlyingBuffer();
-    mMaybeUnderlyingBuffer.emplace(
-        aExternalBuffer, aLength,
-        std::forward<EntryDestructor>(aEntryDestructor));
-  }
-
-  // Buffer length in bytes.
-  Maybe<PowerOfTwo<Length>> BufferLength() const {
-    baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    return mMaybeUnderlyingBuffer.map([](const UnderlyingBuffer& aBuffer) {
-      return aBuffer.mBuffer.BufferLength();
-    });
-    ;
-  }
+  // Buffer length, constant. No need for locking.
+  PowerOfTwo<Length> BufferLength() const { return mBuffer.BufferLength(); }
 
   // Snapshot of the buffer state.
   struct State {
     // Index to the first block.
     BlockIndex mRangeStart;
 
     // Index past the last block. Equals mRangeStart if empty.
     BlockIndex mRangeEnd;
@@ -272,27 +195,22 @@ class BlocksRingBuffer {
     uint64_t mPushedBlockCount = 0;
 
     // Number of blocks that have been removed from this buffer.
     // Note: Live entries = pushed - cleared.
     uint64_t mClearedBlockCount = 0;
   };
 
   // Get a snapshot of the current state.
-  // When out-of-session, mFirstReadIndex==mNextWriteIndex, and
-  // mPushedBlockCount==mClearedBlockCount==0.
   // Note that these may change right after this thread-safe call, so they
   // should only be used for statistical purposes.
   State GetState() const {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    return {
-        mFirstReadIndex, mNextWriteIndex,
-        mMaybeUnderlyingBuffer ? mMaybeUnderlyingBuffer->mPushedBlockCount : 0,
-        mMaybeUnderlyingBuffer ? mMaybeUnderlyingBuffer->mClearedBlockCount
-                               : 0};
+    return {mFirstReadIndex, mNextWriteIndex, mPushedBlockCount,
+            mClearedBlockCount};
   }
 
   // Iterator-like class used to read from an entry.
   // Created through `BlockIterator`, or a `GetEntryAt()` function, lives
   // within a lock guard lifetime.
   class EntryReader : public BufferReader {
    public:
     // Allow move-construction.
@@ -370,18 +288,17 @@ class BlocksRingBuffer {
       return Nothing();
     }
 
    private:
     // Only a BlocksRingBuffer can instantiate an EntryReader.
     friend class BlocksRingBuffer;
 
     explicit EntryReader(const BlocksRingBuffer& aRing, BlockIndex aBlockIndex)
-        : BufferReader(aRing.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(
-              Index(aBlockIndex))),
+        : BufferReader(aRing.mBuffer.ReaderAt(Index(aBlockIndex))),
           mRing(aRing),
           mEntryBytes(BufferReader::ReadULEB128<Length>()),
           mEntryStart(CurrentIndex()) {
       // No EntryReader should live outside of a mutexed call.
       mRing.mMutex.AssertCurrentThreadOwns();
     }
 
     // Using a non-null pointer instead of a reference, to allow copying.
@@ -433,18 +350,17 @@ class BlocksRingBuffer {
     }
 
     // Can be used as reference to come back to this entry with `GetEntryAt()`.
     BlockIndex CurrentBlockIndex() const { return mBlockIndex; }
 
     // Index past the end of this block, which is the start of the next block.
     BlockIndex NextBlockIndex() const {
       MOZ_ASSERT(!IsAtEnd());
-      BufferReader reader =
-          mRing->mMaybeUnderlyingBuffer->mBuffer.ReaderAt(Index(mBlockIndex));
+      BufferReader reader = mRing->mBuffer.ReaderAt(Index(mBlockIndex));
       Length entrySize = reader.ReadULEB128<Length>();
       return BlockIndex(reader.CurrentIndex() + entrySize);
     }
 
     // Index of the first block in the whole buffer.
     BlockIndex BufferRangeStart() const { return mRing->mFirstReadIndex; }
 
     // Index past the last block in the whole buffer.
@@ -513,58 +429,50 @@ class BlocksRingBuffer {
     }
 
     // Using a non-null pointer instead of a reference, to allow copying.
     // This Reader should only live inside one of the thread-safe
     // BlocksRingBuffer functions, for this reference to stay valid.
     NotNull<const BlocksRingBuffer*> mRing;
   };
 
-  // Call `aCallback(Maybe<BlocksRingBuffer::Reader>&&)`, and return whatever
-  // `aCallback` returns. `Maybe` may be `Nothing` when out-of-session.
-  // Callback should not store `Reader`, because it may become invalid after
-  // this call.
+  // Call `aCallback(BlocksRingBuffer::Reader)` with temporary Reader, and
+  // return whatever `aCallback` returns.
+  // Callback should not store `Reader`, as it may become invalid after this
+  // call.
   template <typename Callback>
   auto Read(Callback&& aCallback) const {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    Maybe<Reader> maybeReader;
-    if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
-      maybeReader.emplace(Reader(*this));
-    }
-    return std::forward<Callback>(aCallback)(std::move(maybeReader));
+    return std::forward<Callback>(aCallback)(Reader(*this));
   }
 
   // Call `aCallback(BlocksRingBuffer::EntryReader&)` on each item.
-  // Callback should not store `EntryReader`, because it may become invalid
-  // after this call.
+  // Callback should not store `EntryReader`, as it may become invalid after
+  // this thread-safe call.
   template <typename Callback>
   void ReadEach(Callback&& aCallback) const {
-    Read([&](Maybe<Reader>&& aMaybeReader) {
-      if (MOZ_LIKELY(aMaybeReader)) {
-        std::move(aMaybeReader)->ForEach(aCallback);
-      }
-    });
+    Read([&](const Reader& aReader) { aReader.ForEach(aCallback); });
   }
 
   // Call `aCallback(Maybe<BlocksRingBuffer::EntryReader>&&)` on the entry at
-  // the given BlockIndex; The `Maybe` will be `Nothing` if out-of-session, or
-  // if that entry doesn't exist anymore, or if we've reached just past the
-  // last entry. Return whatever `aCallback` returns. Callback should not
-  // store `EntryReader`, because it may become invalid after this call.
+  // the given BlockIndex; The `Maybe` will be `Nothing` if that entry doesn't
+  // exist anymore, or if we've reached just past the last entry. Return
+  // whatever `aCallback` returns.
+  // Callback should not store `EntryReader`, as it may become invalid after
+  // this thread-safe call.
   template <typename Callback>
   auto ReadAt(BlockIndex aBlockIndex, Callback&& aCallback) const {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
     MOZ_ASSERT(aBlockIndex <= mNextWriteIndex);
-    Maybe<EntryReader> maybeEntryReader;
-    if (MOZ_LIKELY(mMaybeUnderlyingBuffer) && aBlockIndex >= mFirstReadIndex &&
-        aBlockIndex < mNextWriteIndex) {
+    Maybe<EntryReader> maybeReader;
+    if (aBlockIndex >= mFirstReadIndex && aBlockIndex < mNextWriteIndex) {
       AssertBlockIndexIsValid(aBlockIndex);
-      maybeEntryReader.emplace(ReaderInBlockAt(aBlockIndex));
+      maybeReader.emplace(ReaderInBlockAt(aBlockIndex));
     }
-    return std::forward<Callback>(aCallback)(std::move(maybeEntryReader));
+    return std::forward<Callback>(aCallback)(std::move(maybeReader));
   }
 
   class EntryReserver;
 
   // Class used to write an entry contents.
   // Created through `EntryReserver`, lives within a lock guard lifetime.
   class EntryWriter : public BufferWriter {
    public:
@@ -654,18 +562,17 @@ class BlocksRingBuffer {
     // `aEntryBytes`.
     static Length BlockSizeForEntrySize(Length aEntryBytes) {
       return aEntryBytes +
              static_cast<Length>(BufferWriter::ULEB128Size(aEntryBytes));
     }
 
     EntryWriter(BlocksRingBuffer& aRing, BlockIndex aBlockIndex,
                 Length aEntryBytes)
-        : BufferWriter(aRing.mMaybeUnderlyingBuffer->mBuffer.WriterAt(
-              Index(aBlockIndex))),
+        : BufferWriter(aRing.mBuffer.WriterAt(Index(aBlockIndex))),
           mRing(aRing),
           mEntryBytes(aEntryBytes),
           mEntryStart([&]() {
             // BufferWriter is at `aBlockIndex`. Write the entry size...
             BufferWriter::WriteULEB128(aEntryBytes);
             // ... BufferWriter now at start of entry section.
             return CurrentIndex();
           }()) {
@@ -695,43 +602,39 @@ class BlocksRingBuffer {
     // return whatever `aCallback` returns.
     // Callback should not store `EntryWriter`, as it may become invalid after
     // this thread-safe call.
     template <typename Callback>
     auto Reserve(Length aBytes, Callback&& aCallback) {
       // Don't allow even half of the buffer length. More than that would
       // probably be unreasonable, and much more would risk having an entry
       // wrapping around and overwriting itself!
-      MOZ_RELEASE_ASSERT(
-          aBytes <
-          mRing->mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() / 2);
+      MOZ_RELEASE_ASSERT(aBytes < mRing->BufferLength().Value() / 2);
       // COmpute block size from the requested entry size.
       const Length blockBytes = EntryWriter::BlockSizeForEntrySize(aBytes);
       // We will put this new block at the end of the current buffer.
       const BlockIndex blockIndex = mRing->mNextWriteIndex;
       // Compute the end of this new block...
       const Index blockEnd = Index(blockIndex) + blockBytes;
       // ... which is where the following block will go.
       mRing->mNextWriteIndex = BlockIndex(blockEnd);
-      while (
-          blockEnd >
-          Index(mRing->mFirstReadIndex) +
-              mRing->mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value()) {
+      while (blockEnd >
+             Index(mRing->mFirstReadIndex) + mRing->BufferLength().Value()) {
         // About to trample on an old block.
         EntryReader reader = mRing->ReaderInBlockAt(mRing->mFirstReadIndex);
         // Call provided entry destructor for that entry.
-        if (mRing->mMaybeUnderlyingBuffer->mEntryDestructor) {
-          mRing->mMaybeUnderlyingBuffer->mEntryDestructor(reader);
+        if (mRing->mEntryDestructor) {
+          mRing->mEntryDestructor(reader);
         }
-        mRing->mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
+        mRing->mClearedBlockCount += 1;
         MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex()));
         // Move the buffer reading start past this cleared block.
         mRing->mFirstReadIndex = reader.NextBlockIndex();
       }
-      mRing->mMaybeUnderlyingBuffer->mPushedBlockCount += 1;
+      mRing->mPushedBlockCount += 1;
       // Finally, let aCallback write into the entry.
       return std::forward<Callback>(aCallback)(
           EntryWriter(*mRing, blockIndex, aBytes));
     }
 
     // Write a new entry copied from the given buffer, return block index.
     BlockIndex Write(const void* aSrc, Length aBytes) {
       return Reserve(aBytes, [&](EntryWriter aEW) {
@@ -779,154 +682,120 @@ class BlocksRingBuffer {
     }
 
     // Using a non-null pointer instead of a reference, to allow copying.
     // This EntryReserver should only live inside one of the thread-safe
     // BlocksRingBuffer functions, for this reference to stay valid.
     NotNull<BlocksRingBuffer*> mRing;
   };
 
-  // Main function to write entries.
-  // Call `aCallback(Maybe<BlocksRingBuffer::EntryReserver>&&)`, and return
-  // whatever `aCallback` returns. `Maybe` may be `Nothing` when out-of-session.
-  // Callback should not store `EntryReserver`, because it may become invalid
-  // after this call. The `EntryReserver` can then be used to reserve one or
-  // more entries; another callback can then fill each.
+  // Add a new entry, call `aCallback` with a temporary EntryReserver (so that
+  // `aCallback` can reserve an entry or just write something), and return
+  // whatever `aCallback` returns.
+  // Callback should not store `EntryReserver`, as it may become invalid after
+  // this thread-safe call.
   template <typename Callback>
   auto Put(Callback&& aCallback) {
     // Implementation note: We are locking during the whole operation (reserving
     // and writing entry), which means slow writers could block the buffer for a
     // while. It should be possible to only lock when reserving the space, and
     // then letting the callback write the entry without a need for the lock, as
     // it's the only thread that should be accessing this particular entry.
     // Extra safety would be necessary to ensure the entry cannot be read, and
     // fast writers going around the ring cannot trample on this entry until it
     // is fully written.
     // TODO: Investigate this potential improvement as part of bug 1562604.
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    Maybe<EntryReserver> maybeEntryReserver;
-    if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
-      maybeEntryReserver.emplace(EntryReserver(*this));
-    }
-    return std::forward<Callback>(aCallback)(std::move(maybeEntryReserver));
+    return std::forward<Callback>(aCallback)(EntryReserver(*this));
   }
 
   // Add a new entry of known size, call `aCallback` with a temporary
-  // EntryWriter, and return whatever `aCallback` returns. Callback should not
-  // store `EntryWriter`, as it may become invalid after this thread-safe call.
+  // EntryWriter, and return whatever `aCallback` returns.
+  // Callback should not store `EntryWriter`, as it may become invalid after
+  // this thread-safe call.
   template <typename Callback>
   auto Put(Length aLength, Callback&& aCallback) {
-    return Put([&](Maybe<EntryReserver>&& aER) {
-      if (MOZ_LIKELY(aER)) {
-        // We are in-session, with an EntryReserver at the ready.
-        // Reserve the requested space, then invoke the callback with the given
-        // EntryWriter inserted into a Maybe.
-        return aER->Reserve(aLength, [&](EntryWriter aEW) {
-          return std::forward<Callback>(aCallback)(Some(std::move(aEW)));
-        });
-      }
-      // Out-of-session, just invoke the callback with Nothing.
-      return std::forward<Callback>(aCallback)(Maybe<EntryWriter>{});
+    return Put([&](EntryReserver aER) {
+      return aER.Reserve(aLength, std::forward<Callback>(aCallback));
     });
   }
 
   // Add a new entry copied from the given buffer, return block index.
   BlockIndex PutFrom(const void* aSrc, Length aBytes) {
-    return Put([&](Maybe<EntryReserver>&& aER) {
-      if (MOZ_LIKELY(aER)) {
-        return std::move(aER)->Write(aSrc, aBytes);
-      }
-      // Out-of-session, return "empty" BlockIndex.
-      return BlockIndex{};
-    });
+    return Put([&](EntryReserver aER) { return aER.Write(aSrc, aBytes); });
   }
 
   // Add a new entry copied from the given object, return block index.
   // Restricted to trivially-copyable types.
   // TODO: Allow more types (follow-up patches in progress, see bug 1562604).
   template <typename T>
   BlockIndex PutObject(const T& aOb) {
-    return Put([&](Maybe<EntryReserver>&& aER) {
-      if (MOZ_LIKELY(aER)) {
-        return std::move(aER)->WriteObject<T>(aOb);
-      }
-      // Out-of-session, return "empty" BlockIndex.
-      return BlockIndex{};
-    });
+    return Put([&](EntryReserver aER) { return aER.WriteObject<T>(aOb); });
   }
 
   // Clear all entries, calling entry destructor (if any), and move read index
   // to the end so that these entries cannot be read anymore.
   void Clear() {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
     ClearAllEntries();
   }
 
   // Clear all entries strictly before aBlockIndex, calling calling entry
   // destructor (if any), and move read index to the end so that these entries
   // cannot be read anymore.
   void ClearBefore(BlockIndex aBlockIndex) {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    if (!mMaybeUnderlyingBuffer) {
-      return;
-    }
     // Don't accept a not-yet-written index. One-past-the-end is ok.
     MOZ_ASSERT(aBlockIndex <= mNextWriteIndex);
     if (aBlockIndex <= mFirstReadIndex) {
       // Already cleared.
       return;
     }
     if (aBlockIndex == mNextWriteIndex) {
       // Right past the end, just clear everything.
       ClearAllEntries();
       return;
     }
     // Otherwise we need to clear a subset of entries.
     AssertBlockIndexIsValid(aBlockIndex);
-    if (mMaybeUnderlyingBuffer->mEntryDestructor) {
+    if (mEntryDestructor) {
       // We have an entry destructor, destroy entries before aBlockIndex.
       Reader reader(*this);
       BlockIterator it = reader.begin();
       for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
         MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
         EntryReader reader = *it;
-        mMaybeUnderlyingBuffer->mEntryDestructor(reader);
-        mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
+        mEntryDestructor(reader);
+        mClearedBlockCount += 1;
       }
       MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
     } else {
       // No entry destructor, just count skipped entries.
       Reader reader(*this);
       BlockIterator it = reader.begin();
       for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
         MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
-        mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
+        mClearedBlockCount += 1;
       }
       MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
     }
     // Move read index to given index, so there's effectively no more entries
     // before.
     mFirstReadIndex = aBlockIndex;
   }
 
 #ifdef DEBUG
   void Dump() const {
     baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
-    if (!mMaybeUnderlyingBuffer) {
-      printf("empty BlocksRingBuffer\n");
-      return;
-    }
     using ULL = unsigned long long;
     printf("start=%llu (%llu) end=%llu (%llu) - ", ULL(Index(mFirstReadIndex)),
-           ULL(Index(mFirstReadIndex) &
-               (mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() - 1)),
+           ULL(Index(mFirstReadIndex) & (BufferLength().Value() - 1)),
            ULL(Index(mNextWriteIndex)),
-           ULL(Index(mNextWriteIndex) &
-               (mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() - 1)));
-    mMaybeUnderlyingBuffer->mBuffer.Dump();
+           ULL(Index(mNextWriteIndex) & (BufferLength().Value() - 1)));
+    mBuffer.Dump();
   }
 #endif  // DEBUG
 
  private:
   // In DEBUG mode, assert that `aBlockIndex` is a valid index for a live block.
   // (Not just in range, but points exactly at the start of a block.)
   // Slow, so avoid it for internal checks; this is more to check what callers
   // provide us.
@@ -934,23 +803,21 @@ class BlocksRingBuffer {
     mMutex.AssertCurrentThreadOwns();
 #ifdef DEBUG
     MOZ_ASSERT(aBlockIndex >= mFirstReadIndex);
     MOZ_ASSERT(aBlockIndex < mNextWriteIndex);
     // Quick check (default), or slow check (change '1' to '0') below:
 #  if 1
     // Quick check that this looks like a valid block start.
     // Read the entry size at the start of the block.
-    BufferReader br =
-        mMaybeUnderlyingBuffer->mBuffer.ReaderAt(Index(aBlockIndex));
+    BufferReader br = mBuffer.ReaderAt(Index(aBlockIndex));
     Length entryBytes = br.ReadULEB128<Length>();
     // It should be between 1 and half of the buffer length max.
     MOZ_ASSERT(entryBytes > 0);
-    MOZ_ASSERT(entryBytes <
-               mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() / 2);
+    MOZ_ASSERT(entryBytes < BufferLength().Value() / 2);
     // The end of the block should be inside the live buffer range.
     MOZ_ASSERT(Index(aBlockIndex) + BufferReader::ULEB128Size(entryBytes) +
                    entryBytes <=
                Index(mNextWriteIndex));
 #  else
     // Slow check that the index is really the start of the block.
     // This kills performances, as it reads from the first index until
     // aBlockIndex. Only use to debug issues locally.
@@ -972,124 +839,49 @@ class BlocksRingBuffer {
     return EntryReader(*this, aBlockIndex);
   }
 
   // Call entry destructor (if any) on all entries.
   // Note: The read index is not moved; this should only be called from the
   // destructor or ClearAllEntries.
   void DestroyAllEntries() {
     mMutex.AssertCurrentThreadOwns();
-    if (!mMaybeUnderlyingBuffer) {
-      return;
-    }
-    if (mMaybeUnderlyingBuffer->mEntryDestructor) {
+    if (mEntryDestructor) {
       // We have an entry destructor, destroy all the things!
-      Reader(*this).ForEach([this](EntryReader& aReader) {
-        mMaybeUnderlyingBuffer->mEntryDestructor(aReader);
-      });
+      Reader(*this).ForEach(
+          [this](EntryReader& aReader) { mEntryDestructor(aReader); });
     }
-    mMaybeUnderlyingBuffer->mClearedBlockCount =
-        mMaybeUnderlyingBuffer->mPushedBlockCount;
+    mClearedBlockCount = mPushedBlockCount;
   }
 
   // Clear all entries, calling entry destructor (if any), and move read index
   // to the end so that these entries cannot be read anymore.
   void ClearAllEntries() {
     mMutex.AssertCurrentThreadOwns();
-    if (!mMaybeUnderlyingBuffer) {
-      return;
-    }
     DestroyAllEntries();
     // Move read index to write index, so there's effectively no more entries
     // that can be read. (Not setting both to 0, in case user is keeping
     // `BlockIndex`'es to old entries.)
     mFirstReadIndex = mNextWriteIndex;
   }
 
-  // If there is an underlying buffer (with optional entry destructor), destroy
-  // all entries, move read index to the end, and discard the buffer and entry
-  // destructor. This BlocksRingBuffer will now gracefully reject all API calls,
-  // and is in a state where a new underlying buffer&entry deleter may be
-  // installed.
-  void ResetUnderlyingBuffer() {
-    if (!mMaybeUnderlyingBuffer) {
-      return;
-    }
-    ClearAllEntries();
-    mMaybeUnderlyingBuffer.reset();
-  }
-
   // Mutex guarding the following members.
   mutable baseprofiler::detail::BaseProfilerMutex mMutex;
 
-  struct UnderlyingBuffer {
-    // Create a buffer of the given length.
-    explicit UnderlyingBuffer(PowerOfTwo<Length> aLength) : mBuffer(aLength) {}
-
-    // Take ownership of an existing buffer.
-    UnderlyingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
-                     PowerOfTwo<Length> aLength)
-        : mBuffer(std::move(aExistingBuffer), aLength) {}
-
-    // Use an externally-owned buffer.
-    UnderlyingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength)
-        : mBuffer(aExternalBuffer, aLength) {}
-
-    // Create a buffer of the given length.
-    template <typename EntryDestructor>
-    explicit UnderlyingBuffer(PowerOfTwo<Length> aLength,
-                              EntryDestructor&& aEntryDestructor)
-        : mBuffer(aLength),
-          mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
-
-    // Take ownership of an existing buffer.
-    template <typename EntryDestructor>
-    explicit UnderlyingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
-                              PowerOfTwo<Length> aLength,
-                              EntryDestructor&& aEntryDestructor)
-        : mBuffer(std::move(aExistingBuffer), aLength),
-          mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
-
-    // Use an externally-owned buffer.
-    template <typename EntryDestructor>
-    explicit UnderlyingBuffer(Buffer::Byte* aExternalBuffer,
-                              PowerOfTwo<Length> aLength,
-                              EntryDestructor&& aEntryDestructor)
-        : mBuffer(aExternalBuffer, aLength),
-          mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
-
-    // Only allow move-construction.
-    UnderlyingBuffer(UnderlyingBuffer&&) = default;
-
-    // Copies and move-assignment are explictly disallowed.
-    UnderlyingBuffer(const UnderlyingBuffer&) = delete;
-    UnderlyingBuffer& operator=(const UnderlyingBuffer&) = delete;
-    UnderlyingBuffer& operator=(UnderlyingBuffer&&) = delete;
-
-    // Underlying circular byte buffer.
-    Buffer mBuffer;
-    // If set, function to call for each entry that is about to be destroyed.
-    std::function<void(EntryReader&)> mEntryDestructor;
-
-    // Statistics.
-    uint64_t mPushedBlockCount = 0;
-    uint64_t mClearedBlockCount = 0;
-  };
-
-  // Underlying buffer, with entry destructor and stats.
-  // Only valid during in-session period.
-  Maybe<UnderlyingBuffer> mMaybeUnderlyingBuffer;
-
+  // Underlying circular byte buffer.
+  Buffer mBuffer;
   // Index to the first block to be read (or cleared). Initialized to 1 because
-  // 0 is reserved for the "empty" BlockIndex value. Kept between sessions, so
-  // that stored indices from one session will be gracefully denied in future
-  // sessions.
+  // 0 is reserved for the "empty" BlockIndex value.
   BlockIndex mFirstReadIndex = BlockIndex(Index(1));
   // Index where the next new block should be allocated. Initialized to 1
-  // because 0 is reserved for the "empty" BlockIndex value. Kept between
-  // sessions, so that stored indices from one session will be gracefully denied
-  // in future sessions.
+  // because 0 is reserved for the "empty" BlockIndex value.
   BlockIndex mNextWriteIndex = BlockIndex(Index(1));
+  // If set, function to call for each entry that is about to be destroyed.
+  std::function<void(EntryReader&)> mEntryDestructor;
+
+  // Statistics.
+  uint64_t mPushedBlockCount = 0;
+  uint64_t mClearedBlockCount = 0;
 };
 
 }  // namespace mozilla
 
 #endif  // BlocksRingBuffer_h
--- a/mozglue/tests/TestBaseProfiler.cpp
+++ b/mozglue/tests/TestBaseProfiler.cpp
@@ -520,19 +520,18 @@ void TestBlocksRingBufferAPI() {
 
     // Push `1` directly.
     MOZ_RELEASE_ASSERT(ExtractBlockIndex(rb.PutObject(uint32_t(1))) == 1);
     //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
     //   - S[4 |    int(1)    ]E
     VERIFY_START_END_DESTROYED(1, 6, 0);
 
     // Push `2` through EntryReserver, check output BlockIndex.
-    auto bi2 = rb.Put([](Maybe<BlocksRingBuffer::EntryReserver>&& aER) {
-      MOZ_RELEASE_ASSERT(aER.isSome());
-      return aER->WriteObject(uint32_t(2));
+    auto bi2 = rb.Put([](BlocksRingBuffer::EntryReserver aER) {
+      return aER.WriteObject(uint32_t(2));
     });
     static_assert(
         std::is_same<decltype(bi2), BlocksRingBuffer::BlockIndex>::value,
         "All index-returning functions should return a "
         "BlocksRingBuffer::BlockIndex");
     MOZ_RELEASE_ASSERT(ExtractBlockIndex(bi2) == 6);
     //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
     //   - S[4 |    int(1)    ] [4 |    int(2)    ]E
@@ -597,19 +596,18 @@ void TestBlocksRingBufferAPI() {
     MOZ_RELEASE_ASSERT(bi2Next > bi2);
     MOZ_RELEASE_ASSERT(bi2Next >= bi2);
     MOZ_RELEASE_ASSERT(!(bi2Next == bi2));
     MOZ_RELEASE_ASSERT(!(bi2Next < bi2));
     MOZ_RELEASE_ASSERT(!(bi2Next <= bi2));
 
     // Push `3` through EntryReserver and then EntryWriter, check writer output
     // is returned to the initial caller.
-    auto put3 = rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aER) {
-      MOZ_RELEASE_ASSERT(aER.isSome());
-      return aER->Reserve(
+    auto put3 = rb.Put([&](BlocksRingBuffer::EntryReserver aER) {
+      return aER.Reserve(
           sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) {
             aEW.WriteObject(uint32_t(3));
             return float(ExtractBlockIndex(aEW.CurrentBlockIndex()));
           });
     });
     static_assert(std::is_same<decltype(put3), float>::value,
                   "Expect float as returned by callback.");
     MOZ_RELEASE_ASSERT(put3 == 11.0);
@@ -665,19 +663,18 @@ void TestBlocksRingBufferAPI() {
     rb.ReadEach([&](BlocksRingBuffer::EntryReader& aReader) {
       MOZ_RELEASE_ASSERT(aReader.ReadObject<uint32_t>() == ++count);
     });
     MOZ_RELEASE_ASSERT(count == 4);
 
     // Push 5 through EntryReserver then EntryWriter, no returns.
     // This will destroy the second entry.
     // Check that the EntryWriter can access bi4 but not bi2.
-    auto bi5_6 = rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aER) {
-      MOZ_RELEASE_ASSERT(aER.isSome());
-      return aER->Reserve(
+    auto bi5_6 = rb.Put([&](BlocksRingBuffer::EntryReserver aER) {
+      return aER.Reserve(
           sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) {
             aEW.WriteObject(uint32_t(5));
             MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi2).isNothing());
             MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi4).isSome());
             MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi4)->CurrentBlockIndex() == bi4);
             MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi4)->ReadObject<uint32_t>() ==
                                4);
             return MakePair(aEW.CurrentBlockIndex(), aEW.BlockEndIndex());
@@ -772,227 +769,16 @@ void TestBlocksRingBufferAPI() {
   }
   for (size_t i = MBSize * 2; i < MBSize * 3; ++i) {
     MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
   }
 
   printf("TestBlocksRingBufferAPI done\n");
 }
 
-void TestBlocksRingBufferUnderlyingBufferChanges() {
-  printf("TestBlocksRingBufferUnderlyingBufferChanges...\n");
-
-  // Out-of-session BlocksRingBuffer to start with.
-  BlocksRingBuffer rb;
-
-  // Block index to read at. Initially "null", but may be changed below.
-  BlocksRingBuffer::BlockIndex bi;
-
-  // Test all rb APIs when rb is out-of-session and therefore doesn't have an
-  // underlying buffer.
-  auto testOutOfSession = [&]() {
-    MOZ_RELEASE_ASSERT(rb.BufferLength().isNothing());
-    BlocksRingBuffer::State state = rb.GetState();
-    // When out-of-session, range start and ends are the same, and there are no
-    // pushed&cleared blocks.
-    MOZ_RELEASE_ASSERT(state.mRangeStart == state.mRangeEnd);
-    MOZ_RELEASE_ASSERT(state.mPushedBlockCount == 0);
-    MOZ_RELEASE_ASSERT(state.mClearedBlockCount == 0);
-    // `Put()` functions run the callback with `Nothing`.
-    int32_t ran = 0;
-    rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aMaybeEntryReserver) {
-      MOZ_RELEASE_ASSERT(aMaybeEntryReserver.isNothing());
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    rb.Put(1, [&](Maybe<BlocksRingBuffer::EntryWriter>&& aMaybeEntryWriter) {
-      MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isNothing());
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    // `PutFrom` won't do anything, and returns the null BlockIndex.
-    MOZ_RELEASE_ASSERT(rb.PutFrom(&ran, sizeof(ran)) ==
-                       BlocksRingBuffer::BlockIndex{});
-    MOZ_RELEASE_ASSERT(rb.PutObject(ran) == BlocksRingBuffer::BlockIndex{});
-    // `Read()` functions run the callback with `Nothing`.
-    ran = 0;
-    rb.Read([&](Maybe<BlocksRingBuffer::Reader>&& aMaybeReader) {
-      MOZ_RELEASE_ASSERT(aMaybeReader.isNothing());
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    rb.ReadAt(BlocksRingBuffer::BlockIndex{},
-              [&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
-                MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
-                ++ran;
-              });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    rb.ReadAt(bi,
-              [&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
-                MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
-                ++ran;
-              });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    // `ReadEach` shouldn't run the callback (nothing to read).
-    rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
-  };
-
-  // As `testOutOfSession()` attempts to modify the buffer, we run it twice to
-  // make sure one run doesn't influence the next one.
-  testOutOfSession();
-  testOutOfSession();
-
-  rb.ClearBefore(bi);
-  testOutOfSession();
-  testOutOfSession();
-
-  rb.Clear();
-  testOutOfSession();
-  testOutOfSession();
-
-  rb.Reset();
-  testOutOfSession();
-  testOutOfSession();
-
-  constexpr uint32_t MBSize = 32;
-
-  rb.Set(MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>());
-
-  constexpr bool EMPTY = true;
-  constexpr bool NOT_EMPTY = false;
-  // Test all rb APIs when rb has an underlying buffer.
-  auto testInSession = [&](bool aExpectEmpty) {
-    MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
-    BlocksRingBuffer::State state = rb.GetState();
-    if (aExpectEmpty) {
-      MOZ_RELEASE_ASSERT(state.mRangeStart == state.mRangeEnd);
-      MOZ_RELEASE_ASSERT(state.mPushedBlockCount == 0);
-      MOZ_RELEASE_ASSERT(state.mClearedBlockCount == 0);
-    } else {
-      MOZ_RELEASE_ASSERT(state.mRangeStart < state.mRangeEnd);
-      MOZ_RELEASE_ASSERT(state.mPushedBlockCount > 0);
-      MOZ_RELEASE_ASSERT(state.mClearedBlockCount >= 0);
-    }
-    int32_t ran = 0;
-    rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aMaybeEntryReserver) {
-      MOZ_RELEASE_ASSERT(aMaybeEntryReserver.isSome());
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    // The following three `Put...` will write three int32_t of value 1.
-    bi = rb.Put(sizeof(ran),
-                [&](Maybe<BlocksRingBuffer::EntryWriter>&& aMaybeEntryWriter) {
-                  MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isSome());
-                  ++ran;
-                  aMaybeEntryWriter->WriteObject(ran);
-                  return aMaybeEntryWriter->CurrentBlockIndex();
-                });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    MOZ_RELEASE_ASSERT(rb.PutFrom(&ran, sizeof(ran)) !=
-                       BlocksRingBuffer::BlockIndex{});
-    MOZ_RELEASE_ASSERT(rb.PutObject(ran) != BlocksRingBuffer::BlockIndex{});
-    ran = 0;
-    rb.Read([&](Maybe<BlocksRingBuffer::Reader>&& aMaybeReader) {
-      MOZ_RELEASE_ASSERT(aMaybeReader.isSome());
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    rb.ReadEach([&](BlocksRingBuffer::EntryReader& aEntryReader) {
-      MOZ_RELEASE_ASSERT(aEntryReader.RemainingBytes() == sizeof(ran));
-      MOZ_RELEASE_ASSERT(aEntryReader.ReadObject<decltype(ran)>() == 1);
-      ++ran;
-    });
-    MOZ_RELEASE_ASSERT(ran >= 3);
-    ran = 0;
-    rb.ReadAt(BlocksRingBuffer::BlockIndex{},
-              [&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
-                MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
-                ++ran;
-              });
-    MOZ_RELEASE_ASSERT(ran == 1);
-    ran = 0;
-    rb.ReadAt(bi,
-              [&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
-                MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing() == !bi);
-                ++ran;
-              });
-    MOZ_RELEASE_ASSERT(ran == 1);
-  };
-
-  testInSession(EMPTY);
-  testInSession(NOT_EMPTY);
-
-  rb.Set(MakePowerOfTwo<BlocksRingBuffer::Length, 32>());
-  MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
-  rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
-
-  testInSession(EMPTY);
-  testInSession(NOT_EMPTY);
-
-  rb.Reset();
-  testOutOfSession();
-  testOutOfSession();
-
-  uint8_t buffer[MBSize * 3];
-  for (size_t i = 0; i < MBSize * 3; ++i) {
-    buffer[i] = uint8_t('A' + i);
-  }
-
-  rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>());
-  MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
-  rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
-
-  testInSession(EMPTY);
-  testInSession(NOT_EMPTY);
-
-  rb.Reset();
-  testOutOfSession();
-  testOutOfSession();
-
-  int cleared = 0;
-  rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>(),
-         [&](auto&&) { ++cleared; });
-  MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
-  rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
-
-  testInSession(EMPTY);
-  testInSession(NOT_EMPTY);
-
-  // Remove the current underlying buffer, this should clear all entries.
-  rb.Reset();
-  // The above should clear all entries (2 tests, three entries each).
-  MOZ_RELEASE_ASSERT(cleared == 2 * 3);
-
-  // Check that only the provided stack-based sub-buffer was modified.
-  uint32_t changed = 0;
-  for (size_t i = MBSize; i < MBSize * 2; ++i) {
-    changed += (buffer[i] == uint8_t('A' + i)) ? 0 : 1;
-  }
-  // Expect at least 75% changes.
-  MOZ_RELEASE_ASSERT(changed >= MBSize * 6 / 8);
-
-  // Everything around the sub-buffer should be unchanged.
-  for (size_t i = 0; i < MBSize; ++i) {
-    MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
-  }
-  for (size_t i = MBSize * 2; i < MBSize * 3; ++i) {
-    MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
-  }
-
-  testOutOfSession();
-  testOutOfSession();
-
-  printf("TestBlocksRingBufferUnderlyingBufferChanges done\n");
-}
-
 void TestBlocksRingBufferThreading() {
   printf("TestBlocksRingBufferThreading...\n");
 
   // Entry destructor will store about-to-be-cleared value in `lastDestroyed`.
   std::atomic<int> lastDestroyed{0};
 
   constexpr uint32_t MBSize = 8192;
   uint8_t buffer[MBSize * 3];
@@ -1036,20 +822,19 @@ void TestBlocksRingBufferThreading() {
     threads[threadNo] = std::thread(
         [&](int aThreadNo) {
           ::SleepMilli(1);
           constexpr int pushCount = 1024;
           for (int push = 0; push < pushCount; ++push) {
             // Reserve as many bytes as the thread number (but at least enough
             // to store an int), and write an increasing int.
             rb.Put(std::max(aThreadNo, int(sizeof(push))),
-                   [&](Maybe<BlocksRingBuffer::EntryWriter>&& aEW) {
-                     MOZ_RELEASE_ASSERT(aEW.isSome());
-                     aEW->WriteObject(aThreadNo * 1000000 + push);
-                     *aEW += aEW->RemainingBytes();
+                   [&](BlocksRingBuffer::EntryWriter aEW) {
+                     aEW.WriteObject(aThreadNo * 1000000 + push);
+                     aEW += aEW.RemainingBytes();
                    });
           }
         },
         threadNo);
   }
 
   // Wait for all writer threads to die.
   for (auto&& thread : threads) {
@@ -1112,17 +897,16 @@ void TestProfiler() {
   // ::SleepMilli(10000);
 
   // Test dependencies.
   TestPowerOfTwoMask();
   TestPowerOfTwo();
   TestLEB128();
   TestModuloBuffer();
   TestBlocksRingBufferAPI();
-  TestBlocksRingBufferUnderlyingBufferChanges();
   TestBlocksRingBufferThreading();
 
   {
     printf("profiler_init()...\n");
     AUTO_BASE_PROFILER_INIT;
 
     MOZ_RELEASE_ASSERT(!baseprofiler::profiler_is_active());
     MOZ_RELEASE_ASSERT(!baseprofiler::profiler_thread_is_being_profiled());