Bug 1341006 - Use a sparse representation for atom mark bitmaps in zones, r=jonco.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 23 Feb 2017 08:13:33 -0700
changeset 344637 5b49024a794b6e3148fa6aa1d92101f9e0f37015
parent 344636 25ce82ad3b4ba1d6f8c72526f18c58b95b304bd8
child 344638 37a4221a05122c908f37c24f40bc7bc4946a151f
push id31414
push usercbook@mozilla.com
push dateFri, 24 Feb 2017 10:47:41 +0000
treeherdermozilla-central@be661bae6cb9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1341006
milestone54.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1341006 - Use a sparse representation for atom mark bitmaps in zones, r=jonco.
js/src/ds/Bitmap.cpp
js/src/ds/Bitmap.h
js/src/gc/AtomMarking.cpp
js/src/gc/AtomMarking.h
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jsgc.cpp
js/src/moz.build
new file mode 100644
--- /dev/null
+++ b/js/src/ds/Bitmap.cpp
@@ -0,0 +1,130 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ds/Bitmap.h"
+
+using namespace js;
+
+SparseBitmap::~SparseBitmap()
+{
+    if (data.initialized()) {
+        for (Data::Range r(data.all()); !r.empty(); r.popFront())
+            js_delete(r.front().value());
+    }
+}
+
+size_t
+SparseBitmap::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)
+{
+    size_t size = data.sizeOfExcludingThis(mallocSizeOf);
+    for (Data::Range r(data.all()); !r.empty(); r.popFront())
+        size += mallocSizeOf(r.front().value());
+    return size;
+}
+
+SparseBitmap::BitBlock&
+SparseBitmap::getOrCreateBlock(size_t blockId)
+{
+    Data::AddPtr p = data.lookupForAdd(blockId);
+    if (!p) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        BitBlock* block = js_new<BitBlock>();
+        if (!block || !data.add(p, blockId, block))
+            oomUnsafe.crash("Bitmap OOM");
+        PodZero(block);
+    }
+    return *p->value();
+}
+
+void
+SparseBitmap::setBit(size_t bit)
+{
+    size_t word = bit / JS_BITS_PER_WORD;
+    size_t blockWord = blockStartWord(word);
+    BitBlock& block = getOrCreateBlock(blockWord / WordsInBlock);
+    block[word - blockWord] |= uintptr_t(1) << (bit % JS_BITS_PER_WORD);
+}
+
+SparseBitmap::BitBlock*
+SparseBitmap::getBlock(size_t blockId) const
+{
+    Data::Ptr p = data.lookup(blockId);
+    return p ? p->value() : nullptr;
+}
+
+bool
+SparseBitmap::getBit(size_t bit) const
+{
+    size_t word = bit / JS_BITS_PER_WORD;
+    size_t blockWord = blockStartWord(word);
+
+    BitBlock* block = getBlock(blockWord / WordsInBlock);
+    if (block)
+        return (*block)[word - blockWord] & (uintptr_t(1) << (bit % JS_BITS_PER_WORD));
+    return false;
+}
+
+void
+SparseBitmap::bitwiseAndWith(const DenseBitmap& other)
+{
+    for (Data::Enum e(data); !e.empty(); e.popFront()) {
+        BitBlock& block = *e.front().value();
+        size_t blockWord = e.front().key() * WordsInBlock;
+        bool anySet = false;
+        size_t numWords = wordIntersectCount(blockWord, other);
+        for (size_t i = 0; i < numWords; i++) {
+            block[i] &= other.word(blockWord + i);
+            anySet |= !!block[i];
+        }
+        if (!anySet) {
+            js_delete(&block);
+            e.removeFront();
+        }
+    }
+}
+
+void
+SparseBitmap::bitwiseOrWith(const SparseBitmap& other)
+{
+    for (Data::Range r(other.data.all()); !r.empty(); r.popFront()) {
+        const BitBlock& otherBlock = *r.front().value();
+        BitBlock& block = getOrCreateBlock(r.front().key());
+        for (size_t i = 0; i < WordsInBlock; i++)
+            block[i] |= otherBlock[i];
+    }
+}
+
+void
+SparseBitmap::bitwiseOrInto(DenseBitmap& other) const
+{
+    for (Data::Range r(data.all()); !r.empty(); r.popFront()) {
+        BitBlock& block = *r.front().value();
+        size_t blockWord = r.front().key() * WordsInBlock;
+        size_t numWords = wordIntersectCount(blockWord, other);
+#ifdef DEBUG
+        // Any words out of range in other should be zero in this bitmap.
+        for (size_t i = numWords; i < WordsInBlock; i++)
+            MOZ_ASSERT(!block[i]);
+#endif
+        for (size_t i = 0; i < numWords; i++)
+            other.word(blockWord + i) |= block[i];
+    }
+}
+
+void
+SparseBitmap::bitwiseOrRangeInto(size_t wordStart, size_t numWords, uintptr_t* target) const
+{
+    size_t blockWord = blockStartWord(wordStart);
+
+    // We only support using a single bit block in this API.
+    MOZ_ASSERT(numWords && (blockWord == blockStartWord(wordStart + numWords - 1)));
+
+    BitBlock* block = getBlock(blockWord / WordsInBlock);
+    if (block) {
+        for (size_t i = 0; i < numWords; i++)
+            target[i] |= (*block)[wordStart - blockWord + i];
+    }
+}
new file mode 100644
--- /dev/null
+++ b/js/src/ds/Bitmap.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_Bitmap_h
+#define ds_Bitmap_h
+
+#include <algorithm>
+
+#include "jsalloc.h"
+
+#include "js/HashTable.h"
+#include "js/Vector.h"
+
+// This file provides two classes for representing bitmaps.
+//
+// DenseBitmap is an array of words of bits, with size linear in the maximum
+// bit which has been set on it.
+//
+// SparseBitmap provides a reasonably simple, reasonably efficient (in time and
+// space) implementation of a sparse bitmap. The basic representation is a hash
+// table whose entries are fixed length malloc'ed blocks of bits.
+
+namespace js {
+
+class DenseBitmap
+{
+    typedef Vector<uintptr_t, 0, SystemAllocPolicy> Data;
+    Data data;
+
+  public:
+    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+        return data.sizeOfExcludingThis(mallocSizeOf);
+    }
+
+    bool ensureSpace(size_t numWords) {
+        MOZ_ASSERT(data.empty());
+        return data.appendN(0, numWords);
+    }
+
+    size_t numWords() const { return data.length(); }
+    uintptr_t word(size_t i) const { return data[i]; }
+    uintptr_t& word(size_t i) { return data[i]; }
+
+    void copyBitsFrom(size_t wordStart, size_t numWords, uintptr_t* source) {
+        MOZ_ASSERT(wordStart + numWords <= data.length());
+        mozilla::PodCopy(&data[wordStart], source, numWords);
+    }
+
+    void bitwiseOrRangeInto(size_t wordStart, size_t numWords, uintptr_t* target) const {
+        for (size_t i = 0; i < numWords; i++)
+            target[i] |= data[wordStart + i];
+    }
+};
+
+class SparseBitmap
+{
+    // The number of words of bits to use for each block mainly affects the
+    // memory usage of the bitmap. To minimize overhead, bitmaps which are
+    // expected to be fairly dense should have a large block size, and bitmaps
+    // which are expected to be very sparse should have a small block size.
+    static const size_t WordsInBlock = 4096 / sizeof(uintptr_t);
+
+    typedef mozilla::Array<uintptr_t, WordsInBlock> BitBlock;
+    typedef HashMap<size_t, BitBlock*, DefaultHasher<size_t>, SystemAllocPolicy> Data;
+    Data data;
+
+    static size_t blockStartWord(size_t word) {
+        return word & ~(WordsInBlock - 1);
+    }
+
+    // Return the number of words in a BitBlock starting at |blockWord| which
+    // are in |other|.
+    static size_t wordIntersectCount(size_t blockWord, const DenseBitmap& other) {
+        long count = other.numWords() - blockWord;
+        return std::min<size_t>((size_t)WordsInBlock, std::max<long>(count, 0));
+    }
+
+    BitBlock* getBlock(size_t blockId) const;
+    BitBlock& getOrCreateBlock(size_t blockId);
+
+  public:
+    bool init() { return data.init(); }
+    ~SparseBitmap();
+
+    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+    void setBit(size_t bit);
+    bool getBit(size_t bit) const;
+
+    void bitwiseAndWith(const DenseBitmap& other);
+    void bitwiseOrWith(const SparseBitmap& other);
+    void bitwiseOrInto(DenseBitmap& other) const;
+
+    // Currently, this API only supports a range of words that is in a single bit block.
+    void bitwiseOrRangeInto(size_t wordStart, size_t numWords, uintptr_t* target) const;
+};
+
+} // namespace js
+
+#endif // ds_Bitmap_h
--- a/js/src/gc/AtomMarking.cpp
+++ b/js/src/gc/AtomMarking.cpp
@@ -39,39 +39,16 @@ namespace gc {
 //
 // Each arena in the atoms zone has an atomBitmapStart() value indicating the
 // word index into the bitmap of the first thing in the arena. Each arena uses
 // ArenaBitmapWords of data to store its bitmap, which uses the same
 // representation as chunk mark bitmaps: one bit is allocated per Cell, with
 // bits for space between things being unused when things are larger than a
 // single Cell.
 
-static inline void
-SetBit(uintptr_t* bitmap, size_t bit)
-{
-    bitmap[bit / JS_BITS_PER_WORD] |= uintptr_t(1) << (bit % JS_BITS_PER_WORD);
-}
-
-static inline bool
-GetBit(uintptr_t* bitmap, size_t bit)
-{
-    return bitmap[bit / JS_BITS_PER_WORD] & (uintptr_t(1) << (bit % JS_BITS_PER_WORD));
-}
-
-static inline bool
-EnsureBitmapLength(AtomMarkingRuntime::Bitmap& bitmap, size_t nwords)
-{
-    if (nwords > bitmap.length()) {
-        size_t needed = nwords - bitmap.length();
-        if (needed)
-            return bitmap.appendN(0, needed);
-    }
-    return true;
-}
-
 void
 AtomMarkingRuntime::registerArena(Arena* arena)
 {
     MOZ_ASSERT(arena->getThingSize() != 0);
     MOZ_ASSERT(arena->getThingSize() % CellSize == 0);
     MOZ_ASSERT(arena->zone->isAtomsZone());
     MOZ_ASSERT(arena->zone->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
 
@@ -93,101 +70,83 @@ AtomMarkingRuntime::unregisterArena(Aren
 {
     MOZ_ASSERT(arena->zone->isAtomsZone());
 
     // Leak these atom bits if we run out of memory.
     mozilla::Unused << freeArenaIndexes.ref().emplaceBack(arena->atomBitmapStart());
 }
 
 bool
-AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap)
+AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap)
 {
     MOZ_ASSERT(runtime->currentThreadHasExclusiveAccess());
 
-    MOZ_ASSERT(bitmap.empty());
-    if (!EnsureBitmapLength(bitmap, allocatedWords))
+    if (!bitmap.ensureSpace(allocatedWords))
         return false;
 
     Zone* atomsZone = runtime->unsafeAtomsCompartment()->zone();
     for (auto thingKind : AllAllocKinds()) {
         for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
             Arena* arena = aiter.get();
             uintptr_t* chunkWords = arena->chunk()->bitmap.arenaBits(arena);
-            uintptr_t* bitmapWords = &bitmap[arena->atomBitmapStart()];
-            mozilla::PodCopy(bitmapWords, chunkWords, ArenaBitmapWords);
+            bitmap.copyBitsFrom(arena->atomBitmapStart(), ArenaBitmapWords, chunkWords);
         }
     }
 
     return true;
 }
 
 void
-AtomMarkingRuntime::updateZoneBitmap(Zone* zone, const Bitmap& bitmap)
+AtomMarkingRuntime::updateZoneBitmap(Zone* zone, const DenseBitmap& bitmap)
 {
     if (zone->isAtomsZone())
         return;
 
-    // |bitmap| was produced by computeBitmapFromChunkMarkBits, so it should
-    // have the maximum possible size.
-    MOZ_ASSERT(zone->markedAtoms().length() <= bitmap.length());
-
     // Take the bitwise and between the two mark bitmaps to get the best new
     // overapproximation we can. |bitmap| might include bits that are not in
     // the zone's mark bitmap, if additional zones were collected by the GC.
-    for (size_t i = 0; i < zone->markedAtoms().length(); i++)
-        zone->markedAtoms()[i] &= bitmap[i];
+    zone->markedAtoms().bitwiseAndWith(bitmap);
 }
 
 // Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
+template <typename Bitmap>
 static void
-AddBitmapToChunkMarkBits(JSRuntime* runtime, AtomMarkingRuntime::Bitmap& bitmap)
+AddBitmapToChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap)
 {
     // Make sure that by copying the mark bits for one arena in word sizes we
     // do not affect the mark bits for other arenas.
     static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
                   "ArenaBitmapWords must evenly divide ArenaBitmapBits");
 
     Zone* atomsZone = runtime->unsafeAtomsCompartment()->zone();
     for (auto thingKind : AllAllocKinds()) {
         for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
             Arena* arena = aiter.get();
             uintptr_t* chunkWords = arena->chunk()->bitmap.arenaBits(arena);
-
-            // The bitmap might not be long enough, in which case remaining
-            // bits are implicitly zero.
-            if (bitmap.length() <= arena->atomBitmapStart())
-                continue;
-            MOZ_ASSERT(bitmap.length() >= arena->atomBitmapStart() + ArenaBitmapWords);
-
-            uintptr_t* bitmapWords = &bitmap[arena->atomBitmapStart()];
-            for (size_t i = 0; i < ArenaBitmapWords; i++)
-                chunkWords[i] |= bitmapWords[i];
+            bitmap.bitwiseOrRangeInto(arena->atomBitmapStart(), ArenaBitmapWords, chunkWords);
         }
     }
 }
 
 void
 AtomMarkingRuntime::updateChunkMarkBits(JSRuntime* runtime)
 {
     MOZ_ASSERT(runtime->currentThreadHasExclusiveAccess());
 
     // Try to compute a simple union of the zone atom bitmaps before updating
     // the chunk mark bitmaps. If this allocation fails then fall back to
     // updating the chunk mark bitmaps separately for each zone.
-    Bitmap markedUnion;
-    if (EnsureBitmapLength(markedUnion, allocatedWords)) {
+    DenseBitmap markedUnion;
+    if (markedUnion.ensureSpace(allocatedWords)) {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
             // We only need to update the chunk mark bits for zones which were
             // not collected in the current GC. Atoms which are referenced by
             // collected zones have already been marked.
-            if (!zone->isCollectingFromAnyThread()) {
-                MOZ_ASSERT(zone->markedAtoms().length() <= allocatedWords);
-                for (size_t i = 0; i < zone->markedAtoms().length(); i++)
-                    markedUnion[i] |= zone->markedAtoms()[i];
-            }
+            if (!zone->isCollectingFromAnyThread())
+                zone->markedAtoms().bitwiseOrInto(markedUnion);
         }
         AddBitmapToChunkMarkBits(runtime, markedUnion);
     } else {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollectingFromAnyThread())
                 AddBitmapToChunkMarkBits(runtime, zone->markedAtoms());
         }
     }
@@ -220,24 +179,19 @@ AtomMarkingRuntime::markAtom(JSContext* 
     if (!thing || !cx->zone())
         return;
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
     if (ThingIsPermanent(thing) || !thing->zoneFromAnyThread()->isAtomsZone())
         return;
 
     size_t bit = GetAtomBit(thing);
+    MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
 
-    {
-        AutoEnterOOMUnsafeRegion oomUnsafe;
-        if (!EnsureBitmapLength(cx->zone()->markedAtoms(), allocatedWords))
-            oomUnsafe.crash("Atom bitmap OOM");
-    }
-
-    SetBit(cx->zone()->markedAtoms().begin(), bit);
+    cx->zone()->markedAtoms().setBit(bit);
 
     if (!cx->helperThread()) {
         // Trigger a read barrier on the atom, in case there is an incremental
         // GC in progress. This is necessary if the atom is being marked
         // because a reference to it was obtained from another zone which is
         // not being collected by the incremental GC.
         TenuredCell::readBarrier(thing);
     }
@@ -267,28 +221,17 @@ AtomMarkingRuntime::markAtomValue(JSCont
             markAtom(cx, &thing->asTenured());
     }
 }
 
 void
 AtomMarkingRuntime::adoptMarkedAtoms(Zone* target, Zone* source)
 {
     MOZ_ASSERT(target->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
-
-    Bitmap* targetBitmap = &target->markedAtoms();
-    Bitmap* sourceBitmap = &source->markedAtoms();
-    if (targetBitmap->length() < sourceBitmap->length())
-        std::swap(targetBitmap, sourceBitmap);
-    for (size_t i = 0; i < sourceBitmap->length(); i++)
-        (*targetBitmap)[i] |= (*sourceBitmap)[i];
-
-    if (targetBitmap != &target->markedAtoms())
-        target->markedAtoms() = Move(source->markedAtoms());
-    else
-        source->markedAtoms().clear();
+    target->markedAtoms().bitwiseOrWith(source->markedAtoms());
 }
 
 #ifdef DEBUG
 
 bool
 AtomMarkingRuntime::atomIsMarked(Zone* zone, Cell* thingArg)
 {
     if (!thingArg || IsInsideNursery(thingArg))
@@ -304,19 +247,17 @@ AtomMarkingRuntime::atomIsMarked(Zone* z
     JS::TraceKind kind = thing->getTraceKind();
     if (kind == JS::TraceKind::String) {
         JSAtom* atom = static_cast<JSAtom*>(thing);
         if (AtomIsPinnedInRuntime(zone->runtimeFromAnyThread(), atom))
             return true;
     }
 
     size_t bit = GetAtomBit(thing);
-    if (bit >= zone->markedAtoms().length() * JS_BITS_PER_WORD)
-        return false;
-    return GetBit(zone->markedAtoms().begin(), bit);
+    return zone->markedAtoms().getBit(bit);
 }
 
 bool
 AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id)
 {
     if (JSID_IS_GCTHING(id))
         return atomIsMarked(zone, JSID_TO_GCTHING(id).asCell());
     return true;
--- a/js/src/gc/AtomMarking.h
+++ b/js/src/gc/AtomMarking.h
@@ -3,54 +3,53 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef gc_AtomMarking_h
 #define gc_AtomMarking_h
 
 #include "NamespaceImports.h"
+#include "ds/Bitmap.h"
 #include "gc/Heap.h"
 #include "threading/ProtectedData.h"
 
 namespace js {
 namespace gc {
 
 // This class manages state used for marking atoms during GCs.
 // See AtomMarking.cpp for details.
 class AtomMarkingRuntime
 {
     // Unused arena atom bitmap indexes. Protected by the GC lock.
     js::ExclusiveAccessLockOrGCTaskData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
 
+  public:
     // The extent of all allocated and free words in atom mark bitmaps.
     // This monotonically increases and may be read from without locking.
     mozilla::Atomic<size_t> allocatedWords;
 
-  public:
-    typedef Vector<uintptr_t, 0, SystemAllocPolicy> Bitmap;
-
     AtomMarkingRuntime()
       : allocatedWords(0)
     {}
 
     // Mark an arena as holding things in the atoms zone.
     void registerArena(Arena* arena);
 
     // Mark an arena as no longer holding things in the atoms zone.
     void unregisterArena(Arena* arena);
 
     // Fill |bitmap| with an atom marking bitmap based on the things that are
     // currently marked in the chunks used by atoms zone arenas. This returns
     // false on an allocation failure (but does not report an exception).
-    bool computeBitmapFromChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap);
+    bool computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap);
 
     // Update the atom marking bitmap in |zone| according to another
     // overapproximation of the reachable atoms in |bitmap|.
-    void updateZoneBitmap(Zone* zone, const Bitmap& bitmap);
+    void updateZoneBitmap(Zone* zone, const DenseBitmap& bitmap);
 
     // Set any bits in the chunk mark bitmaps for atoms which are marked in any
     // zone in the runtime.
     void updateChunkMarkBits(JSRuntime* runtime);
 
     // Mark an atom or id as being newly reachable by the context's zone.
     void markAtom(JSContext* cx, TenuredCell* thing);
     void markId(JSContext* cx, jsid id);
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -86,17 +86,18 @@ Zone::~Zone()
 }
 
 bool Zone::init(bool isSystemArg)
 {
     isSystem = isSystemArg;
     return uniqueIds().init() &&
            gcZoneGroupEdges().init() &&
            gcWeakKeys().init() &&
-           typeDescrObjects().init();
+           typeDescrObjects().init() &&
+           markedAtoms().init();
 }
 
 void
 Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
 {
     if (updateJit == UpdateJit && needs != jitUsingBarriers_) {
         jit::ToggleBarriers(this, needs);
         jitUsingBarriers_ = needs;
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -420,19 +420,19 @@ struct Zone : public JS::shadow::Zone,
     }
     bool isTooMuchMalloc() const {
         return gcMallocCounter.isTooMuchMalloc() ||
                jitCodeCounter.isTooMuchMalloc();
     }
 
   private:
     // Bitmap of atoms marked by this zone.
-    js::ZoneGroupOrGCTaskData<js::gc::AtomMarkingRuntime::Bitmap> markedAtoms_;
+    js::ZoneGroupOrGCTaskData<js::SparseBitmap> markedAtoms_;
   public:
-    js::gc::AtomMarkingRuntime::Bitmap& markedAtoms() { return markedAtoms_.ref(); }
+    js::SparseBitmap& markedAtoms() { return markedAtoms_.ref(); }
 
     // Track heap usage under this Zone.
     js::gc::HeapUsage usage;
 
     // Thresholds used to trigger GC.
     js::gc::ZoneHeapThreshold threshold;
 
     // Amount of data to allocate before triggering a new incremental slice for
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -4892,17 +4892,17 @@ MAKE_GC_SWEEP_TASK(SweepInitialShapesTas
 MAKE_GC_SWEEP_TASK(SweepObjectGroupsTask);
 MAKE_GC_SWEEP_TASK(SweepRegExpsTask);
 MAKE_GC_SWEEP_TASK(SweepMiscTask);
 #undef MAKE_GC_SWEEP_TASK
 
 /* virtual */ void
 SweepAtomsTask::run()
 {
-    AtomMarkingRuntime::Bitmap marked;
+    DenseBitmap marked;
     if (runtime()->gc.atomMarking.computeBitmapFromChunkMarkBits(runtime(), marked)) {
         for (GCZonesIter zone(runtime()); !zone.done(); zone.next())
             runtime()->gc.atomMarking.updateZoneBitmap(zone, marked);
     } else {
         // Ignore OOM in computeBitmapFromChunkMarkBits. The updateZoneBitmap
         // call can only remove atoms from the zone bitmap, so it is
         // conservative to just not call it.
     }
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -171,16 +171,17 @@ UNIFIED_SOURCES += [
     'builtin/ReflectParse.cpp',
     'builtin/SIMD.cpp',
     'builtin/SymbolObject.cpp',
     'builtin/TestingFunctions.cpp',
     'builtin/TypedObject.cpp',
     'builtin/WeakMapObject.cpp',
     'builtin/WeakSetObject.cpp',
     'devtools/sharkctl.cpp',
+    'ds/Bitmap.cpp',
     'ds/LifoAlloc.cpp',
     'ds/MemoryProtectionExceptionHandler.cpp',
     'frontend/BytecodeCompiler.cpp',
     'frontend/BytecodeEmitter.cpp',
     'frontend/FoldConstants.cpp',
     'frontend/NameFunctions.cpp',
     'frontend/ParseNode.cpp',
     'frontend/TokenStream.cpp',