Bug 1362977 - Rename CellSize to CellAlignBytes and improve the naming of related constants r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 09 May 2017 11:38:32 +0100
changeset 405443 ad92edd96284d2828e300770641a9c3912a6a8aa
parent 405442 a47b645bacdca7cf4ef2d96d80e4712f46c09f1f
child 405444 15d644ff559fac529ea115f4c7c32e01a86a0268
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1362977
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1362977 - Rename CellSize to CellAlignBytes and improve the naming of related constants r=sfink
js/public/HeapAPI.h
js/public/RootingAPI.h
js/src/gc/Allocator.cpp
js/src/gc/AtomMarking-inl.h
js/src/gc/AtomMarking.cpp
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/gc/Nursery.cpp
js/src/gc/StoreBuffer-inl.h
js/src/gc/StoreBuffer.h
js/src/gc/Verifier.cpp
js/src/jit/Ion.cpp
js/src/jit/MacroAssembler.cpp
js/src/jsgc.cpp
js/src/jspropertytree.cpp
js/src/jsscript.h
js/src/vm/Shape.h
js/src/vm/String.h
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -31,19 +31,21 @@ const size_t ArenaMask = ArenaSize - 1;
 #ifdef JS_GC_SMALL_CHUNK_SIZE
 const size_t ChunkShift = 18;
 #else
 const size_t ChunkShift = 20;
 #endif
 const size_t ChunkSize = size_t(1) << ChunkShift;
 const size_t ChunkMask = ChunkSize - 1;
 
-const size_t CellShift = 3;
-const size_t CellSize = size_t(1) << CellShift;
-const size_t CellMask = CellSize - 1;
+const size_t CellAlignShift = 3;
+const size_t CellAlignBytes = size_t(1) << CellAlignShift;
+const size_t CellAlignMask = CellAlignBytes - 1;
+
+const size_t CellBytesPerMarkBit = CellAlignBytes;
 
 /* These are magic constants derived from actual offsets in gc/Heap.h. */
 #ifdef JS_GC_SMALL_CHUNK_SIZE
 const size_t ChunkMarkBitmapOffset = 258104;
 const size_t ChunkMarkBitmapBits = 31744;
 #else
 const size_t ChunkMarkBitmapOffset = 1032352;
 const size_t ChunkMarkBitmapBits = 129024;
@@ -298,17 +300,17 @@ GetGCThingMarkBitmap(const uintptr_t add
     return reinterpret_cast<uintptr_t*>(bmap_addr);
 }
 
 static MOZ_ALWAYS_INLINE void
 GetGCThingMarkWordAndMask(const uintptr_t addr, uint32_t color,
                           uintptr_t** wordp, uintptr_t* maskp)
 {
     MOZ_ASSERT(addr);
-    const size_t bit = (addr & js::gc::ChunkMask) / js::gc::CellSize + color;
+    const size_t bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit + color;
     MOZ_ASSERT(bit < js::gc::ChunkMarkBitmapBits);
     uintptr_t* bitmap = GetGCThingMarkBitmap(addr);
     const uintptr_t nbits = sizeof(*bitmap) * CHAR_BIT;
     *maskp = uintptr_t(1) << (bit % nbits);
     *wordp = &bitmap[bit / nbits];
 }
 
 static MOZ_ALWAYS_INLINE JS::Zone*
--- a/js/public/RootingAPI.h
+++ b/js/public/RootingAPI.h
@@ -722,17 +722,17 @@ namespace js {
 template <typename T>
 class alignas(8) DispatchWrapper
 {
     static_assert(JS::MapTypeToRootKind<T>::kind == JS::RootKind::Traceable,
                   "DispatchWrapper is intended only for usage with a Traceable");
 
     using TraceFn = void (*)(JSTracer*, T*, const char*);
     TraceFn tracer;
-    alignas(gc::CellSize) T storage;
+    alignas(gc::CellAlignBytes) T storage;
 
   public:
     template <typename U>
     MOZ_IMPLICIT DispatchWrapper(U&& initial)
       : tracer(&JS::GCPolicy<T>::trace),
         storage(mozilla::Forward<U>(initial))
     { }
 
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -29,17 +29,17 @@ js::Allocate(JSContext* cx, AllocKind ki
              const Class* clasp)
 {
     static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
     MOZ_ASSERT(IsObjectAllocKind(kind));
     size_t thingSize = Arena::thingSize(kind);
 
     MOZ_ASSERT(thingSize == Arena::thingSize(kind));
     MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
-    static_assert(sizeof(JSObject_Slots0) >= CellSize,
+    static_assert(sizeof(JSObject_Slots0) >= MinCellSize,
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
 
     // Off-thread alloc cannot trigger GC or make runtime assertions.
     if (cx->helperThread()) {
         JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
         if (MOZ_UNLIKELY(allowGC && !obj))
@@ -126,17 +126,17 @@ GCRuntime::tryNewTenuredObject(JSContext
     return obj;
 }
 
 template <typename T, AllowGC allowGC /* = CanGC */>
 T*
 js::Allocate(JSContext* cx)
 {
     static_assert(!mozilla::IsConvertible<T*, JSObject*>::value, "must not be JSObject derived");
-    static_assert(sizeof(T) >= CellSize,
+    static_assert(sizeof(T) >= MinCellSize,
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     AllocKind kind = MapTypeToFinalizeKind<T>::kind;
     size_t thingSize = sizeof(T);
     MOZ_ASSERT(thingSize == Arena::thingSize(kind));
 
     if (!cx->helperThread()) {
         if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind))
--- a/js/src/gc/AtomMarking-inl.h
+++ b/js/src/gc/AtomMarking-inl.h
@@ -13,17 +13,17 @@
 namespace js {
 namespace gc {
 
 inline size_t
 GetAtomBit(TenuredCell* thing)
 {
     MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
     Arena* arena = thing->arena();
-    size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) / CellSize;
+    size_t arenaBit = (reinterpret_cast<uintptr_t>(thing) - arena->address()) / CellBytesPerMarkBit;
     return arena->atomBitmapStart() * JS_BITS_PER_WORD + arenaBit;
 }
 
 inline bool
 ThingIsPermanent(JSAtom* atom)
 {
     return atom->isPermanentAtom();
 }
--- a/js/src/gc/AtomMarking.cpp
+++ b/js/src/gc/AtomMarking.cpp
@@ -43,17 +43,17 @@ namespace gc {
 // representation as chunk mark bitmaps: one bit is allocated per Cell, with
 // bits for space between things being unused when things are larger than a
 // single Cell.
 
 void
 AtomMarkingRuntime::registerArena(Arena* arena)
 {
     MOZ_ASSERT(arena->getThingSize() != 0);
-    MOZ_ASSERT(arena->getThingSize() % CellSize == 0);
+    MOZ_ASSERT(arena->getThingSize() % CellAlignBytes == 0);
     MOZ_ASSERT(arena->zone->isAtomsZone());
     MOZ_ASSERT(arena->zone->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
 
     // We need to find a range of bits from the atoms bitmap for this arena.
 
     // Look for a free range of bits compatible with this arena.
     if (freeArenaIndexes.ref().length()) {
         arena->atomBitmapStart() = freeArenaIndexes.ref().popCopy();
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -320,34 +320,49 @@ class TenuredCell : public Cell
     // Default implementation for kinds that don't require fixup.
     void fixupAfterMovingGC() {}
 
 #ifdef DEBUG
     inline bool isAligned() const;
 #endif
 };
 
-/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
-const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
+/* Cells are aligned to CellAlignShift, so the largest tagged null pointer is: */
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellAlignShift) - 1;
+
+/*
+ * The minimum cell size ends up as twice the cell alignment because the mark
+ * bitmap contains one bit per CellBytesPerMarkBit bytes (which is equal to
+ * CellAlignBytes) and we need two mark bits per cell.
+ */
+const size_t MarkBitsPerCell = 2;
+const size_t MinCellSize = CellBytesPerMarkBit * MarkBitsPerCell;
 
 constexpr size_t
 DivideAndRoundUp(size_t numerator, size_t divisor) {
     return (numerator + divisor - 1) / divisor;
 }
 
-const size_t ArenaCellCount = ArenaSize / CellSize;
-static_assert(ArenaSize % CellSize == 0, "Arena size must be a multiple of cell size");
+static_assert(ArenaSize % CellAlignBytes == 0,
+              "Arena size must be a multiple of cell alignment");
 
 /*
- * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
- * wastes space but allows to avoid expensive devisions by thing's size when
- * accessing the bitmap. In addition this allows to use some bits for colored
- * marking during the cycle GC.
+ * We sometimes use an index to refer to a cell in an arena. The index for a
+ * cell is found by dividing by the cell alignment so not all indicies refer to
+ * valid cells.
  */
-const size_t ArenaBitmapBits = ArenaCellCount;
+const size_t ArenaCellIndexBytes = CellAlignBytes;
+const size_t MaxArenaCellIndex = ArenaSize / CellAlignBytes;
+
+/*
+ * The mark bitmap has one bit per each possible cell start position. This
+ * wastes some space for larger GC things but allows us to avoid division by the
+ * cell's size when accessing the bitmap.
+ */
+const size_t ArenaBitmapBits = ArenaSize / CellBytesPerMarkBit;
 const size_t ArenaBitmapBytes = DivideAndRoundUp(ArenaBitmapBits, 8);
 const size_t ArenaBitmapWords = DivideAndRoundUp(ArenaBitmapBits, JS_BITS_PER_WORD);
 
 /*
  * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
  * can take two forms.
  *
  * - In an empty span, |first| and |last| are both zero.
@@ -1114,17 +1129,17 @@ Arena::chunk() const
     return Chunk::fromAddress(address());
 }
 
 static void
 AssertValidColor(const TenuredCell* thing, uint32_t color)
 {
 #ifdef DEBUG
     Arena* arena = thing->arena();
-    MOZ_ASSERT(color < arena->getThingSize() / CellSize);
+    MOZ_ASSERT(color < arena->getThingSize() / CellBytesPerMarkBit);
 #endif
 }
 
 MOZ_ALWAYS_INLINE const TenuredCell&
 Cell::asTenured() const
 {
     MOZ_ASSERT(isTenured());
     return *static_cast<const TenuredCell*>(this);
@@ -1150,26 +1165,26 @@ Cell::runtimeFromAnyThread() const
 {
     return chunk()->trailer.runtime;
 }
 
 inline uintptr_t
 Cell::address() const
 {
     uintptr_t addr = uintptr_t(this);
-    MOZ_ASSERT(addr % CellSize == 0);
+    MOZ_ASSERT(addr % CellAlignBytes == 0);
     MOZ_ASSERT(Chunk::withinValidRange(addr));
     return addr;
 }
 
 Chunk*
 Cell::chunk() const
 {
     uintptr_t addr = uintptr_t(this);
-    MOZ_ASSERT(addr % CellSize == 0);
+    MOZ_ASSERT(addr % CellAlignBytes == 0);
     addr &= ~ChunkMask;
     return reinterpret_cast<Chunk*>(addr);
 }
 
 inline StoreBuffer*
 Cell::storeBuffer() const
 {
     return chunk()->trailer.storeBuffer;
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -1940,17 +1940,17 @@ CheckSavedValueArray(const MarkStack::Sa
     MOZ_ASSERT(array.kind == HeapSlot::Slot || array.kind == HeapSlot::Element);
 }
 
 inline
 MarkStack::TaggedPtr::TaggedPtr(Tag tag, Cell* ptr)
   : bits(tag | uintptr_t(ptr))
 {
     MOZ_ASSERT(tag <= LastTag);
-    MOZ_ASSERT((uintptr_t(ptr) & CellMask) == 0);
+    MOZ_ASSERT((uintptr_t(ptr) & CellAlignMask) == 0);
 }
 
 inline MarkStack::Tag
 MarkStack::TaggedPtr::tag() const
 {
     auto tag = Tag(bits & TagMask);
     MOZ_ASSERT(tag <= LastTag);
     return tag;
@@ -2730,19 +2730,19 @@ TraceWholeCell(TenuringTracer& mover, ji
 {
     jitcode->traceChildren(&mover);
 }
 
 template <typename T>
 static void
 TraceBufferedCells(TenuringTracer& mover, Arena* arena, ArenaCellSet* cells)
 {
-    for (size_t i = 0; i < ArenaCellCount; i++) {
+    for (size_t i = 0; i < MaxArenaCellIndex; i++) {
         if (cells->hasCell(i)) {
-            auto cell = reinterpret_cast<T*>(uintptr_t(arena) + CellSize * i);
+            auto cell = reinterpret_cast<T*>(uintptr_t(arena) + ArenaCellIndexBytes * i);
             TraceWholeCell(mover, cell);
         }
     }
 }
 
 void
 js::gc::StoreBuffer::traceWholeCells(TenuringTracer& mover)
 {
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -74,17 +74,17 @@ class MarkStack
         ScriptTag,
         TempRopeTag,
 
         LastTag = TempRopeTag
     };
 
     static const uintptr_t TagMask = 7;
     static_assert(TagMask >= uintptr_t(LastTag), "The tag mask must subsume the tags.");
-    static_assert(TagMask <= gc::CellMask, "The tag mask must be embeddable in a Cell*.");
+    static_assert(TagMask <= gc::CellAlignMask, "The tag mask must be embeddable in a Cell*.");
 
     class TaggedPtr
     {
         uintptr_t bits;
 
         Cell* ptr() const;
 
       public:
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -299,21 +299,21 @@ js::Nursery::allocateObject(JSContext* c
 
 void*
 js::Nursery::allocate(size_t size)
 {
     MOZ_ASSERT(isEnabled());
     MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
     MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
-    MOZ_ASSERT(position() % gc::CellSize == 0);
-    MOZ_ASSERT(size % gc::CellSize == 0);
+    MOZ_ASSERT(position() % CellAlignBytes == 0);
+    MOZ_ASSERT(size % CellAlignBytes == 0);
 
 #ifdef JS_GC_ZEAL
-    static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
+    static const size_t CanarySize = (sizeof(Nursery::Canary) + CellAlignBytes - 1) & ~CellAlignMask;
     if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
         size += CanarySize;
 #endif
 
     if (currentEnd() < position() + size) {
         if (currentChunk_ + 1 == numChunks())
             return nullptr;
         setCurrentChunk(currentChunk_ + 1);
@@ -1012,18 +1012,18 @@ js::Nursery::updateNumChunksLocked(unsig
         chunks_[i] = NurseryChunk::fromChunk(newChunk);
         chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
     }
 }
 
 void
 js::Nursery::queueSweepAction(SweepThunk thunk, void* data)
 {
-    static_assert(sizeof(SweepAction) % CellSize == 0,
-                  "SweepAction size must be a multiple of cell size");
+    static_assert(sizeof(SweepAction) % CellAlignBytes == 0,
+                  "SweepAction size must be a multiple of cell alignment");
 
     MOZ_ASSERT(isEnabled());
 
     AutoEnterOOMUnsafeRegion oomUnsafe;
     auto action = reinterpret_cast<SweepAction*>(allocate(sizeof(SweepAction)));
     if (!action)
         oomUnsafe.crash("Nursery::queueSweepAction");
 
--- a/js/src/gc/StoreBuffer-inl.h
+++ b/js/src/gc/StoreBuffer-inl.h
@@ -14,37 +14,38 @@
 #include "gc/Heap-inl.h"
 
 namespace js {
 namespace gc {
 
 inline /* static */ size_t
 ArenaCellSet::getCellIndex(const TenuredCell* cell)
 {
-    MOZ_ASSERT((uintptr_t(cell) & ~ArenaMask) % CellSize == 0);
-    return (uintptr_t(cell) & ArenaMask) / CellSize;
+    uintptr_t cellOffset = uintptr_t(cell) & ArenaMask;
+    MOZ_ASSERT(cellOffset % ArenaCellIndexBytes == 0);
+    return cellOffset / ArenaCellIndexBytes;
 }
 
 inline /* static */ void
 ArenaCellSet::getWordIndexAndMask(size_t cellIndex, size_t* wordp, uint32_t* maskp)
 {
-    BitArray<ArenaCellCount>::getIndexAndMask(cellIndex, wordp, maskp);
+    BitArray<MaxArenaCellIndex>::getIndexAndMask(cellIndex, wordp, maskp);
 }
 
 inline bool
 ArenaCellSet::hasCell(size_t cellIndex) const
 {
-    MOZ_ASSERT(cellIndex < ArenaCellCount);
+    MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
     return bits.get(cellIndex);
 }
 
 inline void
 ArenaCellSet::putCell(size_t cellIndex)
 {
-    MOZ_ASSERT(cellIndex < ArenaCellCount);
+    MOZ_ASSERT(cellIndex < MaxArenaCellIndex);
     bits.set(cellIndex);
 }
 
 inline void
 ArenaCellSet::check() const
 {
 #ifdef DEBUG
     bool bitsZero = bits.isAllClear();
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -445,17 +445,17 @@ class ArenaCellSet
 
     // The arena this relates to.
     Arena* arena;
 
     // Pointer to next set forming a linked list.
     ArenaCellSet* next;
 
     // Bit vector for each possible cell start position.
-    BitArray<ArenaCellCount> bits;
+    BitArray<MaxArenaCellIndex> bits;
 
   public:
     explicit ArenaCellSet(Arena* arena);
 
     bool hasCell(const TenuredCell* cell) const {
         return hasCell(getCellIndex(cell));
     }
 
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -585,17 +585,17 @@ class CheckHeapTracer final : public Hea
 
 CheckHeapTracer::CheckHeapTracer(JSRuntime* rt)
   : HeapCheckTracerBase(rt, TraceWeakMapKeysValues)
 {}
 
 inline static bool
 IsValidGCThingPointer(Cell* cell)
 {
-    return (uintptr_t(cell) & CellMask) == 0;
+    return (uintptr_t(cell) & CellAlignMask) == 0;
 }
 
 void
 CheckHeapTracer::checkCell(Cell* cell)
 {
     if (!IsValidGCThingPointer(cell) || !IsGCThingValidAfterMovingGC(cell)) {
         failures++;
         fprintf(stderr, "Bad pointer %p\n", cell);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -63,17 +63,17 @@
 #include "vm/Debugger-inl.h"
 #include "vm/EnvironmentObject-inl.h"
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 // Assert that JitCode is gc::Cell aligned.
-JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellSize == 0);
+JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellAlignBytes == 0);
 
 static MOZ_THREAD_LOCAL(JitContext*) TlsJitContext;
 
 static JitContext*
 CurrentJitContext()
 {
     if (!TlsJitContext.init())
         return nullptr;
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -783,17 +783,17 @@ MacroAssembler::nurseryAllocate(Register
         return;
     }
 
     // No explicit check for nursery.isEnabled() is needed, as the comparison
     // with the nursery's end will always fail in such cases.
     CompileZone* zone = GetJitContext()->compartment->zone();
     int thingSize = int(gc::Arena::thingSize(allocKind));
     int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
-    MOZ_ASSERT(totalSize % gc::CellSize == 0);
+    MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
     loadPtr(AbsoluteAddress(zone->addressOfNurseryPosition()), result);
     computeEffectiveAddress(Address(result, totalSize), temp);
     branchPtr(Assembler::Below, AbsoluteAddress(zone->addressOfNurseryCurrentEnd()), temp, fail);
     storePtr(temp, AbsoluteAddress(zone->addressOfNurseryPosition()));
 
     if (nDynamicSlots) {
         computeEffectiveAddress(Address(result, thingSize), temp);
         storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -275,18 +275,20 @@ const AllocKind gc::slotsToThingKind[] =
 static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
               "We have defined a slot count for each kind.");
 
 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType) \
     static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
                   #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
     static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
                   #sizedType " is smaller than FreeSpan"); \
-    static_assert(sizeof(sizedType) % CellSize == 0, \
-                  "Size of " #sizedType " is not a multiple of CellSize");
+    static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
+                  "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
+    static_assert(sizeof(sizedType) >= MinCellSize, \
+                  "Size of " #sizedType " is smaller than the minimum size");
 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
 #undef CHECK_THING_SIZE
 
 const uint32_t Arena::ThingSizes[] = {
 #define EXPAND_THING_SIZE(allocKind, traceKind, type, sizedType) \
     sizeof(sizedType),
 FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
 #undef EXPAND_THING_SIZE
@@ -474,17 +476,18 @@ Arena::staticAsserts()
                   "We haven't defined all counts.");
 }
 
 template<typename T>
 inline size_t
 Arena::finalize(FreeOp* fop, AllocKind thingKind, size_t thingSize)
 {
     /* Enforce requirements on size of T. */
-    MOZ_ASSERT(thingSize % CellSize == 0);
+    MOZ_ASSERT(thingSize % CellAlignBytes == 0);
+    MOZ_ASSERT(thingSize >= MinCellSize);
     MOZ_ASSERT(thingSize <= 255);
 
     MOZ_ASSERT(allocated());
     MOZ_ASSERT(thingKind == getAllocKind());
     MOZ_ASSERT(thingSize == getThingSize());
     MOZ_ASSERT(!hasDelayedMarking);
     MOZ_ASSERT(!markOverflow);
     MOZ_ASSERT(!allocatedDuringIncremental);
--- a/js/src/jspropertytree.cpp
+++ b/js/src/jspropertytree.cpp
@@ -232,17 +232,17 @@ Shape::fixupDictionaryShapeAfterMovingGC
     // last property and not otherwise.
     bool listpPointsIntoShape = !MaybeForwarded(base())->isOwned();
 
 #ifdef DEBUG
     // Check that we got this right by interrogating the arena.
     // We use a fake cell pointer for this: it might not point to the beginning
     // of a cell, but will point into the right arena and will have the right
     // alignment.
-    Cell* cell = reinterpret_cast<Cell*>(uintptr_t(listp) & ~CellMask);
+    Cell* cell = reinterpret_cast<Cell*>(uintptr_t(listp) & ~CellAlignMask);
     AllocKind kind = TenuredCell::fromPointer(cell)->getAllocKind();
     MOZ_ASSERT_IF(listpPointsIntoShape, IsShapeAllocKind(kind));
     MOZ_ASSERT_IF(!listpPointsIntoShape, IsObjectAllocKind(kind));
 #endif
 
     if (listpPointsIntoShape) {
         // listp points to the parent field of the next shape.
         Shape* next = reinterpret_cast<Shape*>(uintptr_t(listp) - offsetof(Shape, parent));
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -971,17 +971,17 @@ class JSScript : public js::gc::TenuredC
     uint32_t        sourceEnd_;
     uint32_t        toStringStart_;
     uint32_t        toStringEnd_;
 
 #ifdef MOZ_VTUNE
     // Unique Method ID passed to the VTune profiler, or 0 if unset.
     // Allows attribution of different jitcode to the same source script.
     uint32_t        vtuneMethodId_;
-    // Extra padding to maintain JSScript as a multiple of gc::CellSize.
+    // Extra padding to maintain JSScript as a multiple of gc::CellAlignBytes.
     uint32_t        __vtune_unused_padding_;
 #endif
 
     // Number of times the script has been called or has had backedges taken.
     // When running in ion, also increased for any inlined scripts. Reset if
     // the script's JIT code is forcibly discarded.
     mozilla::Atomic<uint32_t, mozilla::Relaxed> warmUpCount;
 
@@ -2047,18 +2047,18 @@ class JSScript : public js::gc::TenuredC
 
       private:
         void holdScript(JS::HandleFunction fun);
         void dropScript();
     };
 };
 
 /* If this fails, add/remove padding within JSScript. */
-static_assert(sizeof(JSScript) % js::gc::CellSize == 0,
-              "Size of JSScript must be an integral multiple of js::gc::CellSize");
+static_assert(sizeof(JSScript) % js::gc::CellAlignBytes == 0,
+              "Size of JSScript must be an integral multiple of js::gc::CellAlignBytes");
 
 namespace js {
 
 // Information about a script which may be (or has been) lazily compiled to
 // bytecode from its source.
 class LazyScript : public gc::TenuredCell
 {
   private:
@@ -2390,18 +2390,18 @@ class LazyScript : public gc::TenuredCel
     }
 
     uint64_t packedFields() const {
         return packedFields_;
     }
 };
 
 /* If this fails, add/remove padding within LazyScript. */
-static_assert(sizeof(LazyScript) % js::gc::CellSize == 0,
-              "Size of LazyScript must be an integral multiple of js::gc::CellSize");
+static_assert(sizeof(LazyScript) % js::gc::CellAlignBytes == 0,
+              "Size of LazyScript must be an integral multiple of js::gc::CellAlignBytes");
 
 struct ScriptAndCounts
 {
     /* This structure is stored and marked from the JSRuntime. */
     JSScript* script;
     ScriptCounts scriptCounts;
 
     inline explicit ScriptAndCounts(JSScript* script);
--- a/js/src/vm/Shape.h
+++ b/js/src/vm/Shape.h
@@ -417,17 +417,17 @@ class BaseShape : public gc::TenuredCell
 
     /* For owned BaseShapes, the canonical unowned BaseShape. */
     GCPtrUnownedBaseShape unowned_;
 
     /* For owned BaseShapes, the shape's shape table. */
     ShapeTable*      table_;
 
 #if JS_BITS_PER_WORD == 32
-    // Ensure sizeof(BaseShape) is a multiple of gc::CellSize.
+    // Ensure sizeof(BaseShape) is a multiple of gc::CellAlignBytes.
     uint32_t padding_;
 #endif
 
     BaseShape(const BaseShape& base) = delete;
     BaseShape& operator=(const BaseShape& other) = delete;
 
   public:
     void finalize(FreeOp* fop);
@@ -500,19 +500,19 @@ class BaseShape : public gc::TenuredCell
 
 #ifdef DEBUG
     bool canSkipMarkingShapeTable(Shape* lastShape);
 #endif
 
   private:
     static void staticAsserts() {
         JS_STATIC_ASSERT(offsetof(BaseShape, clasp_) == offsetof(js::shadow::BaseShape, clasp_));
-        static_assert(sizeof(BaseShape) % gc::CellSize == 0,
+        static_assert(sizeof(BaseShape) % gc::CellAlignBytes == 0,
                       "Things inheriting from gc::Cell must have a size that's "
-                      "a multiple of gc::CellSize");
+                      "a multiple of gc::CellAlignBytes");
     }
 
     void traceShapeTable(JSTracer* trc);
 };
 
 class UnownedBaseShape : public BaseShape {};
 
 UnownedBaseShape*
--- a/js/src/vm/String.h
+++ b/js/src/vm/String.h
@@ -956,17 +956,17 @@ class JSFatInlineString : public JSInlin
     template<typename CharT>
     static bool lengthFits(size_t length);
 
     /* Only called by the GC for strings with the AllocKind::FAT_INLINE_STRING kind. */
 
     MOZ_ALWAYS_INLINE void finalize(js::FreeOp* fop);
 };
 
-static_assert(sizeof(JSFatInlineString) % js::gc::CellSize == 0,
+static_assert(sizeof(JSFatInlineString) % js::gc::CellAlignBytes == 0,
               "fat inline strings shouldn't waste space up to the next cell "
               "boundary");
 
 class JSExternalString : public JSLinearString
 {
     void init(const char16_t* chars, size_t length, const JSStringFinalizer* fin);
 
     /* Vacuous and therefore unimplemented. */
@@ -1052,50 +1052,50 @@ static_assert(sizeof(JSAtom) == sizeof(J
               "string subclasses must be binary-compatible with JSString");
 
 namespace js {
 
 class NormalAtom : public JSAtom
 {
   protected: // Silence Clang unused-field warning.
     HashNumber hash_;
-    uint32_t padding_; // Ensure the size is a multiple of gc::CellSize.
+    uint32_t padding_; // Ensure the size is a multiple of gc::CellAlignBytes.
 
   public:
     HashNumber hash() const {
         return hash_;
     }
     void initHash(HashNumber hash) {
         hash_ = hash;
     }
 };
 
 static_assert(sizeof(NormalAtom) == sizeof(JSString) + sizeof(uint64_t),
               "NormalAtom must have size of a string + HashNumber, "
-              "aligned to gc::CellSize");
+              "aligned to gc::CellAlignBytes");
 
 class FatInlineAtom : public JSAtom
 {
   protected: // Silence Clang unused-field warning.
     char inlineStorage_[sizeof(JSFatInlineString) - sizeof(JSString)];
     HashNumber hash_;
-    uint32_t padding_; // Ensure the size is a multiple of gc::CellSize.
+    uint32_t padding_; // Ensure the size is a multiple of gc::CellAlignBytes.
 
   public:
     HashNumber hash() const {
         return hash_;
     }
     void initHash(HashNumber hash) {
         hash_ = hash;
     }
 };
 
 static_assert(sizeof(FatInlineAtom) == sizeof(JSFatInlineString) + sizeof(uint64_t),
               "FatInlineAtom must have size of a fat inline string + HashNumber, "
-              "aligned to gc::CellSize");
+              "aligned to gc::CellAlignBytes");
 
 } // namespace js
 
 inline js::HashNumber
 JSAtom::hash() const
 {
     if (isFatInline())
         return static_cast<const js::FatInlineAtom*>(this)->hash();
@@ -1476,33 +1476,33 @@ JSThinInlineString::lengthFits<char16_t>
 {
     return length <= MAX_LENGTH_TWO_BYTE;
 }
 
 template<>
 MOZ_ALWAYS_INLINE bool
 JSFatInlineString::lengthFits<JS::Latin1Char>(size_t length)
 {
-    static_assert((INLINE_EXTENSION_CHARS_LATIN1 * sizeof(char)) % js::gc::CellSize == 0,
+    static_assert((INLINE_EXTENSION_CHARS_LATIN1 * sizeof(char)) % js::gc::CellAlignBytes == 0,
                   "fat inline strings' Latin1 characters don't exactly "
                   "fill subsequent cells and thus are wasteful");
     static_assert(MAX_LENGTH_LATIN1 + 1 ==
                   (sizeof(JSFatInlineString) -
                    offsetof(JSFatInlineString, d.inlineStorageLatin1)) / sizeof(char),
                   "MAX_LENGTH_LATIN1 must be one less than inline Latin1 "
                   "storage count");
 
     return length <= MAX_LENGTH_LATIN1;
 }
 
 template<>
 MOZ_ALWAYS_INLINE bool
 JSFatInlineString::lengthFits<char16_t>(size_t length)
 {
-    static_assert((INLINE_EXTENSION_CHARS_TWO_BYTE * sizeof(char16_t)) % js::gc::CellSize == 0,
+    static_assert((INLINE_EXTENSION_CHARS_TWO_BYTE * sizeof(char16_t)) % js::gc::CellAlignBytes == 0,
                   "fat inline strings' char16_t characters don't exactly "
                   "fill subsequent cells and thus are wasteful");
     static_assert(MAX_LENGTH_TWO_BYTE + 1 ==
                   (sizeof(JSFatInlineString) -
                    offsetof(JSFatInlineString, d.inlineStorageTwoByte)) / sizeof(char16_t),
                   "MAX_LENGTH_TWO_BYTE must be one less than inline "
                   "char16_t storage count");