Bug 1531716 - Part 4: Replace jstypes macros with constexpr functions. r=jonco
authorAndré Bargull <andre.bargull@gmail.com>
Mon, 04 Nov 2019 14:04:35 +0000
changeset 500384 3a9b6b73cab7bb07ceea6a599911c668f3b735dc
parent 500383 93e5393aa92b81452cdb11066587e3298aace039
child 500385 345aa7ad3051a3c4c74dab8f6505dc8c8d1248a7
push id36763
push userrmaries@mozilla.com
push dateMon, 04 Nov 2019 21:44:06 +0000
treeherdermozilla-central@75a7a3400888 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1531716
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1531716 - Part 4: Replace jstypes macros with constexpr functions. r=jonco JS_BIT and JS_BITMASK are only used in contexts where uint32_t is used, so these two functions are now typed to accept and return uint32_t. JS_HOWMANY and the three JS_ROUND functions are only used with size_t inputs, so these four functions are now typed to accept and return size_t. Differential Revision: https://phabricator.services.mozilla.com/D51142
js/public/Class.h
js/public/HeapAPI.h
js/src/builtin/MapObject.cpp
js/src/ds/LifoAlloc.cpp
js/src/frontend/BytecodeEmitter.cpp
js/src/frontend/SourceNotes.h
js/src/frontend/SwitchEmitter.cpp
js/src/fuzz-tests/testStructuredCloneReader.cpp
js/src/gc/Cell.h
js/src/gc/GC.cpp
js/src/gc/Nursery-inl.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/jit/MacroAssembler.cpp
js/src/jit/shared/AtomicOperations-shared-jit.cpp
js/src/jstypes.h
js/src/vm/ArrayBufferObject.cpp
js/src/vm/BigIntType.h
js/src/vm/RegExpObject.h
js/src/vm/Shape-inl.h
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/StringType.h
js/src/vm/StructuredClone.cpp
js/src/vm/TypedArrayObject.cpp
js/src/wasm/WasmCode.cpp
js/xpconnect/src/xpcprivate.h
--- a/js/public/Class.h
+++ b/js/public/Class.h
@@ -696,17 +696,17 @@ static const uint32_t JSCLASS_USERBIT1 =
 // is a constant in [1, 255]. Reserved slots are indexed from 0 to n-1.
 
 // Room for 8 flags below ...
 static const uintptr_t JSCLASS_RESERVED_SLOTS_SHIFT = 8;
 // ... and 16 above this field.
 static const uint32_t JSCLASS_RESERVED_SLOTS_WIDTH = 8;
 
 static const uint32_t JSCLASS_RESERVED_SLOTS_MASK =
-    JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH);
+    js::BitMask(JSCLASS_RESERVED_SLOTS_WIDTH);
 
 static constexpr uint32_t JSCLASS_HAS_RESERVED_SLOTS(uint32_t n) {
   return (n & JSCLASS_RESERVED_SLOTS_MASK) << JSCLASS_RESERVED_SLOTS_SHIFT;
 }
 
 static constexpr uint32_t JSCLASS_HIGH_FLAGS_SHIFT =
     JSCLASS_RESERVED_SLOTS_SHIFT + JSCLASS_RESERVED_SLOTS_WIDTH;
 
@@ -754,17 +754,17 @@ static constexpr uint32_t JSCLASS_GLOBAL
 }
 
 static constexpr uint32_t JSCLASS_GLOBAL_FLAGS =
     JSCLASS_GLOBAL_FLAGS_WITH_SLOTS(0);
 
 // Fast access to the original value of each standard class's prototype.
 static const uint32_t JSCLASS_CACHED_PROTO_SHIFT = JSCLASS_HIGH_FLAGS_SHIFT + 9;
 static const uint32_t JSCLASS_CACHED_PROTO_MASK =
-    JS_BITMASK(js::JSCLASS_CACHED_PROTO_WIDTH);
+    js::BitMask(js::JSCLASS_CACHED_PROTO_WIDTH);
 
 static_assert(JSProto_LIMIT <= (JSCLASS_CACHED_PROTO_MASK + 1),
               "JSProtoKey must not exceed the maximum cacheable proto-mask");
 
 static constexpr uint32_t JSCLASS_HAS_CACHED_PROTO(JSProtoKey key) {
   return uint32_t(key) << JSCLASS_CACHED_PROTO_SHIFT;
 }
 
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -225,24 +225,25 @@ struct Zone {
   }
 
   static MOZ_ALWAYS_INLINE JS::shadow::Zone* from(JS::Zone* zone) {
     return reinterpret_cast<JS::shadow::Zone*>(zone);
   }
 };
 
 struct String {
-  static const uint32_t NON_ATOM_BIT = JS_BIT(1);
-  static const uint32_t LINEAR_BIT = JS_BIT(4);
-  static const uint32_t INLINE_CHARS_BIT = JS_BIT(6);
-  static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9);
-  static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | JS_BIT(8);
-  static const uint32_t TYPE_FLAGS_MASK = JS_BITMASK(9) - JS_BIT(2) - JS_BIT(0);
-  static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8);
-  static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8);
+  static const uint32_t NON_ATOM_BIT = js::Bit(1);
+  static const uint32_t LINEAR_BIT = js::Bit(4);
+  static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
+  static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
+  static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | NON_ATOM_BIT | js::Bit(8);
+  static const uint32_t TYPE_FLAGS_MASK =
+      js::BitMask(9) - js::Bit(2) - js::Bit(0);
+  static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8);
+  static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8);
 
   uintptr_t flags_;
 #if JS_BITS_PER_WORD == 32
   uint32_t length_;
 #endif
 
   union {
     const JS::Latin1Char* nonInlineCharsLatin1;
--- a/js/src/builtin/MapObject.cpp
+++ b/js/src/builtin/MapObject.cpp
@@ -214,17 +214,17 @@ MapIteratorObject* MapIteratorObject::cr
     if (!iterobj) {
       return nullptr;
     }
 
     iterobj->setSlot(TargetSlot, ObjectValue(*mapobj));
     iterobj->setSlot(RangeSlot, PrivateValue(nullptr));
     iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
 
-    const size_t size = JS_ROUNDUP(sizeof(ValueMap::Range), gc::CellAlignBytes);
+    const size_t size = RoundUp(sizeof(ValueMap::Range), gc::CellAlignBytes);
     buffer = nursery.allocateBufferSameLocation(iterobj, size);
     if (buffer) {
       break;
     }
 
     if (!IsInsideNursery(iterobj)) {
       ReportOutOfMemory(cx);
       return nullptr;
@@ -987,17 +987,17 @@ SetIteratorObject* SetIteratorObject::cr
     if (!iterobj) {
       return nullptr;
     }
 
     iterobj->setSlot(TargetSlot, ObjectValue(*setobj));
     iterobj->setSlot(RangeSlot, PrivateValue(nullptr));
     iterobj->setSlot(KindSlot, Int32Value(int32_t(kind)));
 
-    const size_t size = JS_ROUNDUP(sizeof(ValueSet::Range), gc::CellAlignBytes);
+    const size_t size = RoundUp(sizeof(ValueSet::Range), gc::CellAlignBytes);
     buffer = nursery.allocateBufferSameLocation(iterobj, size);
     if (buffer) {
       break;
     }
 
     if (!IsInsideNursery(iterobj)) {
       ReportOutOfMemory(cx);
       return nullptr;
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -157,17 +157,17 @@ static size_t NextSize(size_t start, siz
   const size_t mb = 1 * 1024 * 1024;
   if (used < mb) {
     return Max(start, used);
   }
 
   // After 1 MB, grow more gradually, to waste less memory.
   // The sequence (in megabytes) begins:
   // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
-  return JS_ROUNDUP(used / 8, mb);
+  return RoundUp(used / 8, mb);
 }
 
 LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
                                                            bool oversize) {
   MOZ_ASSERT(fallibleScope_,
              "[OOM] Cannot allocate a new chunk in an infallible scope.");
 
   // Compute the size which should be requested in order to be able to fit |n|
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -472,17 +472,17 @@ bool BytecodeEmitter::emitDupAt(unsigned
   if (slotFromTop == 0 && count == 1) {
     return emit1(JSOP_DUP);
   }
 
   if (slotFromTop == 1 && count == 2) {
     return emit1(JSOP_DUP2);
   }
 
-  if (slotFromTop >= JS_BIT(24)) {
+  if (slotFromTop >= Bit(24)) {
     reportError(nullptr, JSMSG_TOO_MANY_LOCALS);
     return false;
   }
 
   for (unsigned i = 0; i < count; i++) {
     BytecodeOffset off;
     if (!emitN(JSOP_DUPAT, 3, &off)) {
       return false;
@@ -2064,21 +2064,21 @@ bool BytecodeEmitter::emitNumberOp(doubl
     if (ival == 1) {
       return emit1(JSOP_ONE);
     }
     if ((int)(int8_t)ival == ival) {
       return emit2(JSOP_INT8, uint8_t(int8_t(ival)));
     }
 
     uint32_t u = uint32_t(ival);
-    if (u < JS_BIT(16)) {
+    if (u < Bit(16)) {
       if (!emitUint16Operand(JSOP_UINT16, u)) {
         return false;
       }
-    } else if (u < JS_BIT(24)) {
+    } else if (u < Bit(24)) {
       BytecodeOffset off;
       if (!emitN(JSOP_UINT24, 3, &off)) {
         return false;
       }
       SET_UINT24(bytecodeSection().code(off), u);
     } else {
       BytecodeOffset off;
       if (!emitN(JSOP_INT32, 4, &off)) {
@@ -2267,17 +2267,17 @@ bool BytecodeEmitter::isRunOnceLambda() 
   }
 
   return parent && parent->emittingRunOnceLambda &&
          !sc->asFunctionBox()->shouldSuppressRunOnce();
 }
 
 bool BytecodeEmitter::allocateResumeIndex(BytecodeOffset offset,
                                           uint32_t* resumeIndex) {
-  static constexpr uint32_t MaxResumeIndex = JS_BITMASK(24);
+  static constexpr uint32_t MaxResumeIndex = BitMask(24);
 
   static_assert(
       MaxResumeIndex < uint32_t(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
       "resumeIndex should not include magic AbstractGeneratorObject "
       "resumeIndex values");
   static_assert(
       MaxResumeIndex <= INT32_MAX / sizeof(uintptr_t),
       "resumeIndex * sizeof(uintptr_t) must fit in an int32. JIT code relies "
--- a/js/src/frontend/SourceNotes.h
+++ b/js/src/frontend/SourceNotes.h
@@ -209,19 +209,19 @@ inline void SN_MAKE_TERMINATOR(jssrcnote
 
 inline bool SN_IS_TERMINATOR(jssrcnote* sn) { return *sn == SRC_NULL; }
 
 }  // namespace js
 
 #define SN_TYPE_BITS 5
 #define SN_DELTA_BITS 3
 #define SN_XDELTA_BITS 6
-#define SN_TYPE_MASK (JS_BITMASK(SN_TYPE_BITS) << SN_DELTA_BITS)
-#define SN_DELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_DELTA_BITS))
-#define SN_XDELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_XDELTA_BITS))
+#define SN_TYPE_MASK (js::BitMask(SN_TYPE_BITS) << SN_DELTA_BITS)
+#define SN_DELTA_MASK ((ptrdiff_t)js::BitMask(SN_DELTA_BITS))
+#define SN_XDELTA_MASK ((ptrdiff_t)js::BitMask(SN_XDELTA_BITS))
 
 #define SN_MAKE_NOTE(sn, t, d) \
   (*(sn) = (jssrcnote)(((t) << SN_DELTA_BITS) | ((d)&SN_DELTA_MASK)))
 #define SN_MAKE_XDELTA(sn, d) \
   (*(sn) = (jssrcnote)((SRC_XDELTA << SN_DELTA_BITS) | ((d)&SN_XDELTA_MASK)))
 
 #define SN_IS_XDELTA(sn) ((*(sn) >> SN_DELTA_BITS) >= SRC_XDELTA)
 #define SN_TYPE(sn) \
@@ -230,18 +230,18 @@ inline bool SN_IS_TERMINATOR(jssrcnote* 
 #define SN_IS_GETTABLE(sn) (SN_TYPE(sn) <= SRC_LAST_GETTABLE)
 
 #define SN_DELTA(sn) \
   ((ptrdiff_t)(SN_IS_XDELTA(sn) ? *(sn)&SN_XDELTA_MASK : *(sn)&SN_DELTA_MASK))
 #define SN_SET_DELTA(sn, delta)                 \
   (SN_IS_XDELTA(sn) ? SN_MAKE_XDELTA(sn, delta) \
                     : SN_MAKE_NOTE(sn, SN_TYPE(sn), delta))
 
-#define SN_DELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_DELTA_BITS))
-#define SN_XDELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_XDELTA_BITS))
+#define SN_DELTA_LIMIT ((ptrdiff_t)js::Bit(SN_DELTA_BITS))
+#define SN_XDELTA_LIMIT ((ptrdiff_t)js::Bit(SN_XDELTA_BITS))
 
 /*
  * Offset fields follow certain notes and are frequency-encoded: an offset in
  * [0,0x7f] consumes one byte, an offset in [0x80,0x7fffffff] takes four, and
  * the high bit of the first byte is set.
  */
 #define SN_4BYTE_OFFSET_FLAG 0x80
 #define SN_4BYTE_OFFSET_MASK 0x7f
--- a/js/src/frontend/SwitchEmitter.cpp
+++ b/js/src/frontend/SwitchEmitter.cpp
@@ -27,33 +27,33 @@ using namespace js::frontend;
 
 using mozilla::Maybe;
 
 bool SwitchEmitter::TableGenerator::addNumber(int32_t caseValue) {
   if (isInvalid()) {
     return true;
   }
 
-  if (unsigned(caseValue + int(JS_BIT(15))) >= unsigned(JS_BIT(16))) {
+  if (unsigned(caseValue + int(Bit(15))) >= unsigned(Bit(16))) {
     setInvalid();
     return true;
   }
 
   if (intmap_.isNothing()) {
     intmap_.emplace();
   }
 
   low_ = std::min(low_, caseValue);
   high_ = std::max(high_, caseValue);
 
   // Check for duplicates, which require a JSOP_CONDSWITCH.
   // We bias caseValue by 65536 if it's negative, and hope that's a rare case
   // (because it requires a malloc'd bitmap).
   if (caseValue < 0) {
-    caseValue += JS_BIT(16);
+    caseValue += Bit(16);
   }
   if (caseValue >= intmapBitLength_) {
     size_t newLength = NumWordsForBitArrayOfLength(caseValue + 1);
     if (!intmap_->resize(newLength)) {
       ReportOutOfMemory(bce_->cx);
       return false;
     }
     intmapBitLength_ = newLength * BitArrayElementBits;
@@ -82,17 +82,17 @@ void SwitchEmitter::TableGenerator::fini
     low_ = 0;
     high_ = -1;
     return;
   }
 
   // Compute table length and select condswitch instead if overlarge
   // or more than half-sparse.
   tableLength_ = uint32_t(high_ - low_ + 1);
-  if (tableLength_ >= JS_BIT(16) || tableLength_ > 2 * caseCount) {
+  if (tableLength_ >= Bit(16) || tableLength_ > 2 * caseCount) {
     setInvalid();
   }
 }
 
 uint32_t SwitchEmitter::TableGenerator::toCaseIndex(int32_t caseValue) const {
   MOZ_ASSERT(finished_);
   MOZ_ASSERT(isValid());
   uint32_t caseIndex = uint32_t(caseValue - low_);
@@ -134,17 +134,17 @@ bool SwitchEmitter::emitLexical(Handle<L
   }
 
   state_ = State::Lexical;
   return true;
 }
 
 bool SwitchEmitter::validateCaseCount(uint32_t caseCount) {
   MOZ_ASSERT(state_ == State::Discriminant || state_ == State::Lexical);
-  if (caseCount > JS_BIT(16)) {
+  if (caseCount > Bit(16)) {
     bce_->reportError(switchPos_, JSMSG_TOO_MANY_CASES);
     return false;
   }
   caseCount_ = caseCount;
 
   state_ = State::CaseCount;
   return true;
 }
--- a/js/src/fuzz-tests/testStructuredCloneReader.cpp
+++ b/js/src/fuzz-tests/testStructuredCloneReader.cpp
@@ -28,17 +28,17 @@ static int testStructuredCloneReaderFuzz
     JS::PrepareForFullGC(gCx);
     JS::NonIncrementalGC(gCx, GC_NORMAL, JS::GCReason::API);
   });
 
   if (!size) return 0;
 
   // Make sure to pad the buffer to a multiple of kSegmentAlignment
   const size_t kSegmentAlignment = 8;
-  size_t buf_size = JS_ROUNDUP(size, kSegmentAlignment);
+  size_t buf_size = RoundUp(size, kSegmentAlignment);
 
   JS::StructuredCloneScope scope = JS::StructuredCloneScope::DifferentProcess;
 
   auto clonebuf = MakeUnique<JSStructuredCloneData>(scope);
   if (!clonebuf || !clonebuf->Init(buf_size)) {
     ReportOutOfMemory(gCx);
     return 0;
   }
--- a/js/src/gc/Cell.h
+++ b/js/src/gc/Cell.h
@@ -64,26 +64,26 @@ extern bool CurrentThreadIsGCMarking();
 //
 // During moving GC operation a Cell may be marked as forwarded. This indicates
 // that a gc::RelocationOverlay is currently stored in the Cell's memory and
 // should be used to find the new location of the Cell.
 struct alignas(gc::CellAlignBytes) Cell {
  public:
   // The low bits of the first word of each Cell are reserved for GC flags.
   static constexpr int ReservedBits = 2;
-  static constexpr uintptr_t RESERVED_MASK = JS_BITMASK(ReservedBits);
+  static constexpr uintptr_t RESERVED_MASK = BitMask(ReservedBits);
 
   // Indicates if the cell is currently a RelocationOverlay
-  static constexpr uintptr_t FORWARD_BIT = JS_BIT(0);
+  static constexpr uintptr_t FORWARD_BIT = Bit(0);
 
   // When a Cell is in the nursery, this will indicate if it is a JSString (1)
   // or JSObject (0). When not in nursery, this bit is still reserved for
   // JSString to use as JSString::NON_ATOM bit. This may be removed by Bug
   // 1376646.
-  static constexpr uintptr_t JSSTRING_BIT = JS_BIT(1);
+  static constexpr uintptr_t JSSTRING_BIT = Bit(1);
 
   MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
   MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
   MOZ_ALWAYS_INLINE TenuredCell& asTenured();
 
   MOZ_ALWAYS_INLINE bool isMarkedAny() const;
   MOZ_ALWAYS_INLINE bool isMarkedBlack() const;
   MOZ_ALWAYS_INLINE bool isMarkedGray() const;
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -281,24 +281,24 @@ const AllocKind gc::slotsToThingKind[] =
     /*  8 */ AllocKind::OBJECT8,  AllocKind::OBJECT12, AllocKind::OBJECT12, AllocKind::OBJECT12,
     /* 12 */ AllocKind::OBJECT12, AllocKind::OBJECT16, AllocKind::OBJECT16, AllocKind::OBJECT16,
     /* 16 */ AllocKind::OBJECT16
     // clang-format on
 };
 
 // Check that reserved bits of a Cell are compatible with our typical allocators
 // since most derived classes will store a pointer in the first word.
-static_assert(js::detail::LIFO_ALLOC_ALIGN > JS_BITMASK(Cell::ReservedBits),
+static_assert(js::detail::LIFO_ALLOC_ALIGN > BitMask(Cell::ReservedBits),
               "Cell::ReservedBits should support LifoAlloc");
-static_assert(CellAlignBytes > JS_BITMASK(Cell::ReservedBits),
+static_assert(CellAlignBytes > BitMask(Cell::ReservedBits),
               "Cell::ReservedBits should support gc::Cell");
 static_assert(
-    sizeof(uintptr_t) > JS_BITMASK(Cell::ReservedBits),
+    sizeof(uintptr_t) > BitMask(Cell::ReservedBits),
     "Cell::ReservedBits should support small malloc / aligned globals");
-static_assert(js::jit::CodeAlignment > JS_BITMASK(Cell::ReservedBits),
+static_assert(js::jit::CodeAlignment > BitMask(Cell::ReservedBits),
               "Cell::ReservedBits should support JIT code");
 
 static_assert(mozilla::ArrayLength(slotsToThingKind) ==
                   SLOTS_TO_THING_KIND_LIMIT,
               "We have defined a slot count for each kind.");
 
 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal,       \
                          nursery, compact)                                     \
--- a/js/src/gc/Nursery-inl.h
+++ b/js/src/gc/Nursery-inl.h
@@ -95,31 +95,31 @@ inline void js::Nursery::setDirectForwar
 namespace js {
 
 // The allocation methods below will not run the garbage collector. If the
 // nursery cannot accomodate the allocation, the malloc heap will be used
 // instead.
 
 template <typename T>
 static inline T* AllocateObjectBuffer(JSContext* cx, uint32_t count) {
-  size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
+  size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
   T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
   if (!buffer) {
     ReportOutOfMemory(cx);
   }
   return buffer;
 }
 
 template <typename T>
 static inline T* AllocateObjectBuffer(JSContext* cx, JSObject* obj,
                                       uint32_t count) {
   if (cx->isHelperThreadContext()) {
     return cx->pod_malloc<T>(count);
   }
-  size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
+  size_t nbytes = RoundUp(count * sizeof(T), sizeof(Value));
   T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(obj, nbytes));
   if (!buffer) {
     ReportOutOfMemory(cx);
   }
   return buffer;
 }
 
 // If this returns null then the old buffer will be left alone.
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -135,18 +135,18 @@ void js::NurseryDecommitTask::queueChunk
 
 void js::NurseryDecommitTask::queueRange(
     size_t newCapacity, NurseryChunk& newChunk,
     const AutoLockHelperThreadState& lock) {
   MOZ_ASSERT(!partialChunk || partialChunk == &newChunk);
 
   // Only save this to decommit later if there's at least one page to
   // decommit.
-  if (JS_ROUNDUP(newCapacity, SystemPageSize()) >=
-      JS_ROUNDDOWN(Nursery::NurseryChunkUsableSize, SystemPageSize())) {
+  if (RoundUp(newCapacity, SystemPageSize()) >=
+      RoundDown(Nursery::NurseryChunkUsableSize, SystemPageSize())) {
     // Clear the existing decommit request because it may be a larger request
     // for the same chunk.
     partialChunk = nullptr;
     return;
   }
   partialChunk = &newChunk;
   partialCapacity = newCapacity;
 }
@@ -379,17 +379,17 @@ void js::Nursery::enterZealMode() {
       }
 
       // It'd be simpler to poison the whole chunk, but we can't do that
       // because the nursery might be partially used.
       chunk(0).poisonRange(capacity_, NurseryChunkUsableSize - capacity_,
                            JS_FRESH_NURSERY_PATTERN,
                            MemCheckKind::MakeUndefined);
     }
-    capacity_ = JS_ROUNDUP(tunables().gcMaxNurseryBytes(), ChunkSize);
+    capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
     setCurrentEnd();
   }
 }
 
 void js::Nursery::leaveZealMode() {
   if (isEnabled()) {
     MOZ_ASSERT(isEmpty());
     setCurrentChunk(0);
@@ -440,18 +440,17 @@ JSObject* js::Nursery::allocateObject(JS
   return obj;
 }
 
 Cell* js::Nursery::allocateString(Zone* zone, size_t size, AllocKind kind) {
   // Ensure there's enough space to replace the contents with a
   // RelocationOverlay.
   MOZ_ASSERT(size >= sizeof(RelocationOverlay));
 
-  size_t allocSize =
-      JS_ROUNDUP(sizeof(StringLayout) - 1 + size, CellAlignBytes);
+  size_t allocSize = RoundUp(sizeof(StringLayout) - 1 + size, CellAlignBytes);
   auto header = static_cast<StringLayout*>(allocate(allocSize));
   if (!header) {
     return nullptr;
   }
   header->zone = zone;
 
   auto cell = reinterpret_cast<Cell*>(&header->cell);
   gcTracer.traceNurseryAlloc(cell, kind);
@@ -1322,17 +1321,17 @@ MOZ_ALWAYS_INLINE void js::Nursery::setC
 bool js::Nursery::allocateNextChunk(const unsigned chunkno,
                                     AutoLockGCBgAlloc& lock) {
   const unsigned priorCount = allocatedChunkCount();
   const unsigned newCount = priorCount + 1;
 
   MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
              (chunkno == 0 && allocatedChunkCount() == 0));
   MOZ_ASSERT(chunkno == allocatedChunkCount());
-  MOZ_ASSERT(chunkno < JS_HOWMANY(capacity(), ChunkSize));
+  MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
 
   if (!chunks_.resize(newCount)) {
     return false;
   }
 
   Chunk* newChunk;
   newChunk = gc->getOrAllocChunk(lock);
   if (!newChunk) {
@@ -1437,20 +1436,20 @@ bool js::Nursery::maybeResizeExact(JS::G
     return true;
   }
 
   return false;
 }
 
 size_t js::Nursery::roundSize(size_t size) {
   if (size >= ChunkSize) {
-    size = JS_ROUND(size, ChunkSize);
+    size = Round(size, ChunkSize);
   } else {
-    size = Min(JS_ROUND(size, SubChunkStep),
-               JS_ROUNDDOWN(NurseryChunkUsableSize, SubChunkStep));
+    size = Min(Round(size, SubChunkStep),
+               RoundDown(NurseryChunkUsableSize, SubChunkStep));
   }
   MOZ_ASSERT(size >= ArenaSize);
   return size;
 }
 
 void js::Nursery::growAllocableSpace(size_t newCapacity) {
   MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
   MOZ_ASSERT(newCapacity <= roundSize(tunables().gcMaxNurseryBytes()));
@@ -1524,17 +1523,17 @@ void js::Nursery::shrinkAllocableSpace(s
   // clamping in maybeResizeNursery().
   MOZ_ASSERT(newCapacity != 0);
   // Don't attempt to shrink it to the same size.
   if (newCapacity == capacity_) {
     return;
   }
   MOZ_ASSERT(newCapacity < capacity_);
 
-  unsigned newCount = JS_HOWMANY(newCapacity, ChunkSize);
+  unsigned newCount = HowMany(newCapacity, ChunkSize);
   if (newCount < allocatedChunkCount()) {
     freeChunksFrom(newCount);
   }
 
   size_t oldCapacity = capacity_;
   capacity_ = newCapacity;
 
   setCurrentEnd();
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -189,17 +189,17 @@ class Nursery {
   unsigned allocatedChunkCount() const { return chunks_.length(); }
 
   // Total number of chunks and the capacity of the nursery. Chunks will be
   // lazilly allocated and added to the chunks array up to this limit, after
   // that the nursery must be collected, this limit may be raised during
   // collection.
   unsigned maxChunkCount() const {
     MOZ_ASSERT(capacity());
-    return JS_HOWMANY(capacity(), gc::ChunkSize);
+    return HowMany(capacity(), gc::ChunkSize);
   }
 
   void enable();
   void disable();
   bool isEnabled() const { return capacity() != 0; }
 
   void enableStrings();
   void disableStrings();
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -922,19 +922,19 @@ static void AllocateAndInitTypedArrayBuf
     obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
     return;
   }
 
   obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
 
   size_t nbytes = count * obj->bytesPerElement();
   MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
-             "JS_ROUNDUP must not overflow");
-
-  nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
+             "RoundUp must not overflow");
+
+  nbytes = RoundUp(nbytes, sizeof(Value));
   void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
                                                  js::ArrayBufferContentsArena);
   if (buf) {
     InitObjectPrivate(obj, buf, nbytes, MemoryUse::TypedArrayElements);
   }
 }
 
 void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
--- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
@@ -654,18 +654,17 @@ void AtomicMemcpyDownUnsynchronized(uint
   // preprocessing then that is better than unaligned copying on a platform
   // that supports it.
 
   if (nbytes >= WORDSIZE) {
     void (*copyBlock)(uint8_t * dest, const uint8_t* src);
     void (*copyWord)(uint8_t * dest, const uint8_t* src);
 
     if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
-      const uint8_t* cutoff =
-          (const uint8_t*)JS_ROUNDUP(uintptr_t(src), WORDSIZE);
+      const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE);
       MOZ_ASSERT(cutoff <= lim);  // because nbytes >= WORDSIZE
       while (src < cutoff) {
         AtomicCopyByteUnsynchronized(dest++, src++);
       }
       copyBlock = AtomicCopyBlockDownUnsynchronized;
       copyWord = AtomicCopyWordUnsynchronized;
     } else if (UnalignedAccessesAreOK()) {
       copyBlock = AtomicCopyBlockDownUnsynchronized;
@@ -856,17 +855,17 @@ bool InitializeJittedAtomics() {
 
   masm.finish();
   if (masm.oom()) {
     return false;
   }
 
   // Allocate executable memory.
   uint32_t codeLength = masm.bytesNeeded();
-  size_t roundedCodeLength = JS_ROUNDUP(codeLength, ExecutableCodePageSize);
+  size_t roundedCodeLength = RoundUp(codeLength, ExecutableCodePageSize);
   uint8_t* code = (uint8_t*)AllocateExecutableMemory(
       roundedCodeLength, ProtectionSetting::Writable,
       MemCheckKind::MakeUndefined);
   if (!code) {
     return false;
   }
 
   // Zero the padding.
--- a/js/src/jstypes.h
+++ b/js/src/jstypes.h
@@ -20,16 +20,19 @@
 
 #ifndef jstypes_h
 #define jstypes_h
 
 #include "mozilla/Attributes.h"
 #include "mozilla/Casting.h"
 #include "mozilla/Types.h"
 
+#include <stddef.h>
+#include <stdint.h>
+
 // jstypes.h is (or should be!) included by every file in SpiderMonkey.
 // js-config.h also should be included by every file. So include it here.
 // XXX: including it in js/RequiredDefines.h should be a better option, since
 // that is by definition the header file that should be included in all
 // SpiderMonkey code.  However, Gecko doesn't do this!  See bug 909576.
 #include "js-config.h"
 
 /*
@@ -72,34 +75,44 @@
 **      behave syntactically more like functions when called.
 ***********************************************************************/
 #define JS_BEGIN_MACRO do {
 #define JS_END_MACRO \
   }                  \
   while (0)
 
 /***********************************************************************
-** MACROS:      JS_BIT
-**              JS_BITMASK
+** FUNCTIONS:   Bit
+**              BitMask
 ** DESCRIPTION:
-** Bit masking macros.  XXX n must be <= 31 to be portable
+** Bit masking functions.  XXX n must be <= 31 to be portable
 ***********************************************************************/
-#define JS_BIT(n) ((uint32_t)1 << (n))
-#define JS_BITMASK(n) (JS_BIT(n) - 1)
+namespace js {
+constexpr uint32_t Bit(uint32_t n) { return uint32_t(1) << n; }
+
+constexpr uint32_t BitMask(uint32_t n) { return Bit(n) - 1; }
+}  // namespace js
 
 /***********************************************************************
-** MACROS:      JS_HOWMANY
-**              JS_ROUNDUP
+** FUNCTIONS:   HowMany
+**              RoundUp
+**              RoundDown
+**              Round
 ** DESCRIPTION:
-**      Commonly used macros for operations on compatible types.
+**      Commonly used functions for operations on compatible types.
 ***********************************************************************/
-#define JS_HOWMANY(x, y) (((x) + (y)-1) / (y))
-#define JS_ROUNDUP(x, y) (JS_HOWMANY(x, y) * (y))
-#define JS_ROUNDDOWN(x, y) (((x) / (y)) * (y))
-#define JS_ROUND(x, y) ((((x) + (y) / 2) / (y)) * (y))
+namespace js {
+constexpr size_t HowMany(size_t x, size_t y) { return (x + y - 1) / y; }
+
+constexpr size_t RoundUp(size_t x, size_t y) { return HowMany(x, y) * y; }
+
+constexpr size_t RoundDown(size_t x, size_t y) { return (x / y) * y; }
+
+constexpr size_t Round(size_t x, size_t y) { return ((x + y / 2) / y) * y; }
+}  // namespace js
 
 #if defined(JS_64BIT)
 #  define JS_BITS_PER_WORD 64
 #else
 #  define JS_BITS_PER_WORD 32
 #endif
 
 /***********************************************************************
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -754,32 +754,32 @@ static bool CreateSpecificWasmBuffer(
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
     uint32_t cur = clampedMaxSize.value() / 2;
 
     for (; cur > initialSize; cur /= 2) {
-      uint32_t clampedMaxSize = JS_ROUNDUP(cur, wasm::PageSize);
+      uint32_t clampedMaxSize = RoundUp(cur, wasm::PageSize);
       buffer = RawbufT::Allocate(initialSize, Some(clampedMaxSize), mappedSize);
       if (buffer) {
         break;
       }
     }
 
     if (!buffer) {
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
     // Try to grow our chunk as much as possible.
     for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) {
-      buffer->tryGrowMaxSizeInPlace(JS_ROUNDUP(d, wasm::PageSize));
+      buffer->tryGrowMaxSizeInPlace(RoundUp(d, wasm::PageSize));
     }
   }
 
   // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case
   // of failure.
   RootedArrayBufferObjectMaybeShared object(
       cx, ObjT::createFromNewRawBuffer(cx, buffer, initialSize));
   if (!object) {
@@ -969,17 +969,17 @@ void ArrayBufferObject::setDataPointer(B
 uint32_t ArrayBufferObject::byteLength() const {
   return getFixedSlot(BYTE_LENGTH_SLOT).toInt32();
 }
 
 inline size_t ArrayBufferObject::associatedBytes() const {
   if (bufferKind() == MALLOCED) {
     return byteLength();
   } else if (bufferKind() == MAPPED) {
-    return JS_ROUNDUP(byteLength(), js::gc::SystemPageSize());
+    return RoundUp(byteLength(), js::gc::SystemPageSize());
   } else {
     MOZ_CRASH("Unexpected buffer kind");
   }
 }
 
 void ArrayBufferObject::setByteLength(uint32_t length) {
   MOZ_ASSERT(length <= INT32_MAX);
   setFixedSlot(BYTE_LENGTH_SLOT, Int32Value(length));
@@ -1163,25 +1163,25 @@ ArrayBufferObject* ArrayBufferObject::cr
 
   size_t nAllocated = 0;
   size_t nslots = reservedSlots;
   if (contents.kind() == USER_OWNED) {
     // No accounting to do in this case.
   } else if (contents.kind() == EXTERNAL) {
     // Store the FreeInfo in the inline data slots so that we
     // don't use up slots for it in non-refcounted array buffers.
-    size_t freeInfoSlots = JS_HOWMANY(sizeof(FreeInfo), sizeof(Value));
+    size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value));
     MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS,
                "FreeInfo must fit in inline slots");
     nslots += freeInfoSlots;
   } else {
     // The ABO is taking ownership, so account the bytes against the zone.
     nAllocated = nbytes;
     if (contents.kind() == MAPPED) {
-      nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize());
+      nAllocated = RoundUp(nbytes, js::gc::SystemPageSize());
     } else {
       MOZ_ASSERT(contents.kind() == MALLOCED,
                  "should have handled all possible callers' kinds");
     }
   }
 
   MOZ_ASSERT(!(class_.flags & JSCLASS_HAS_PRIVATE));
   gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots);
@@ -1215,17 +1215,17 @@ ArrayBufferObject* ArrayBufferObject::cr
   }
 
   // Try fitting the data inline with the object by repurposing fixed-slot
   // storage.  Add extra fixed slots if necessary to accomplish this, but don't
   // exceed the maximum number of fixed slots!
   size_t nslots = JSCLASS_RESERVED_SLOTS(&class_);
   uint8_t* data;
   if (nbytes <= MaxInlineBytes) {
-    int newSlots = JS_HOWMANY(nbytes, sizeof(Value));
+    int newSlots = HowMany(nbytes, sizeof(Value));
     MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value)));
 
     nslots += newSlots;
     data = nullptr;
   } else {
     data = AllocateArrayBufferContents(cx, nbytes);
     if (!data) {
       return nullptr;
--- a/js/src/vm/BigIntType.h
+++ b/js/src/vm/BigIntType.h
@@ -43,17 +43,17 @@ class BigInt final
     : public js::gc::CellWithLengthAndFlags<js::gc::TenuredCell> {
   using Base = js::gc::CellWithLengthAndFlags<js::gc::TenuredCell>;
 
  public:
   using Digit = uintptr_t;
 
  private:
   // The low NumFlagBitsReservedForGC flag bits are reserved.
-  static constexpr uintptr_t SignBit = JS_BIT(Base::NumFlagBitsReservedForGC);
+  static constexpr uintptr_t SignBit = js::Bit(Base::NumFlagBitsReservedForGC);
   static constexpr size_t InlineDigitsLength =
       (js::gc::MinCellSize - sizeof(Base)) / sizeof(Digit);
 
   // Note: 32-bit length and flags fields are inherited from
   // CellWithLengthAndFlags.
 
   // The digit storage starts with the least significant digit (little-endian
   // digit order).  Byte order within a digit is of course native endian.
--- a/js/src/vm/RegExpObject.h
+++ b/js/src/vm/RegExpObject.h
@@ -40,16 +40,23 @@ namespace js {
 struct MatchPair;
 class MatchPairs;
 class RegExpStatics;
 
 namespace frontend {
 class TokenStreamAnyChars;
 }
 
+// Temporary definitions until irregexp is updated from upstream.
+namespace irregexp {
+constexpr size_t JS_HOWMANY(size_t x, size_t y) { return (x + y - 1) / y; }
+
+constexpr size_t JS_ROUNDUP(size_t x, size_t y) { return JS_HOWMANY(x, y) * y; }
+}  // namespace irregexp
+
 extern RegExpObject* RegExpAlloc(JSContext* cx, NewObjectKind newKind,
                                  HandleObject proto = nullptr);
 
 extern JSObject* CloneRegExpObject(JSContext* cx, Handle<RegExpObject*> regex);
 
 class RegExpObject : public NativeObject {
   static const unsigned LAST_INDEX_SLOT = 0;
   static const unsigned SOURCE_SLOT = 1;
--- a/js/src/vm/Shape-inl.h
+++ b/js/src/vm/Shape-inl.h
@@ -286,17 +286,17 @@ MOZ_ALWAYS_INLINE ShapeTable::Entry& Sha
   Shape* shape = entry->shape();
   if (shape && shape->propidRaw() == id) {
     return *entry;
   }
 
   /* Collision: double hash. */
   uint32_t sizeLog2 = HASH_BITS - hashShift_;
   HashNumber hash2 = Hash2(hash0, sizeLog2, hashShift_);
-  uint32_t sizeMask = JS_BITMASK(sizeLog2);
+  uint32_t sizeMask = BitMask(sizeLog2);
 
   /* Save the first removed entry pointer so we can recycle it if adding. */
   Entry* firstRemoved;
   if (Adding == MaybeAdding::Adding) {
     if (entry->isRemoved()) {
       firstRemoved = entry;
     } else {
       firstRemoved = nullptr;
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -41,25 +41,25 @@ Shape* const ShapeTable::Entry::SHAPE_RE
 bool ShapeIC::init(JSContext* cx) {
   size_ = MAX_SIZE;
   entries_.reset(cx->pod_calloc<Entry>(size_));
   return (!entries_) ? false : true;
 }
 
 bool ShapeTable::init(JSContext* cx, Shape* lastProp) {
   uint32_t sizeLog2 = CeilingLog2Size(entryCount_);
-  uint32_t size = JS_BIT(sizeLog2);
+  uint32_t size = Bit(sizeLog2);
   if (entryCount_ >= size - (size >> 2)) {
     sizeLog2++;
   }
   if (sizeLog2 < MIN_SIZE_LOG2) {
     sizeLog2 = MIN_SIZE_LOG2;
   }
 
-  size = JS_BIT(sizeLog2);
+  size = Bit(sizeLog2);
   entries_.reset(cx->pod_calloc<Entry>(size));
   if (!entries_) {
     return false;
   }
 
   MOZ_ASSERT(sizeLog2 <= HASH_BITS);
   hashShift_ = HASH_BITS - sizeLog2;
 
@@ -220,18 +220,18 @@ bool ShapeTable::change(JSContext* cx, i
   MOZ_ASSERT(entries_);
   MOZ_ASSERT(-1 <= log2Delta && log2Delta <= 1);
 
   /*
    * Grow, shrink, or compress by changing this->entries_.
    */
   uint32_t oldLog2 = HASH_BITS - hashShift_;
   uint32_t newLog2 = oldLog2 + log2Delta;
-  uint32_t oldSize = JS_BIT(oldLog2);
-  uint32_t newSize = JS_BIT(newLog2);
+  uint32_t oldSize = Bit(oldLog2);
+  uint32_t newSize = Bit(newLog2);
   Entry* newTable = cx->maybe_pod_calloc<Entry>(newSize);
   if (!newTable) {
     return false;
   }
 
   /* Now that we have newTable allocated, update members. */
   MOZ_ASSERT(newLog2 <= HASH_BITS);
   hashShift_ = HASH_BITS - newLog2;
--- a/js/src/vm/Shape.h
+++ b/js/src/vm/Shape.h
@@ -202,18 +202,18 @@ class PropertyTree {
 };
 
 class TenuringTracer;
 
 typedef JSGetterOp GetterOp;
 typedef JSSetterOp SetterOp;
 
 /* Limit on the number of slotful properties in an object. */
-static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
-static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
+static const uint32_t SHAPE_INVALID_SLOT = Bit(24) - 1;
+static const uint32_t SHAPE_MAXIMUM_SLOT = Bit(24) - 2;
 
 enum class MaybeAdding { Adding = true, NotAdding = false };
 
 class AutoKeepShapeCaches;
 
 /*
  * ShapeIC uses a small array that is linearly searched.
  */
@@ -332,17 +332,17 @@ class ShapeTable {
   };
 
  private:
   static const uint32_t HASH_BITS = mozilla::tl::BitSize<HashNumber>::value;
 
   // This value is low because it's common for a ShapeTable to be created
   // with an entryCount of zero.
   static const uint32_t MIN_SIZE_LOG2 = 2;
-  static const uint32_t MIN_SIZE = JS_BIT(MIN_SIZE_LOG2);
+  static const uint32_t MIN_SIZE = Bit(MIN_SIZE_LOG2);
 
   uint32_t hashShift_; /* multiplicative hash shift */
 
   uint32_t entryCount_;   /* number of entries in table */
   uint32_t removedCount_; /* removed entry sentinels in table */
 
   uint32_t freeList_; /* SHAPE_INVALID_SLOT or head of slot
                          freelist in owning dictionary-mode
@@ -410,17 +410,17 @@ class ShapeTable {
     MOZ_ASSERT(entryCount_ + removedCount_ <= capacity());
   }
   void incRemovedCount() {
     removedCount_++;
     MOZ_ASSERT(entryCount_ + removedCount_ <= capacity());
   }
 
   // By definition, hashShift = HASH_BITS - log2(capacity).
-  uint32_t capacity() const { return JS_BIT(HASH_BITS - hashShift_); }
+  uint32_t capacity() const { return Bit(HASH_BITS - hashShift_); }
 
   // Whether we need to grow.  We want to do this if the load factor
   // is >= 0.75
   bool needsToGrow() const {
     uint32_t size = capacity();
     return entryCount_ + removedCount_ >= size - (size >> 2);
   }
 
@@ -898,17 +898,17 @@ class Shape : public gc::TenuredCell {
   // Flags that are not modified after the Shape is created. Off-thread Ion
   // compilation can access the immutableFlags word, so we don't want any
   // mutable state here to avoid (TSan) races.
   enum ImmutableFlags : uint32_t {
     // Mask to get the index in object slots for isDataProperty() shapes.
     // For other shapes in the property tree with a parent, stores the
     // parent's slot index (which may be invalid), and invalid for all
     // other shapes.
-    SLOT_MASK = JS_BIT(24) - 1,
+    SLOT_MASK = BitMask(24),
 
     // Number of fixed slots in objects with this shape.
     // FIXED_SLOTS_MAX is the biggest count of fixed slots a Shape can store.
     FIXED_SLOTS_MAX = 0x1f,
     FIXED_SLOTS_SHIFT = 24,
     FIXED_SLOTS_MASK = uint32_t(FIXED_SLOTS_MAX << FIXED_SLOTS_SHIFT),
 
     // Property stored in per-object dictionary, not shared property tree.
--- a/js/src/vm/StringType.h
+++ b/js/src/vm/StringType.h
@@ -257,47 +257,47 @@ class JSString : public js::gc::CellWith
    * If the INDEX_VALUE_BIT is set, flags will also hold an integer index.
    */
 
   // The low bits of flag word are reserved by GC.
   static_assert(Base::NumFlagBitsReservedForGC <= 3,
                 "JSString::flags must reserve enough bits for Cell");
 
   static const uint32_t NON_ATOM_BIT = js::gc::Cell::JSSTRING_BIT;
-  static const uint32_t LINEAR_BIT = JS_BIT(4);
-  static const uint32_t DEPENDENT_BIT = JS_BIT(5);
-  static const uint32_t INLINE_CHARS_BIT = JS_BIT(6);
+  static const uint32_t LINEAR_BIT = js::Bit(4);
+  static const uint32_t DEPENDENT_BIT = js::Bit(5);
+  static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
 
   static const uint32_t EXTENSIBLE_FLAGS =
-      NON_ATOM_BIT | LINEAR_BIT | JS_BIT(7);
-  static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | JS_BIT(8);
+      NON_ATOM_BIT | LINEAR_BIT | js::Bit(7);
+  static const uint32_t EXTERNAL_FLAGS = NON_ATOM_BIT | LINEAR_BIT | js::Bit(8);
 
-  static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | JS_BIT(7);
-  static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | JS_BIT(8);
-  static const uint32_t PERMANENT_ATOM_FLAGS = JS_BIT(8);
+  static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | js::Bit(7);
+  static const uint32_t PERMANENT_ATOM_MASK = NON_ATOM_BIT | js::Bit(8);
+  static const uint32_t PERMANENT_ATOM_FLAGS = js::Bit(8);
 
   /* Initial flags for thin inline and fat inline strings. */
   static const uint32_t INIT_THIN_INLINE_FLAGS =
       NON_ATOM_BIT | LINEAR_BIT | INLINE_CHARS_BIT;
   static const uint32_t INIT_FAT_INLINE_FLAGS =
       NON_ATOM_BIT | LINEAR_BIT | FAT_INLINE_MASK;
   static const uint32_t INIT_ROPE_FLAGS = NON_ATOM_BIT;
   static const uint32_t INIT_LINEAR_FLAGS = NON_ATOM_BIT | LINEAR_BIT;
   static const uint32_t INIT_DEPENDENT_FLAGS =
       NON_ATOM_BIT | LINEAR_BIT | DEPENDENT_BIT;
 
   static const uint32_t TYPE_FLAGS_MASK =
-      JS_BITMASK(9) - JS_BITMASK(3) + js::gc::Cell::JSSTRING_BIT;
+      js::BitMask(9) - js::BitMask(3) + js::gc::Cell::JSSTRING_BIT;
 
-  static const uint32_t LATIN1_CHARS_BIT = JS_BIT(9);
+  static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
 
-  static const uint32_t INDEX_VALUE_BIT = JS_BIT(10);
+  static const uint32_t INDEX_VALUE_BIT = js::Bit(10);
   static const uint32_t INDEX_VALUE_SHIFT = 16;
 
-  static const uint32_t PINNED_ATOM_BIT = JS_BIT(11);
+  static const uint32_t PINNED_ATOM_BIT = js::Bit(11);
 
   static const uint32_t MAX_LENGTH = js::MaxStringLength;
 
   static const JS::Latin1Char MAX_LATIN1_CHAR = 0xff;
 
   /*
    * Helper function to validate that a string of a given length is
    * representable by a JSString. An allocation overflow is reported if false
--- a/js/src/vm/StructuredClone.cpp
+++ b/js/src/vm/StructuredClone.cpp
@@ -2031,24 +2031,24 @@ JSString* JSStructuredCloneReader::readS
   if (!chars.maybeAlloc(context(), nchars) ||
       !in.readChars(chars.get(), nchars)) {
     return nullptr;
   }
   return chars.toStringDontDeflate(context(), nchars);
 }
 
 JSString* JSStructuredCloneReader::readString(uint32_t data) {
-  uint32_t nchars = data & JS_BITMASK(31);
+  uint32_t nchars = data & BitMask(31);
   bool latin1 = data & (1 << 31);
   return latin1 ? readStringImpl<Latin1Char>(nchars)
                 : readStringImpl<char16_t>(nchars);
 }
 
 BigInt* JSStructuredCloneReader::readBigInt(uint32_t data) {
-  size_t length = data & JS_BITMASK(31);
+  size_t length = data & BitMask(31);
   bool isNegative = data & (1 << 31);
   if (length == 0) {
     return BigInt::zero(context());
   }
   RootedBigInt result(
       context(), BigInt::createUninitialized(context(), length, isNegative));
   if (!result) {
     return nullptr;
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -119,17 +119,17 @@ bool TypedArrayObject::ensureHasBuffer(J
   MOZ_ALWAYS_TRUE(buffer->addView(cx, tarray));
 
   // tarray is not shared, because if it were it would have a buffer.
   memcpy(buffer->dataPointer(), tarray->dataPointerUnshared(),
          tarray->byteLength());
 
   // If the object is in the nursery, the buffer will be freed by the next
   // nursery GC. Free the data slot pointer if the object has no inline data.
-  size_t nbytes = JS_ROUNDUP(tarray->byteLength(), sizeof(Value));
+  size_t nbytes = RoundUp(tarray->byteLength(), sizeof(Value));
   Nursery& nursery = cx->nursery();
   if (tarray->isTenured() && !tarray->hasInlineElements() &&
       !nursery.isInside(tarray->elements())) {
     js_free(tarray->elements());
     RemoveCellMemory(tarray, nbytes, MemoryUse::TypedArrayElements);
   }
 
   tarray->setPrivate(buffer->dataPointer());
@@ -165,17 +165,17 @@ void TypedArrayObject::finalize(JSFreeOp
 
   // Typed arrays with a buffer object do not need to be free'd
   if (curObj->hasBuffer()) {
     return;
   }
 
   // Free the data slot pointer if it does not point into the old JSObject.
   if (!curObj->hasInlineElements()) {
-    size_t nbytes = JS_ROUNDUP(curObj->byteLength(), sizeof(Value));
+    size_t nbytes = RoundUp(curObj->byteLength(), sizeof(Value));
     fop->free_(obj, curObj->elements(), nbytes, MemoryUse::TypedArrayElements);
   }
 }
 
 /* static */
 size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) {
   TypedArrayObject* newObj = &obj->as<TypedArrayObject>();
   const TypedArrayObject* oldObj = &old->as<TypedArrayObject>();
@@ -202,17 +202,17 @@ size_t TypedArrayObject::objectMoved(JSO
   // have any data to move.
   if (!buf) {
     return 0;
   }
 
   Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
   if (!nursery.isInside(buf)) {
     nursery.removeMallocedBuffer(buf);
-    size_t nbytes = JS_ROUNDUP(newObj->byteLength(), sizeof(Value));
+    size_t nbytes = RoundUp(newObj->byteLength(), sizeof(Value));
     AddCellMemory(newObj, nbytes, MemoryUse::TypedArrayElements);
     return 0;
   }
 
   // Determine if we can use inline data for the target array. If this is
   // possible, the nursery will have picked an allocation size that is large
   // enough.
   size_t nbytes = oldObj->byteLength();
@@ -231,20 +231,20 @@ size_t TypedArrayObject::objectMoved(JSO
       uint8_t* output = newObj->fixedData(TypedArrayObject::FIXED_DATA_START);
       output[0] = ZeroLengthArrayData;
     }
 #endif
     newObj->setInlineElements();
   } else {
     MOZ_ASSERT(!oldObj->hasInlineElements());
     MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
-               "JS_ROUNDUP must not overflow");
+               "RoundUp must not overflow");
 
     AutoEnterOOMUnsafeRegion oomUnsafe;
-    nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
+    nbytes = RoundUp(nbytes, sizeof(Value));
     void* data = newObj->zone()->pod_arena_malloc<uint8_t>(
         js::ArrayBufferContentsArena, nbytes);
     if (!data) {
       oomUnsafe.crash(
           "Failed to allocate typed array elements while tenuring.");
     }
     MOZ_ASSERT(!nursery.isInside(data));
     InitObjectPrivate(newObj, data, nbytes, MemoryUse::TypedArrayElements);
@@ -561,19 +561,19 @@ class TypedArrayObjectTemplate : public 
     }
 
     initTypedArraySlots(obj, len);
 
     void* buf = nullptr;
     if (!fitsInline) {
       MOZ_ASSERT(len > 0);
       MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
-                 "JS_ROUNDUP must not overflow");
-
-      nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
+                 "RoundUp must not overflow");
+
+      nbytes = RoundUp(nbytes, sizeof(Value));
       buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
                                                js::ArrayBufferContentsArena);
       if (!buf) {
         ReportOutOfMemory(cx);
         return nullptr;
       }
     }
 
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -102,17 +102,17 @@ const uint8_t* LinkData::deserialize(con
 CodeSegment::~CodeSegment() {
   if (unregisterOnDestroy_) {
     UnregisterCodeSegment(this);
   }
 }
 
 static uint32_t RoundupCodeLength(uint32_t codeLength) {
   // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
-  return JS_ROUNDUP(codeLength, ExecutableCodePageSize);
+  return RoundUp(codeLength, ExecutableCodePageSize);
 }
 
 /* static */
 UniqueCodeBytes CodeSegment::AllocateCodeBytes(uint32_t codeLength) {
   if (codeLength > MaxCodeBytesPerProcess) {
     return nullptr;
   }
 
--- a/js/xpconnect/src/xpcprivate.h
+++ b/js/xpconnect/src/xpcprivate.h
@@ -1548,17 +1548,17 @@ class XPCWrappedNative final : public ns
                    already_AddRefed<XPCNativeSet>&& aSet);
 
   virtual ~XPCWrappedNative();
   void Destroy();
 
  private:
   enum {
     // Flags bits for mFlatJSObject:
-    FLAT_JS_OBJECT_VALID = JS_BIT(0)
+    FLAT_JS_OBJECT_VALID = js::Bit(0)
   };
 
   bool Init(JSContext* cx, nsIXPCScriptable* scriptable);
   bool FinishInit(JSContext* cx);
 
   bool ExtendSet(JSContext* aCx, XPCNativeInterface* aInterface);
 
   nsresult InitTearOff(JSContext* cx, XPCWrappedNativeTearOff* aTearOff,