Bug 1488698 - Always use braces for if/for/while statements in js/src/gc, part 1. r=sfink
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 06 Sep 2018 10:36:51 +0200
changeset 435719 3ba56e06507e0c78c679261c3d0424decb3f7b29
parent 435718 9f7beee7269b10833e0ca8446195cc354dac63c3
child 435720 d27cd3305320b12ad93f0837090a0913d0f31e76
push id34618
push userbtara@mozilla.com
push dateTue, 11 Sep 2018 22:13:11 +0000
treeherdermozilla-central@1169e8a4ca2b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1488698
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1488698 - Always use braces for if/for/while statements in js/src/gc, part 1. r=sfink
js/src/gc/Allocator.cpp
js/src/gc/ArenaList-inl.h
js/src/gc/AtomMarking-inl.h
js/src/gc/AtomMarking.cpp
js/src/gc/AtomMarking.h
js/src/gc/Barrier.cpp
js/src/gc/Barrier.h
js/src/gc/Cell.h
js/src/gc/FindSCCs.h
js/src/gc/FreeOp.h
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -40,37 +40,41 @@ js::Allocate(JSContext* cx, AllocKind ki
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative());
 
     // We cannot trigger GC or make runtime assertions when nursery allocation
     // is suppressed, either explicitly or because we are off-thread.
     if (cx->isNurseryAllocSuppressed()) {
         JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
-        if (MOZ_UNLIKELY(allowGC && !obj))
+        if (MOZ_UNLIKELY(allowGC && !obj)) {
             ReportOutOfMemory(cx);
+        }
         return obj;
     }
 
     JSRuntime* rt = cx->runtime();
-    if (!rt->gc.checkAllocatorState<allowGC>(cx, kind))
+    if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
         return nullptr;
+    }
 
     if (cx->nursery().isEnabled() && heap != TenuredHeap) {
         JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize, nDynamicSlots, clasp);
-        if (obj)
+        if (obj) {
             return obj;
+        }
 
         // Our most common non-jit allocation path is NoGC; thus, if we fail the
         // alloc and cannot GC, we *must* return nullptr here so that the caller
         // will do a CanGC allocation to clear the nursery. Failing to do so will
         // cause all allocations on this path to land in Tenured, and we will not
         // get the benefit of the nursery.
-        if (!allowGC)
+        if (!allowGC) {
             return nullptr;
+        }
     }
 
     return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
 }
 template JSObject* js::Allocate<JSObject, NoGC>(JSContext* cx, gc::AllocKind kind,
                                                 size_t nDynamicSlots, gc::InitialHeap heap,
                                                 const Class* clasp);
 template JSObject* js::Allocate<JSObject, CanGC>(JSContext* cx, gc::AllocKind kind,
@@ -86,50 +90,54 @@ GCRuntime::tryNewNurseryObject(JSContext
 {
     MOZ_RELEASE_ASSERT(!cx->helperThread());
 
     MOZ_ASSERT(cx->isNurseryAllocAllowed());
     MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
     JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
-    if (obj)
+    if (obj) {
         return obj;
+    }
 
     if (allowGC && !cx->suppressGC) {
         cx->runtime()->gc.minorGC(JS::gcreason::OUT_OF_NURSERY);
 
         // Exceeding gcMaxBytes while tenuring can disable the Nursery.
-        if (cx->nursery().isEnabled())
+        if (cx->nursery().isEnabled()) {
             return cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
+        }
     }
     return nullptr;
 }
 
 template <AllowGC allowGC>
 JSObject*
 GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
                                size_t nDynamicSlots)
 {
     HeapSlot* slots = nullptr;
     if (nDynamicSlots) {
         slots = cx->maybe_pod_malloc<HeapSlot>(nDynamicSlots);
         if (MOZ_UNLIKELY(!slots)) {
-            if (allowGC)
+            if (allowGC) {
                 ReportOutOfMemory(cx);
+            }
             return nullptr;
         }
         Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
     }
 
     JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
 
     if (obj) {
-        if (nDynamicSlots)
+        if (nDynamicSlots) {
             static_cast<NativeObject*>(obj)->initSlots(slots);
+        }
     } else {
         js_free(slots);
     }
 
     return obj;
 }
 
 // Attempt to allocate a new string out of the nursery. If there is not enough
@@ -140,26 +148,28 @@ GCRuntime::tryNewNurseryString(JSContext
 {
     MOZ_ASSERT(IsNurseryAllocable(kind));
     MOZ_ASSERT(cx->isNurseryAllocAllowed());
     MOZ_ASSERT(!cx->helperThread());
     MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
     Cell* cell = cx->nursery().allocateString(cx->zone(), thingSize, kind);
-    if (cell)
+    if (cell) {
         return static_cast<JSString*>(cell);
+    }
 
     if (allowGC && !cx->suppressGC) {
         cx->runtime()->gc.minorGC(JS::gcreason::OUT_OF_NURSERY);
 
         // Exceeding gcMaxBytes while tenuring can disable the Nursery, and
         // other heuristics can disable nursery strings for this zone.
-        if (cx->nursery().isEnabled() && cx->zone()->allocNurseryStrings)
+        if (cx->nursery().isEnabled() && cx->zone()->allocNurseryStrings) {
             return static_cast<JSString*>(cx->nursery().allocateString(cx->zone(), thingSize, kind));
+        }
     }
     return nullptr;
 }
 
 template <typename StringAllocT, AllowGC allowGC /* = CanGC */>
 StringAllocT*
 js::AllocateString(JSContext* cx, InitialHeap heap)
 {
@@ -168,41 +178,45 @@ js::AllocateString(JSContext* cx, Initia
     AllocKind kind = MapTypeToFinalizeKind<StringAllocT>::kind;
     size_t size = sizeof(StringAllocT);
     MOZ_ASSERT(size == Arena::thingSize(kind));
     MOZ_ASSERT(size == sizeof(JSString) || size == sizeof(JSFatInlineString));
 
     // Off-thread alloc cannot trigger GC or make runtime assertions.
     if (cx->isNurseryAllocSuppressed()) {
         StringAllocT* str = GCRuntime::tryNewTenuredThing<StringAllocT, NoGC>(cx, kind, size);
-        if (MOZ_UNLIKELY(allowGC && !str))
+        if (MOZ_UNLIKELY(allowGC && !str)) {
             ReportOutOfMemory(cx);
+        }
         return str;
     }
 
     JSRuntime* rt = cx->runtime();
-    if (!rt->gc.checkAllocatorState<allowGC>(cx, kind))
+    if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
         return nullptr;
+    }
 
     if (cx->nursery().isEnabled() &&
         heap != TenuredHeap &&
         cx->nursery().canAllocateStrings() &&
         cx->zone()->allocNurseryStrings)
     {
         auto str = static_cast<StringAllocT*>(rt->gc.tryNewNurseryString<allowGC>(cx, size, kind));
-        if (str)
+        if (str) {
             return str;
+        }
 
         // Our most common non-jit allocation path is NoGC; thus, if we fail the
         // alloc and cannot GC, we *must* return nullptr here so that the caller
         // will do a CanGC allocation to clear the nursery. Failing to do so will
         // cause all allocations on this path to land in Tenured, and we will not
         // get the benefit of the nursery.
-        if (!allowGC)
+        if (!allowGC) {
             return nullptr;
+        }
     }
 
     return GCRuntime::tryNewTenuredThing<StringAllocT, allowGC>(cx, kind, size);
 }
 
 #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, bgfinal, nursery, compact) \
     template type* js::AllocateString<type, NoGC>(JSContext* cx, InitialHeap heap);\
     template type* js::AllocateString<type, CanGC>(JSContext* cx, InitialHeap heap);
@@ -217,18 +231,19 @@ js::Allocate(JSContext* cx)
     static_assert(sizeof(T) >= MinCellSize,
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     AllocKind kind = MapTypeToFinalizeKind<T>::kind;
     size_t thingSize = sizeof(T);
     MOZ_ASSERT(thingSize == Arena::thingSize(kind));
 
     if (!cx->helperThread()) {
-        if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind))
+        if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind)) {
             return nullptr;
+        }
     }
 
     return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
 }
 
 #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, bgFinal, nursery, compact) \
     template type* js::Allocate<type, NoGC>(JSContext* cx);\
     template type* js::Allocate<type, CanGC>(JSContext* cx);
@@ -253,79 +268,85 @@ GCRuntime::tryNewTenuredThing(JSContext*
                 // all-compartments, non-incremental, shrinking GC and wait for
                 // sweeping to finish.
                 JS::PrepareForFullGC(cx);
                 cx->runtime()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
                 cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
 
                 t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
             }
-            if (!t)
+            if (!t) {
                 ReportOutOfMemory(cx);
+            }
         }
     }
 
     checkIncrementalZoneState(cx, t);
     gcTracer.traceTenuredAlloc(t, kind);
     // We count this regardless of the profiler's state, assuming that it costs just as much to
     // count it, as to check the profiler's state and decide not to count it.
     cx->noteTenuredAlloc();
     return t;
 }
 
 template <AllowGC allowGC>
 bool
 GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
 {
     if (allowGC) {
-        if (!gcIfNeededAtAllocation(cx))
+        if (!gcIfNeededAtAllocation(cx)) {
             return false;
+        }
     }
 
 #if defined(JS_GC_ZEAL) || defined(DEBUG)
     MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
                   kind == AllocKind::ATOM ||
                   kind == AllocKind::FAT_INLINE_ATOM ||
                   kind == AllocKind::SYMBOL ||
                   kind == AllocKind::JITCODE ||
                   kind == AllocKind::SCOPE);
     MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
                   kind != AllocKind::ATOM &&
                   kind != AllocKind::FAT_INLINE_ATOM);
     MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
 #endif
 
     // Crash if we perform a GC action when it is not safe.
-    if (allowGC && !cx->suppressGC)
+    if (allowGC && !cx->suppressGC) {
         cx->verifyIsSafeToGC();
+    }
 
     // For testing out of memory conditions
     if (js::oom::ShouldFailWithOOM()) {
         // If we are doing a fallible allocation, percolate up the OOM
         // instead of reporting it.
-        if (allowGC)
+        if (allowGC) {
             ReportOutOfMemory(cx);
+        }
         return false;
     }
 
     return true;
 }
 
 bool
 GCRuntime::gcIfNeededAtAllocation(JSContext* cx)
 {
 #ifdef JS_GC_ZEAL
-    if (needZealousGC())
+    if (needZealousGC()) {
         runDebugGC();
+    }
 #endif
 
     // Invoking the interrupt callback can fail and we can't usefully
     // handle that here. Just check in case we need to collect instead.
-    if (cx->hasAnyPendingInterrupt())
+    if (cx->hasAnyPendingInterrupt()) {
         gcIfRequested();
+    }
 
     // If we have grown past our GC heap threshold while in the middle of
     // an incremental GC, we're growing faster than we're GCing, so stop
     // the world and do a full, non-incremental GC right now, if possible.
     if (isIncrementalGCInProgress() &&
         cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
     {
         PrepareZoneForGC(cx->zone());
@@ -335,65 +356,70 @@ GCRuntime::gcIfNeededAtAllocation(JSCont
     return true;
 }
 
 template <typename T>
 /* static */ void
 GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t)
 {
 #ifdef DEBUG
-    if (cx->helperThread() || !t)
+    if (cx->helperThread() || !t) {
         return;
+    }
 
     TenuredCell* cell = &t->asTenured();
     Zone* zone = cell->zone();
-    if (zone->isGCMarking() || zone->isGCSweeping())
+    if (zone->isGCMarking() || zone->isGCSweeping()) {
         MOZ_ASSERT(cell->isMarkedBlack());
-    else
+    } else {
         MOZ_ASSERT(!cell->isMarkedAny());
+    }
 #endif
 }
 
 TenuredCell*
 js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind)
 {
     void* cell = zone->arenas.allocateFromFreeList(thingKind);
     if (!cell) {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         cell = GCRuntime::refillFreeListInGC(zone, thingKind);
-        if (!cell)
+        if (!cell) {
             oomUnsafe.crash(ChunkSize, "Failed not allocate new chunk during GC");
+        }
     }
     return TenuredCell::fromPointer(cell);
 }
 
 
 // ///////////  Arena -> Thing Allocator  //////////////////////////////////////
 
 bool
 GCRuntime::startBackgroundAllocTaskIfIdle()
 {
     AutoLockHelperThreadState helperLock;
-    if (allocTask.isRunningWithLockHeld(helperLock))
+    if (allocTask.isRunningWithLockHeld(helperLock)) {
         return true;
+    }
 
     // Join the previous invocation of the task. This will return immediately
     // if the thread has never been started.
     allocTask.joinWithLockHeld(helperLock);
 
     return allocTask.startWithLockHeld(helperLock);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind)
 {
     MOZ_ASSERT(cx->freeLists().isEmpty(thingKind));
 
-    if (!cx->helperThread())
+    if (!cx->helperThread()) {
         return refillFreeListFromMainThread(cx, thingKind);
+    }
 
     return refillFreeListFromHelperThread(cx, thingKind);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind)
 {
     // It should not be possible to allocate on the main thread while we are
@@ -434,63 +460,69 @@ ArenaLists::refillFreeListAndAllocate(Fr
 {
     MOZ_ASSERT(freeLists.isEmpty(thingKind));
 
     JSRuntime* rt = runtimeFromAnyThread();
 
     mozilla::Maybe<AutoLockGCBgAlloc> maybeLock;
 
     // See if we can proceed without taking the GC lock.
-    if (concurrentUse(thingKind) != ConcurrentUse::None)
+    if (concurrentUse(thingKind) != ConcurrentUse::None) {
         maybeLock.emplace(rt);
+    }
 
     ArenaList& al = arenaLists(thingKind);
     Arena* arena = al.takeNextArena();
     if (arena) {
         // Empty arenas should be immediately freed.
         MOZ_ASSERT(!arena->isEmpty());
 
         return freeLists.setArenaAndAllocate(arena, thingKind);
     }
 
     // Parallel threads have their own ArenaLists, but chunks are shared;
     // if we haven't already, take the GC lock now to avoid racing.
-    if (maybeLock.isNothing())
+    if (maybeLock.isNothing()) {
         maybeLock.emplace(rt);
+    }
 
     Chunk* chunk = rt->gc.pickChunk(maybeLock.ref());
-    if (!chunk)
+    if (!chunk) {
         return nullptr;
+    }
 
     // Although our chunk should definitely have enough space for another arena,
     // there are other valid reasons why Chunk::allocateArena() may fail.
     arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds, maybeLock.ref());
-    if (!arena)
+    if (!arena) {
         return nullptr;
+    }
 
     MOZ_ASSERT(al.isCursorAtEnd());
     al.insertBeforeCursor(arena);
 
     return freeLists.setArenaAndAllocate(arena, thingKind);
 }
 
 inline TenuredCell*
 FreeLists::setArenaAndAllocate(Arena* arena, AllocKind kind)
 {
 #ifdef DEBUG
     auto old = freeLists_[kind];
-    if (!old->isEmpty())
+    if (!old->isEmpty()) {
         old->getArena()->checkNoMarkedFreeCells();
+    }
 #endif
 
     FreeSpan* span = arena->getFirstFreeSpan();
     freeLists_[kind] = span;
 
-    if (MOZ_UNLIKELY(arena->zone->wasGCStarted()))
+    if (MOZ_UNLIKELY(arena->zone->wasGCStarted())) {
         arena->arenaAllocatedDuringGC();
+    }
 
     TenuredCell* thing = span->allocate(Arena::thingSize(kind));
     MOZ_ASSERT(thing); // This allocation is infallible.
 
     return thing;
 }
 
 void
@@ -559,18 +591,19 @@ GCRuntime::allocateArena(Chunk* chunk, Z
     if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
         (usage.gcBytes() >= tunables.gcMaxBytes()))
         return nullptr;
 
     Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
     zone->usage.addGCArena();
 
     // Trigger an incremental slice if needed.
-    if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds)
+    if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
         maybeAllocTriggerZoneGC(zone, lock);
+    }
 
     return arena;
 }
 
 Arena*
 Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
 {
     Arena* arena = info.numArenasFreeCommitted > 0
@@ -627,63 +660,69 @@ Chunk::fetchNextDecommittedArena()
  * it to the most recently freed arena when we free, and forcing it to
  * the last alloc + 1 when we allocate.
  */
 uint32_t
 Chunk::findDecommittedArenaOffset()
 {
     /* Note: lastFreeArenaOffset can be past the end of the list. */
     for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
-        if (decommittedArenas.get(i))
+        if (decommittedArenas.get(i)) {
             return i;
+        }
     }
     for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
-        if (decommittedArenas.get(i))
+        if (decommittedArenas.get(i)) {
             return i;
+        }
     }
     MOZ_CRASH("No decommitted arenas found.");
 }
 
 
 // ///////////  System -> Chunk Allocator  /////////////////////////////////////
 
 Chunk*
 GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock)
 {
     Chunk* chunk = emptyChunks(lock).pop();
     if (!chunk) {
         chunk = Chunk::allocate(rt);
-        if (!chunk)
+        if (!chunk) {
             return nullptr;
+        }
         MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
     }
 
-    if (wantBackgroundAllocation(lock))
+    if (wantBackgroundAllocation(lock)) {
         lock.tryToStartBackgroundAllocation();
+    }
 
     return chunk;
 }
 
 void
 GCRuntime::recycleChunk(Chunk* chunk, const AutoLockGC& lock)
 {
     AlwaysPoison(&chunk->trailer, JS_FREED_CHUNK_PATTERN, sizeof(ChunkTrailer),
                  MemCheckKind::MakeNoAccess);
     emptyChunks(lock).push(chunk);
 }
 
 Chunk*
 GCRuntime::pickChunk(AutoLockGCBgAlloc& lock)
 {
-    if (availableChunks(lock).count())
+    if (availableChunks(lock).count()) {
         return availableChunks(lock).head();
+    }
 
     Chunk* chunk = getOrAllocChunk(lock);
-    if (!chunk)
+    if (!chunk) {
         return nullptr;
+    }
 
     chunk->init(rt);
     MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
     MOZ_ASSERT(chunk->unused());
     MOZ_ASSERT(!fullChunks(lock).contains(chunk));
     MOZ_ASSERT(!availableChunks(lock).contains(chunk));
 
     chunkAllocationSinceLastGC = true;
@@ -707,30 +746,32 @@ BackgroundAllocTask::run()
     AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
 
     AutoLockGC lock(runtime());
     while (!cancel_ && runtime()->gc.wantBackgroundAllocation(lock)) {
         Chunk* chunk;
         {
             AutoUnlockGC unlock(lock);
             chunk = Chunk::allocate(runtime());
-            if (!chunk)
+            if (!chunk) {
                 break;
+            }
             chunk->init(runtime());
         }
         chunkPool_.ref().push(chunk);
     }
 }
 
 /* static */ Chunk*
 Chunk::allocate(JSRuntime* rt)
 {
     Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
-    if (!chunk)
+    if (!chunk) {
         return nullptr;
+    }
     rt->gc.stats().count(gcstats::STAT_NEW_CHUNK);
     return chunk;
 }
 
 void
 Chunk::init(JSRuntime* rt)
 {
     /* The chunk may still have some regions marked as no-access. */
--- a/js/src/gc/ArenaList-inl.h
+++ b/js/src/gc/ArenaList-inl.h
@@ -112,49 +112,52 @@ js::gc::ArenaList::isCursorAtEnd() const
 {
     check();
     return !*cursorp_;
 }
 
 void
 js::gc::ArenaList::moveCursorToEnd()
 {
-    while (!isCursorAtEnd())
+    while (!isCursorAtEnd()) {
         cursorp_ = &(*cursorp_)->next;
+    }
 }
 
 js::gc::Arena*
 js::gc::ArenaList::arenaAfterCursor() const
 {
     check();
     return *cursorp_;
 }
 
 js::gc::Arena*
 js::gc::ArenaList::takeNextArena()
 {
     check();
     Arena* arena = *cursorp_;
-    if (!arena)
+    if (!arena) {
         return nullptr;
+    }
     cursorp_ = &arena->next;
     check();
     return arena;
 }
 
 void
 js::gc::ArenaList::insertAtCursor(Arena* a)
 {
     check();
     a->next = *cursorp_;
     *cursorp_ = a;
     // At this point, the cursor is sitting before |a|. Move it after |a|
     // if necessary.
-    if (!a->hasFreeThings())
+    if (!a->hasFreeThings()) {
         cursorp_ = &a->next;
+    }
     check();
 }
 
 void
 js::gc::ArenaList::insertBeforeCursor(Arena* a)
 {
     check();
     a->next = *cursorp_;
@@ -164,18 +167,19 @@ js::gc::ArenaList::insertBeforeCursor(Ar
 }
 
 js::gc::ArenaList&
 js::gc::ArenaList::insertListWithCursorAtEnd(const ArenaList& other)
 {
     check();
     other.check();
     MOZ_ASSERT(other.isCursorAtEnd());
-    if (other.isCursorAtHead())
+    if (other.isCursorAtHead()) {
         return *this;
+    }
     // Insert the full arenas of |other| after those of |this|.
     *other.cursorp_ = *cursorp_;
     *cursorp_ = other.head_;
     cursorp_ = other.cursorp_;
     check();
     return *this;
 }
 
@@ -191,18 +195,19 @@ js::gc::SortedArenaList::setThingsPerAre
     thingsPerArena_ = thingsPerArena;
 }
 
 void
 js::gc::SortedArenaList::reset(size_t thingsPerArena)
 {
     setThingsPerArena(thingsPerArena);
     // Initialize the segments.
-    for (size_t i = 0; i <= thingsPerArena; ++i)
+    for (size_t i = 0; i <= thingsPerArena; ++i) {
         segments[i].clear();
+    }
 }
 
 void
 js::gc::SortedArenaList::insertAt(Arena* arena, size_t nfree)
 {
     MOZ_ASSERT(nfree <= thingsPerArena_);
     segments[nfree].append(arena);
 }
@@ -238,18 +243,19 @@ js::gc::SortedArenaList::toArenaList()
 }
 
 #ifdef DEBUG
 
 bool
 js::gc::FreeLists::allEmpty() const
 {
     for (auto i : AllAllocKinds()) {
-        if (!isEmpty(i))
+        if (!isEmpty(i)) {
             return false;
+        }
     }
     return true;
 }
 
 bool
 js::gc::FreeLists::isEmpty(AllocKind kind) const
 {
     return freeLists_[kind]->isEmpty();
@@ -258,35 +264,37 @@ js::gc::FreeLists::isEmpty(AllocKind kin
 #endif
 
 void
 js::gc::FreeLists::clear()
 {
     for (auto i : AllAllocKinds()) {
 #ifdef DEBUG
         auto old = freeLists_[i];
-        if (!old->isEmpty())
+        if (!old->isEmpty()) {
             old->getArena()->checkNoMarkedFreeCells();
+        }
 #endif
         freeLists_[i] = &emptySentinel;
     }
 }
 
 js::gc::TenuredCell*
 js::gc::FreeLists::allocate(AllocKind kind)
 {
     return freeLists_[kind]->allocate(Arena::thingSize(kind));
 }
 
 void
 js::gc::FreeLists::unmarkPreMarkedFreeCells(AllocKind kind)
 {
     FreeSpan* freeSpan = freeLists_[kind];
-    if (!freeSpan->isEmpty())
+    if (!freeSpan->isEmpty()) {
         freeSpan->getArena()->unmarkPreMarkedFreeCells();
+    }
 }
 
 JSRuntime*
 js::gc::ArenaLists::runtime()
 {
     return zone_->runtimeFromMainThread();
 }
 
@@ -306,18 +314,19 @@ js::gc::Arena*
 js::gc::ArenaLists::getFirstArenaToSweep(AllocKind thingKind) const
 {
     return arenaListsToSweep(thingKind);
 }
 
 js::gc::Arena*
 js::gc::ArenaLists::getFirstSweptArena(AllocKind thingKind) const
 {
-    if (thingKind != incrementalSweptArenaKind.ref())
+    if (thingKind != incrementalSweptArenaKind.ref()) {
         return nullptr;
+    }
     return incrementalSweptArenas.ref().head();
 }
 
 js::gc::Arena*
 js::gc::ArenaLists::getArenaAfterCursor(AllocKind thingKind) const
 {
     return arenaLists(thingKind).arenaAfterCursor();
 }
@@ -325,32 +334,35 @@ js::gc::ArenaLists::getArenaAfterCursor(
 bool
 js::gc::ArenaLists::arenaListsAreEmpty() const
 {
     for (auto i : AllAllocKinds()) {
         /*
          * The arena cannot be empty if the background finalization is not yet
          * done.
          */
-        if (concurrentUse(i) == ConcurrentUse::BackgroundFinalize)
+        if (concurrentUse(i) == ConcurrentUse::BackgroundFinalize) {
             return false;
-        if (!arenaLists(i).isEmpty())
+        }
+        if (!arenaLists(i).isEmpty()) {
             return false;
+        }
     }
     return true;
 }
 
 void
 js::gc::ArenaLists::unmarkAll()
 {
     for (auto i : AllAllocKinds()) {
         /* The background finalization must have stopped at this point. */
         MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
-        for (Arena* arena = arenaLists(i).head(); arena; arena = arena->next)
+        for (Arena* arena = arenaLists(i).head(); arena; arena = arena->next) {
             arena->unmarkAll();
+        }
     }
 }
 
 bool
 js::gc::ArenaLists::doneBackgroundFinalize(AllocKind kind) const
 {
     return concurrentUse(kind) != ConcurrentUse::BackgroundFinalize;
 }
@@ -371,32 +383,34 @@ MOZ_ALWAYS_INLINE js::gc::TenuredCell*
 js::gc::ArenaLists::allocateFromFreeList(AllocKind thingKind)
 {
     return freeLists().allocate(thingKind);
 }
 
 void
 js::gc::ArenaLists::unmarkPreMarkedFreeCells()
 {
-    for (auto i : AllAllocKinds())
+    for (auto i : AllAllocKinds()) {
         freeLists().unmarkPreMarkedFreeCells(i);
+    }
 }
 
 void
 js::gc::ArenaLists::checkEmptyFreeLists()
 {
     MOZ_ASSERT(freeLists().allEmpty());
 }
 
 bool
 js::gc::ArenaLists::checkEmptyArenaLists()
 {
     bool empty = true;
 #ifdef DEBUG
     for (auto i : AllAllocKinds()) {
-        if (!checkEmptyArenaList(i))
+        if (!checkEmptyArenaList(i)) {
             empty = false;
+        }
     }
 #endif
     return empty;
 }
 
 #endif // gc_ArenaList_inl_h
--- a/js/src/gc/AtomMarking-inl.h
+++ b/js/src/gc/AtomMarking-inl.h
@@ -42,22 +42,24 @@ AtomMarkingRuntime::inlinedMarkAtom(JSCo
                   mozilla::IsSame<T, JS::Symbol>::value,
                   "Should only be called with JSAtom* or JS::Symbol* argument");
 
     MOZ_ASSERT(thing);
     js::gc::TenuredCell* cell = &thing->asTenured();
     MOZ_ASSERT(cell->zoneFromAnyThread()->isAtomsZone());
 
     // The context's zone will be null during initialization of the runtime.
-    if (!cx->zone())
+    if (!cx->zone()) {
         return;
+    }
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
-    if (ThingIsPermanent(thing))
+    if (ThingIsPermanent(thing)) {
         return;
+    }
 
     size_t bit = GetAtomBit(cell);
     MOZ_ASSERT(bit / JS_BITS_PER_WORD < allocatedWords);
 
     cx->zone()->markedAtoms().setBit(bit);
 
     if (!cx->helperThread()) {
         // Trigger a read barrier on the atom, in case there is an incremental
--- a/js/src/gc/AtomMarking.cpp
+++ b/js/src/gc/AtomMarking.cpp
@@ -75,18 +75,19 @@ AtomMarkingRuntime::unregisterArena(Aren
 }
 
 bool
 AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime, DenseBitmap& bitmap)
 {
     MOZ_ASSERT(CurrentThreadIsPerformingGC());
     MOZ_ASSERT(!runtime->hasHelperThreadZones());
 
-    if (!bitmap.ensureSpace(allocatedWords))
+    if (!bitmap.ensureSpace(allocatedWords)) {
         return false;
+    }
 
     Zone* atomsZone = runtime->unsafeAtomsZone();
     for (auto thingKind : AllAllocKinds()) {
         for (ArenaIter aiter(atomsZone, thingKind); !aiter.done(); aiter.next()) {
             Arena* arena = aiter.get();
             uintptr_t* chunkWords = arena->chunk()->bitmap.arenaBits(arena);
             bitmap.copyBitsFrom(arena->atomBitmapStart(), ArenaBitmapWords, chunkWords);
         }
@@ -95,18 +96,19 @@ AtomMarkingRuntime::computeBitmapFromChu
     return true;
 }
 
 void
 AtomMarkingRuntime::refineZoneBitmapForCollectedZone(Zone* zone, const DenseBitmap& bitmap)
 {
     MOZ_ASSERT(zone->isCollectingFromAnyThread());
 
-    if (zone->isAtomsZone())
+    if (zone->isAtomsZone()) {
         return;
+    }
 
     // Take the bitwise and between the two mark bitmaps to get the best new
     // overapproximation we can. |bitmap| might include bits that are not in
     // the zone's mark bitmap, if additional zones were collected by the GC.
     zone->markedAtoms().bitwiseAndWith(bitmap);
 }
 
 // Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
@@ -139,24 +141,26 @@ AtomMarkingRuntime::markAtomsUsedByUncol
     // the chunk mark bitmaps. If this allocation fails then fall back to
     // updating the chunk mark bitmaps separately for each zone.
     DenseBitmap markedUnion;
     if (markedUnion.ensureSpace(allocatedWords)) {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
             // We only need to update the chunk mark bits for zones which were
             // not collected in the current GC. Atoms which are referenced by
             // collected zones have already been marked.
-            if (!zone->isCollectingFromAnyThread())
+            if (!zone->isCollectingFromAnyThread()) {
                 zone->markedAtoms().bitwiseOrInto(markedUnion);
+            }
         }
         BitwiseOrIntoChunkMarkBits(runtime, markedUnion);
     } else {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
-            if (!zone->isCollectingFromAnyThread())
+            if (!zone->isCollectingFromAnyThread()) {
                 BitwiseOrIntoChunkMarkBits(runtime, zone->markedAtoms());
+            }
         }
     }
 }
 
 template <typename T>
 void
 AtomMarkingRuntime::markAtom(JSContext* cx, T* thing)
 {
@@ -179,18 +183,19 @@ AtomMarkingRuntime::markId(JSContext* cx
     }
     MOZ_ASSERT(!JSID_IS_GCTHING(id));
 }
 
 void
 AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value)
 {
     if (value.isString()) {
-        if (value.toString()->isAtom())
+        if (value.toString()->isAtom()) {
             markAtom(cx, &value.toString()->asAtom());
+        }
         return;
     }
     if (value.isSymbol()) {
         markAtom(cx, value.toSymbol());
         return;
     }
     MOZ_ASSERT_IF(value.isGCThing(),
                   value.isObject() ||
@@ -214,73 +219,82 @@ AtomMarkingRuntime::atomIsMarked(Zone* z
     static_assert(mozilla::IsSame<T, JSAtom>::value ||
                   mozilla::IsSame<T, JS::Symbol>::value,
                   "Should only be called with JSAtom* or JS::Symbol* argument");
 
     MOZ_ASSERT(thing);
     MOZ_ASSERT(!IsInsideNursery(thing));
     MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
 
-    if (!zone->runtimeFromAnyThread()->permanentAtomsPopulated())
+    if (!zone->runtimeFromAnyThread()->permanentAtomsPopulated()) {
         return true;
+    }
 
-    if (ThingIsPermanent(thing))
+    if (ThingIsPermanent(thing)) {
         return true;
+    }
 
     size_t bit = GetAtomBit(&thing->asTenured());
     return zone->markedAtoms().getBit(bit);
 }
 
 template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JSAtom* thing);
 template bool AtomMarkingRuntime::atomIsMarked(Zone* zone, JS::Symbol* thing);
 
 template<>
 bool
 AtomMarkingRuntime::atomIsMarked(Zone* zone, TenuredCell* thing)
 {
-    if (!thing)
+    if (!thing) {
         return true;
+    }
 
     if (thing->is<JSString>()) {
         JSString* str = thing->as<JSString>();
-        if (!str->isAtom())
+        if (!str->isAtom()) {
             return true;
+        }
         return atomIsMarked(zone, &str->asAtom());
     }
 
-    if (thing->is<JS::Symbol>())
+    if (thing->is<JS::Symbol>()) {
         return atomIsMarked(zone, thing->as<JS::Symbol>());
+    }
 
     return true;
 }
 
 bool
 AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id)
 {
-    if (JSID_IS_ATOM(id))
+    if (JSID_IS_ATOM(id)) {
         return atomIsMarked(zone, JSID_TO_ATOM(id));
+    }
 
-    if (JSID_IS_SYMBOL(id))
+    if (JSID_IS_SYMBOL(id)) {
         return atomIsMarked(zone, JSID_TO_SYMBOL(id));
+    }
 
     MOZ_ASSERT(!JSID_IS_GCTHING(id));
     return true;
 }
 
 bool
 AtomMarkingRuntime::valueIsMarked(Zone* zone, const Value& value)
 {
     if (value.isString()) {
-        if (value.toString()->isAtom())
+        if (value.toString()->isAtom()) {
             return atomIsMarked(zone, &value.toString()->asAtom());
+        }
         return true;
     }
 
-    if (value.isSymbol())
+    if (value.isSymbol()) {
         return atomIsMarked(zone, value.toSymbol());
+    }
 
     MOZ_ASSERT_IF(value.isGCThing(),
                   value.isObject() ||
                   value.isPrivateGCThing() ||
                   IF_BIGINT(value.isBigInt(), false));
     return true;
 }
 
--- a/js/src/gc/AtomMarking.h
+++ b/js/src/gc/AtomMarking.h
@@ -22,18 +22,19 @@ class Arena;
 class AtomMarkingRuntime
 {
     // Unused arena atom bitmap indexes. Protected by the GC lock.
     js::GCLockData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
 
     void markChildren(JSContext* cx, JSAtom*) {}
 
     void markChildren(JSContext* cx, JS::Symbol* symbol) {
-        if (JSAtom* description = symbol->description())
+        if (JSAtom* description = symbol->description()) {
             markAtom(cx, description);
+        }
     }
 
   public:
     // The extent of all allocated and free words in atom mark bitmaps.
     // This monotonically increases and may be read from without locking.
     mozilla::Atomic<size_t, mozilla::SequentiallyConsistent,
                     mozilla::recordreplay::Behavior::DontPreserve> allocatedWords;
 
--- a/js/src/gc/Barrier.cpp
+++ b/js/src/gc/Barrier.cpp
@@ -35,18 +35,19 @@ bool
 IsMarkedBlack(JSObject* obj)
 {
     return obj->isMarkedBlack();
 }
 
 bool
 HeapSlot::preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const
 {
-    if (kind == Slot)
+    if (kind == Slot) {
         return &owner->getSlotRef(slot) == this;
+    }
 
     uint32_t numShifted = owner->getElementsHeader()->numShiftedElements();
     MOZ_ASSERT(slot >= numShifted);
     return &owner->getDenseElement(slot - numShifted) == (const Value*)this;
 }
 
 void
 HeapSlot::assertPreconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot,
@@ -133,39 +134,42 @@ JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
 
 template void PreBarrierFunctor<jsid>::operator()<JS::Symbol>(JS::Symbol*);
 template void PreBarrierFunctor<jsid>::operator()<JSString>(JSString*);
 
 template <typename T>
 /* static */ bool
 MovableCellHasher<T>::hasHash(const Lookup& l)
 {
-    if (!l)
+    if (!l) {
         return true;
+    }
 
     return l->zoneFromAnyThread()->hasUniqueId(l);
 }
 
 template <typename T>
 /* static */ bool
 MovableCellHasher<T>::ensureHash(const Lookup& l)
 {
-    if (!l)
+    if (!l) {
         return true;
+    }
 
     uint64_t unusedId;
     return l->zoneFromAnyThread()->getOrCreateUniqueId(l, &unusedId);
 }
 
 template <typename T>
 /* static */ HashNumber
 MovableCellHasher<T>::hash(const Lookup& l)
 {
-    if (!l)
+    if (!l) {
         return 0;
+    }
 
     // We have to access the zone from-any-thread here: a worker thread may be
     // cloning a self-hosted object from the main runtime's self- hosting zone
     // into another runtime. The zone's uid lock will protect against multiple
     // workers doing this simultaneously.
     MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
                l->zoneFromAnyThread()->isSelfHostingZone() ||
                CurrentThreadIsPerformingGC());
@@ -173,29 +177,32 @@ MovableCellHasher<T>::hash(const Lookup&
     return l->zoneFromAnyThread()->getHashCodeInfallible(l);
 }
 
 template <typename T>
 /* static */ bool
 MovableCellHasher<T>::match(const Key& k, const Lookup& l)
 {
     // Return true if both are null or false if only one is null.
-    if (!k)
+    if (!k) {
         return !l;
-    if (!l)
+    }
+    if (!l) {
         return false;
+    }
 
     MOZ_ASSERT(k);
     MOZ_ASSERT(l);
     MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
                l->zoneFromAnyThread()->isSelfHostingZone());
 
     Zone* zone = k->zoneFromAnyThread();
-    if (zone != l->zoneFromAnyThread())
+    if (zone != l->zoneFromAnyThread()) {
         return false;
+    }
 
 #ifdef DEBUG
     // Incremental table sweeping means that existing table entries may no
     // longer have unique IDs. We fail the match in that case and the entry is
     // removed from the table later on.
     if (!zone->hasUniqueId(k)) {
         Key key = k;
         MOZ_ASSERT(IsAboutToBeFinalizedUnbarriered(&key));
--- a/js/src/gc/Barrier.h
+++ b/js/src/gc/Barrier.h
@@ -298,24 +298,26 @@ struct InternalBarrierMethods<Value>
 
         // If the target needs an entry, add it.
         js::gc::StoreBuffer* sb;
         if ((next.isObject() || next.isString()) && (sb = next.toGCThing()->storeBuffer())) {
             // If we know that the prev has already inserted an entry, we can
             // skip doing the lookup to add the new entry. Note that we cannot
             // safely assert the presence of the entry because it may have been
             // added via a different store buffer.
-            if ((prev.isObject() || prev.isString()) && prev.toGCThing()->storeBuffer())
+            if ((prev.isObject() || prev.isString()) && prev.toGCThing()->storeBuffer()) {
                 return;
+            }
             sb->putValue(vp);
             return;
         }
         // Remove the prev entry if the new value does not need it.
-        if ((prev.isObject() || prev.isString()) && (sb = prev.toGCThing()->storeBuffer()))
+        if ((prev.isObject() || prev.isString()) && (sb = prev.toGCThing()->storeBuffer())) {
             sb->unputValue(vp);
+        }
     }
 
     static void readBarrier(const Value& v) {
         DispatchTyped(ReadBarrierFunctor<Value>(), v);
     }
 
 #ifdef DEBUG
     static bool thingIsNotGray(const Value& v) { return JS::ValueIsNotGray(v); }
@@ -642,18 +644,19 @@ class ReadBarriered : public ReadBarrier
         CheckTargetIsNotGray(v.value);
         T prior = this->value;
         this->value = v.value;
         this->post(prior, v.value);
         return *this;
     }
 
     const T& get() const {
-        if (InternalBarrierMethods<T>::isMarkable(this->value))
+        if (InternalBarrierMethods<T>::isMarkable(this->value)) {
             this->read();
+        }
         return this->value;
     }
 
     const T& unbarrieredGet() const {
         return this->value;
     }
 
     explicit operator bool() const {
@@ -716,18 +719,19 @@ class HeapSlot : public WriteBarrieredBa
 
   private:
     void post(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
 #ifdef DEBUG
         assertPreconditionForWriteBarrierPost(owner, kind, slot, target);
 #endif
         if (this->value.isObject() || this->value.isString()) {
             gc::Cell* cell = this->value.toGCThing();
-            if (cell->storeBuffer())
+            if (cell->storeBuffer()) {
                 cell->storeBuffer()->putSlot(owner, kind, slot, 1);
+            }
         }
     }
 };
 
 class HeapSlotArray
 {
     HeapSlot* array;
 
--- a/js/src/gc/Cell.h
+++ b/js/src/gc/Cell.h
@@ -284,20 +284,22 @@ inline StoreBuffer*
 Cell::storeBuffer() const
 {
     return chunk()->trailer.storeBuffer;
 }
 
 inline JS::TraceKind
 Cell::getTraceKind() const
 {
-    if (isTenured())
+    if (isTenured()) {
         return asTenured().getTraceKind();
-    if (nurseryCellIsString())
+    }
+    if (nurseryCellIsString()) {
         return JS::TraceKind::String;
+    }
     return JS::TraceKind::Object;
 }
 
 /* static */ MOZ_ALWAYS_INLINE bool
 Cell::needWriteBarrierPre(JS::Zone* zone) {
     return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
 }
 
@@ -424,30 +426,32 @@ TenuredCell::readBarrier(TenuredCell* th
         Cell* tmp = thing;
         TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
         MOZ_ASSERT(tmp == thing);
     }
 
     if (thing->isMarkedGray()) {
         // There shouldn't be anything marked grey unless we're on the main thread.
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
-        if (!JS::RuntimeHeapIsCollecting())
+        if (!JS::RuntimeHeapIsCollecting()) {
             JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(thing, thing->getTraceKind()));
+        }
     }
 }
 
 void
 AssertSafeToSkipBarrier(TenuredCell* thing);
 
 /* static */ MOZ_ALWAYS_INLINE void
 TenuredCell::writeBarrierPre(TenuredCell* thing)
 {
     MOZ_ASSERT(!CurrentThreadIsIonCompiling());
-    if (!thing)
+    if (!thing) {
         return;
+    }
 
 #ifdef JS_GC_ZEAL
     // When verifying pre barriers we need to switch on all barriers, even
     // those on the Atoms Zone. Normally, we never enter a parse task when
     // collecting in the atoms zone, so will filter out atoms below.
     // Unfortuantely, If we try that when verifying pre-barriers, we'd never be
     // able to handle off thread parse tasks at all as we switch on the verifier any
     // time we're not doing GC. This would cause us to deadlock, as off thread parsing
@@ -489,18 +493,19 @@ TenuredCell::writeBarrierPost(void* cell
 Cell::thingIsNotGray(Cell* cell)
 {
     return JS::CellIsNotGray(cell);
 }
 
 bool
 Cell::isAligned() const
 {
-    if (!isTenured())
+    if (!isTenured()) {
         return true;
+    }
     return asTenured().isAligned();
 }
 
 bool
 TenuredCell::isAligned() const
 {
     return Arena::isAligned(address(), arena()->getThingSize());
 }
--- a/js/src/gc/FindSCCs.h
+++ b/js/src/gc/FindSCCs.h
@@ -27,18 +27,19 @@ struct GraphNodeBase
       : gcNextGraphNode(nullptr),
         gcNextGraphComponent(nullptr),
         gcDiscoveryTime(0),
         gcLowLink(0) {}
 
     ~GraphNodeBase() {}
 
     Node* nextNodeInGroup() const {
-        if (gcNextGraphNode && gcNextGraphNode->gcNextGraphComponent == gcNextGraphComponent)
+        if (gcNextGraphNode && gcNextGraphNode->gcNextGraphComponent == gcNextGraphComponent) {
             return gcNextGraphNode;
+        }
         return nullptr;
     }
 
     Node* nextGroup() const {
         return gcNextGraphComponent;
     }
 };
 
@@ -123,18 +124,19 @@ class ComponentFinder
             v->gcDiscoveryTime = Undefined;
             v->gcLowLink = Undefined;
         }
 
         return result;
     }
 
     static void mergeGroups(Node* first) {
-        for (Node* v = first; v; v = v->gcNextGraphNode)
+        for (Node* v = first; v; v = v->gcNextGraphNode) {
             v->gcNextGraphComponent = nullptr;
+        }
     }
 
   public:
     /* Call from implementation of GraphNodeBase::findOutgoingEdges(). */
     void addEdgeTo(Node* w) {
         if (w->gcDiscoveryTime == Undefined) {
             processNode(w);
             cur->gcLowLink = Min(cur->gcLowLink, w->gcLowLink);
@@ -164,18 +166,19 @@ class ComponentFinder
             return;
         }
 
         Node* old = cur;
         cur = v;
         cur->findOutgoingEdges(*static_cast<Derived*>(this));
         cur = old;
 
-        if (stackFull)
+        if (stackFull) {
             return;
+        }
 
         if (v->gcLowLink == v->gcDiscoveryTime) {
             Node* nextComponent = firstComponent;
             Node* w;
             do {
                 MOZ_ASSERT(stack);
                 w = stack;
                 stack = w->gcNextGraphNode;
--- a/js/src/gc/FreeOp.h
+++ b/js/src/gc/FreeOp.h
@@ -57,18 +57,19 @@ class FreeOp : public JSFreeOp
     }
 
     void freeLater(void* p) {
         // FreeOps other than the defaultFreeOp() are constructed on the stack,
         // and won't hold onto the pointers to free indefinitely.
         MOZ_ASSERT(!isDefaultFreeOp());
 
         AutoEnterOOMUnsafeRegion oomUnsafe;
-        if (!freeLaterList.append(p))
+        if (!freeLaterList.append(p)) {
             oomUnsafe.crash("FreeOp::freeLater");
+        }
     }
 
     bool appendJitPoisonRange(const jit::JitPoisonRange& range) {
         // FreeOps other than the defaultFreeOp() are constructed on the stack,
         // and won't hold onto the pointers to free indefinitely.
         MOZ_ASSERT(!isDefaultFreeOp());
 
         return jitPoisonRanges.append(range);