Bug 1488698 - Always use braces for if/for/while statements in js/src/jit, part 2. r=mgaudet
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 06 Sep 2018 11:51:36 +0200
changeset 435744 6a52f517bdb44b42b677c7c50a67d9306b3ed935
parent 435743 817b7142d4d965f9ab591a33268e2a59d29e4aed
child 435745 19ae9ff093caf9f9071277549c5aac056db90587
push id34618
push userbtara@mozilla.com
push dateTue, 11 Sep 2018 22:13:11 +0000
treeherdermozilla-central@1169e8a4ca2b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmgaudet
bugs1488698
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1488698 - Always use braces for if/for/while statements in js/src/jit, part 2. r=mgaudet
js/src/jit/AliasAnalysis.cpp
js/src/jit/AlignmentMaskAnalysis.cpp
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/BacktrackingAllocator.h
js/src/jit/Bailouts.cpp
js/src/jit/BitSet.cpp
js/src/jit/BitSet.h
js/src/jit/BytecodeAnalysis.cpp
js/src/jit/BytecodeAnalysis.h
--- a/js/src/jit/AliasAnalysis.cpp
+++ b/js/src/jit/AliasAnalysis.cpp
@@ -63,20 +63,22 @@ AliasAnalysis::spewDependencyList()
         JitSpewHeader(JitSpew_AliasSummaries);
         print.printf("Dependency list for other passes:\n");
 
         for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
             for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns()));
                  def != end;
                  ++def)
             {
-                if (!def->dependency())
+                if (!def->dependency()) {
                     continue;
-                if (!def->getAliasSet().isLoad())
+                }
+                if (!def->getAliasSet().isLoad()) {
                     continue;
+                }
 
                 JitSpewHeader(JitSpew_AliasSummaries);
                 print.printf(" ");
                 MDefinition::PrintOpcodeName(print, def->op());
                 print.printf("%d marked depending on ", def->id());
                 MDefinition::PrintOpcodeName(print, def->dependency()->op());
                 print.printf("%d\n", def->dependency()->id());
             }
@@ -90,33 +92,37 @@ static inline const MDefinition*
 MaybeUnwrap(const MDefinition* object)
 {
 
     while (object->isSlots() || object->isElements() || object->isConvertElementsToDoubles()) {
         MOZ_ASSERT(object->numOperands() == 1);
         object = object->getOperand(0);
     }
 
-    if (object->isTypedArrayElements())
+    if (object->isTypedArrayElements()) {
+        return nullptr;
+    }
+    if (object->isTypedObjectElements()) {
         return nullptr;
-    if (object->isTypedObjectElements())
+    }
+    if (object->isConstantElements()) {
         return nullptr;
-    if (object->isConstantElements())
-        return nullptr;
+    }
 
     return object;
 }
 
 // Get the object of any load/store. Returns nullptr if not tied to
 // an object.
 static inline const MDefinition*
 GetObject(const MDefinition* ins)
 {
-    if (!ins->getAliasSet().isStore() && !ins->getAliasSet().isLoad())
+    if (!ins->getAliasSet().isStore() && !ins->getAliasSet().isLoad()) {
         return nullptr;
+    }
 
     // Note: only return the object if that objects owns that property.
     // I.e. the poperty isn't on the prototype chain.
     const MDefinition* object = nullptr;
     switch (ins->op()) {
       case MDefinition::Opcode::InitializedLength:
       case MDefinition::Opcode::LoadElement:
       case MDefinition::Opcode::LoadUnboxedScalar:
@@ -189,18 +195,19 @@ GetObject(const MDefinition* ins)
       case MDefinition::Opcode::WasmLoadGlobalCell:
       case MDefinition::Opcode::WasmStoreGlobalVar:
       case MDefinition::Opcode::WasmStoreGlobalCell:
       case MDefinition::Opcode::ArrayJoin:
         return nullptr;
       default:
 #ifdef DEBUG
         // Crash when the default aliasSet is overriden, but when not added in the list above.
-        if (!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any)
+        if (!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any) {
             MOZ_CRASH("Overridden getAliasSet without updating AliasAnalysis GetObject");
+        }
 #endif
 
         return nullptr;
     }
 
     MOZ_ASSERT(!ins->getAliasSet().isStore() || ins->getAliasSet().flags() != AliasSet::Flag::Any);
     object = MaybeUnwrap(object);
     MOZ_ASSERT_IF(object, object->type() == MIRType::Object);
@@ -208,75 +215,82 @@ GetObject(const MDefinition* ins)
 }
 
 // Generic comparing if a load aliases a store using TI information.
 MDefinition::AliasType
 AliasAnalysis::genericMightAlias(const MDefinition* load, const MDefinition* store)
 {
     const MDefinition* loadObject = GetObject(load);
     const MDefinition* storeObject = GetObject(store);
-    if (!loadObject || !storeObject)
+    if (!loadObject || !storeObject) {
         return MDefinition::AliasType::MayAlias;
+    }
 
-    if (!loadObject->resultTypeSet() || !storeObject->resultTypeSet())
+    if (!loadObject->resultTypeSet() || !storeObject->resultTypeSet()) {
         return MDefinition::AliasType::MayAlias;
+    }
 
-    if (loadObject->resultTypeSet()->objectsIntersect(storeObject->resultTypeSet()))
+    if (loadObject->resultTypeSet()->objectsIntersect(storeObject->resultTypeSet())) {
         return MDefinition::AliasType::MayAlias;
+    }
 
     return MDefinition::AliasType::NoAlias;
 }
 
 // Whether there might be a path from src to dest, excluding loop backedges. This is
 // approximate and really ought to depend on precomputed reachability information.
 static inline bool
 BlockMightReach(MBasicBlock* src, MBasicBlock* dest)
 {
     while (src->id() <= dest->id()) {
-        if (src == dest)
+        if (src == dest) {
             return true;
+        }
         switch (src->numSuccessors()) {
           case 0:
             return false;
           case 1: {
             MBasicBlock* successor = src->getSuccessor(0);
-            if (successor->id() <= src->id())
+            if (successor->id() <= src->id()) {
                 return true; // Don't iloop.
+            }
             src = successor;
             break;
           }
           default:
             return true;
         }
     }
     return false;
 }
 
 static void
 IonSpewDependency(MInstruction* load, MInstruction* store, const char* verb, const char* reason)
 {
 #ifdef JS_JITSPEW
-    if (!JitSpewEnabled(JitSpew_Alias))
+    if (!JitSpewEnabled(JitSpew_Alias)) {
         return;
+    }
 
     Fprinter& out = JitSpewPrinter();
     out.printf("Load ");
     load->printName(out);
     out.printf(" %s on store ", verb);
     store->printName(out);
     out.printf(" (%s)\n", reason);
 #endif
 }
 
 static void
 IonSpewAliasInfo(const char* pre, MInstruction* ins, const char* post)
 {
 #ifdef JS_JITSPEW
-    if (!JitSpewEnabled(JitSpew_Alias))
+    if (!JitSpewEnabled(JitSpew_Alias)) {
         return;
+    }
 
     Fprinter& out = JitSpewPrinter();
     out.printf("%s ", pre);
     ins->printName(out);
     out.printf(" %s\n", post);
 #endif
 }
 
@@ -300,60 +314,68 @@ bool
 AliasAnalysis::analyze()
 {
     Vector<MInstructionVector, AliasSet::NumCategories, JitAllocPolicy> stores(alloc());
 
     // Initialize to the first instruction.
     MInstruction* firstIns = *graph_.entryBlock()->begin();
     for (unsigned i = 0; i < AliasSet::NumCategories; i++) {
         MInstructionVector defs(alloc());
-        if (!defs.append(firstIns))
+        if (!defs.append(firstIns)) {
             return false;
-        if (!stores.append(std::move(defs)))
+        }
+        if (!stores.append(std::move(defs))) {
             return false;
+        }
     }
 
     // Type analysis may have inserted new instructions. Since this pass depends
     // on the instruction number ordering, all instructions are renumbered.
     uint32_t newId = 0;
 
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-        if (mir->shouldCancel("Alias Analysis (main loop)"))
+        if (mir->shouldCancel("Alias Analysis (main loop)")) {
             return false;
+        }
 
         if (block->isLoopHeader()) {
             JitSpew(JitSpew_Alias, "Processing loop header %d", block->id());
             loop_ = new(alloc().fallible()) LoopAliasInfo(alloc(), loop_, *block);
-            if (!loop_)
+            if (!loop_) {
                 return false;
+            }
         }
 
-        for (MPhiIterator def(block->phisBegin()), end(block->phisEnd()); def != end; ++def)
+        for (MPhiIterator def(block->phisBegin()), end(block->phisEnd()); def != end; ++def) {
             def->setId(newId++);
+        }
 
         for (MInstructionIterator def(block->begin()), end(block->begin(block->lastIns()));
              def != end;
              ++def)
         {
             def->setId(newId++);
 
             AliasSet set = def->getAliasSet();
-            if (set.isNone())
+            if (set.isNone()) {
                 continue;
+            }
 
             // For the purposes of alias analysis, all recoverable operations
             // are treated as effect free as the memory represented by these
             // operations cannot be aliased by others.
-            if (def->canRecoverOnBailout())
+            if (def->canRecoverOnBailout()) {
                 continue;
+            }
 
             if (set.isStore()) {
                 for (AliasSetIterator iter(set); iter; iter++) {
-                    if (!stores[*iter].append(*def))
+                    if (!stores[*iter].append(*def)) {
                         return false;
+                    }
                 }
 
 #ifdef JS_JITSPEW
                 if (JitSpewEnabled(JitSpew_Alias)) {
                     Fprinter& out = JitSpewPrinter();
                     out.printf("Processing store ");
                     def->printName(out);
                     out.printf(" (flags %x)\n", set.flags());
@@ -366,32 +388,34 @@ AliasAnalysis::analyze()
                 for (AliasSetIterator iter(set); iter; iter++) {
                     MInstructionVector& aliasedStores = stores[*iter];
                     for (int i = aliasedStores.length() - 1; i >= 0; i--) {
                         MInstruction* store = aliasedStores[i];
                         if (genericMightAlias(*def, store) != MDefinition::AliasType::NoAlias &&
                             def->mightAlias(store) != MDefinition::AliasType::NoAlias &&
                             BlockMightReach(store->block(), *block))
                         {
-                            if (lastStore->id() < store->id())
+                            if (lastStore->id() < store->id()) {
                                 lastStore = store;
+                            }
                             break;
                         }
                     }
                 }
 
                 def->setDependency(lastStore);
                 IonSpewDependency(*def, lastStore, "depends", "");
 
                 // If the last store was before the current loop, we assume this load
                 // is loop invariant. If a later instruction writes to the same location,
                 // we will fix this at the end of the loop.
                 if (loop_ && lastStore->id() < loop_->firstInstruction()->id()) {
-                    if (!loop_->addInvariantLoad(*def))
+                    if (!loop_->addInvariantLoad(*def)) {
                         return false;
+                    }
                 }
             }
         }
 
         // Renumber the last instruction, as the analysis depends on this and the order.
         block->lastIns()->setId(newId++);
 
         if (block->isLoopBackedge()) {
@@ -408,44 +432,47 @@ AliasAnalysis::analyze()
                 AliasSet set = ins->getAliasSet();
                 MOZ_ASSERT(set.isLoad());
 
                 bool hasAlias = false;
                 for (AliasSetIterator iter(set); iter; iter++) {
                     MInstructionVector& aliasedStores = stores[*iter];
                     for (int i = aliasedStores.length() - 1;; i--) {
                         MInstruction* store = aliasedStores[i];
-                        if (store->id() < firstLoopIns->id())
+                        if (store->id() < firstLoopIns->id()) {
                             break;
+                        }
                         if (genericMightAlias(ins, store) != MDefinition::AliasType::NoAlias &&
                             ins->mightAlias(store) != MDefinition::AliasType::NoAlias)
                         {
                             hasAlias = true;
                             IonSpewDependency(ins, store, "aliases", "store in loop body");
                             break;
                         }
                     }
-                    if (hasAlias)
+                    if (hasAlias) {
                         break;
+                    }
                 }
 
                 if (hasAlias) {
                     // This instruction depends on stores inside the loop body. Mark it as having a
                     // dependency on the last instruction of the loop header. The last instruction is a
                     // control instruction and these are never hoisted.
                     MControlInstruction* controlIns = loop_->loopHeader()->lastIns();
                     IonSpewDependency(ins, controlIns, "depends", "due to stores in loop body");
                     ins->setDependency(controlIns);
                 } else {
                     IonSpewAliasInfo("Load", ins, "does not depend on any stores in this loop");
 
                     if (outerLoop && ins->dependency()->id() < outerLoop->firstInstruction()->id()) {
                         IonSpewAliasInfo("Load", ins, "may be invariant in outer loop");
-                        if (!outerLoop->addInvariantLoad(ins))
+                        if (!outerLoop->addInvariantLoad(ins)) {
                             return false;
+                        }
                     }
                 }
             }
             loop_ = loop_->outer();
         }
     }
 
     spewDependencyList();
--- a/js/src/jit/AlignmentMaskAnalysis.cpp
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -37,58 +37,66 @@ AnalyzeAsmHeapAddress(MDefinition* ptr, 
     // Putting the add on the outside might seem like it exposes other users of
     // the expression to the possibility of i32 overflow, if we aren't in wasm
     // and they aren't naturally truncating. However, since we use MAdd::New
     // with MIRType::Int32, we make sure that the value is truncated, just as it
     // would be by the MBitAnd.
 
     MOZ_ASSERT(IsCompilingWasm());
 
-    if (!ptr->isBitAnd())
+    if (!ptr->isBitAnd()) {
         return;
+    }
 
     MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
     MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
-    if (lhs->isConstant())
+    if (lhs->isConstant()) {
         mozilla::Swap(lhs, rhs);
-    if (!lhs->isAdd() || !rhs->isConstant())
+    }
+    if (!lhs->isAdd() || !rhs->isConstant()) {
         return;
+    }
 
     MDefinition* op0 = lhs->toAdd()->getOperand(0);
     MDefinition* op1 = lhs->toAdd()->getOperand(1);
-    if (op0->isConstant())
+    if (op0->isConstant()) {
         mozilla::Swap(op0, op1);
-    if (!op1->isConstant())
+    }
+    if (!op1->isConstant()) {
         return;
+    }
 
     uint32_t i = op1->toConstant()->toInt32();
     uint32_t m = rhs->toConstant()->toInt32();
-    if (!IsAlignmentMask(m) || (i & m) != i)
+    if (!IsAlignmentMask(m) || (i & m) != i) {
         return;
+    }
 
     // The pattern was matched! Produce the replacement expression.
     MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
     ptr->block()->insertBefore(ptr->toBitAnd(), and_);
     MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
     ptr->block()->insertBefore(ptr->toBitAnd(), add);
     ptr->replaceAllUsesWith(add);
     ptr->block()->discard(ptr->toBitAnd());
 }
 
 bool
 AlignmentMaskAnalysis::analyze()
 {
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
-            if (!graph_.alloc().ensureBallast())
+            if (!graph_.alloc().ensureBallast()) {
                 return false;
+            }
 
             // Note that we don't check for MWasmCompareExchangeHeap
             // or MWasmAtomicBinopHeap, because the backend and the OOB
             // mechanism don't support non-zero offsets for them yet.
-            if (i->isAsmJSLoadHeap())
+            if (i->isAsmJSLoadHeap()) {
                 AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
-            else if (i->isAsmJSStoreHeap())
+            } else if (i->isAsmJSStoreHeap()) {
                 AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
+            }
         }
     }
     return true;
 }
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -50,47 +50,51 @@ InsertSortedList(InlineForwardList<T> &l
 
     if (SortBefore(list.back(), value)) {
         list.pushBack(value);
         return;
     }
 
     T* prev = nullptr;
     for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
-        if (SortBefore(value, *iter))
+        if (SortBefore(value, *iter)) {
             break;
+        }
         prev = *iter;
     }
 
-    if (prev)
+    if (prev) {
         list.insertAfter(prev, value);
-    else
+    } else {
         list.pushFront(value);
+    }
 }
 
 /////////////////////////////////////////////////////////////////////
 // LiveRange
 /////////////////////////////////////////////////////////////////////
 
 inline void
 LiveRange::noteAddedUse(UsePosition* use)
 {
     LUse::Policy policy = use->usePolicy();
     usesSpillWeight_ += BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
-    if (policy == LUse::FIXED)
+    if (policy == LUse::FIXED) {
         ++numFixedUses_;
+    }
 }
 
 inline void
 LiveRange::noteRemovedUse(UsePosition* use)
 {
     LUse::Policy policy = use->usePolicy();
     usesSpillWeight_ -= BacktrackingAllocator::SpillWeightFromUsePolicy(policy);
-    if (policy == LUse::FIXED)
+    if (policy == LUse::FIXED) {
         --numFixedUses_;
+    }
     MOZ_ASSERT_IF(!hasUses(), !usesSpillWeight_ && !numFixedUses_);
 }
 
 void
 LiveRange::addUse(UsePosition* use)
 {
     MOZ_ASSERT(covers(use->pos));
     InsertSortedList(uses_, use);
@@ -119,18 +123,19 @@ LiveRange::distributeUses(LiveRange* oth
             noteRemovedUse(use);
             other->addUse(use);
         } else {
             iter++;
         }
     }
 
     // Distribute the definition to |other| as well, if possible.
-    if (hasDefinition() && from() == other->from())
+    if (hasDefinition() && from() == other->from()) {
         other->setHasDefinition();
+    }
 }
 
 bool
 LiveRange::contains(LiveRange* other) const
 {
     return from() <= other->from() && to() >= other->to();
 }
 
@@ -154,18 +159,19 @@ LiveRange::intersect(LiveRange* other, R
         if (from() >= other->to()) {
             *post = range_;
             return;
         }
         *post = Range(other->to(), to());
         innerTo = other->to();
     }
 
-    if (innerFrom != innerTo)
+    if (innerFrom != innerTo) {
         *inside = Range(innerFrom, innerTo);
+    }
 }
 
 bool
 LiveRange::intersects(LiveRange* other) const
 {
     Range pre, inside, post;
     intersect(other, &pre, &inside, &post);
     return !inside.empty();
@@ -173,82 +179,88 @@ LiveRange::intersects(LiveRange* other) 
 
 /////////////////////////////////////////////////////////////////////
 // SpillSet
 /////////////////////////////////////////////////////////////////////
 
 void
 SpillSet::setAllocation(LAllocation alloc)
 {
-    for (size_t i = 0; i < numSpilledBundles(); i++)
+    for (size_t i = 0; i < numSpilledBundles(); i++) {
         spilledBundle(i)->setAllocation(alloc);
+    }
 }
 
 /////////////////////////////////////////////////////////////////////
 // LiveBundle
 /////////////////////////////////////////////////////////////////////
 
 #ifdef DEBUG
 size_t
 LiveBundle::numRanges() const
 {
     size_t count = 0;
-    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++)
+    for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
         count++;
+    }
     return count;
 }
 #endif // DEBUG
 
 LiveRange*
 LiveBundle::rangeFor(CodePosition pos) const
 {
     for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
-        if (range->covers(pos))
+        if (range->covers(pos)) {
             return range;
+        }
     }
     return nullptr;
 }
 
 void
 LiveBundle::addRange(LiveRange* range)
 {
     MOZ_ASSERT(!range->bundle());
     range->setBundle(this);
     InsertSortedList(ranges_, &range->bundleLink);
 }
 
 bool
 LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to)
 {
     LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
-    if (!range)
+    if (!range) {
         return false;
+    }
     addRange(range);
     return true;
 }
 
 bool
 LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
                                       CodePosition from, CodePosition to)
 {
     LiveRange* range = LiveRange::FallibleNew(alloc, oldRange->vreg(), from, to);
-    if (!range)
+    if (!range) {
         return false;
+    }
     addRange(range);
     oldRange->distributeUses(range);
     return true;
 }
 
 LiveRange*
 LiveBundle::popFirstRange()
 {
     LiveRange::BundleLinkIterator iter = rangesBegin();
-    if (!iter)
+    if (!iter) {
         return nullptr;
+    }
 
     LiveRange* range = LiveRange::get(*iter);
     ranges_.removeAt(iter);
 
     range->setBundle(nullptr);
     return range;
 }
 
@@ -300,49 +312,54 @@ VirtualRegister::addInitialRange(TempAll
             break;
         }
 
         if (!merged) {
             // This is the first old range we've found that overlaps the new
             // range. Extend this one to cover its union with the new range.
             merged = existing;
 
-            if (from < existing->from())
+            if (from < existing->from()) {
                 existing->setFrom(from);
-            if (to > existing->to())
+            }
+            if (to > existing->to()) {
                 existing->setTo(to);
+            }
 
             // Continue searching to see if any other old ranges can be
             // coalesced with the new merged range.
             iter++;
             continue;
         }
 
         // Coalesce this range into the previous range we merged into.
         MOZ_ASSERT(existing->from() >= merged->from());
-        if (existing->to() > merged->to())
+        if (existing->to() > merged->to()) {
             merged->setTo(existing->to());
+        }
 
         MOZ_ASSERT(!existing->hasDefinition());
         existing->distributeUses(merged);
         MOZ_ASSERT(!existing->hasUses());
 
         ranges_.removeAndIncrement(iter);
     }
 
     if (!merged) {
         // The new range does not overlap any existing range for the vreg.
         LiveRange* range = LiveRange::FallibleNew(alloc, vreg(), from, to);
-        if (!range)
+        if (!range) {
             return false;
-
-        if (prev)
+        }
+
+        if (prev) {
             ranges_.insertAfter(&prev->registerLink, &range->registerLink);
-        else
+        } else {
             ranges_.pushFront(&range->registerLink);
+        }
 
         (*numRanges)++;
     }
 
     return true;
 }
 
 void
@@ -362,20 +379,22 @@ VirtualRegister::setInitialDefinition(Co
 
 LiveRange*
 VirtualRegister::rangeFor(CodePosition pos, bool preferRegister /* = false */) const
 {
     LiveRange* found = nullptr;
     for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
         if (range->covers(pos)) {
-            if (!preferRegister || range->bundle()->allocation().isRegister())
+            if (!preferRegister || range->bundle()->allocation().isRegister()) {
                 return range;
-            if (!found)
+            }
+            if (!found) {
                 found = range;
+            }
         }
     }
     return found;
 }
 
 void
 VirtualRegister::addRange(LiveRange* range)
 {
@@ -399,50 +418,58 @@ VirtualRegister::removeRange(LiveRange* 
 // BacktrackingAllocator
 /////////////////////////////////////////////////////////////////////
 
 // This function pre-allocates and initializes as much global state as possible
 // to avoid littering the algorithms with memory management cruft.
 bool
 BacktrackingAllocator::init()
 {
-    if (!RegisterAllocator::init())
+    if (!RegisterAllocator::init()) {
         return false;
+    }
 
     liveIn = mir->allocate<BitSet>(graph.numBlockIds());
-    if (!liveIn)
+    if (!liveIn) {
         return false;
+    }
 
     size_t numVregs = graph.numVirtualRegisters();
-    if (!vregs.init(mir->alloc(), numVregs))
+    if (!vregs.init(mir->alloc(), numVregs)) {
         return false;
-    for (uint32_t i = 0; i < numVregs; i++)
+    }
+    for (uint32_t i = 0; i < numVregs; i++) {
         new(&vregs[i]) VirtualRegister();
+    }
 
     // Build virtual register objects.
     for (size_t i = 0; i < graph.numBlocks(); i++) {
-        if (mir->shouldCancel("Create data structures (main loop)"))
+        if (mir->shouldCancel("Create data structures (main loop)")) {
             return false;
+        }
 
         LBlock* block = graph.getBlock(i);
         for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
-            if (mir->shouldCancel("Create data structures (inner loop 1)"))
+            if (mir->shouldCancel("Create data structures (inner loop 1)")) {
                 return false;
+            }
 
             for (size_t j = 0; j < ins->numDefs(); j++) {
                 LDefinition* def = ins->getDef(j);
-                if (def->isBogusTemp())
+                if (def->isBogusTemp()) {
                     continue;
+                }
                 vreg(def).init(*ins, def, /* isTemp = */ false);
             }
 
             for (size_t j = 0; j < ins->numTemps(); j++) {
                 LDefinition* def = ins->getTemp(j);
-                if (def->isBogusTemp())
+                if (def->isBogusTemp()) {
                     continue;
+                }
                 vreg(def).init(*ins, def, /* isTemp = */ true);
             }
         }
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi* phi = block->getPhi(j);
             LDefinition* def = phi->getDef(0);
             vreg(def).init(phi, def, /* isTemp = */ false);
         }
@@ -474,25 +501,27 @@ BacktrackingAllocator::init()
 
     LBlock* backedge = nullptr;
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
 
         // If we see a loop header, mark the backedge so we know when we have
         // hit the end of the loop. Don't process the loop immediately, so that
         // if there is an inner loop we will ignore the outer backedge.
-        if (block->mir()->isLoopHeader())
+        if (block->mir()->isLoopHeader()) {
             backedge = block->mir()->backedge()->lir();
+        }
 
         if (block == backedge) {
             LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
             LiveRange* range = LiveRange::FallibleNew(alloc(), 0, entryOf(header),
                                                       exitOf(block).next());
-            if (!range || !hotcode.insert(range))
+            if (!range || !hotcode.insert(range)) {
                 return false;
+            }
         }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to)
@@ -546,39 +575,43 @@ IsInputReused(LInstruction* ins, LUse* u
  */
 bool
 BacktrackingAllocator::buildLivenessInfo()
 {
     JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
 
     Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
     BitSet loopDone(graph.numBlockIds());
-    if (!loopDone.init(alloc()))
+    if (!loopDone.init(alloc())) {
         return false;
+    }
 
     size_t numRanges = 0;
 
     for (size_t i = graph.numBlocks(); i > 0; i--) {
-        if (mir->shouldCancel("Build Liveness Info (main loop)"))
+        if (mir->shouldCancel("Build Liveness Info (main loop)")) {
             return false;
+        }
 
         LBlock* block = graph.getBlock(i - 1);
         MBasicBlock* mblock = block->mir();
 
         BitSet& live = liveIn[mblock->id()];
         new (&live) BitSet(graph.numVirtualRegisters());
-        if (!live.init(alloc()))
+        if (!live.init(alloc())) {
             return false;
+        }
 
         // Propagate liveIn from our successors to us.
         for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
             MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
             // Skip backedges, as we fix them up at the loop header.
-            if (mblock->id() < successor->id())
+            if (mblock->id() < successor->id()) {
                 live.insertAll(liveIn[successor->id()]);
+            }
         }
 
         // Add successor phis.
         if (mblock->successorWithPhis()) {
             LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
             for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
                 LPhi* phi = phiSuccessor->getPhi(j);
                 LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
@@ -609,84 +642,92 @@ BacktrackingAllocator::buildLivenessInfo
                             found = true;
                             break;
                         }
                     }
                     // If this register doesn't have an explicit def above, mark
                     // it as clobbered by the call unless it is actually
                     // call-preserved.
                     if (!found && !ins->isCallPreserved(*iter)) {
-                        if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next()))
+                        if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next())) {
                             return false;
+                        }
                     }
                 }
 
                 CallRange* callRange =
                     new(alloc().fallible()) CallRange(outputOf(*ins), outputOf(*ins).next());
-                if (!callRange)
+                if (!callRange) {
                     return false;
+                }
 
                 callRangesList.pushFront(callRange);
-                if (!callRanges.insert(callRange))
+                if (!callRanges.insert(callRange)) {
                     return false;
+                }
             }
 
             for (size_t i = 0; i < ins->numDefs(); i++) {
                 LDefinition* def = ins->getDef(i);
-                if (def->isBogusTemp())
+                if (def->isBogusTemp()) {
                     continue;
+                }
 
                 CodePosition from = outputOf(*ins);
 
                 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
                     // MUST_REUSE_INPUT is implemented by allocating an output
                     // register and moving the input to it. Register hints are
                     // used to avoid unnecessary moves. We give the input an
                     // LUse::ANY policy to avoid allocating a register for the
                     // input.
                     LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
                     MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
                     MOZ_ASSERT(inputUse->usedAtStart());
                     *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
                 }
 
-                if (!vreg(def).addInitialRange(alloc(), from, from.next(), &numRanges))
+                if (!vreg(def).addInitialRange(alloc(), from, from.next(), &numRanges)) {
                     return false;
+                }
                 vreg(def).setInitialDefinition(from);
                 live.remove(def->virtualRegister());
             }
 
             for (size_t i = 0; i < ins->numTemps(); i++) {
                 LDefinition* temp = ins->getTemp(i);
-                if (temp->isBogusTemp())
+                if (temp->isBogusTemp()) {
                     continue;
+                }
 
                 // Normally temps are considered to cover both the input
                 // and output of the associated instruction. In some cases
                 // though we want to use a fixed register as both an input
                 // and clobbered register in the instruction, so watch for
                 // this and shorten the temp to cover only the output.
                 CodePosition from = inputOf(*ins);
                 if (temp->policy() == LDefinition::FIXED) {
                     AnyRegister reg = temp->output()->toRegister();
                     for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
                         if (alloc->isUse()) {
                             LUse* use = alloc->toUse();
                             if (use->isFixedRegister()) {
-                                if (GetFixedRegister(vreg(use).def(), use) == reg)
+                                if (GetFixedRegister(vreg(use).def(), use) == reg) {
                                     from = outputOf(*ins);
+                                }
                             }
                         }
                     }
                 }
 
                 CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
 
-                if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges))
+                if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges)) {
                     return false;
+                }
                 vreg(temp).setInitialDefinition(from);
             }
 
             DebugOnly<bool> hasUseRegister = false;
             DebugOnly<bool> hasUseRegisterAtStart = false;
 
             for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
                 if (inputAlloc->isUse()) {
@@ -709,44 +750,49 @@ BacktrackingAllocator::buildLivenessInfo
                         }
                     }
 
                     // If there are both useRegisterAtStart(x) and useRegister(y)
                     // uses, we may assign the same register to both operands
                     // (bug 772830). Don't allow this for now.
                     if (use->policy() == LUse::REGISTER) {
                         if (use->usedAtStart()) {
-                            if (!IsInputReused(*ins, use))
+                            if (!IsInputReused(*ins, use)) {
                                 hasUseRegisterAtStart = true;
+                            }
                         } else {
                             hasUseRegister = true;
                         }
                     }
                     MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
 #endif
 
                     // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
-                    if (use->policy() == LUse::RECOVERED_INPUT)
+                    if (use->policy() == LUse::RECOVERED_INPUT) {
                         continue;
+                    }
 
                     CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
                     if (use->isFixedRegister()) {
                         LAllocation reg(AnyRegister::FromCode(use->registerCode()));
                         for (size_t i = 0; i < ins->numDefs(); i++) {
                             LDefinition* def = ins->getDef(i);
-                            if (def->policy() == LDefinition::FIXED && *def->output() == reg)
+                            if (def->policy() == LDefinition::FIXED && *def->output() == reg) {
                                 to = inputOf(*ins);
+                            }
                         }
                     }
 
-                    if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(), &numRanges))
+                    if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(), &numRanges)) {
                         return false;
+                    }
                     UsePosition* usePosition = new(alloc().fallible()) UsePosition(use, to);
-                    if (!usePosition)
+                    if (!usePosition) {
                         return false;
+                    }
                     vreg(use).addInitialUse(usePosition);
                     live.insert(use->virtualRegister());
                 }
             }
         }
 
         // Phis have simultaneous assignment semantics at block begin, so at
         // the beginning of the block we can be sure that liveIn does not
@@ -754,18 +800,19 @@ BacktrackingAllocator::buildLivenessInfo
         for (unsigned int i = 0; i < block->numPhis(); i++) {
             LDefinition* def = block->getPhi(i)->getDef(0);
             if (live.contains(def->virtualRegister())) {
                 live.remove(def->virtualRegister());
             } else {
                 // This is a dead phi, so add a dummy range over all phis. This
                 // can go away if we have an earlier dead code elimination pass.
                 CodePosition entryPos = entryOf(block);
-                if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(), &numRanges))
+                if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(), &numRanges)) {
                     return false;
+                }
             }
         }
 
         if (mblock->isLoopHeader()) {
             // A divergence from the published algorithm is required here, as
             // our block order does not guarantee that blocks of a loop are
             // contiguous. As a result, a single live range spanning the
             // loop is not possible. Additionally, we require liveIn in a later
@@ -775,49 +822,54 @@ BacktrackingAllocator::buildLivenessInfo
                 // Blocks must already have been visited to have a liveIn set.
                 MOZ_ASSERT(loopBlock->id() >= mblock->id());
 
                 // Add a range for this entire loop block
                 CodePosition from = entryOf(loopBlock->lir());
                 CodePosition to = exitOf(loopBlock->lir()).next();
 
                 for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
-                    if (!vregs[*liveRegId].addInitialRange(alloc(), from, to, &numRanges))
+                    if (!vregs[*liveRegId].addInitialRange(alloc(), from, to, &numRanges)) {
                         return false;
+                    }
                 }
 
                 // Fix up the liveIn set.
                 liveIn[loopBlock->id()].insertAll(live);
 
                 // Make sure we don't visit this node again
                 loopDone.insert(loopBlock->id());
 
                 // If this is the loop header, any predecessors are either the
                 // backedge or out of the loop, so skip any predecessors of
                 // this block
                 if (loopBlock != mblock) {
                     for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
                         MBasicBlock* pred = loopBlock->getPredecessor(i);
-                        if (loopDone.contains(pred->id()))
+                        if (loopDone.contains(pred->id())) {
                             continue;
-                        if (!loopWorkList.append(pred))
+                        }
+                        if (!loopWorkList.append(pred)) {
                             return false;
+                        }
                     }
                 }
 
                 // Terminate loop if out of work.
-                if (loopWorkList.empty())
+                if (loopWorkList.empty()) {
                     break;
+                }
 
                 // Grab the next block off the work list, skipping any OSR block.
                 MBasicBlock* osrBlock = graph.mir().osrBlock();
                 while (!loopWorkList.empty()) {
                     loopBlock = loopWorkList.popCopy();
-                    if (loopBlock != osrBlock)
+                    if (loopBlock != osrBlock) {
                         break;
+                    }
                 }
 
                 // If end is reached without finding a non-OSR block, then no more work items were found.
                 if (loopBlock == osrBlock) {
                     MOZ_ASSERT(loopWorkList.empty());
                     break;
                 }
             }
@@ -826,77 +878,92 @@ BacktrackingAllocator::buildLivenessInfo
             loopDone.clear();
         }
 
         MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
     }
 
     JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
 
-    if (JitSpewEnabled(JitSpew_RegAlloc))
+    if (JitSpewEnabled(JitSpew_RegAlloc)) {
         dumpInstructions();
+    }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::go()
 {
     JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
 
-    if (!init())
+    if (!init()) {
+        return false;
+    }
+
+    if (!buildLivenessInfo()) {
         return false;
-
-    if (!buildLivenessInfo())
+    }
+
+    if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) {
         return false;
-
-    if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
-        return false;
+    }
 
     JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
-    if (!mergeAndQueueRegisters())
+    if (!mergeAndQueueRegisters()) {
         return false;
-
-    if (JitSpewEnabled(JitSpew_RegAlloc))
+    }
+
+    if (JitSpewEnabled(JitSpew_RegAlloc)) {
         dumpVregs();
+    }
 
     JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
 
     // Allocate, spill and split bundles until finished.
     while (!allocationQueue.empty()) {
-        if (mir->shouldCancel("Backtracking Allocation"))
+        if (mir->shouldCancel("Backtracking Allocation")) {
             return false;
+        }
 
         QueueItem item = allocationQueue.removeHighest();
-        if (!processBundle(mir, item.bundle))
+        if (!processBundle(mir, item.bundle)) {
             return false;
+        }
     }
 
     JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
 
-    if (!tryAllocatingRegistersForSpillBundles())
+    if (!tryAllocatingRegistersForSpillBundles()) {
         return false;
-
-    if (!pickStackSlots())
+    }
+
+    if (!pickStackSlots()) {
         return false;
-
-    if (JitSpewEnabled(JitSpew_RegAlloc))
+    }
+
+    if (JitSpewEnabled(JitSpew_RegAlloc)) {
         dumpAllocations();
-
-    if (!resolveControlFlow())
+    }
+
+    if (!resolveControlFlow()) {
         return false;
-
-    if (!reifyAllocations())
+    }
+
+    if (!reifyAllocations()) {
         return false;
-
-    if (!populateSafepoints())
+    }
+
+    if (!populateSafepoints()) {
         return false;
-
-    if (!annotateMoveGroups())
+    }
+
+    if (!annotateMoveGroups()) {
         return false;
+    }
 
     return true;
 }
 
 static bool
 IsArgumentSlotDefinition(LDefinition* def)
 {
     return def->policy() == LDefinition::FIXED && def->output()->isArgument();
@@ -908,70 +975,77 @@ IsThisSlotDefinition(LDefinition* def)
     return IsArgumentSlotDefinition(def) &&
         def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
 }
 
 bool
 BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1)
 {
     // See if bundle0 and bundle1 can be merged together.
-    if (bundle0 == bundle1)
+    if (bundle0 == bundle1) {
         return true;
+    }
 
     // Get a representative virtual register from each bundle.
     VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()];
     VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()];
 
-    if (!reg0.isCompatible(reg1))
+    if (!reg0.isCompatible(reg1)) {
         return true;
+    }
 
     // Registers which might spill to the frame's |this| slot can only be
     // grouped with other such registers. The frame's |this| slot must always
     // hold the |this| value, as required by JitFrame tracing and by the Ion
     // constructor calling convention.
     if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
-        if (*reg0.def()->output() != *reg1.def()->output())
+        if (*reg0.def()->output() != *reg1.def()->output()) {
             return true;
+        }
     }
 
     // Registers which might spill to the frame's argument slots can only be
     // grouped with other such registers if the frame might access those
     // arguments through a lazy arguments object or rest parameter.
     if (IsArgumentSlotDefinition(reg0.def()) || IsArgumentSlotDefinition(reg1.def())) {
         if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
-            if (*reg0.def()->output() != *reg1.def()->output())
+            if (*reg0.def()->output() != *reg1.def()->output()) {
                 return true;
+            }
         }
     }
 
     // Limit the number of times we compare ranges if there are many ranges in
     // one of the bundles, to avoid quadratic behavior.
     static const size_t MAX_RANGES = 200;
 
     // Make sure that ranges in the bundles do not overlap.
     LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(), iter1 = bundle1->rangesBegin();
     size_t count = 0;
     while (iter0 && iter1) {
-        if (++count >= MAX_RANGES)
+        if (++count >= MAX_RANGES) {
             return true;
+        }
 
         LiveRange* range0 = LiveRange::get(*iter0);
         LiveRange* range1 = LiveRange::get(*iter1);
 
-        if (range0->from() >= range1->to())
+        if (range0->from() >= range1->to()) {
             iter1++;
-        else if (range1->from() >= range0->to())
+        } else if (range1->from() >= range0->to()) {
             iter0++;
-        else
+        } else {
             return true;
+        }
     }
 
     // Move all ranges from bundle1 into bundle0.
-    while (LiveRange* range = bundle1->popFirstRange())
+    while (LiveRange* range = bundle1->popFirstRange()) {
         bundle0->addRange(range);
+    }
 
     return true;
 }
 
 static inline LDefinition*
 FindReusingDefOrTemp(LNode* node, LAllocation* alloc)
 {
     if (node->isPhi()) {
@@ -1002,18 +1076,19 @@ FindReusingDefOrTemp(LNode* node, LAlloc
 }
 
 static inline size_t
 NumReusingDefs(LInstruction* ins)
 {
     size_t num = 0;
     for (size_t i = 0; i < ins->numDefs(); i++) {
         LDefinition* def = ins->getDef(i);
-        if (def->policy() == LDefinition::MUST_REUSE_INPUT)
+        if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
             num++;
+        }
     }
     return num;
 }
 
 bool
 BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input)
 {
     // def is a vreg which reuses input for its output physical register. Try
@@ -1064,42 +1139,45 @@ BacktrackingAllocator::tryMergeReusedReg
     // memory uses after the def won't help.
     if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
         def.setMustCopyInput();
         return true;
     }
 
     // The input cannot have register or reused uses after the definition.
     for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
-        if (iter->pos <= inputOf(def.ins()))
+        if (iter->pos <= inputOf(def.ins())) {
             continue;
+        }
 
         LUse* use = iter->use();
         if (FindReusingDefOrTemp(insData[iter->pos], use)) {
             def.setMustCopyInput();
             return true;
         }
         if (iter->usePolicy() != LUse::ANY && iter->usePolicy() != LUse::KEEPALIVE) {
             def.setMustCopyInput();
             return true;
         }
     }
 
     LiveRange* preRange = LiveRange::FallibleNew(alloc(), input.vreg(),
                                                  inputRange->from(), outputOf(def.ins()));
-    if (!preRange)
+    if (!preRange) {
         return false;
+    }
 
     // The new range starts at reg's input position, which means it overlaps
     // with the old range at one position. This is what we want, because we
     // need to copy the input before the instruction.
     LiveRange* postRange = LiveRange::FallibleNew(alloc(), input.vreg(),
                                                   inputOf(def.ins()), inputRange->to());
-    if (!postRange)
+    if (!postRange) {
         return false;
+    }
 
     inputRange->distributeUses(preRange);
     inputRange->distributeUses(postRange);
     MOZ_ASSERT(!inputRange->hasUses());
 
     JitSpew(JitSpew_RegAlloc, "  splitting reused input at %u to try to help grouping",
             inputOf(def.ins()).bits());
 
@@ -1109,37 +1187,40 @@ BacktrackingAllocator::tryMergeReusedReg
     input.addRange(postRange);
 
     firstBundle->removeRange(inputRange);
     firstBundle->addRange(preRange);
 
     // The new range goes in a separate bundle, where it will be spilled during
     // allocation.
     LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
-    if (!secondBundle)
+    if (!secondBundle) {
         return false;
+    }
     secondBundle->addRange(postRange);
 
     return tryMergeBundles(def.firstBundle(), input.firstBundle());
 }
 
 bool
 BacktrackingAllocator::mergeAndQueueRegisters()
 {
     MOZ_ASSERT(!vregs[0u].hasRanges());
 
     // Create a bundle for each register containing all its ranges.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
-        if (!reg.hasRanges())
+        if (!reg.hasRanges()) {
             continue;
+        }
 
         LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
-        if (!bundle)
+        if (!bundle) {
             return false;
+        }
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
             bundle->addRange(range);
         }
     }
 
     // If there is an OSR block, merge parameters in that block with the
     // corresponding parameters in the initial block.
@@ -1149,74 +1230,81 @@ BacktrackingAllocator::mergeAndQueueRegi
             if (iter->isParameter()) {
                 for (size_t i = 0; i < iter->numDefs(); i++) {
                     DebugOnly<bool> found = false;
                     VirtualRegister& paramVreg = vreg(iter->getDef(i));
                     for (; original < paramVreg.vreg(); original++) {
                         VirtualRegister& originalVreg = vregs[original];
                         if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
                             MOZ_ASSERT(originalVreg.ins()->isParameter());
-                            if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle()))
+                            if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle())) {
                                 return false;
+                            }
                             found = true;
                             break;
                         }
                     }
                     MOZ_ASSERT(found);
                 }
             }
         }
     }
 
     // Try to merge registers with their reused inputs.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
-        if (!reg.hasRanges())
+        if (!reg.hasRanges()) {
             continue;
+        }
 
         if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
             LUse* use =
                 reg.ins()->toInstruction()->getOperand(reg.def()->getReusedInput())->toUse();
-            if (!tryMergeReusedRegister(reg, vreg(use)))
+            if (!tryMergeReusedRegister(reg, vreg(use))) {
                 return false;
+            }
         }
     }
 
     // Try to merge phis with their inputs.
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         LBlock* block = graph.getBlock(i);
         for (size_t j = 0; j < block->numPhis(); j++) {
             LPhi* phi = block->getPhi(j);
             VirtualRegister& outputVreg = vreg(phi->getDef(0));
             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
                 VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
-                if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle()))
+                if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle())) {
                     return false;
+                }
             }
         }
     }
 
     // Add all bundles to the allocation queue, and create spill sets for them.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
             LiveBundle* bundle = range->bundle();
             if (range == bundle->firstRange()) {
-                if (!alloc().ensureBallast())
+                if (!alloc().ensureBallast()) {
                     return false;
+                }
 
                 SpillSet* spill = SpillSet::New(alloc());
-                if (!spill)
+                if (!spill) {
                     return false;
+                }
                 bundle->setSpillSet(spill);
 
                 size_t priority = computePriority(bundle);
-                if (!allocationQueue.insert(QueueItem(bundle, priority)))
+                if (!allocationQueue.insert(QueueItem(bundle, priority))) {
                     return false;
+                }
             }
         }
     }
 
     return true;
 }
 
 static const size_t MAX_ATTEMPTS = 2;
@@ -1246,48 +1334,54 @@ BacktrackingAllocator::tryAllocateNonFix
 {
     // If we want, but do not require a bundle to be in a specific register,
     // only look at that register for allocating and evict or spill if it is
     // not available. Picking a separate register may be even worse than
     // spilling, as it will still necessitate moves and will tie up more
     // registers than if we spilled.
     if (hint.kind() == Requirement::FIXED) {
         AnyRegister reg = hint.allocation().toRegister();
-        if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting))
+        if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting)) {
             return false;
-        if (*success)
+        }
+        if (*success) {
             return true;
+        }
     }
 
     // Spill bundles which have no hint or register requirement.
     if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) {
         JitSpew(JitSpew_RegAlloc, "  postponed spill (no hint or register requirement)");
-        if (!spilledBundles.append(bundle))
+        if (!spilledBundles.append(bundle)) {
             return false;
+        }
         *success = true;
         return true;
     }
 
     if (conflicting.empty() || minimalBundle(bundle)) {
         // Search for any available register which the bundle can be
         // allocated to.
         for (size_t i = 0; i < AnyRegister::Total; i++) {
-            if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting))
+            if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting)) {
                 return false;
-            if (*success)
+            }
+            if (*success) {
                 return true;
+            }
         }
     }
 
     // Spill bundles which have no register requirement if they didn't get
     // allocated.
     if (requirement.kind() == Requirement::NONE) {
         JitSpew(JitSpew_RegAlloc, "  postponed spill (no register requirement)");
-        if (!spilledBundles.append(bundle))
+        if (!spilledBundles.append(bundle)) {
             return false;
+        }
         *success = true;
         return true;
     }
 
     // We failed to allocate this bundle.
     MOZ_ASSERT(!*success);
     return true;
 }
@@ -1323,47 +1417,52 @@ BacktrackingAllocator::processBundle(MIR
     // for higher weight bundles.
 
     Requirement requirement, hint;
     bool canAllocate = computeRequirement(bundle, &requirement, &hint);
 
     bool fixed;
     LiveBundleVector conflicting;
     for (size_t attempt = 0;; attempt++) {
-        if (mir->shouldCancel("Backtracking Allocation (processBundle loop)"))
+        if (mir->shouldCancel("Backtracking Allocation (processBundle loop)")) {
             return false;
+        }
 
         if (canAllocate) {
             bool success = false;
             fixed = false;
             conflicting.clear();
 
             // Ok, let's try allocating for this bundle.
             if (requirement.kind() == Requirement::FIXED) {
-                if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting))
+                if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting)) {
                     return false;
+                }
             } else {
-                if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting))
+                if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting)) {
                     return false;
+                }
             }
 
             // If that worked, we're done!
-            if (success)
+            if (success) {
                 return true;
+            }
 
             // If that didn't work, but we have one or more non-fixed bundles
             // known to be conflicting, maybe we can evict them and try again.
             if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) &&
                 !fixed &&
                 !conflicting.empty() &&
                 maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
                 {
                     for (size_t i = 0; i < conflicting.length(); i++) {
-                        if (!evictBundle(conflicting[i]))
+                        if (!evictBundle(conflicting[i])) {
                             return false;
+                        }
                     }
                     continue;
                 }
         }
 
         // A minimal bundle cannot be split any further. If we try to split it
         // it at this point we will just end up with the same bundle and will
         // enter an infinite loop. Weights and the initial live ranges must
@@ -1389,90 +1488,99 @@ BacktrackingAllocator::computeRequiremen
 
         if (range->hasDefinition()) {
             // Deal with any definition constraints/hints.
             LDefinition::Policy policy = reg.def()->policy();
             if (policy == LDefinition::FIXED) {
                 // Fixed policies get a FIXED requirement.
                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
                         reg.def()->output()->toString().get());
-                if (!requirement->merge(Requirement(*reg.def()->output())))
+                if (!requirement->merge(Requirement(*reg.def()->output()))) {
                     return false;
+                }
             } else if (reg.ins()->isPhi()) {
                 // Phis don't have any requirements, but they should prefer their
                 // input allocations. This is captured by the group hints above.
             } else {
                 // Non-phis get a REGISTER requirement.
-                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                if (!requirement->merge(Requirement(Requirement::REGISTER))) {
                     return false;
+                }
             }
         }
 
         // Search uses for requirements.
         for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
             LUse::Policy policy = iter->usePolicy();
             if (policy == LUse::FIXED) {
                 AnyRegister required = GetFixedRegister(reg.def(), iter->use());
 
                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
                         required.name(), iter->pos.bits());
 
                 // If there are multiple fixed registers which the bundle is
                 // required to use, fail. The bundle will need to be split before
                 // it can be allocated.
-                if (!requirement->merge(Requirement(LAllocation(required))))
+                if (!requirement->merge(Requirement(LAllocation(required)))) {
                     return false;
+                }
             } else if (policy == LUse::REGISTER) {
-                if (!requirement->merge(Requirement(Requirement::REGISTER)))
+                if (!requirement->merge(Requirement(Requirement::REGISTER))) {
                     return false;
+                }
             } else if (policy == LUse::ANY) {
                 // ANY differs from KEEPALIVE by actively preferring a register.
-                if (!hint->merge(Requirement(Requirement::REGISTER)))
+                if (!hint->merge(Requirement(Requirement::REGISTER))) {
                     return false;
+                }
             }
         }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
                                            bool* success, bool* pfixed, LiveBundleVector& conflicting)
 {
     *success = false;
 
-    if (!r.allocatable)
+    if (!r.allocatable) {
         return true;
+    }
 
     LiveBundleVector aliasedConflicting;
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
         VirtualRegister& reg = vregs[range->vreg()];
 
-        if (!reg.isCompatible(r.reg))
+        if (!reg.isCompatible(r.reg)) {
             return true;
+        }
 
         for (size_t a = 0; a < r.reg.numAliased(); a++) {
             PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
             LiveRange* existing;
-            if (!rAlias.allocations.contains(range, &existing))
+            if (!rAlias.allocations.contains(range, &existing)) {
                 continue;
+            }
             if (existing->hasVreg()) {
                 MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
                 bool duplicate = false;
                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
                     if (aliasedConflicting[i] == existing->bundle()) {
                         duplicate = true;
                         break;
                     }
                 }
-                if (!duplicate && !aliasedConflicting.append(existing->bundle()))
+                if (!duplicate && !aliasedConflicting.append(existing->bundle())) {
                     return false;
+                }
             } else {
                 JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
                         rAlias.reg.name(), existing->toString().get());
                 *pfixed = true;
                 return true;
             }
         }
     }
@@ -1497,36 +1605,40 @@ BacktrackingAllocator::tryAllocateRegist
                     JitSpew(JitSpew_RegAlloc, "      %s [weight %zu]",
                             existing->toString().get(), computeSpillWeight(existing));
                 }
             }
         }
 #endif
 
         if (conflicting.empty()) {
-            if (!conflicting.appendAll(aliasedConflicting))
+            if (!conflicting.appendAll(aliasedConflicting)) {
                 return false;
+            }
         } else {
             if (maximumSpillWeight(aliasedConflicting) < maximumSpillWeight(conflicting)) {
                 conflicting.clear();
-                if (!conflicting.appendAll(aliasedConflicting))
+                if (!conflicting.appendAll(aliasedConflicting)) {
                     return false;
+                }
             }
         }
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
-        if (!alloc().ensureBallast())
+        if (!alloc().ensureBallast()) {
             return false;
-        if (!r.allocations.insert(range))
+        }
+        if (!r.allocations.insert(range)) {
             return false;
+        }
     }
 
     bundle->setAllocation(LAllocation(r.reg));
     *success = true;
     return true;
 }
 
 bool
@@ -1553,18 +1665,19 @@ BacktrackingAllocator::evictBundle(LiveB
 }
 
 bool
 BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle,
                                               const LiveBundleVector& newBundles)
 {
     if (JitSpewEnabled(JitSpew_RegAlloc)) {
         JitSpew(JitSpew_RegAlloc, "    splitting bundle %s into:", bundle->toString().get());
-        for (size_t i = 0; i < newBundles.length(); i++)
+        for (size_t i = 0; i < newBundles.length(); i++) {
             JitSpew(JitSpew_RegAlloc, "      %s", newBundles[i]->toString().get());
+        }
     }
 
     // Remove all ranges in the old bundle from their register's list.
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
         vregs[range->vreg()].removeRange(range);
     }
 
@@ -1576,18 +1689,19 @@ BacktrackingAllocator::splitAndRequeueBu
             vregs[range->vreg()].addRange(range);
         }
     }
 
     // Queue the new bundles for register assignment.
     for (size_t i = 0; i < newBundles.length(); i++) {
         LiveBundle* newBundle = newBundles[i];
         size_t priority = computePriority(newBundle);
-        if (!allocationQueue.insert(QueueItem(newBundle, priority)))
+        if (!allocationQueue.insert(QueueItem(newBundle, priority))) {
             return false;
+        }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::spill(LiveBundle* bundle)
 {
@@ -1615,55 +1729,62 @@ bool
 BacktrackingAllocator::tryAllocatingRegistersForSpillBundles()
 {
     for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
         LiveBundle* bundle = *it;
         LiveBundleVector conflicting;
         bool fixed = false;
         bool success = false;
 
-        if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles"))
+        if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles")) {
             return false;
-
-        if (JitSpewEnabled(JitSpew_RegAlloc))
+        }
+
+        if (JitSpewEnabled(JitSpew_RegAlloc)) {
             JitSpew(JitSpew_RegAlloc, "Spill or allocate %s", bundle->toString().get());
+        }
 
         // Search for any available register which the bundle can be
         // allocated to.
         for (size_t i = 0; i < AnyRegister::Total; i++) {
-            if (!tryAllocateRegister(registers[i], bundle, &success, &fixed, conflicting))
+            if (!tryAllocateRegister(registers[i], bundle, &success, &fixed, conflicting)) {
                 return false;
-            if (success)
+            }
+            if (success) {
                 break;
+            }
         }
 
         // If the bundle still has no register, spill the bundle.
-        if (!success && !spill(bundle))
+        if (!success && !spill(bundle)) {
             return false;
+        }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::pickStackSlots()
 {
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
 
-        if (mir->shouldCancel("Backtracking Pick Stack Slots"))
+        if (mir->shouldCancel("Backtracking Pick Stack Slots")) {
             return false;
+        }
 
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
             LiveBundle* bundle = range->bundle();
 
             if (bundle->allocation().isBogus()) {
-                if (!pickStackSlot(bundle->spillSet()))
+                if (!pickStackSlot(bundle->spillSet())) {
                     return false;
+                }
                 MOZ_ASSERT(!bundle->allocation().isBogus());
             }
         }
     }
 
     return true;
 }
 
@@ -1723,127 +1844,141 @@ BacktrackingAllocator::pickStackSlot(Spi
             for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
                 LiveRange* range = LiveRange::get(*iter);
                 LiveRange* existing;
                 if (spillSlot->allocated.contains(range, &existing)) {
                     success = false;
                     break;
                 }
             }
-            if (!success)
+            if (!success) {
                 break;
+            }
         }
         if (success) {
             // We can reuse this physical stack slot for the new bundles.
             // Update the allocated ranges for the slot.
             for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
                 LiveBundle* bundle = spillSet->spilledBundle(i);
-                if (!insertAllRanges(spillSlot->allocated, bundle))
+                if (!insertAllRanges(spillSlot->allocated, bundle)) {
                     return false;
+                }
             }
             spillSet->setAllocation(spillSlot->alloc);
             return true;
         }
 
         // On a miss, move the spill to the end of the list. This will cause us
         // to make fewer attempts to allocate from slots with a large and
         // highly contended range.
         slotList->popFront();
         slotList->pushBack(spillSlot);
 
-        if (++searches == MAX_SEARCH_COUNT)
+        if (++searches == MAX_SEARCH_COUNT) {
             break;
+        }
     }
 
     // We need a new physical stack slot.
     uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
 
     SpillSlot* spillSlot = new(alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
-    if (!spillSlot)
+    if (!spillSlot) {
         return false;
+    }
 
     for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
         LiveBundle* bundle = spillSet->spilledBundle(i);
-        if (!insertAllRanges(spillSlot->allocated, bundle))
+        if (!insertAllRanges(spillSlot->allocated, bundle)) {
             return false;
+        }
     }
 
     spillSet->setAllocation(spillSlot->alloc);
 
     slotList->pushFront(spillSlot);
     return true;
 }
 
 bool
 BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, LiveBundle* bundle)
 {
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
-        if (!alloc().ensureBallast())
+        if (!alloc().ensureBallast()) {
             return false;
-        if (!set.insert(range))
+        }
+        if (!set.insert(range)) {
             return false;
+        }
     }
     return true;
 }
 
 bool
 BacktrackingAllocator::deadRange(LiveRange* range)
 {
     // Check for direct uses of this range.
-    if (range->hasUses() || range->hasDefinition())
+    if (range->hasUses() || range->hasDefinition()) {
         return false;
+    }
 
     CodePosition start = range->from();
     LNode* ins = insData[start];
-    if (start == entryOf(ins->block()))
+    if (start == entryOf(ins->block())) {
         return false;
+    }
 
     VirtualRegister& reg = vregs[range->vreg()];
 
     // Check if there are later ranges for this vreg.
     LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
     for (iter++; iter; iter++) {
         LiveRange* laterRange = LiveRange::get(*iter);
-        if (laterRange->from() > range->from())
+        if (laterRange->from() > range->from()) {
             return false;
+        }
     }
 
     // Check if this range ends at a loop backedge.
     LNode* last = insData[range->to().previous()];
-    if (last->isGoto() && last->toGoto()->target()->id() < last->block()->mir()->id())
+    if (last->isGoto() && last->toGoto()->target()->id() < last->block()->mir()->id()) {
         return false;
+    }
 
     // Check if there are phis which this vreg flows to.
-    if (reg.usedByPhi())
+    if (reg.usedByPhi()) {
         return false;
+    }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::resolveControlFlow()
 {
     // Add moves to handle changing assignments for vregs over their lifetime.
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
 
     // Look for places where a register's assignment changes in the middle of a
     // basic block.
     MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
 
-        if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)"))
+        if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)")) {
             return false;
+        }
 
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; ) {
             LiveRange* range = LiveRange::get(*iter);
 
-            if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)"))
+            if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)")) {
                 return false;
+            }
 
             // Remove ranges which will never be used.
             if (deadRange(range)) {
                 reg.removeRangeAndIncrement(iter);
                 continue;
             }
 
             // The range which defines the register does not have a predecessor
@@ -1878,42 +2013,47 @@ BacktrackingAllocator::resolveControlFlo
                     break;
                 }
             }
             if (skip) {
                 iter++;
                 continue;
             }
 
-            if (!alloc().ensureBallast())
+            if (!alloc().ensureBallast()) {
                 return false;
+            }
 
             LiveRange* predecessorRange = reg.rangeFor(start.previous(), /* preferRegister = */ true);
             if (start.subpos() == CodePosition::INPUT) {
-                if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type()))
+                if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type())) {
                     return false;
+                }
             } else {
-                if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type()))
+                if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type())) {
                     return false;
+                }
             }
 
             iter++;
         }
     }
 
     JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
-        if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
+        if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) {
             return false;
+        }
 
         LBlock* successor = graph.getBlock(i);
         MBasicBlock* mSuccessor = successor->mir();
-        if (mSuccessor->numPredecessors() < 1)
+        if (mSuccessor->numPredecessors() < 1) {
             continue;
+        }
 
         // Resolve phis to moves.
         for (size_t j = 0; j < successor->numPhis(); j++) {
             LPhi* phi = successor->getPhi(j);
             MOZ_ASSERT(phi->numDefs() == 1);
             LDefinition* def = phi->getDef(0);
             VirtualRegister& reg = vreg(def);
             LiveRange* to = reg.rangeFor(entryOf(successor));
@@ -1922,72 +2062,82 @@ BacktrackingAllocator::resolveControlFlo
             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
                 LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
                 MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
 
                 LAllocation* input = phi->getOperand(k);
                 LiveRange* from = vreg(input).rangeFor(exitOf(predecessor), /* preferRegister = */ true);
                 MOZ_ASSERT(from);
 
-                if (!alloc().ensureBallast())
+                if (!alloc().ensureBallast()) {
                     return false;
-                if (!moveAtExit(predecessor, from, to, def->type()))
+                }
+                if (!moveAtExit(predecessor, from, to, def->type())) {
                     return false;
+                }
             }
         }
     }
 
     // Add moves to resolve graph edges with different allocations at their
     // source and target.
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* targetRange = LiveRange::get(*iter);
 
             size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
-            if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId))))
+            if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId)))) {
                 firstBlockId++;
+            }
             for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
                 LBlock* successor = graph.getBlock(id);
-                if (!targetRange->covers(entryOf(successor)))
+                if (!targetRange->covers(entryOf(successor))) {
                     break;
+                }
 
                 BitSet& live = liveIn[id];
-                if (!live.contains(i))
+                if (!live.contains(i)) {
                     continue;
+                }
 
                 for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
                     LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
-                    if (targetRange->covers(exitOf(predecessor)))
+                    if (targetRange->covers(exitOf(predecessor))) {
                         continue;
-
-                    if (!alloc().ensureBallast())
+                    }
+
+                    if (!alloc().ensureBallast()) {
                         return false;
+                    }
                     LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
                     if (successor->mir()->numPredecessors() > 1) {
                         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
-                        if (!moveAtExit(predecessor, from, targetRange, reg.type()))
+                        if (!moveAtExit(predecessor, from, targetRange, reg.type())) {
                             return false;
+                        }
                     } else {
-                        if (!moveAtEntry(successor, from, targetRange, reg.type()))
+                        if (!moveAtEntry(successor, from, targetRange, reg.type())) {
                             return false;
+                        }
                     }
                 }
             }
         }
     }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins, bool considerCopy)
 {
-    if (LDefinition* def = FindReusingDefOrTemp(ins, use))
+    if (LDefinition* def = FindReusingDefOrTemp(ins, use)) {
         return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
+    }
     return false;
 }
 
 bool
 BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy)
 {
     switch (use->usePolicy()) {
       case LUse::ANY:
@@ -2000,52 +2150,57 @@ BacktrackingAllocator::isRegisterUse(Use
       default:
         return false;
     }
 }
 
 bool
 BacktrackingAllocator::isRegisterDefinition(LiveRange* range)
 {
-    if (!range->hasDefinition())
+    if (!range->hasDefinition()) {
         return false;
+    }
 
     VirtualRegister& reg = vregs[range->vreg()];
-    if (reg.ins()->isPhi())
+    if (reg.ins()->isPhi()) {
         return false;
-
-    if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister())
+    }
+
+    if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister()) {
         return false;
+    }
 
     return true;
 }
 
 bool
 BacktrackingAllocator::reifyAllocations()
 {
     JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
 
     MOZ_ASSERT(!vregs[0u].hasRanges());
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
 
-        if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
+        if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) {
             return false;
+        }
 
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
 
             if (range->hasDefinition()) {
                 reg.def()->setOutput(range->bundle()->allocation());
                 if (reg.ins()->recoversInput()) {
                     LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
                     for (size_t i = 0; i < snapshot->numEntries(); i++) {
                         LAllocation* entry = snapshot->getEntry(i);
-                        if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
+                        if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) {
                             *entry = *reg.def()->output();
+                        }
                     }
                 }
             }
 
             for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
                 LAllocation* alloc = iter->use();
                 *alloc = range->bundle()->allocation();
 
@@ -2053,26 +2208,29 @@ BacktrackingAllocator::reifyAllocations(
                 // add copies if the use and def have different allocations.
                 LNode* ins = insData[iter->pos];
                 if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
                     LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
                     LAllocation res = outputRange->bundle()->allocation();
                     LAllocation sourceAlloc = range->bundle()->allocation();
 
                     if (res != *alloc) {
-                        if (!this->alloc().ensureBallast())
+                        if (!this->alloc().ensureBallast()) {
                             return false;
+                        }
                         if (NumReusingDefs(ins->toInstruction()) <= 1) {
                             LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
-                            if (!group->addAfter(sourceAlloc, res, reg.type()))
+                            if (!group->addAfter(sourceAlloc, res, reg.type())) {
                                 return false;
+                            }
                         } else {
                             LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
-                            if (!group->add(sourceAlloc, res, reg.type()))
+                            if (!group->add(sourceAlloc, res, reg.type())) {
                                 return false;
+                            }
                         }
                         *alloc = res;
                     }
                 }
             }
 
             addLiveRegistersForRange(reg, range);
         }
@@ -2083,63 +2241,68 @@ BacktrackingAllocator::reifyAllocations(
 }
 
 size_t
 BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from)
 {
     size_t i = 0;
     for (; i < graph.numNonCallSafepoints(); i++) {
         const LInstruction* ins = graph.getNonCallSafepoint(i);
-        if (from <= inputOf(ins))
+        if (from <= inputOf(ins)) {
             break;
+        }
     }
     return i;
 }
 
 void
 BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range)
 {
     // Fill in the live register sets for all non-call safepoints.
     LAllocation a = range->bundle()->allocation();
-    if (!a.isRegister())
+    if (!a.isRegister()) {
         return;
+    }
 
     // Don't add output registers to the safepoint.
     CodePosition start = range->from();
     if (range->hasDefinition() && !reg.isTemp()) {
 #ifdef CHECK_OSIPOINT_REGISTERS
         // We don't add the output register to the safepoint,
         // but it still might get added as one of the inputs.
         // So eagerly add this reg to the safepoint clobbered registers.
         if (reg.ins()->isInstruction()) {
-            if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint())
+            if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint()) {
                 safepoint->addClobberedRegister(a.toRegister());
+            }
         }
 #endif
         start = start.next();
     }
 
     size_t i = findFirstNonCallSafepoint(start);
     for (; i < graph.numNonCallSafepoints(); i++) {
         LInstruction* ins = graph.getNonCallSafepoint(i);
         CodePosition pos = inputOf(ins);
 
         // Safepoints are sorted, so we can shortcut out of this loop
         // if we go out of range.
-        if (range->to() <= pos)
+        if (range->to() <= pos) {
             break;
+        }
 
         MOZ_ASSERT(range->covers(pos));
 
         LSafepoint* safepoint = ins->safepoint();
         safepoint->addLiveRegister(a.toRegister());
 
 #ifdef CHECK_OSIPOINT_REGISTERS
-        if (reg.isTemp())
+        if (reg.isTemp()) {
             safepoint->addClobberedRegister(a.toRegister());
+        }
 #endif
     }
 }
 
 static inline bool
 IsNunbox(VirtualRegister& reg)
 {
 #ifdef JS_NUNBOX32
@@ -2154,64 +2317,70 @@ static inline bool
 IsSlotsOrElements(VirtualRegister& reg)
 {
     return reg.type() == LDefinition::SLOTS;
 }
 
 static inline bool
 IsTraceable(VirtualRegister& reg)
 {
-    if (reg.type() == LDefinition::OBJECT)
+    if (reg.type() == LDefinition::OBJECT) {
         return true;
+    }
 #ifdef JS_PUNBOX64
-    if (reg.type() == LDefinition::BOX)
+    if (reg.type() == LDefinition::BOX) {
         return true;
+    }
 #endif
     return false;
 }
 
 size_t
 BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom)
 {
     size_t i = startFrom;
     for (; i < graph.numSafepoints(); i++) {
         LInstruction* ins = graph.getSafepoint(i);
-        if (pos <= inputOf(ins))
+        if (pos <= inputOf(ins)) {
             break;
+        }
     }
     return i;
 }
 
 bool
 BacktrackingAllocator::populateSafepoints()
 {
     JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
 
     size_t firstSafepoint = 0;
 
     MOZ_ASSERT(!vregs[0u].def());
     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
 
-        if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
+        if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) {
             continue;
+        }
 
         firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
-        if (firstSafepoint >= graph.numSafepoints())
+        if (firstSafepoint >= graph.numSafepoints()) {
             break;
+        }
 
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
 
             for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
                 LInstruction* ins = graph.getSafepoint(j);
 
                 if (!range->covers(inputOf(ins))) {
-                    if (inputOf(ins) >= range->to())
+                    if (inputOf(ins) >= range->to()) {
                         break;
+                    }
                     continue;
                 }
 
                 // Include temps but not instruction outputs. Also make sure
                 // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
                 // we would have to add the input reg to this safepoint.
                 if (ins == reg.ins() && !reg.isTemp()) {
                     DebugOnly<LDefinition*> def = reg.def();
@@ -2221,41 +2390,47 @@ BacktrackingAllocator::populateSafepoint
                                   def->type() == LDefinition::FLOAT32 ||
                                   def->type() == LDefinition::DOUBLE);
                     continue;
                 }
 
                 LSafepoint* safepoint = ins->safepoint();
 
                 LAllocation a = range->bundle()->allocation();
-                if (a.isGeneralReg() && ins->isCall())
+                if (a.isGeneralReg() && ins->isCall()) {
                     continue;
+                }
 
                 switch (reg.type()) {
                   case LDefinition::OBJECT:
-                    if (!safepoint->addGcPointer(a))
+                    if (!safepoint->addGcPointer(a)) {
                         return false;
+                    }
                     break;
                   case LDefinition::SLOTS:
-                    if (!safepoint->addSlotsOrElementsPointer(a))
+                    if (!safepoint->addSlotsOrElementsPointer(a)) {
                         return false;
+                    }
                     break;
 #ifdef JS_NUNBOX32
                   case LDefinition::TYPE:
-                    if (!safepoint->addNunboxType(i, a))
+                    if (!safepoint->addNunboxType(i, a)) {
                         return false;
+                    }
                     break;
                   case LDefinition::PAYLOAD:
-                    if (!safepoint->addNunboxPayload(i, a))
+                    if (!safepoint->addNunboxPayload(i, a)) {
                         return false;
+                    }
                     break;
 #else
                   case LDefinition::BOX:
-                    if (!safepoint->addBoxedValue(a))
+                    if (!safepoint->addBoxedValue(a)) {
                         return false;
+                    }
                     break;
 #endif
                   default:
                     MOZ_CRASH("Bad register type");
                 }
             }
         }
     }
@@ -2267,46 +2442,50 @@ bool
 BacktrackingAllocator::annotateMoveGroups()
 {
     // Annotate move groups in the LIR graph with any register that is not
     // allocated at that point and can be used as a scratch register. This is
     // only required for x86, as other platforms always have scratch registers
     // available for use.
 #ifdef JS_CODEGEN_X86
     LiveRange* range = LiveRange::FallibleNew(alloc(), 0, CodePosition(), CodePosition().next());
-    if (!range)
+    if (!range) {
         return false;
+    }
 
     for (size_t i = 0; i < graph.numBlocks(); i++) {
-        if (mir->shouldCancel("Backtracking Annotate Move Groups"))
+        if (mir->shouldCancel("Backtracking Annotate Move Groups")) {
             return false;
+        }
 
         LBlock* block = graph.getBlock(i);
         LInstruction* last = nullptr;
         for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
             if (iter->isMoveGroup()) {
                 CodePosition from = last ? outputOf(last) : entryOf(block);
                 range->setTo(from.next());
                 range->setFrom(from);
 
                 for (size_t i = 0; i < AnyRegister::Total; i++) {
                     PhysicalRegister& reg = registers[i];
-                    if (reg.reg.isFloat() || !reg.allocatable)
+                    if (reg.reg.isFloat() || !reg.allocatable) {
                         continue;
+                    }
 
                     // This register is unavailable for use if (a) it is in use
                     // by some live range immediately before the move group,
                     // or (b) it is an operand in one of the group's moves. The
                     // latter case handles live ranges which end immediately
                     // before the move group or start immediately after.
                     // For (b) we need to consider move groups immediately
                     // preceding or following this one.
 
-                    if (iter->toMoveGroup()->uses(reg.reg.gpr()))
+                    if (iter->toMoveGroup()->uses(reg.reg.gpr())) {
                         continue;
+                    }
                     bool found = false;
                     LInstructionIterator niter(iter);
                     for (niter++; niter != block->end(); niter++) {
                         if (niter->isMoveGroup()) {
                             if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
                                 found = true;
                                 break;
                             }
@@ -2325,18 +2504,19 @@ BacktrackingAllocator::annotateMoveGroup
                                 }
                             } else {
                                 break;
                             }
                         } while (riter != block->begin());
                     }
 
                     LiveRange* existing;
-                    if (found || reg.allocations.contains(range, &existing))
+                    if (found || reg.allocations.contains(range, &existing)) {
                         continue;
+                    }
 
                     iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
                     break;
                 }
             } else {
                 last = *iter;
             }
         }
@@ -2354,27 +2534,31 @@ BacktrackingAllocator::annotateMoveGroup
 
 UniqueChars
 LiveRange::toString() const
 {
     AutoEnterOOMUnsafeRegion oomUnsafe;
 
     UniqueChars buf = JS_smprintf("v%u [%u,%u)", hasVreg() ? vreg() : 0, from().bits(), to().bits());
 
-    if (buf && bundle() && !bundle()->allocation().isBogus())
+    if (buf && bundle() && !bundle()->allocation().isBogus()) {
         buf = JS_sprintf_append(std::move(buf), " %s", bundle()->allocation().toString().get());
-
-    if (buf && hasDefinition())
+    }
+
+    if (buf && hasDefinition()) {
         buf = JS_sprintf_append(std::move(buf), " (def)");
-
-    for (UsePositionIterator iter = usesBegin(); buf && iter; iter++)
+    }
+
+    for (UsePositionIterator iter = usesBegin(); buf && iter; iter++) {
         buf = JS_sprintf_append(std::move(buf), " %s@%u", iter->use()->toString().get(), iter->pos.bits());
-
-    if (!buf)
+    }
+
+    if (!buf) {
         oomUnsafe.crash("LiveRange::toString()");
+    }
 
     return buf;
 }
 
 UniqueChars
 LiveBundle::toString() const
 {
     AutoEnterOOMUnsafeRegion oomUnsafe;
@@ -2383,18 +2567,19 @@ LiveBundle::toString() const
     UniqueChars buf = JS_smprintf("%s", "");
 
     for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter; iter++) {
         buf = JS_sprintf_append(std::move(buf), "%s %s",
                                 (iter == rangesBegin()) ? "" : " ##",
                                 LiveRange::get(*iter)->toString().get());
     }
 
-    if (!buf)
+    if (!buf) {
         oomUnsafe.crash("LiveBundle::toString()");
+    }
 
     return buf;
 }
 
 #endif // JS_JITSPEW
 
 void
 BacktrackingAllocator::dumpVregs()
@@ -2403,35 +2588,37 @@ BacktrackingAllocator::dumpVregs()
     MOZ_ASSERT(!vregs[0u].hasRanges());
 
     fprintf(stderr, "Live ranges by virtual register:\n");
 
     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
         fprintf(stderr, "  ");
         VirtualRegister& reg = vregs[i];
         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
-            if (iter != reg.rangesBegin())
+            if (iter != reg.rangesBegin()) {
                 fprintf(stderr, " ## ");
+            }
             fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
         }
         fprintf(stderr, "\n");
     }
 
     fprintf(stderr, "\nLive ranges by bundle:\n");
 
     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister& reg = vregs[i];
         for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter; baseIter++) {
             LiveRange* range = LiveRange::get(*baseIter);
             LiveBundle* bundle = range->bundle();
             if (range == bundle->firstRange()) {
                 fprintf(stderr, "  ");
                 for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
-                    if (iter != bundle->rangesBegin())
+                    if (iter != bundle->rangesBegin()) {
                         fprintf(stderr, " ## ");
+                    }
                     fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
                 }
                 fprintf(stderr, "\n");
             }
         }
     }
 #endif
 }
@@ -2440,20 +2627,21 @@ BacktrackingAllocator::dumpVregs()
 struct BacktrackingAllocator::PrintLiveRange
 {
     bool& first_;
 
     explicit PrintLiveRange(bool& first) : first_(first) {}
 
     void operator()(const LiveRange* range)
     {
-        if (first_)
+        if (first_) {
             first_ = false;
-        else
+        } else {
             fprintf(stderr, " /");
+        }
         fprintf(stderr, " %s", range->toString().get());
     }
 };
 #endif
 
 void
 BacktrackingAllocator::dumpAllocations()
 {
@@ -2522,69 +2710,78 @@ BacktrackingAllocator::minimalBundle(Liv
 
     if (!range->hasVreg()) {
         *pfixed = true;
         return true;
     }
 
     // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
     // each range into a separate bundle.
-    if (++iter)
+    if (++iter) {
         return false;
+    }
 
     if (range->hasDefinition()) {
         VirtualRegister& reg = vregs[range->vreg()];
-        if (pfixed)
+        if (pfixed) {
             *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister();
+        }
         return minimalDef(range, reg.ins());
     }
 
     bool fixed = false, minimal = false, multiple = false;
 
     for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
-        if (iter != range->usesBegin())
+        if (iter != range->usesBegin()) {
             multiple = true;
+        }
 
         switch (iter->usePolicy()) {
           case LUse::FIXED:
-            if (fixed)
+            if (fixed) {
                 return false;
+            }
             fixed = true;
-            if (minimalUse(range, *iter))
+            if (minimalUse(range, *iter)) {
                 minimal = true;
+            }
             break;
 
           case LUse::REGISTER:
-            if (minimalUse(range, *iter))
+            if (minimalUse(range, *iter)) {
                 minimal = true;
+            }
             break;
 
           default:
             break;
         }
     }
 
     // If a range contains a fixed use and at least one other use,
     // splitAtAllRegisterUses will split each use into a different bundle.
-    if (multiple && fixed)
+    if (multiple && fixed) {
         minimal = false;
-
-    if (pfixed)
+    }
+
+    if (pfixed) {
         *pfixed = fixed;
+    }
     return minimal;
 }
 
 size_t
 BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle)
 {
     // Minimal bundles have an extremely high spill weight, to ensure they
     // can evict any other bundles and be allocated to a register.
     bool fixed;
-    if (minimalBundle(bundle, &fixed))
+    if (minimalBundle(bundle, &fixed)) {
         return fixed ? 2000000 : 1000000;
+    }
 
     size_t usesTotal = 0;
     fixed = false;
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
 
         if (range->hasDefinition()) {
@@ -2593,52 +2790,56 @@ BacktrackingAllocator::computeSpillWeigh
                 usesTotal += 2000;
                 fixed = true;
             } else if (!reg.ins()->isPhi()) {
                 usesTotal += 2000;
             }
         }
 
         usesTotal += range->usesSpillWeight();
-        if (range->numFixedUses() > 0)
+        if (range->numFixedUses() > 0) {
             fixed = true;
+        }
     }
 
     // Bundles with fixed uses are given a higher spill weight, since they must
     // be allocated to a specific register.
-    if (testbed && fixed)
+    if (testbed && fixed) {
         usesTotal *= 2;
+    }
 
     // Compute spill weight as a use density, lowering the weight for long
     // lived bundles with relatively few uses.
     size_t lifetimeTotal = computePriority(bundle);
     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
 }
 
 size_t
 BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
 {
     size_t maxWeight = 0;
-    for (size_t i = 0; i < bundles.length(); i++)
+    for (size_t i = 0; i < bundles.length(); i++) {
         maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
+    }
     return maxWeight;
 }
 
 bool
 BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
 {
     // If this bundle has portions that are hot and portions that are cold,
     // split it at the boundaries between hot and cold code.
 
     LiveRange* hotRange = nullptr;
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
-        if (hotcode.contains(range, &hotRange))
+        if (hotcode.contains(range, &hotRange)) {
             break;
+        }
     }
 
     // Don't split if there is no hot code in the bundle.
     if (!hotRange) {
         JitSpew(JitSpew_RegAlloc, "  bundle does not contain hot code");
         return true;
     }
 
@@ -2659,98 +2860,112 @@ BacktrackingAllocator::trySplitAcrossHot
     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString().get());
 
     // Tweak the splitting method when compiling wasm code to look at actual
     // uses within the hot/cold code. This heuristic is in place as the below
     // mechanism regresses several asm.js tests. Hopefully this will be fixed
     // soon and this special case removed. See bug 948838.
     if (compilingWasm()) {
         SplitPositionVector splitPositions;
-        if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
+        if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to())) {
             return false;
+        }
         *success = true;
         return splitAt(bundle, splitPositions);
     }
 
     LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
                                                     bundle->spillParent());
-    if (!hotBundle)
+    if (!hotBundle) {
         return false;
+    }
     LiveBundle* preBundle = nullptr;
     LiveBundle* postBundle = nullptr;
     LiveBundle* coldBundle = nullptr;
 
     if (testbed) {
         coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), bundle->spillParent());
-        if (!coldBundle)
+        if (!coldBundle) {
             return false;
+        }
     }
 
     // Accumulate the ranges of hot and cold code in the bundle. Note that
     // we are only comparing with the single hot range found, so the cold code
     // may contain separate hot ranges.
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
         LiveRange::Range hot, coldPre, coldPost;
         range->intersect(hotRange, &coldPre, &hot, &coldPost);
 
         if (!hot.empty()) {
-            if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to))
+            if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to)) {
                 return false;
+            }
         }
 
         if (!coldPre.empty()) {
             if (testbed) {
-                if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
+                if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to)) {
                     return false;
+                }
             } else {
                 if (!preBundle) {
                     preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
                                                         bundle->spillParent());
-                    if (!preBundle)
+                    if (!preBundle) {
                         return false;
+                    }
                 }
-                if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
+                if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to)) {
                     return false;
+                }
             }
         }
 
         if (!coldPost.empty()) {
             if (testbed) {
-                if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
+                if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to)) {
                     return false;
+                }
             } else {
                 if (!postBundle) {
                     postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
                                                          bundle->spillParent());
-                    if (!postBundle)
+                    if (!postBundle) {
                         return false;
+                    }
                 }
-                if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
+                if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to)) {
                     return false;
+                }
             }
         }
     }
 
     MOZ_ASSERT(hotBundle->numRanges() != 0);
 
     LiveBundleVector newBundles;
-    if (!newBundles.append(hotBundle))
+    if (!newBundles.append(hotBundle)) {
         return false;
+    }
 
     if (testbed) {
         MOZ_ASSERT(coldBundle->numRanges() != 0);
-        if (!newBundles.append(coldBundle))
+        if (!newBundles.append(coldBundle)) {
             return false;
+        }
     } else {
         MOZ_ASSERT(preBundle || postBundle);
-        if (preBundle && !newBundles.append(preBundle))
+        if (preBundle && !newBundles.append(preBundle)) {
             return false;
-        if (postBundle && !newBundles.append(postBundle))
+        }
+        if (postBundle && !newBundles.append(postBundle)) {
             return false;
+        }
     }
 
     *success = true;
     return splitAndRequeueBundles(bundle, newBundles);
 }
 
 bool
 BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
@@ -2800,18 +3015,19 @@ BacktrackingAllocator::trySplitAfterLast
         JitSpew(JitSpew_RegAlloc, "  bundle's last use is a register use");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split after last register use at %u",
             lastRegisterTo.bits());
 
     SplitPositionVector splitPositions;
-    if (!splitPositions.append(lastRegisterTo))
+    if (!splitPositions.append(lastRegisterTo)) {
         return false;
+    }
     *success = true;
     return splitAt(bundle, splitPositions);
 }
 
 bool
 BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success)
 {
     // If this bundle's earlier uses do not require it to be in a register,
@@ -2828,18 +3044,19 @@ BacktrackingAllocator::trySplitBeforeFir
     }
 
     CodePosition firstRegisterFrom;
 
     CodePosition conflictEnd;
     if (conflict) {
         for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
-            if (range->to() > conflictEnd)
+            if (range->to() > conflictEnd) {
                 conflictEnd = range->to();
+            }
         }
     }
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
 
         if (!conflict || range->from() > conflictEnd) {
             if (range->hasDefinition() && isRegisterDefinition(range)) {
@@ -2853,32 +3070,34 @@ BacktrackingAllocator::trySplitBeforeFir
 
             if (!conflict || outputOf(ins) >= conflictEnd) {
                 if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
                     firstRegisterFrom = inputOf(ins);
                     break;
                 }
             }
         }
-        if (firstRegisterFrom.bits())
+        if (firstRegisterFrom.bits()) {
             break;
+        }
     }
 
     if (!firstRegisterFrom.bits()) {
         // Can't trim non-register uses off the beginning by splitting.
         JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
         return true;
     }
 
     JitSpew(JitSpew_RegAlloc, "  split before first register use at %u",
             firstRegisterFrom.bits());
 
     SplitPositionVector splitPositions;
-    if (!splitPositions.append(firstRegisterFrom))
+    if (!splitPositions.append(firstRegisterFrom)) {
         return false;
+    }
     *success = true;
     return splitAt(bundle, splitPositions);
 }
 
 // When splitting a bundle according to a list of split positions, return
 // whether a use or range at |pos| should use a different bundle than the last
 // position this was called for.
 static bool
@@ -2912,110 +3131,123 @@ UseNewBundle(const SplitPositionVector& 
 
 static bool
 HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
 {
     MOZ_ASSERT(range->bundle() == bundle);
 
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* prevRange = LiveRange::get(*iter);
-        if (prevRange == range)
+        if (prevRange == range) {
             return false;
-        if (prevRange->vreg() == range->vreg())
+        }
+        if (prevRange->vreg() == range->vreg()) {
             return true;
+        }
     }
 
     MOZ_CRASH();
 }
 
 static bool
 HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
 {
     MOZ_ASSERT(range->bundle() == bundle);
 
     bool foundRange = false;
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* prevRange = LiveRange::get(*iter);
-        if (foundRange && prevRange->vreg() == range->vreg())
+        if (foundRange && prevRange->vreg() == range->vreg()) {
             return true;
-        if (prevRange == range)
+        }
+        if (prevRange == range) {
             foundRange = true;
+        }
     }
 
     MOZ_ASSERT(foundRange);
     return false;
 }
 
 bool
 BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions)
 {
     // Split the bundle at the given split points. Register uses which have no
     // intervening split points are consolidated into the same bundle. If the
     // list of split points is empty, then all register uses are placed in
     // minimal bundles.
 
     // splitPositions should be sorted.
-    for (size_t i = 1; i < splitPositions.length(); ++i)
+    for (size_t i = 1; i < splitPositions.length(); ++i) {
         MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]);
+    }
 
     // We don't need to create a new spill bundle if there already is one.
     bool spillBundleIsNew = false;
     LiveBundle* spillBundle = bundle->spillParent();
     if (!spillBundle) {
         spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
-        if (!spillBundle)
+        if (!spillBundle) {
             return false;
+        }
         spillBundleIsNew = true;
 
         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
             LiveRange* range = LiveRange::get(*iter);
 
             CodePosition from = range->from();
-            if (isRegisterDefinition(range))
+            if (isRegisterDefinition(range)) {
                 from = minimalDefEnd(insData[from]).next();
+            }
 
             if (from < range->to()) {
-                if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
+                if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to())) {
                     return false;
-
-                if (range->hasDefinition() && !isRegisterDefinition(range))
+                }
+
+                if (range->hasDefinition() && !isRegisterDefinition(range)) {
                     spillBundle->lastRange()->setHasDefinition();
+                }
             }
         }
     }
 
     LiveBundleVector newBundles;
 
     // The bundle which ranges are currently being added to.
     LiveBundle* activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
-    if (!activeBundle || !newBundles.append(activeBundle))
+    if (!activeBundle || !newBundles.append(activeBundle)) {
         return false;
+    }
 
     // State for use by UseNewBundle.
     size_t activeSplitPosition = 0;
 
     // Make new bundles according to the split positions, and distribute ranges
     // and uses to them.
     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
         LiveRange* range = LiveRange::get(*iter);
 
         if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
             activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
-            if (!activeBundle || !newBundles.append(activeBundle))
+            if (!activeBundle || !newBundles.append(activeBundle)) {
                 return false;
+            }
         }
 
         LiveRange* activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
                                                         range->from(), range->to());
-        if (!activeRange)
+        if (!activeRange) {
             return false;
+        }
         activeBundle->addRange(activeRange);
 
-        if (isRegisterDefinition(range))
+        if (isRegisterDefinition(range)) {
             activeRange->setHasDefinition();
+        }
 
         while (range->hasUses()) {
             UsePosition* use = range->popUse();
             LNode* ins = insData[use->pos];
 
             // Any uses of a register that appear before its definition has
             // finished must be associated with the range for that definition.
             if (isRegisterDefinition(range) && use->pos <= minimalDefEnd(insData[range->from()])) {
@@ -3031,22 +3263,24 @@ BacktrackingAllocator::splitAt(LiveBundl
                 if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
                     (!activeRange->hasUses() ||
                      activeRange->usesBegin()->pos != use->pos ||
                      activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
                      use->usePolicy() == LUse::FIXED))
                 {
                     activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
                                                            spillBundle);
-                    if (!activeBundle || !newBundles.append(activeBundle))
+                    if (!activeBundle || !newBundles.append(activeBundle)) {
                         return false;
+                    }
                     activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
                                                          range->from(), range->to());
-                    if (!activeRange)
+                    if (!activeRange) {
                         return false;
+                    }
                     activeBundle->addRange(activeRange);
                 }
 
                 activeRange->addUse(use);
             } else {
                 MOZ_ASSERT(spillBundleIsNew);
                 spillBundle->rangeFor(use->pos)->addUse(use);
             }
@@ -3085,22 +3319,24 @@ BacktrackingAllocator::splitAt(LiveBundl
                     bundle->removeRangeAndIncrementIterator(iter);
                     continue;
                 }
             }
 
             iter++;
         }
 
-        if (bundle->hasRanges() && !filteredBundles.append(bundle))
+        if (bundle->hasRanges() && !filteredBundles.append(bundle)) {
             return false;
+        }
     }
 
-    if (spillBundleIsNew && !filteredBundles.append(spillBundle))
+    if (spillBundleIsNew && !filteredBundles.append(spillBundle)) {
         return false;
+    }
 
     return splitAndRequeueBundles(bundle, filteredBundles);
 }
 
 bool
 BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle)
 {
     // Split the bundle to separate register uses and non-register uses and
@@ -3120,70 +3356,81 @@ BacktrackingAllocator::splitAcrossCalls(
 
         // The search above returns an arbitrary call within the range. Walk
         // backwards to find the first call in the range.
         for (CallRangeList::reverse_iterator riter = callRangesList.rbegin(callRange);
              riter != callRangesList.rend();
              ++riter)
         {
             CodePosition pos = riter->range.from;
-            if (range->covers(pos))
+            if (range->covers(pos)) {
                 callRange = *riter;
-            else
+            } else {
                 break;
+            }
         }
 
         // Add all call positions within the range, by walking forwards.
         for (CallRangeList::iterator iter = callRangesList.begin(callRange);
              iter != callRangesList.end();
              ++iter)
         {
             CodePosition pos = iter->range.from;
-            if (!range->covers(pos))
+            if (!range->covers(pos)) {
                 break;
+            }
 
             // Calls at the beginning of the range are ignored; there is no splitting to do.
             if (range->covers(pos.previous())) {
                 MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
-                if (!callPositions.append(pos))
+                if (!callPositions.append(pos)) {
                     return false;
+                }
             }
         }
     }
     MOZ_ASSERT(callPositions.length());
 
 #ifdef JS_JITSPEW
     JitSpewStart(JitSpew_RegAlloc, "  split across calls at ");
-    for (size_t i = 0; i < callPositions.length(); ++i)
+    for (size_t i = 0; i < callPositions.length(); ++i) {
         JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits());
+    }
     JitSpewFin(JitSpew_RegAlloc);
 #endif
 
     return splitAt(bundle, callPositions);
 }
 
 bool
 BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict)
 {
     bool success = false;
 
-    if (!trySplitAcrossHotcode(bundle, &success))
+    if (!trySplitAcrossHotcode(bundle, &success)) {
         return false;
-    if (success)
+    }
+    if (success) {
         return true;
-
-    if (fixed)
+    }
+
+    if (fixed) {
         return splitAcrossCalls(bundle);
-
-    if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success))
+    }
+
+    if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success)) {
         return false;
-    if (success)
+    }
+    if (success) {
         return true;
-
-    if (!trySplitAfterLastRegisterUse(bundle, conflict, &success))
+    }
+
+    if (!trySplitAfterLastRegisterUse(bundle, conflict, &success)) {
         return false;
-    if (success)
+    }
+    if (success) {
         return true;
+    }
 
     // Split at all register uses.
     SplitPositionVector emptyPositions;
     return splitAt(bundle, emptyPositions);
 }
--- a/js/src/jit/BacktrackingAllocator.h
+++ b/js/src/jit/BacktrackingAllocator.h
@@ -104,25 +104,27 @@ class Requirement
     int priority() const;
 
     MOZ_MUST_USE bool merge(const Requirement& newRequirement) {
         // Merge newRequirement with any existing requirement, returning false
         // if the new and old requirements conflict.
         MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
 
         if (newRequirement.kind() == Requirement::FIXED) {
-            if (kind() == Requirement::FIXED)
+            if (kind() == Requirement::FIXED) {
                 return newRequirement.allocation() == allocation();
+            }
             *this = newRequirement;
             return true;
         }
 
         MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
-        if (kind() == Requirement::FIXED)
+        if (kind() == Requirement::FIXED) {
             return allocation().isRegister();
+        }
 
         *this = newRequirement;
         return true;
     }
 
     void dump() const;
 
   private:
@@ -376,20 +378,22 @@ class LiveRange : public TempObject
 #ifdef JS_JITSPEW
     // Return a string describing this range.
     UniqueChars toString() const;
 #endif
 
     // Comparator for use in range splay trees.
     static int compare(LiveRange* v0, LiveRange* v1) {
         // LiveRange includes 'from' but excludes 'to'.
-        if (v0->to() <= v1->from())
+        if (v0->to() <= v1->from()) {
             return -1;
-        if (v0->from() >= v1->to())
+        }
+        if (v0->from() >= v1->to()) {
             return 1;
+        }
         return 0;
     }
 };
 
 // Tracks information about bundles that should all be spilled to the same
 // physical location. At the beginning of allocation, each bundle has its own
 // spill set. As bundles are split, the new smaller bundles continue to use the
 // same spill set.
@@ -666,20 +670,22 @@ class BacktrackingAllocator : protected 
         LiveRange::Range range;
 
         CallRange(CodePosition from, CodePosition to)
           : range(from, to)
         {}
 
         // Comparator for use in splay tree.
         static int compare(CallRange* v0, CallRange* v1) {
-            if (v0->range.to <= v1->range.from)
+            if (v0->range.to <= v1->range.from) {
                 return -1;
-            if (v0->range.from >= v1->range.to)
+            }
+            if (v0->range.from >= v1->range.to) {
                 return 1;
+            }
             return 0;
         }
     };
 
     // Ranges where all registers must be spilled due to call instructions.
     typedef InlineList<CallRange> CallRangeList;
     CallRangeList callRangesList;
     SplayTree<CallRange*, CallRange> callRanges;
@@ -785,42 +791,46 @@ class BacktrackingAllocator : protected 
         LAllocation fromAlloc = from->bundle()->allocation();
         LAllocation toAlloc = to->bundle()->allocation();
         MOZ_ASSERT(fromAlloc != toAlloc);
         return moves->add(fromAlloc, toAlloc, type);
     }
 
     MOZ_MUST_USE bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to,
                                 LDefinition::Type type) {
-        if (from->bundle()->allocation() == to->bundle()->allocation())
+        if (from->bundle()->allocation() == to->bundle()->allocation()) {
             return true;
+        }
         LMoveGroup* moves = getInputMoveGroup(ins);
         return addMove(moves, from, to, type);
     }
 
     MOZ_MUST_USE bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to,
                                 LDefinition::Type type) {
-        if (from->bundle()->allocation() == to->bundle()->allocation())
+        if (from->bundle()->allocation() == to->bundle()->allocation()) {
             return true;
+        }
         LMoveGroup* moves = getMoveGroupAfter(ins);
         return addMove(moves, from, to, type);
     }
 
     MOZ_MUST_USE bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
                                  LDefinition::Type type) {
-        if (from->bundle()->allocation() == to->bundle()->allocation())
+        if (from->bundle()->allocation() == to->bundle()->allocation()) {
             return true;
+        }
         LMoveGroup* moves = block->getExitMoveGroup(alloc());
         return addMove(moves, from, to, type);
     }
 
     MOZ_MUST_USE bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
                                   LDefinition::Type type) {
-        if (from->bundle()->allocation() == to->bundle()->allocation())
+        if (from->bundle()->allocation() == to->bundle()->allocation()) {
             return true;
+        }
         LMoveGroup* moves = block->getEntryMoveGroup(alloc());
         return addMove(moves, from, to, type);
     }
 
     // Debugging methods.
     void dumpAllocations();
 
     struct PrintLiveRange;
--- a/js/src/jit/Bailouts.cpp
+++ b/js/src/jit/Bailouts.cpp
@@ -68,18 +68,19 @@ jit::Bailout(BailoutStack* sp, BaselineB
     // This condition was wrong when we entered this bailout function, but it
     // might be true now. A GC might have reclaimed all the Jit code and
     // invalidated all frames which are currently on the stack. As we are
     // already in a bailout, we could not switch to an invalidation
     // bailout. When the code of an IonScript which is on the stack is
     // invalidated (see InvalidateActivation), we remove references to it and
     // increment the reference counter for each activation that appear on the
     // stack. As the bailed frame is one of them, we have to decrement it now.
-    if (frame.ionScript()->invalidated())
+    if (frame.ionScript()->invalidated()) {
         frame.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
+    }
 
     // NB: Commentary on how |lastProfilingFrame| is set from bailouts.
     //
     // Once we return to jitcode, any following frames might get clobbered,
     // but the current frame will not (as it will be clobbered "in-place"
     // with a baseline frame that will share the same frame prefix).
     // However, there may be multiple baseline frames unpacked from this
     // single Ion frame, which means we will need to once again reset
@@ -88,18 +89,19 @@ jit::Bailout(BailoutStack* sp, BaselineB
     //
     // In the case of error, the jitcode will jump immediately to an
     // exception handler, which will unwind the frames and properly set
     // the |lastProfilingFrame| to point to the frame being resumed into
     // (see |AutoResetLastProfilerFrameOnReturnFromException|).
     //
     // In both cases, we want to temporarily set the |lastProfilingFrame|
     // to the current frame being bailed out, and then fix it up later.
-    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) {
         cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+    }
 
     return retval;
 }
 
 uint32_t
 jit::InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut,
                          BaselineBailoutInfo** bailoutInfo)
 {
@@ -158,18 +160,19 @@ jit::InvalidationBailout(InvalidationBai
         JitSpew(JitSpew_IonInvalidate, "   frameSize %u", unsigned(layout->prevFrameLocalSize()));
         JitSpew(JitSpew_IonInvalidate, "   ra %p", (void*) layout->returnAddress());
 #endif
     }
 
     frame.ionScript()->decrementInvalidationCount(cx->runtime()->defaultFreeOp());
 
     // Make the frame being bailed out the top profiled frame.
-    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) {
         cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+    }
 
     return retval;
 }
 
 BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
                                    const JSJitFrameIter& frame)
   : machine_(frame.machineState())
 {
@@ -210,53 +213,57 @@ jit::ExceptionHandlerBailout(JSContext* 
 
     {
         // Currently we do not tolerate OOM here so as not to complicate the
         // exception handling code further.
         AutoEnterOOMUnsafeRegion oomUnsafe;
 
         retval = BailoutIonToBaseline(cx, bailoutData.activation(), frameView, true,
                                       &bailoutInfo, &excInfo);
-        if (retval == BAILOUT_RETURN_FATAL_ERROR && cx->isThrowingOutOfMemory())
+        if (retval == BAILOUT_RETURN_FATAL_ERROR && cx->isThrowingOutOfMemory()) {
             oomUnsafe.crash("ExceptionHandlerBailout");
+        }
     }
 
     if (retval == BAILOUT_RETURN_OK) {
         MOZ_ASSERT(bailoutInfo);
 
         // Overwrite the kind so HandleException after the bailout returns
         // false, jumping directly to the exception tail.
-        if (excInfo.propagatingIonExceptionForDebugMode())
+        if (excInfo.propagatingIonExceptionForDebugMode()) {
             bailoutInfo->bailoutKind = Bailout_IonExceptionDebugMode;
+        }
 
         rfe->kind = ResumeFromException::RESUME_BAILOUT;
         rfe->target = cx->runtime()->jitRuntime()->getBailoutTail().value;
         rfe->bailoutInfo = bailoutInfo;
     } else {
         // Bailout failed. If the overrecursion check failed, clear the
         // exception to turn this into an uncatchable error, continue popping
         // all inline frames and have the caller report the error.
         MOZ_ASSERT(!bailoutInfo);
 
         if (retval == BAILOUT_RETURN_OVERRECURSED) {
             *overrecursed = true;
-            if (!excInfo.propagatingIonExceptionForDebugMode())
+            if (!excInfo.propagatingIonExceptionForDebugMode()) {
                 cx->clearPendingException();
+            }
         } else {
             MOZ_ASSERT(retval == BAILOUT_RETURN_FATAL_ERROR);
 
             // Crash for now so as not to complicate the exception handling code
             // further.
             MOZ_CRASH();
         }
     }
 
     // Make the frame being bailed out the top profiled frame.
-    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
+    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) {
         cx->jitActivation->setLastProfilingFrame(currentFramePtr);
+    }
 
     return retval;
 }
 
 // Initialize the decl env Object, call object, and any arguments obj of the
 // current frame.
 bool
 jit::EnsureHasEnvironmentObjects(JSContext* cx, AbstractFramePtr fp)
@@ -265,18 +272,19 @@ jit::EnsureHasEnvironmentObjects(JSConte
     MOZ_ASSERT(!fp.isEvalFrame());
 
     if (fp.isFunctionFrame()) {
         // Ion does not handle extra var environments due to parameter
         // expressions yet.
         MOZ_ASSERT(!fp.callee()->needsExtraBodyVarEnvironment());
 
         if (!fp.hasInitialEnvironment() && fp.callee()->needsFunctionEnvironmentObjects()) {
-            if (!fp.initFunctionEnvironmentObjects(cx))
+            if (!fp.initFunctionEnvironmentObjects(cx)) {
                 return false;
+            }
         }
     }
 
     return true;
 }
 
 void
 jit::CheckFrequentBailouts(JSContext* cx, JSScript* script, BailoutKind bailoutKind)
@@ -287,18 +295,19 @@ jit::CheckFrequentBailouts(JSContext* cx
         IonScript* ionScript = script->ionScript();
 
         if (ionScript->bailoutExpected()) {
             // If we bailout because of the first execution of a basic block,
             // then we should record which basic block we are returning in,
             // which should prevent this from happening again.  Also note that
             // the first execution bailout can be related to an inlined script,
             // so there is no need to penalize the caller.
-            if (bailoutKind != Bailout_FirstExecution && !script->hadFrequentBailouts())
+            if (bailoutKind != Bailout_FirstExecution && !script->hadFrequentBailouts()) {
                 script->setHadFrequentBailouts();
+            }
 
             JitSpew(JitSpew_IonInvalidate, "Invalidating due to too many bailouts");
 
             Invalidate(cx, script);
         }
     }
 }
 
--- a/js/src/jit/BitSet.cpp
+++ b/js/src/jit/BitSet.cpp
@@ -10,73 +10,78 @@ using namespace js;
 using namespace js::jit;
 
 bool
 BitSet::init(TempAllocator& alloc)
 {
     size_t sizeRequired = numWords() * sizeof(*bits_);
 
     bits_ = (uint32_t*)alloc.allocate(sizeRequired);
-    if (!bits_)
+    if (!bits_) {
         return false;
+    }
 
     memset(bits_, 0, sizeRequired);
 
     return true;
 }
 
 bool
 BitSet::empty() const
 {
     MOZ_ASSERT(bits_);
     const uint32_t* bits = bits_;
     for (unsigned int i = 0, e = numWords(); i < e; i++) {
-        if (bits[i])
+        if (bits[i]) {
             return false;
+        }
     }
     return true;
 }
 
 void
 BitSet::insertAll(const BitSet& other)
 {
     MOZ_ASSERT(bits_);
     MOZ_ASSERT(other.numBits_ == numBits_);
     MOZ_ASSERT(other.bits_);
 
     uint32_t* bits = bits_;
     const uint32_t* otherBits = other.bits_;
-    for (unsigned int i = 0, e = numWords(); i < e; i++)
+    for (unsigned int i = 0, e = numWords(); i < e; i++) {
         bits[i] |= otherBits[i];
+    }
 }
 
 void
 BitSet::removeAll(const BitSet& other)
 {
     MOZ_ASSERT(bits_);
     MOZ_ASSERT(other.numBits_ == numBits_);
     MOZ_ASSERT(other.bits_);
 
     uint32_t* bits = bits_;
     const uint32_t* otherBits = other.bits_;
-    for (unsigned int i = 0, e = numWords(); i < e; i++)
+    for (unsigned int i = 0, e = numWords(); i < e; i++) {
         bits[i] &= ~otherBits[i];
+    }
 }
 
 void
 BitSet::intersect(const BitSet& other)
 {
     MOZ_ASSERT(bits_);
     MOZ_ASSERT(other.numBits_ == numBits_);
     MOZ_ASSERT(other.bits_);
 
     uint32_t* bits = bits_;
     const uint32_t* otherBits = other.bits_;
-    for (unsigned int i = 0, e = numWords(); i < e; i++)
+    for (unsigned int i = 0, e = numWords(); i < e; i++) {
         bits[i] &= otherBits[i];
+    }
 }
 
 // returns true if the intersection caused the contents of the set to change.
 bool
 BitSet::fixedPointIntersect(const BitSet& other)
 {
     MOZ_ASSERT(bits_);
     MOZ_ASSERT(other.numBits_ == numBits_);
@@ -85,31 +90,34 @@ BitSet::fixedPointIntersect(const BitSet
     bool changed = false;
 
     uint32_t* bits = bits_;
     const uint32_t* otherBits = other.bits_;
     for (unsigned int i = 0, e = numWords(); i < e; i++) {
         uint32_t old = bits[i];
         bits[i] &= otherBits[i];
 
-        if (!changed && old != bits[i])
+        if (!changed && old != bits[i]) {
             changed = true;
+        }
     }
     return changed;
 }
 
 void
 BitSet::complement()
 {
     MOZ_ASSERT(bits_);
     uint32_t* bits = bits_;
-    for (unsigned int i = 0, e = numWords(); i < e; i++)
+    for (unsigned int i = 0, e = numWords(); i < e; i++) {
         bits[i] = ~bits[i];
+    }
 }
 
 void
 BitSet::clear()
 {
     MOZ_ASSERT(bits_);
     uint32_t* bits = bits_;
-    for (unsigned int i = 0, e = numWords(); i < e; i++)
+    for (unsigned int i = 0, e = numWords(); i < e; i++) {
         bits[i] = 0;
+    }
 }
--- a/js/src/jit/BitSet.h
+++ b/js/src/jit/BitSet.h
@@ -122,18 +122,19 @@ class BitSet::Iterator
     uint32_t value_;
 
     void skipEmpty() {
         // Skip words containing only zeros.
         unsigned numWords = set_.numWords();
         const uint32_t* bits = set_.bits_;
         while (value_ == 0) {
             word_++;
-            if (word_ == numWords)
+            if (word_ == numWords) {
                 return;
+            }
 
             index_ = word_ * BitSet::BitsPerWord;
             value_ = bits[word_];
         }
 
         // Be careful: the result of CountTrailingZeroes32 is undefined if the
         // input is 0.
         int numZeros = mozilla::CountTrailingZeroes32(value_);
--- a/js/src/jit/BytecodeAnalysis.cpp
+++ b/js/src/jit/BytecodeAnalysis.cpp
@@ -38,18 +38,19 @@ struct CatchFinallyRange
     bool contains(uint32_t offset) const {
         return start <= offset && offset < end;
     }
 };
 
 bool
 BytecodeAnalysis::init(TempAllocator& alloc, GSNCache& gsn)
 {
-    if (!infos_.growByUninitialized(script_->length()))
+    if (!infos_.growByUninitialized(script_->length())) {
         return false;
+    }
 
     // Initialize the env chain slot if either the function needs some
     // EnvironmentObject (like a CallObject) or the script uses the env
     // chain. The latter case is handled below.
     usesEnvironmentChain_ = script_->module() || script_->initialEnvironmentShape() ||
                             (script_->functionDelazifying() &&
                              script_->functionDelazifying()->needsSomeEnvironmentObject());
 
@@ -66,23 +67,25 @@ BytecodeAnalysis::init(TempAllocator& al
         JSOp op = JSOp(*pc);
         nextpc = pc + GetBytecodeLength(pc);
         unsigned offset = script_->pcToOffset(pc);
 
         JitSpew(JitSpew_BaselineOp, "Analyzing op @ %d (end=%d): %s",
                 int(script_->pcToOffset(pc)), int(script_->length()), CodeName[op]);
 
         // If this bytecode info has not yet been initialized, it's not reachable.
-        if (!infos_[offset].initialized)
+        if (!infos_[offset].initialized) {
             continue;
+        }
 
         unsigned stackDepth = infos_[offset].stackDepth;
 #ifdef DEBUG
-        for (jsbytecode* chkpc = pc + 1; chkpc < (pc + GetBytecodeLength(pc)); chkpc++)
+        for (jsbytecode* chkpc = pc + 1; chkpc < (pc + GetBytecodeLength(pc)); chkpc++) {
             MOZ_ASSERT(!infos_[script_->pcToOffset(chkpc)].initialized);
+        }
 #endif
 
         unsigned nuses = GetUseCount(pc);
         unsigned ndefs = GetDefCount(pc);
 
         MOZ_ASSERT(stackDepth >= nuses);
         stackDepth -= nuses;
         stackDepth += ndefs;
@@ -141,29 +144,32 @@ BytecodeAnalysis::init(TempAllocator& al
 
             // Ensure the code following the try-block is always marked as
             // reachable, to simplify Ion's ControlFlowGenerator.
             uint32_t afterTryOffset = script_->pcToOffset(afterTry);
             infos_[afterTryOffset].init(stackDepth);
             infos_[afterTryOffset].jumpTarget = true;
 
             // Pop CatchFinallyRanges that are no longer needed.
-            while (!catchFinallyRanges.empty() && catchFinallyRanges.back().end <= offset)
+            while (!catchFinallyRanges.empty() && catchFinallyRanges.back().end <= offset) {
                 catchFinallyRanges.popBack();
+            }
 
             CatchFinallyRange range(script_->pcToOffset(endOfTry), script_->pcToOffset(afterTry));
-            if (!catchFinallyRanges.append(range))
+            if (!catchFinallyRanges.append(range)) {
                 return false;
+            }
             break;
           }
 
           case JSOP_LOOPENTRY:
             for (size_t i = 0; i < catchFinallyRanges.length(); i++) {
-                if (catchFinallyRanges[i].contains(offset))
+                if (catchFinallyRanges[i].contains(offset)) {
                     infos_[offset].loopEntryInCatchOrFinally = true;
+                }
             }
             break;
 
           case JSOP_GETNAME:
           case JSOP_BINDNAME:
           case JSOP_BINDVAR:
           case JSOP_SETNAME:
           case JSOP_STRICTSETNAME:
@@ -179,51 +185,55 @@ BytecodeAnalysis::init(TempAllocator& al
           case JSOP_IMPLICITTHIS:
             usesEnvironmentChain_ = true;
             break;
 
           case JSOP_GETGNAME:
           case JSOP_SETGNAME:
           case JSOP_STRICTSETGNAME:
           case JSOP_GIMPLICITTHIS:
-            if (script_->hasNonSyntacticScope())
+            if (script_->hasNonSyntacticScope()) {
                 usesEnvironmentChain_ = true;
+            }
             break;
 
           default:
             break;
         }
 
         bool jump = IsJumpOpcode(op);
         if (jump) {
             // Case instructions do not push the lvalue back when branching.
             unsigned newStackDepth = stackDepth;
-            if (op == JSOP_CASE)
+            if (op == JSOP_CASE) {
                 newStackDepth--;
+            }
 
             unsigned targetOffset = offset + GET_JUMP_OFFSET(pc);
 
             // If this is a a backedge to an un-analyzed segment, analyze from there.
             bool jumpBack = (targetOffset < offset) && !infos_[targetOffset].initialized;
 
             infos_[targetOffset].init(newStackDepth);
             infos_[targetOffset].jumpTarget = true;
 
-            if (jumpBack)
+            if (jumpBack) {
                 nextpc = script_->offsetToPC(targetOffset);
+            }
         }
 
         // Handle any fallthrough from this opcode.
         if (BytecodeFallsThrough(op)) {
             jsbytecode* fallthrough = pc + GetBytecodeLength(pc);
             MOZ_ASSERT(fallthrough < end);
             unsigned fallthroughOffset = script_->pcToOffset(fallthrough);
 
             infos_[fallthroughOffset].init(stackDepth);
 
             // Treat the fallthrough of a branch instruction as a jump target.
-            if (jump)
+            if (jump) {
                 infos_[fallthroughOffset].jumpTarget = true;
+            }
         }
     }
 
     return true;
 }
--- a/js/src/jit/BytecodeAnalysis.h
+++ b/js/src/jit/BytecodeAnalysis.h
@@ -47,18 +47,19 @@ class BytecodeAnalysis
     MOZ_MUST_USE bool init(TempAllocator& alloc, GSNCache& gsn);
 
     BytecodeInfo& info(jsbytecode* pc) {
         MOZ_ASSERT(infos_[script_->pcToOffset(pc)].initialized);
         return infos_[script_->pcToOffset(pc)];
     }
 
     BytecodeInfo* maybeInfo(jsbytecode* pc) {
-        if (infos_[script_->pcToOffset(pc)].initialized)
+        if (infos_[script_->pcToOffset(pc)].initialized) {
             return &infos_[script_->pcToOffset(pc)];
+        }
         return nullptr;
     }
 
     bool usesEnvironmentChain() const {
         return usesEnvironmentChain_;
     }
 
     bool hasTryFinally() const {