Bug 1519636 - Reformat recent changes to the Google coding style r=Ehsan
authorSylvestre Ledru <sledru@mozilla.com>
Thu, 24 Jan 2019 08:11:00 +0000
changeset 455235 08b686c04a013ca91738d80f6f34b92a86c130eb
parent 455234 a3bfdc6d05af5dbf4b09451b477a402b4b3b813a
child 455236 3692b1c3df6076cb3c07b102726e17cd0acce818
push id76688
push usersledru@mozilla.com
push dateThu, 24 Jan 2019 08:12:11 +0000
treeherderautoland@08b686c04a01 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersEhsan
bugs1519636
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1519636 - Reformat recent changes to the Google coding style r=Ehsan # ignore-this-changeset Depends on D17388 Differential Revision: https://phabricator.services.mozilla.com/D17389
dom/base/nsContentUtils.cpp
dom/base/nsJSUtils.cpp
dom/webauthn/PublicKeyCredential.cpp
dom/webauthn/WebAuthnManager.cpp
dom/webauthn/WebAuthnTransactionParent.cpp
dom/webauthn/WinWebAuthnManager.cpp
embedding/ios/GeckoEmbed/GeckoEmbed/AppDelegate.m
embedding/ios/GeckoEmbed/GeckoEmbed/ViewController.m
embedding/ios/GeckoEmbed/js/dirs.m
extensions/cookie/nsPermissionManager.cpp
image/imgFrame.cpp
js/src/ds/LifoAlloc.cpp
js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
js/src/jit/shared/AtomicOperations-feeling-lucky.h
js/src/jit/shared/AtomicOperations-shared-jit.cpp
js/src/jit/shared/AtomicOperations-shared-jit.h
layout/build/nsLayoutStatics.cpp
security/manager/ssl/TransportSecurityInfo.cpp
widget/cocoa/nsNativeThemeCocoa.mm
widget/cocoa/nsTouchBar.mm
widget/cocoa/nsTouchBarUpdater.mm
xpcom/threads/nsThreadManager.cpp
--- a/dom/base/nsContentUtils.cpp
+++ b/dom/base/nsContentUtils.cpp
@@ -10433,18 +10433,19 @@ static bool JSONCreator(const char16_t* 
   NS_ENSURE_TRUE(JS_Stringify(aCx, &value, nullptr, JS::NullHandleValue,
                               JSONCreator, &serializedValue),
                  false);
   aOutStr = serializedValue;
   return true;
 }
 
 /* static */
-bool nsContentUtils::HighPriorityEventPendingForTopLevelDocumentBeforeContentfulPaint(
-    Document* aDocument) {
+bool nsContentUtils::
+    HighPriorityEventPendingForTopLevelDocumentBeforeContentfulPaint(
+        Document* aDocument) {
   if (!aDocument || aDocument->IsLoadedAsData()) {
     return false;
   }
 
   Document* topLevel = aDocument->GetTopLevelContentDocument();
   return topLevel && topLevel->GetShell() &&
          topLevel->GetShell()->GetPresContext() &&
          !topLevel->GetShell()->GetPresContext()->HadContentfulPaint() &&
--- a/dom/base/nsJSUtils.cpp
+++ b/dom/base/nsJSUtils.cpp
@@ -372,19 +372,17 @@ JSScript* nsJSUtils::ExecutionContext::G
   MOZ_ASSERT(!mSkip);
   MOZ_ASSERT(mScript);
   mScriptUsed = true;
 #endif
 
   return MaybeGetScript();
 }
 
-JSScript* nsJSUtils::ExecutionContext::MaybeGetScript() {
-  return mScript;
-}
+JSScript* nsJSUtils::ExecutionContext::MaybeGetScript() { return mScript; }
 
 nsresult nsJSUtils::ExecutionContext::ExecScript() {
   if (mSkip) {
     return mRv;
   }
 
   MOZ_ASSERT(mScript);
 
--- a/dom/webauthn/PublicKeyCredential.cpp
+++ b/dom/webauthn/PublicKeyCredential.cpp
@@ -5,17 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/dom/Promise.h"
 #include "mozilla/dom/PublicKeyCredential.h"
 #include "mozilla/dom/WebAuthenticationBinding.h"
 #include "nsCycleCollectionParticipant.h"
 
 #ifdef OS_WIN
-#include "WinWebAuthnManager.h"
+#  include "WinWebAuthnManager.h"
 #endif
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(PublicKeyCredential)
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PublicKeyCredential, Credential)
   tmp->mRawIdCachedObj = nullptr;
--- a/dom/webauthn/WebAuthnManager.cpp
+++ b/dom/webauthn/WebAuthnManager.cpp
@@ -14,17 +14,17 @@
 #include "mozilla/dom/PWebAuthnTransaction.h"
 #include "mozilla/dom/WebAuthnManager.h"
 #include "mozilla/dom/WebAuthnTransactionChild.h"
 #include "mozilla/dom/WebAuthnUtil.h"
 #include "mozilla/ipc/BackgroundChild.h"
 #include "mozilla/ipc/PBackgroundChild.h"
 
 #ifdef OS_WIN
-#include "WinWebAuthnManager.h"
+#  include "WinWebAuthnManager.h"
 #endif
 
 using namespace mozilla::ipc;
 
 namespace mozilla {
 namespace dom {
 
 /***********************************************************************
--- a/dom/webauthn/WebAuthnTransactionParent.cpp
+++ b/dom/webauthn/WebAuthnTransactionParent.cpp
@@ -5,17 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/dom/WebAuthnTransactionParent.h"
 #include "mozilla/dom/U2FTokenManager.h"
 #include "mozilla/ipc/PBackgroundParent.h"
 #include "mozilla/ipc/BackgroundParent.h"
 
 #ifdef OS_WIN
-#include "WinWebAuthnManager.h"
+#  include "WinWebAuthnManager.h"
 #endif
 
 namespace mozilla {
 namespace dom {
 
 mozilla::ipc::IPCResult WebAuthnTransactionParent::RecvRequestRegister(
     const uint64_t& aTransactionId,
     const WebAuthnMakeCredentialInfo& aTransactionInfo) {
--- a/dom/webauthn/WinWebAuthnManager.cpp
+++ b/dom/webauthn/WinWebAuthnManager.cpp
@@ -8,17 +8,17 @@
 #include "mozilla/MozPromise.h"
 #include "mozilla/ipc/BackgroundParent.h"
 #include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Unused.h"
 #include "nsTextFormatter.h"
 #include "winwebauthn/webauthn.h"
 
 #ifdef OS_WIN
-#include "WinWebAuthnManager.h"
+#  include "WinWebAuthnManager.h"
 #endif
 
 namespace mozilla {
 namespace dom {
 
 namespace {
 static mozilla::LazyLogModule gWinWebAuthnManagerLog("winwebauthnkeymanager");
 StaticAutoPtr<WinWebAuthnManager> gWinWebAuthnManager;
--- a/embedding/ios/GeckoEmbed/GeckoEmbed/AppDelegate.m
+++ b/embedding/ios/GeckoEmbed/GeckoEmbed/AppDelegate.m
@@ -9,37 +9,45 @@
 #import "AppDelegate.h"
 
 @interface AppDelegate ()
 
 @end
 
 @implementation AppDelegate
 
-
-- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
-    // Override point for customization after application launch.
-    return YES;
+- (BOOL)application:(UIApplication *)application
+    didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
+  // Override point for customization after application launch.
+  return YES;
 }
 
 - (void)applicationWillResignActive:(UIApplication *)application {
-    // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.
-    // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game.
+  // Sent when the application is about to move from active to inactive state. This can occur for
+  // certain types of temporary interruptions (such as an incoming phone call or SMS message) or
+  // when the user quits the application and it begins the transition to the background state. Use
+  // this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates.
+  // Games should use this method to pause the game.
 }
 
 - (void)applicationDidEnterBackground:(UIApplication *)application {
-    // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later.
-    // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.
+  // Use this method to release shared resources, save user data, invalidate timers, and store
+  // enough application state information to restore your application to its current state in case
+  // it is terminated later. If your application supports background execution, this method is
+  // called instead of applicationWillTerminate: when the user quits.
 }
 
 - (void)applicationWillEnterForeground:(UIApplication *)application {
-    // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background.
+  // Called as part of the transition from the background to the inactive state; here you can undo
+  // many of the changes made on entering the background.
 }
 
 - (void)applicationDidBecomeActive:(UIApplication *)application {
-    // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.
+  // Restart any tasks that were paused (or not yet started) while the application was inactive. If
+  // the application was previously in the background, optionally refresh the user interface.
 }
 
 - (void)applicationWillTerminate:(UIApplication *)application {
-    // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.
+  // Called when the application is about to terminate. Save data if appropriate. See also
+  // applicationDidEnterBackground:.
 }
 
 @end
--- a/embedding/ios/GeckoEmbed/GeckoEmbed/ViewController.m
+++ b/embedding/ios/GeckoEmbed/GeckoEmbed/ViewController.m
@@ -10,18 +10,18 @@
 
 @interface ViewController ()
 
 @end
 
 @implementation ViewController
 
 - (void)viewDidLoad {
-    [super viewDidLoad];
-    // Do any additional setup after loading the view, typically from a nib.
+  [super viewDidLoad];
+  // Do any additional setup after loading the view, typically from a nib.
 }
 
 - (void)didReceiveMemoryWarning {
-    [super didReceiveMemoryWarning];
-    // Dispose of any resources that can be recreated.
+  [super didReceiveMemoryWarning];
+  // Dispose of any resources that can be recreated.
 }
 
 @end
--- a/embedding/ios/GeckoEmbed/js/dirs.m
+++ b/embedding/ios/GeckoEmbed/js/dirs.m
@@ -1,12 +1,11 @@
 #import <Foundation/Foundation.h>
 
-bool GetDocumentsDirectory(char* dir)
-{
-    NSSearchPathDirectory directory = NSDocumentDirectory;
-    NSArray* paths = NSSearchPathForDirectoriesInDomains(directory, NSUserDomainMask, YES);
-    if ([paths count] == 0) {
-        return false;
-    }
-    strcpy(dir, [[paths objectAtIndex:0] UTF8String]);
-    return true;
+bool GetDocumentsDirectory(char* dir) {
+  NSSearchPathDirectory directory = NSDocumentDirectory;
+  NSArray* paths = NSSearchPathForDirectoriesInDomains(directory, NSUserDomainMask, YES);
+  if ([paths count] == 0) {
+    return false;
+  }
+  strcpy(dir, [[paths objectAtIndex:0] UTF8String]);
+  return true;
 }
--- a/extensions/cookie/nsPermissionManager.cpp
+++ b/extensions/cookie/nsPermissionManager.cpp
@@ -1866,20 +1866,18 @@ nsresult nsPermissionManager::AddInterna
 
   switch (op) {
     case eOperationNone: {
       // nothing to do
       return NS_OK;
     }
 
     case eOperationAdding: {
-      UpdateAutoplayTelemetry(aType,
-                              nsIPermissionManager::UNKNOWN_ACTION,
-                              aPermission,
-                              aExpireType);
+      UpdateAutoplayTelemetry(aType, nsIPermissionManager::UNKNOWN_ACTION,
+                              aPermission, aExpireType);
       if (aDBOperation == eWriteToDB) {
         // we'll be writing to the database - generate a known unique id
         id = ++mLargestID;
       } else {
         // we're reading from the database - use the id already assigned
         id = aID;
       }
 
@@ -1913,18 +1911,17 @@ nsresult nsPermissionManager::AddInterna
 
       // If the type we want to remove is EXPIRE_POLICY, we need to reject
       // attempts to change the permission.
       if (entry->GetPermissions()[index].mExpireType == EXPIRE_POLICY) {
         NS_WARNING("Attempting to remove EXPIRE_POLICY permission");
         break;
       }
 
-      UpdateAutoplayTelemetry(aType,
-                              oldPermissionEntry.mPermission,
+      UpdateAutoplayTelemetry(aType, oldPermissionEntry.mPermission,
                               nsIPermissionManager::UNKNOWN_ACTION,
                               aExpireType);
       entry->GetPermissions().RemoveElementAt(index);
 
       // Record a count of the number of preload permissions present in the
       // content process.
       if (IsPreloadPermission(mTypeArray[typeIndex].get())) {
         sPreloadPermissionCount--;
@@ -1956,20 +1953,18 @@ nsresult nsPermissionManager::AddInterna
 
       // If the existing type is EXPIRE_POLICY, we need to reject attempts to
       // change the permission.
       if (entry->GetPermissions()[index].mExpireType == EXPIRE_POLICY) {
         NS_WARNING("Attempting to modify EXPIRE_POLICY permission");
         break;
       }
 
-      UpdateAutoplayTelemetry(aType,
-                              entry->GetPermissions()[index].mPermission,
-                              aPermission,
-                              aExpireType);
+      UpdateAutoplayTelemetry(aType, entry->GetPermissions()[index].mPermission,
+                              aPermission, aExpireType);
 
       // If the new expireType is EXPIRE_SESSION, then we have to keep a
       // copy of the previous permission/expireType values. This cached value
       // will be used when restoring the permissions of an app.
       if (entry->GetPermissions()[index].mExpireType !=
               nsIPermissionManager::EXPIRE_SESSION &&
           aExpireType == nsIPermissionManager::EXPIRE_SESSION) {
         entry->GetPermissions()[index].mNonSessionPermission =
--- a/image/imgFrame.cpp
+++ b/image/imgFrame.cpp
@@ -505,17 +505,19 @@ nsresult imgFrame::Optimize(DrawTarget* 
   // XXX(seth): It's currently unclear if there's any reason why we can't
   // optimize non-premult surfaces. We should look into removing this.
   if (mNonPremult) {
     return NS_OK;
   }
   if (!gfxVars::UseWebRender()) {
     mOptSurface = aTarget->OptimizeSourceSurface(mLockedSurface);
   } else {
-    mOptSurface = gfxPlatform::GetPlatform()->ScreenReferenceDrawTarget()->OptimizeSourceSurface(mLockedSurface);
+    mOptSurface = gfxPlatform::GetPlatform()
+                      ->ScreenReferenceDrawTarget()
+                      ->OptimizeSourceSurface(mLockedSurface);
   }
   if (mOptSurface == mLockedSurface) {
     mOptSurface = nullptr;
   }
 
   if (mOptSurface) {
     // There's no reason to keep our original surface around if we have an
     // optimized surface. Release our reference to it. This will leave
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -178,20 +178,19 @@ LifoAlloc::UniqueBumpChunk LifoAlloc::ne
                    (minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
     return nullptr;
   }
 
   // Note: When computing chunkSize growth, we only are interested in chunks
   // used for small allocations. This excludes unused chunks, oversized chunks,
   // and chunks transferred in from another LifoAlloc.
   MOZ_ASSERT(curSize_ >= smallAllocsSize_);
-  const size_t chunkSize =
-      (oversize || minSize > defaultChunkSize_)
-          ? MallocGoodSize(minSize)
-          : NextSize(defaultChunkSize_, smallAllocsSize_);
+  const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
+                               ? MallocGoodSize(minSize)
+                               : NextSize(defaultChunkSize_, smallAllocsSize_);
 
   // Create a new BumpChunk, and allocate space for it.
   UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
   if (!result) {
     return nullptr;
   }
   MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
   return result;
--- a/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
@@ -115,150 +115,150 @@ inline bool js::jit::AtomicOperations::I
 inline void js::jit::AtomicOperations::ShutDown() {
   // Nothing
 }
 
 // When compiling with Clang on 32-bit linux it will be necessary to link with
 // -latomic to get the proper 64-bit intrinsics.
 
 inline bool js::jit::AtomicOperations::hasAtomic8() {
-#  if defined(HAS_64BIT_ATOMICS)
+#if defined(HAS_64BIT_ATOMICS)
   return true;
-#  else
+#else
   return false;
-#  endif
+#endif
 }
 
 inline bool js::jit::AtomicOperations::isLockfree8() {
-#  if defined(HAS_64BIT_LOCKFREE)
+#if defined(HAS_64BIT_LOCKFREE)
   return true;
-#  else
+#else
   return false;
-#  endif
+#endif
 }
 
 inline void js::jit::AtomicOperations::fenceSeqCst() {
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   __sync_synchronize();
-#  else
+#else
   __atomic_thread_fence(__ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   __sync_synchronize();
   T v = *addr;
   __sync_synchronize();
-#  else
+#else
   T v;
   __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-#  endif
+#endif
   return v;
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::loadSeqCst(int64_t* addr) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::loadSeqCst(uint64_t* addr) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   __sync_synchronize();
   *addr = val;
   __sync_synchronize();
-#  else
+#else
   __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline void AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline void AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   T v;
   __sync_synchronize();
   do {
     v = *addr;
   } while (__sync_val_compare_and_swap(addr, v, val) != v);
   return v;
-#  else
+#else
   T v;
   __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
   return v;
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
                                                           T newval) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_val_compare_and_swap(addr, oldval, newval);
-#  else
+#else
   __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
                             __ATOMIC_SEQ_CST);
   return oldval;
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::compareExchangeSeqCst(int64_t* addr,
                                                        int64_t oldval,
                                                        int64_t newval) {
   MOZ_CRASH("No 64-bit atomics");
@@ -268,157 +268,157 @@ template <>
 inline uint64_t AtomicOperations::compareExchangeSeqCst(uint64_t* addr,
                                                         uint64_t oldval,
                                                         uint64_t newval) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_fetch_and_add(addr, val);
-#  else
+#else
   return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_fetch_and_sub(addr, val);
-#  else
+#else
   return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_fetch_and_and(addr, val);
-#  else
+#else
   return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_fetch_and_or(addr, val);
-#  else
+#else
   return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_fetch_and_xor(addr, val);
-#  else
+#else
   return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-#  endif
+#endif
 }
 
-#  ifndef HAS_64BIT_ATOMICS
+#ifndef HAS_64BIT_ATOMICS
 namespace js {
 namespace jit {
 
 template <>
 inline int64_t AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 template <>
 inline uint64_t AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val) {
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
-#  endif
+#endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
   // This is actually roughly right even on 32-bit platforms since in that
   // case, double, int64, and uint64 loads need not be access-atomic.
   //
   // We could use __atomic_load, but it would be needlessly expensive on
--- a/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
@@ -66,17 +66,17 @@ inline void js::jit::AtomicOperations::f
   _ReadWriteBarrier();
 #if defined(_M_IX86) || defined(_M_X64)
   _mm_mfence();
 #elif defined(_M_ARM64)
   // MemoryBarrier is defined in winnt.h, which we don't want to include here.
   // This expression is the expansion of MemoryBarrier.
   __dmb(_ARM64_BARRIER_SY);
 #else
-#error "Unknown hardware for MSVC"
+#  error "Unknown hardware for MSVC"
 #endif
 }
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
   _ReadWriteBarrier();
   T v = *addr;
   _ReadWriteBarrier();
--- a/js/src/jit/shared/AtomicOperations-feeling-lucky.h
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
@@ -3,17 +3,16 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_shared_AtomicOperations_feeling_lucky_h
 #define jit_shared_AtomicOperations_feeling_lucky_h
 
 #if defined(__clang__) || defined(__GNUC__)
-# include "jit/shared/AtomicOperations-feeling-lucky-gcc.h"
+#  include "jit/shared/AtomicOperations-feeling-lucky-gcc.h"
 #elif defined(_MSC_VER)
-# include "jit/shared/AtomicOperations-feeling-lucky-msvc.h"
+#  include "jit/shared/AtomicOperations-feeling-lucky-msvc.h"
 #else
-# error "No AtomicOperations support for this platform+compiler combination"
+#  error "No AtomicOperations support for this platform+compiler combination"
 #endif
 
-#endif // jit_shared_AtomicOperations_feeling_lucky_h
-
+#endif  // jit_shared_AtomicOperations_feeling_lucky_h
--- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
@@ -92,43 +92,40 @@ static constexpr Register64 AtomicReturn
 // Assigned registers except temp are disjoint from the argument registers,
 // since accounting for both 32-bit and 64-bit arguments and constraints on the
 // result register is much too messy.  The temp is in an argument register since
 // it won't be used until we've moved all arguments to other registers.
 //
 // Save LR because it's the second scratch register.  The first scratch register
 // is r12 (IP).  The atomics implementation in the MacroAssembler uses both.
 
-static const LiveRegisterSet AtomicNonVolatileRegs =
-  LiveRegisterSet(GeneralRegisterSet((uint32_t(1) << Registers::r4) |
-                                     (uint32_t(1) << Registers::r5) |
-                                     (uint32_t(1) << Registers::r6) |
-                                     (uint32_t(1) << Registers::r7) |
-                                     (uint32_t(1) << Registers::r8) |
-                                     (uint32_t(1) << Registers::lr)),
-                  FloatRegisterSet(0));
+static const LiveRegisterSet AtomicNonVolatileRegs = LiveRegisterSet(
+    GeneralRegisterSet(
+        (uint32_t(1) << Registers::r4) | (uint32_t(1) << Registers::r5) |
+        (uint32_t(1) << Registers::r6) | (uint32_t(1) << Registers::r7) |
+        (uint32_t(1) << Registers::r8) | (uint32_t(1) << Registers::lr)),
+    FloatRegisterSet(0));
 
 static constexpr Register AtomicPtrReg = r8;
 static constexpr Register AtomicPtr2Reg = r6;
 static constexpr Register AtomicTemp = r3;
 static constexpr Register AtomicValReg = r6;
 static constexpr Register64 AtomicValReg64(r7, r6);
 static constexpr Register AtomicVal2Reg = r4;
 static constexpr Register64 AtomicVal2Reg64(r5, r4);
 
 static constexpr Register64 AtomicReturnReg64 = ReturnReg64;
 
 #elif defined(JS_CODEGEN_X86)
 
 // There are no argument registers.
 
-static const LiveRegisterSet AtomicNonVolatileRegs =
-  LiveRegisterSet(GeneralRegisterSet((1 << X86Encoding::rbx) |
-                                     (1 << X86Encoding::rsi)),
-                  FloatRegisterSet(0));
+static const LiveRegisterSet AtomicNonVolatileRegs = LiveRegisterSet(
+    GeneralRegisterSet((1 << X86Encoding::rbx) | (1 << X86Encoding::rsi)),
+    FloatRegisterSet(0));
 
 static constexpr Register AtomicPtrReg = esi;
 static constexpr Register AtomicPtr2Reg = ebx;
 static constexpr Register AtomicValReg = ebx;
 static constexpr Register AtomicVal2Reg = ecx;
 static constexpr Register AtomicTemp = edx;
 
 // 64-bit registers for cmpxchg8b.  ValReg/Val2Reg/Temp are not used in this
@@ -164,28 +161,29 @@ static constexpr Scalar::Type SIZEWORD =
 //
 // A "word" is an item that we can copy using only register intermediate storage
 // on all platforms; words can be individually copied without worrying about
 // overlap.
 //
 // Blocks and words can be aligned or unaligned; specific (generated) copying
 // functions handle this in platform-specific ways.
 
-static constexpr size_t WORDSIZE = sizeof(uintptr_t); // Also see SIZEWORD above
-static constexpr size_t BLOCKSIZE = 8 * WORDSIZE;     // Must be a power of 2
+static constexpr size_t WORDSIZE =
+    sizeof(uintptr_t);                             // Also see SIZEWORD above
+static constexpr size_t BLOCKSIZE = 8 * WORDSIZE;  // Must be a power of 2
 
-static_assert(BLOCKSIZE % WORDSIZE == 0, "A block is an integral number of words");
+static_assert(BLOCKSIZE % WORDSIZE == 0,
+              "A block is an integral number of words");
 
 static constexpr size_t WORDMASK = WORDSIZE - 1;
 static constexpr size_t BLOCKMASK = BLOCKSIZE - 1;
 
-struct ArgIterator
-{
-    ABIArgGenerator abi;
-    unsigned argBase = 0;
+struct ArgIterator {
+  ABIArgGenerator abi;
+  unsigned argBase = 0;
 };
 
 static void GenGprArg(MacroAssembler& masm, MIRType t, ArgIterator* iter,
                       Register reg) {
   MOZ_ASSERT(t == MIRType::Pointer || t == MIRType::Int32);
   ABIArg arg = iter->abi.next(t);
   switch (arg.kind()) {
     case ABIArg::GPR: {
@@ -195,19 +193,17 @@ static void GenGprArg(MacroAssembler& ma
       break;
     }
     case ABIArg::Stack: {
       Address src(masm.getStackPointer(),
                   iter->argBase + arg.offsetFromArgBase());
       masm.loadPtr(src, reg);
       break;
     }
-    default: {
-      MOZ_CRASH("Not possible");
-    }
+    default: { MOZ_CRASH("Not possible"); }
   }
 }
 
 static void GenGpr64Arg(MacroAssembler& masm, ArgIterator* iter,
                         Register64 reg) {
   ABIArg arg = iter->abi.next(MIRType::Int64);
   switch (arg.kind()) {
     case ABIArg::GPR: {
@@ -231,19 +227,17 @@ static void GenGpr64Arg(MacroAssembler& 
     case ABIArg::GPR_PAIR: {
       if (arg.gpr64() != reg) {
         masm.move32(arg.oddGpr(), reg.high);
         masm.move32(arg.evenGpr(), reg.low);
       }
       break;
     }
 #endif
-    default: {
-      MOZ_CRASH("Not possible");
-    }
+    default: { MOZ_CRASH("Not possible"); }
   }
 }
 
 static uint32_t GenPrologue(MacroAssembler& masm, ArgIterator* iter) {
   masm.assumeUnreachable("Shouldn't get here");
   masm.flushBuffer();
   masm.haltingAlign(CodeAlignment);
   masm.setFramePushed(0);
@@ -358,50 +352,50 @@ static uint32_t GenStore(MacroAssembler&
   }
   masm.memoryBarrier(sync.barrierAfter);
 
   GenEpilogue(masm);
   return start;
 }
 
 enum class CopyDir {
-  DOWN,                       // Move data down, ie, iterate toward higher addresses
-  UP                          // The other way
+  DOWN,  // Move data down, ie, iterate toward higher addresses
+  UP     // The other way
 };
 
 static uint32_t GenCopy(MacroAssembler& masm, Scalar::Type size,
                         uint32_t unroll, CopyDir direction) {
   ArgIterator iter;
   uint32_t start = GenPrologue(masm, &iter);
 
   Register dest = AtomicPtrReg;
   Register src = AtomicPtr2Reg;
 
   GenGprArg(masm, MIRType::Pointer, &iter, dest);
   GenGprArg(masm, MIRType::Pointer, &iter, src);
 
-  uint32_t offset = direction == CopyDir::DOWN ? 0 : unroll-1;
+  uint32_t offset = direction == CopyDir::DOWN ? 0 : unroll - 1;
   for (uint32_t i = 0; i < unroll; i++) {
     switch (size) {
       case SIZE8:
         masm.load8ZeroExtend(Address(src, offset), AtomicTemp);
         masm.store8(AtomicTemp, Address(dest, offset));
         break;
       case SIZE16:
-        masm.load16ZeroExtend(Address(src, offset*2), AtomicTemp);
-        masm.store16(AtomicTemp, Address(dest, offset*2));
+        masm.load16ZeroExtend(Address(src, offset * 2), AtomicTemp);
+        masm.store16(AtomicTemp, Address(dest, offset * 2));
         break;
       case SIZE32:
-        masm.load32(Address(src, offset*4), AtomicTemp);
-        masm.store32(AtomicTemp, Address(dest, offset*4));
+        masm.load32(Address(src, offset * 4), AtomicTemp);
+        masm.store32(AtomicTemp, Address(dest, offset * 4));
         break;
       case SIZE64:
 #if defined(JS_64BIT)
-        masm.load64(Address(src, offset*8), AtomicTemp64);
-        masm.store64(AtomicTemp64, Address(dest, offset*8));
+        masm.load64(Address(src, offset * 8), AtomicTemp64);
+        masm.store64(AtomicTemp64, Address(dest, offset * 8));
         break;
 #else
         MOZ_CRASH("64-bit atomic load/store not available on this platform");
 #endif
       default:
         MOZ_CRASH("Unknown size");
     }
     offset += direction == CopyDir::DOWN ? 1 : -1;
@@ -474,46 +468,44 @@ static uint32_t GenExchange(MacroAssembl
     default:
       MOZ_CRASH("Unknown size");
   }
 
   GenEpilogue(masm);
   return start;
 }
 
-static uint32_t
-GenFetchOp(MacroAssembler& masm, Scalar::Type size, AtomicOp op,
-           Synchronization sync) {
+static uint32_t GenFetchOp(MacroAssembler& masm, Scalar::Type size, AtomicOp op,
+                           Synchronization sync) {
   ArgIterator iter;
   uint32_t start = GenPrologue(masm, &iter);
   GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
 
   Address addr(AtomicPtrReg, 0);
   switch (size) {
     case SIZE8:
     case SIZE16:
     case SIZE32: {
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
       Register tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
-        ? Register::Invalid()
-        : AtomicTemp;
+                         ? Register::Invalid()
+                         : AtomicTemp;
 #else
       Register tmp = AtomicTemp;
 #endif
       GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
-      masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp,
-                         ReturnReg);
+      masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, ReturnReg);
       break;
     }
     case SIZE64: {
 #if defined(JS_64BIT)
 #  if defined(JS_CODEGEN_X64)
       Register64 tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
-        ? Register64::Invalid()
-        : AtomicTemp64;
+                           ? Register64::Invalid()
+                           : AtomicTemp64;
 #  else
       Register64 tmp = AtomicTemp64;
 #  endif
       GenGpr64Arg(masm, &iter, AtomicValReg64);
       masm.atomicFetchOp64(sync, op, AtomicValReg64, addr, tmp,
                            AtomicReturnReg64);
       break;
 #else
@@ -568,30 +560,39 @@ uint64_t (*AtomicStore64Unsynchronized)(
 // See the definitions of BLOCKSIZE and WORDSIZE earlier.  The "unaligned"
 // functions perform individual byte copies (and must always be "down" or "up").
 // The others ignore alignment issues, and thus either depend on unaligned
 // accesses being OK or not being invoked on unaligned addresses.
 //
 // src and dest point to the lower addresses of the respective data areas
 // irrespective of "up" or "down".
 
-static void (*AtomicCopyUnalignedBlockDownUnsynchronized)(uint8_t* dest, const uint8_t* src);
-static void (*AtomicCopyUnalignedBlockUpUnsynchronized)(uint8_t* dest, const uint8_t* src);
-static void (*AtomicCopyUnalignedWordDownUnsynchronized)(uint8_t* dest, const uint8_t* src);
-static void (*AtomicCopyUnalignedWordUpUnsynchronized)(uint8_t* dest, const uint8_t* src);
+static void (*AtomicCopyUnalignedBlockDownUnsynchronized)(uint8_t* dest,
+                                                          const uint8_t* src);
+static void (*AtomicCopyUnalignedBlockUpUnsynchronized)(uint8_t* dest,
+                                                        const uint8_t* src);
+static void (*AtomicCopyUnalignedWordDownUnsynchronized)(uint8_t* dest,
+                                                         const uint8_t* src);
+static void (*AtomicCopyUnalignedWordUpUnsynchronized)(uint8_t* dest,
+                                                       const uint8_t* src);
 
-static void (*AtomicCopyBlockDownUnsynchronized)(uint8_t* dest, const uint8_t* src);
-static void (*AtomicCopyBlockUpUnsynchronized)(uint8_t* dest, const uint8_t* src);
+static void (*AtomicCopyBlockDownUnsynchronized)(uint8_t* dest,
+                                                 const uint8_t* src);
+static void (*AtomicCopyBlockUpUnsynchronized)(uint8_t* dest,
+                                               const uint8_t* src);
 static void (*AtomicCopyWordUnsynchronized)(uint8_t* dest, const uint8_t* src);
 static void (*AtomicCopyByteUnsynchronized)(uint8_t* dest, const uint8_t* src);
 
 uint8_t (*AtomicCmpXchg8SeqCst)(uint8_t* addr, uint8_t oldval, uint8_t newval);
-uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval, uint16_t newval);
-uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval, uint32_t newval);
-uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval, uint64_t newval);
+uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval,
+                                  uint16_t newval);
+uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval,
+                                  uint32_t newval);
+uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval,
+                                  uint64_t newval);
 
 uint8_t (*AtomicExchange8SeqCst)(uint8_t* addr, uint8_t val);
 uint16_t (*AtomicExchange16SeqCst)(uint16_t* addr, uint16_t val);
 uint32_t (*AtomicExchange32SeqCst)(uint32_t* addr, uint32_t val);
 #ifdef JS_64BIT
 uint64_t (*AtomicExchange64SeqCst)(uint64_t* addr, uint64_t val);
 #endif
 
@@ -621,18 +622,17 @@ uint16_t (*AtomicXor16SeqCst)(uint16_t* 
 uint32_t (*AtomicXor32SeqCst)(uint32_t* addr, uint32_t val);
 #ifdef JS_64BIT
 uint64_t (*AtomicXor64SeqCst)(uint64_t* addr, uint64_t val);
 #endif
 
 static bool UnalignedAccessesAreOK() {
 #ifdef DEBUG
   const char* flag = getenv("JS_NO_UNALIGNED_MEMCPY");
-  if (flag && *flag == '1')
-    return false;
+  if (flag && *flag == '1') return false;
 #endif
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
   return true;
 #elif defined(JS_CODEGEN_ARM)
   return !HasAlignmentFault();
 #elif defined(JS_CODEGEN_ARM64)
   // This is not necessarily true but it's the best guess right now.
   return true;
@@ -646,30 +646,29 @@ void AtomicMemcpyDownUnsynchronized(uint
   const uint8_t* lim = src + nbytes;
 
   // Set up bulk copying.  The cases are ordered the way they are on the
   // assumption that if we can achieve aligned copies even with a little
   // preprocessing then that is better than unaligned copying on a platform
   // that supports it.
 
   if (nbytes >= WORDSIZE) {
-    void (*copyBlock)(uint8_t* dest, const uint8_t* src);
-    void (*copyWord)(uint8_t* dest, const uint8_t* src);
+    void (*copyBlock)(uint8_t * dest, const uint8_t* src);
+    void (*copyWord)(uint8_t * dest, const uint8_t* src);
 
     if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
-      const uint8_t* cutoff = (const uint8_t*)JS_ROUNDUP(uintptr_t(src),
-                                                         WORDSIZE);
-      MOZ_ASSERT(cutoff <= lim); // because nbytes >= WORDSIZE
+      const uint8_t* cutoff =
+          (const uint8_t*)JS_ROUNDUP(uintptr_t(src), WORDSIZE);
+      MOZ_ASSERT(cutoff <= lim);  // because nbytes >= WORDSIZE
       while (src < cutoff) {
         AtomicCopyByteUnsynchronized(dest++, src++);
       }
       copyBlock = AtomicCopyBlockDownUnsynchronized;
       copyWord = AtomicCopyWordUnsynchronized;
-    }
-    else if (UnalignedAccessesAreOK()) {
+    } else if (UnalignedAccessesAreOK()) {
       copyBlock = AtomicCopyBlockDownUnsynchronized;
       copyWord = AtomicCopyWordUnsynchronized;
     } else {
       copyBlock = AtomicCopyUnalignedBlockDownUnsynchronized;
       copyWord = AtomicCopyUnalignedWordDownUnsynchronized;
     }
 
     // Bulk copy, first larger blocks and then individual words.
@@ -699,29 +698,28 @@ void AtomicMemcpyDownUnsynchronized(uint
 void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
                                   size_t nbytes) {
   const uint8_t* lim = src;
 
   src += nbytes;
   dest += nbytes;
 
   if (nbytes >= WORDSIZE) {
-    void (*copyBlock)(uint8_t* dest, const uint8_t* src);
-    void (*copyWord)(uint8_t* dest, const uint8_t* src);
+    void (*copyBlock)(uint8_t * dest, const uint8_t* src);
+    void (*copyWord)(uint8_t * dest, const uint8_t* src);
 
     if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
       const uint8_t* cutoff = (const uint8_t*)(uintptr_t(src) & ~WORDMASK);
-      MOZ_ASSERT(cutoff >= lim); // Because nbytes >= WORDSIZE
+      MOZ_ASSERT(cutoff >= lim);  // Because nbytes >= WORDSIZE
       while (src > cutoff) {
         AtomicCopyByteUnsynchronized(--dest, --src);
       }
       copyBlock = AtomicCopyBlockUpUnsynchronized;
       copyWord = AtomicCopyWordUnsynchronized;
-    }
-    else if (UnalignedAccessesAreOK()) {
+    } else if (UnalignedAccessesAreOK()) {
       copyBlock = AtomicCopyBlockUpUnsynchronized;
       copyWord = AtomicCopyWordUnsynchronized;
     } else {
       copyBlock = AtomicCopyUnalignedBlockUpUnsynchronized;
       copyWord = AtomicCopyUnalignedWordUpUnsynchronized;
     }
 
     const uint8_t* blocklim = src - ((src - lim) & ~BLOCKMASK);
@@ -792,28 +790,28 @@ bool InitializeJittedAtomics() {
   uint32_t store8Unsynchronized = GenStore(masm, SIZE8, None);
   uint32_t store16Unsynchronized = GenStore(masm, SIZE16, None);
   uint32_t store32Unsynchronized = GenStore(masm, SIZE32, None);
 #ifdef JS_64BIT
   uint32_t store64Unsynchronized = GenStore(masm, SIZE64, None);
 #endif
 
   uint32_t copyUnalignedBlockDownUnsynchronized =
-    GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::DOWN);
+      GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::DOWN);
   uint32_t copyUnalignedBlockUpUnsynchronized =
-    GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::UP);
+      GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::UP);
   uint32_t copyUnalignedWordDownUnsynchronized =
-    GenCopy(masm, SIZE8, WORDSIZE, CopyDir::DOWN);
+      GenCopy(masm, SIZE8, WORDSIZE, CopyDir::DOWN);
   uint32_t copyUnalignedWordUpUnsynchronized =
-    GenCopy(masm, SIZE8, WORDSIZE, CopyDir::UP);
+      GenCopy(masm, SIZE8, WORDSIZE, CopyDir::UP);
 
   uint32_t copyBlockDownUnsynchronized =
-    GenCopy(masm, SIZEWORD, BLOCKSIZE/WORDSIZE, CopyDir::DOWN);
+      GenCopy(masm, SIZEWORD, BLOCKSIZE / WORDSIZE, CopyDir::DOWN);
   uint32_t copyBlockUpUnsynchronized =
-    GenCopy(masm, SIZEWORD, BLOCKSIZE/WORDSIZE, CopyDir::UP);
+      GenCopy(masm, SIZEWORD, BLOCKSIZE / WORDSIZE, CopyDir::UP);
   uint32_t copyWordUnsynchronized = GenCopy(masm, SIZEWORD, 1, CopyDir::DOWN);
   uint32_t copyByteUnsynchronized = GenCopy(masm, SIZE8, 1, CopyDir::DOWN);
 
   uint32_t cmpxchg8SeqCst = GenCmpxchg(masm, SIZE8, Full);
   uint32_t cmpxchg16SeqCst = GenCmpxchg(masm, SIZE16, Full);
   uint32_t cmpxchg32SeqCst = GenCmpxchg(masm, SIZE32, Full);
   uint32_t cmpxchg64SeqCst = GenCmpxchg(masm, SIZE64, Full);
 
@@ -855,20 +853,19 @@ bool InitializeJittedAtomics() {
   masm.finish();
   if (masm.oom()) {
     return false;
   }
 
   // Allocate executable memory.
   uint32_t codeLength = masm.bytesNeeded();
   size_t roundedCodeLength = JS_ROUNDUP(codeLength, ExecutableCodePageSize);
-  uint8_t* code =
-    (uint8_t*)AllocateExecutableMemory(roundedCodeLength,
-                                       ProtectionSetting::Writable,
-                                       MemCheckKind::MakeUndefined);
+  uint8_t* code = (uint8_t*)AllocateExecutableMemory(
+      roundedCodeLength, ProtectionSetting::Writable,
+      MemCheckKind::MakeUndefined);
   if (!code) {
     return false;
   }
 
   // Zero the padding.
   memset(code + codeLength, 0, roundedCodeLength - codeLength);
 
   // Copy the code into place but do not flush, as the flush path requires a
@@ -881,152 +878,148 @@ bool InitializeJittedAtomics() {
   // Reprotect the whole region to avoid having separate RW and RX mappings.
   if (!ExecutableAllocator::makeExecutable(code, roundedCodeLength)) {
     DeallocateExecutableMemory(code, roundedCodeLength);
     return false;
   }
 
   // Create the function pointers.
 
-  AtomicFenceSeqCst = (void(*)())(code + fenceSeqCst);
+  AtomicFenceSeqCst = (void (*)())(code + fenceSeqCst);
 
 #ifndef JS_64BIT
-  AtomicCompilerFence = (void(*)())(code + nop);
+  AtomicCompilerFence = (void (*)())(code + nop);
 #endif
 
   AtomicLoad8SeqCst = (uint8_t(*)(const uint8_t* addr))(code + load8SeqCst);
   AtomicLoad16SeqCst = (uint16_t(*)(const uint16_t* addr))(code + load16SeqCst);
   AtomicLoad32SeqCst = (uint32_t(*)(const uint32_t* addr))(code + load32SeqCst);
 #ifdef JS_64BIT
   AtomicLoad64SeqCst = (uint64_t(*)(const uint64_t* addr))(code + load64SeqCst);
 #endif
 
   AtomicLoad8Unsynchronized =
-    (uint8_t(*)(const uint8_t* addr))(code + load8Unsynchronized);
+      (uint8_t(*)(const uint8_t* addr))(code + load8Unsynchronized);
   AtomicLoad16Unsynchronized =
-    (uint16_t(*)(const uint16_t* addr))(code + load16Unsynchronized);
+      (uint16_t(*)(const uint16_t* addr))(code + load16Unsynchronized);
   AtomicLoad32Unsynchronized =
-    (uint32_t(*)(const uint32_t* addr))(code + load32Unsynchronized);
+      (uint32_t(*)(const uint32_t* addr))(code + load32Unsynchronized);
 #ifdef JS_64BIT
   AtomicLoad64Unsynchronized =
-    (uint64_t(*)(const uint64_t* addr))(code + load64Unsynchronized);
+      (uint64_t(*)(const uint64_t* addr))(code + load64Unsynchronized);
 #endif
 
   AtomicStore8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + store8SeqCst);
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + store8SeqCst);
   AtomicStore16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + store16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + store16SeqCst);
   AtomicStore32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + store32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + store32SeqCst);
 #ifdef JS_64BIT
   AtomicStore64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + store64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + store64SeqCst);
 #endif
 
   AtomicStore8Unsynchronized =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + store8Unsynchronized);
-  AtomicStore16Unsynchronized =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + store16Unsynchronized);
-  AtomicStore32Unsynchronized =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + store32Unsynchronized);
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + store8Unsynchronized);
+  AtomicStore16Unsynchronized = (uint16_t(*)(uint16_t * addr, uint16_t val))(
+      code + store16Unsynchronized);
+  AtomicStore32Unsynchronized = (uint32_t(*)(uint32_t * addr, uint32_t val))(
+      code + store32Unsynchronized);
 #ifdef JS_64BIT
-  AtomicStore64Unsynchronized =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + store64Unsynchronized);
+  AtomicStore64Unsynchronized = (uint64_t(*)(uint64_t * addr, uint64_t val))(
+      code + store64Unsynchronized);
 #endif
 
   AtomicCopyUnalignedBlockDownUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyUnalignedBlockDownUnsynchronized);
+      (void (*)(uint8_t * dest, const uint8_t* src))(
+          code + copyUnalignedBlockDownUnsynchronized);
   AtomicCopyUnalignedBlockUpUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyUnalignedBlockUpUnsynchronized);
+      (void (*)(uint8_t * dest, const uint8_t* src))(
+          code + copyUnalignedBlockUpUnsynchronized);
   AtomicCopyUnalignedWordDownUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyUnalignedWordDownUnsynchronized);
+      (void (*)(uint8_t * dest, const uint8_t* src))(
+          code + copyUnalignedWordDownUnsynchronized);
   AtomicCopyUnalignedWordUpUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyUnalignedWordUpUnsynchronized);
+      (void (*)(uint8_t * dest, const uint8_t* src))(
+          code + copyUnalignedWordUpUnsynchronized);
 
-  AtomicCopyBlockDownUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyBlockDownUnsynchronized);
-  AtomicCopyBlockUpUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(
-      code + copyBlockUpUnsynchronized);
-  AtomicCopyWordUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(code + copyWordUnsynchronized);
-  AtomicCopyByteUnsynchronized =
-    (void(*)(uint8_t* dest, const uint8_t* src))(code + copyByteUnsynchronized);
+  AtomicCopyBlockDownUnsynchronized = (void (*)(
+      uint8_t * dest, const uint8_t* src))(code + copyBlockDownUnsynchronized);
+  AtomicCopyBlockUpUnsynchronized = (void (*)(
+      uint8_t * dest, const uint8_t* src))(code + copyBlockUpUnsynchronized);
+  AtomicCopyWordUnsynchronized = (void (*)(uint8_t * dest, const uint8_t* src))(
+      code + copyWordUnsynchronized);
+  AtomicCopyByteUnsynchronized = (void (*)(uint8_t * dest, const uint8_t* src))(
+      code + copyByteUnsynchronized);
 
-  AtomicCmpXchg8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t oldval, uint8_t newval))(
-      code + cmpxchg8SeqCst);
+  AtomicCmpXchg8SeqCst = (uint8_t(*)(uint8_t * addr, uint8_t oldval,
+                                     uint8_t newval))(code + cmpxchg8SeqCst);
   AtomicCmpXchg16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t oldval, uint16_t newval))(
-      code + cmpxchg16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t oldval, uint16_t newval))(
+          code + cmpxchg16SeqCst);
   AtomicCmpXchg32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t oldval, uint32_t newval))(
-      code + cmpxchg32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t oldval, uint32_t newval))(
+          code + cmpxchg32SeqCst);
   AtomicCmpXchg64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t oldval, uint64_t newval))(
-      code + cmpxchg64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t oldval, uint64_t newval))(
+          code + cmpxchg64SeqCst);
 
-  AtomicExchange8SeqCst = (uint8_t(*)(uint8_t* addr, uint8_t val))(
-    code + exchange8SeqCst);
-  AtomicExchange16SeqCst = (uint16_t(*)(uint16_t* addr, uint16_t val))(
-    code + exchange16SeqCst);
-  AtomicExchange32SeqCst = (uint32_t(*)(uint32_t* addr, uint32_t val))(
-    code + exchange32SeqCst);
+  AtomicExchange8SeqCst =
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + exchange8SeqCst);
+  AtomicExchange16SeqCst =
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + exchange16SeqCst);
+  AtomicExchange32SeqCst =
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + exchange32SeqCst);
 #ifdef JS_64BIT
-  AtomicExchange64SeqCst = (uint64_t(*)(uint64_t* addr, uint64_t val))(
-    code + exchange64SeqCst);
+  AtomicExchange64SeqCst =
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + exchange64SeqCst);
 #endif
 
   AtomicAdd8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + add8SeqCst);
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + add8SeqCst);
   AtomicAdd16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + add16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + add16SeqCst);
   AtomicAdd32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + add32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + add32SeqCst);
 #ifdef JS_64BIT
   AtomicAdd64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + add64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + add64SeqCst);
 #endif
 
   AtomicAnd8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + and8SeqCst);
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + and8SeqCst);
   AtomicAnd16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + and16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + and16SeqCst);
   AtomicAnd32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + and32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + and32SeqCst);
 #ifdef JS_64BIT
   AtomicAnd64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + and64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + and64SeqCst);
 #endif
 
-  AtomicOr8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + or8SeqCst);
+  AtomicOr8SeqCst = (uint8_t(*)(uint8_t * addr, uint8_t val))(code + or8SeqCst);
   AtomicOr16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + or16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + or16SeqCst);
   AtomicOr32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + or32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + or32SeqCst);
 #ifdef JS_64BIT
   AtomicOr64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + or64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + or64SeqCst);
 #endif
 
   AtomicXor8SeqCst =
-    (uint8_t(*)(uint8_t* addr, uint8_t val))(code + xor8SeqCst);
+      (uint8_t(*)(uint8_t * addr, uint8_t val))(code + xor8SeqCst);
   AtomicXor16SeqCst =
-    (uint16_t(*)(uint16_t* addr, uint16_t val))(code + xor16SeqCst);
+      (uint16_t(*)(uint16_t * addr, uint16_t val))(code + xor16SeqCst);
   AtomicXor32SeqCst =
-    (uint32_t(*)(uint32_t* addr, uint32_t val))(code + xor32SeqCst);
+      (uint32_t(*)(uint32_t * addr, uint32_t val))(code + xor32SeqCst);
 #ifdef JS_64BIT
   AtomicXor64SeqCst =
-    (uint64_t(*)(uint64_t* addr, uint64_t val))(code + xor64SeqCst);
+      (uint64_t(*)(uint64_t * addr, uint64_t val))(code + xor64SeqCst);
 #endif
 
   codeSegment = code;
   codeSegmentSize = roundedCodeLength;
 
   return true;
 }
 
@@ -1034,10 +1027,10 @@ void ShutDownJittedAtomics() {
   // Must have been initialized.
   MOZ_ASSERT(codeSegment);
 
   DeallocateExecutableMemory(codeSegment, codeSegmentSize);
   codeSegment = nullptr;
   codeSegmentSize = 0;
 }
 
-} // jit
-} // js
+}  // namespace jit
+}  // namespace js
--- a/js/src/jit/shared/AtomicOperations-shared-jit.h
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.h
@@ -119,58 +119,57 @@ extern uint16_t (*AtomicXor16SeqCst)(uin
 extern uint32_t (*AtomicXor32SeqCst)(uint32_t* addr, uint32_t val);
 #ifdef JS_64BIT
 extern uint64_t (*AtomicXor64SeqCst)(uint64_t* addr, uint64_t val);
 #endif
 
 // `cmpxchg` takes a cell address, an expected value and a replacement value.
 // If the value in the cell equals the expected value then the replacement value
 // is stored in the cell.  It always returns the value previously in the cell.
-extern uint8_t (*AtomicCmpXchg8SeqCst)(uint8_t* addr, uint8_t oldval, uint8_t newval);
-extern uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval, uint16_t newval);
-extern uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval, uint32_t newval);
-extern uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval, uint64_t newval);
+extern uint8_t (*AtomicCmpXchg8SeqCst)(uint8_t* addr, uint8_t oldval,
+                                       uint8_t newval);
+extern uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval,
+                                         uint16_t newval);
+extern uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval,
+                                         uint32_t newval);
+extern uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval,
+                                         uint64_t newval);
 
 // `...MemcpyDown` moves bytes toward lower addresses in memory: dest <= src.
 // `...MemcpyUp` moves bytes toward higher addresses in memory: dest >= src.
-extern void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src, size_t nbytes);
-extern void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src, size_t nbytes);
+extern void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
+                                           size_t nbytes);
+extern void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
+                                         size_t nbytes);
 
-} }
+}  // namespace jit
+}  // namespace js
 
-inline bool js::jit::AtomicOperations::hasAtomic8() {
-  return true;
-}
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
 
-inline bool js::jit::AtomicOperations::isLockfree8() {
-  return true;
-}
+inline bool js::jit::AtomicOperations::isLockfree8() { return true; }
+
+inline void js::jit::AtomicOperations::fenceSeqCst() { AtomicFenceSeqCst(); }
 
-inline void
-js::jit::AtomicOperations::fenceSeqCst() {
-  AtomicFenceSeqCst();
-}
-
-#define JIT_LOADOP(T, U, loadop)                            \
-  template<> inline T                                       \
-  AtomicOperations::loadSeqCst(T* addr) {                   \
-    JS::AutoSuppressGCAnalysis nogc;                        \
-    return (T)loadop((U*)addr);                             \
+#define JIT_LOADOP(T, U, loadop)                   \
+  template <>                                      \
+  inline T AtomicOperations::loadSeqCst(T* addr) { \
+    JS::AutoSuppressGCAnalysis nogc;               \
+    return (T)loadop((U*)addr);                    \
   }
 
 #ifndef JS_64BIT
-#  define JIT_LOADOP_CAS(T)                                     \
-  template<>                                                    \
-  inline T                                                      \
-  AtomicOperations::loadSeqCst(T* addr) {                       \
-    JS::AutoSuppressGCAnalysis nogc;                            \
-    AtomicCompilerFence();                                      \
-    return (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, 0, 0);     \
-  }
-#endif // !JS_64BIT
+#  define JIT_LOADOP_CAS(T)                                   \
+    template <>                                               \
+    inline T AtomicOperations::loadSeqCst(T* addr) {          \
+      JS::AutoSuppressGCAnalysis nogc;                        \
+      AtomicCompilerFence();                                  \
+      return (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, 0, 0); \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
 JIT_LOADOP(int8_t, uint8_t, AtomicLoad8SeqCst)
 JIT_LOADOP(uint8_t, uint8_t, AtomicLoad8SeqCst)
 JIT_LOADOP(int16_t, uint16_t, AtomicLoad16SeqCst)
 JIT_LOADOP(uint16_t, uint16_t, AtomicLoad16SeqCst)
@@ -180,48 +179,47 @@ JIT_LOADOP(uint32_t, uint32_t, AtomicLoa
 #ifdef JIT_LOADOP_CAS
 JIT_LOADOP_CAS(int64_t)
 JIT_LOADOP_CAS(uint64_t)
 #else
 JIT_LOADOP(int64_t, uint64_t, AtomicLoad64SeqCst)
 JIT_LOADOP(uint64_t, uint64_t, AtomicLoad64SeqCst)
 #endif
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_LOADOP
 #undef JIT_LOADOP_CAS
 
-#define JIT_STOREOP(T, U, storeop)                      \
-  template<> inline void                                \
-  AtomicOperations::storeSeqCst(T* addr, T val) {       \
-    JS::AutoSuppressGCAnalysis nogc;                    \
-    storeop((U*)addr, val);                             \
+#define JIT_STOREOP(T, U, storeop)                            \
+  template <>                                                 \
+  inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                          \
+    storeop((U*)addr, val);                                   \
   }
 
 #ifndef JS_64BIT
-#  define JIT_STOREOP_CAS(T)                                          \
-  template<>                                                          \
-  inline void                                                         \
-  AtomicOperations::storeSeqCst(T* addr, T val) {                     \
-    JS::AutoSuppressGCAnalysis nogc;                                  \
-    AtomicCompilerFence();                                            \
-    T oldval = *addr; /* good initial approximation */                \
-    for (;;) {                                                        \
-      T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,           \
-                                           (uint64_t)oldval,          \
-                                           (uint64_t)val);            \
-      if (nextval == oldval) {                                        \
-        break;                                                        \
-      }                                                               \
-      oldval = nextval;                                               \
-    }                                                                 \
-    AtomicCompilerFence();                                            \
-  }
-#endif // !JS_64BIT
+#  define JIT_STOREOP_CAS(T)                                                   \
+    template <>                                                                \
+    inline void AtomicOperations::storeSeqCst(T* addr, T val) {                \
+      JS::AutoSuppressGCAnalysis nogc;                                         \
+      AtomicCompilerFence();                                                   \
+      T oldval = *addr; /* good initial approximation */                       \
+      for (;;) {                                                               \
+        T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,                  \
+                                             (uint64_t)oldval, (uint64_t)val); \
+        if (nextval == oldval) {                                               \
+          break;                                                               \
+        }                                                                      \
+        oldval = nextval;                                                      \
+      }                                                                        \
+      AtomicCompilerFence();                                                   \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
 JIT_STOREOP(int8_t, uint8_t, AtomicStore8SeqCst)
 JIT_STOREOP(uint8_t, uint8_t, AtomicStore8SeqCst)
 JIT_STOREOP(int16_t, uint16_t, AtomicStore16SeqCst)
 JIT_STOREOP(uint16_t, uint16_t, AtomicStore16SeqCst)
@@ -231,48 +229,48 @@ JIT_STOREOP(uint32_t, uint32_t, AtomicSt
 #ifdef JIT_STOREOP_CAS
 JIT_STOREOP_CAS(int64_t)
 JIT_STOREOP_CAS(uint64_t)
 #else
 JIT_STOREOP(int64_t, uint64_t, AtomicStore64SeqCst)
 JIT_STOREOP(uint64_t, uint64_t, AtomicStore64SeqCst)
 #endif
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_STOREOP
 #undef JIT_STOREOP_CAS
 
-#define JIT_EXCHANGEOP(T, U, xchgop)                            \
-  template<> inline T                                           \
-  AtomicOperations::exchangeSeqCst(T* addr, T val) {            \
-    JS::AutoSuppressGCAnalysis nogc;                            \
-    return (T)xchgop((U*)addr, (U)val);                         \
+#define JIT_EXCHANGEOP(T, U, xchgop)                          \
+  template <>                                                 \
+  inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                          \
+    return (T)xchgop((U*)addr, (U)val);                       \
   }
 
 #ifndef JS_64BIT
-#  define JIT_EXCHANGEOP_CAS(T)                                       \
-  template<> inline T                                                 \
-  AtomicOperations::exchangeSeqCst(T* addr, T val) {                  \
-    JS::AutoSuppressGCAnalysis nogc;                                  \
-    AtomicCompilerFence();                                            \
-    T oldval = *addr;                                                 \
-    for (;;) {                                                        \
-      T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,           \
-                                           (uint64_t)oldval,          \
-                                           (uint64_t)val);            \
-      if (nextval == oldval) {                                        \
-        break;                                                        \
-      }                                                               \
-      oldval = nextval;                                               \
-    }                                                                 \
-    AtomicCompilerFence();                                            \
-    return oldval;                                                    \
-  }
-#endif // !JS_64BIT
+#  define JIT_EXCHANGEOP_CAS(T)                                                \
+    template <>                                                                \
+    inline T AtomicOperations::exchangeSeqCst(T* addr, T val) {                \
+      JS::AutoSuppressGCAnalysis nogc;                                         \
+      AtomicCompilerFence();                                                   \
+      T oldval = *addr;                                                        \
+      for (;;) {                                                               \
+        T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,                  \
+                                             (uint64_t)oldval, (uint64_t)val); \
+        if (nextval == oldval) {                                               \
+          break;                                                               \
+        }                                                                      \
+        oldval = nextval;                                                      \
+      }                                                                        \
+      AtomicCompilerFence();                                                   \
+      return oldval;                                                           \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
 JIT_EXCHANGEOP(int8_t, uint8_t, AtomicExchange8SeqCst)
 JIT_EXCHANGEOP(uint8_t, uint8_t, AtomicExchange8SeqCst)
 JIT_EXCHANGEOP(int16_t, uint16_t, AtomicExchange16SeqCst)
 JIT_EXCHANGEOP(uint16_t, uint16_t, AtomicExchange16SeqCst)
@@ -282,324 +280,342 @@ JIT_EXCHANGEOP(uint32_t, uint32_t, Atomi
 #ifdef JIT_EXCHANGEOP_CAS
 JIT_EXCHANGEOP_CAS(int64_t)
 JIT_EXCHANGEOP_CAS(uint64_t)
 #else
 JIT_EXCHANGEOP(int64_t, uint64_t, AtomicExchange64SeqCst)
 JIT_EXCHANGEOP(uint64_t, uint64_t, AtomicExchange64SeqCst)
 #endif
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_EXCHANGEOP
 #undef JIT_EXCHANGEOP_CAS
 
-#define JIT_CAS(T, U, cmpxchg)                                          \
-  template<> inline T                                                   \
-  AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) { \
-    JS::AutoSuppressGCAnalysis nogc;                                    \
-    return (T)cmpxchg((U*)addr, (U)oldval, (U)newval);                  \
+#define JIT_CAS(T, U, cmpxchg)                                        \
+  template <>                                                         \
+  inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
+                                                   T newval) {        \
+    JS::AutoSuppressGCAnalysis nogc;                                  \
+    return (T)cmpxchg((U*)addr, (U)oldval, (U)newval);                \
   }
 
 namespace js {
 namespace jit {
 
 JIT_CAS(int8_t, uint8_t, AtomicCmpXchg8SeqCst)
 JIT_CAS(uint8_t, uint8_t, AtomicCmpXchg8SeqCst)
 JIT_CAS(int16_t, uint16_t, AtomicCmpXchg16SeqCst)
 JIT_CAS(uint16_t, uint16_t, AtomicCmpXchg16SeqCst)
 JIT_CAS(int32_t, uint32_t, AtomicCmpXchg32SeqCst)
 JIT_CAS(uint32_t, uint32_t, AtomicCmpXchg32SeqCst)
 JIT_CAS(int64_t, uint64_t, AtomicCmpXchg64SeqCst)
 JIT_CAS(uint64_t, uint64_t, AtomicCmpXchg64SeqCst)
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_CAS
 
-#define JIT_FETCHADDOP(T, U, xadd)                                   \
-  template<> inline T                                                \
-  AtomicOperations::fetchAddSeqCst(T* addr, T val) {                 \
-    JS::AutoSuppressGCAnalysis nogc;                                 \
-    return (T)xadd((U*)addr, (U)val);                                \
-  }                                                                  \
+#define JIT_FETCHADDOP(T, U, xadd)                            \
+  template <>                                                 \
+  inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                          \
+    return (T)xadd((U*)addr, (U)val);                         \
+  }
 
-#define JIT_FETCHSUBOP(T)                                            \
-  template<> inline T                                                \
-  AtomicOperations::fetchSubSeqCst(T* addr, T val) {                 \
-    JS::AutoSuppressGCAnalysis nogc;                                 \
-    return fetchAddSeqCst(addr, (T)(0-val));                         \
+#define JIT_FETCHSUBOP(T)                                     \
+  template <>                                                 \
+  inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                          \
+    return fetchAddSeqCst(addr, (T)(0 - val));                \
   }
 
 #ifndef JS_64BIT
-#  define JIT_FETCHADDOP_CAS(T)                                         \
-  template<> inline T                                                   \
-  AtomicOperations::fetchAddSeqCst(T* addr, T val) {                    \
-    JS::AutoSuppressGCAnalysis nogc;                                    \
-    AtomicCompilerFence();                                              \
-    T oldval = *addr; /* Good initial approximation */                  \
-    for (;;) {                                                          \
-      T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,             \
-                                           (uint64_t)oldval,            \
-                                           (uint64_t)(oldval + val));   \
-      if (nextval == oldval) {                                          \
-        break;                                                          \
-      }                                                                 \
-      oldval = nextval;                                                 \
-    }                                                                   \
-    AtomicCompilerFence();                                              \
-    return oldval;                                                      \
-  }
-#endif // !JS_64BIT
+#  define JIT_FETCHADDOP_CAS(T)                                           \
+    template <>                                                           \
+    inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) {           \
+      JS::AutoSuppressGCAnalysis nogc;                                    \
+      AtomicCompilerFence();                                              \
+      T oldval = *addr; /* Good initial approximation */                  \
+      for (;;) {                                                          \
+        T nextval = (T)AtomicCmpXchg64SeqCst(                             \
+            (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval + val)); \
+        if (nextval == oldval) {                                          \
+          break;                                                          \
+        }                                                                 \
+        oldval = nextval;                                                 \
+      }                                                                   \
+      AtomicCompilerFence();                                              \
+      return oldval;                                                      \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
 JIT_FETCHADDOP(int8_t, uint8_t, AtomicAdd8SeqCst)
 JIT_FETCHADDOP(uint8_t, uint8_t, AtomicAdd8SeqCst)
 JIT_FETCHADDOP(int16_t, uint16_t, AtomicAdd16SeqCst)
 JIT_FETCHADDOP(uint16_t, uint16_t, AtomicAdd16SeqCst)
 JIT_FETCHADDOP(int32_t, uint32_t, AtomicAdd32SeqCst)
 JIT_FETCHADDOP(uint32_t, uint32_t, AtomicAdd32SeqCst)
 
 #ifdef JIT_FETCHADDOP_CAS
 JIT_FETCHADDOP_CAS(int64_t)
 JIT_FETCHADDOP_CAS(uint64_t)
 #else
-JIT_FETCHADDOP(int64_t,  uint64_t, AtomicAdd64SeqCst)
+JIT_FETCHADDOP(int64_t, uint64_t, AtomicAdd64SeqCst)
 JIT_FETCHADDOP(uint64_t, uint64_t, AtomicAdd64SeqCst)
 #endif
 
 JIT_FETCHSUBOP(int8_t)
 JIT_FETCHSUBOP(uint8_t)
 JIT_FETCHSUBOP(int16_t)
 JIT_FETCHSUBOP(uint16_t)
 JIT_FETCHSUBOP(int32_t)
 JIT_FETCHSUBOP(uint32_t)
 JIT_FETCHSUBOP(int64_t)
 JIT_FETCHSUBOP(uint64_t)
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_FETCHADDOP
 #undef JIT_FETCHADDOP_CAS
 #undef JIT_FETCHSUBOP
 
-#define JIT_FETCHBITOPX(T, U, name, op)                                 \
-  template<> inline T                                                   \
-  AtomicOperations::name(T* addr, T val) {                              \
-    JS::AutoSuppressGCAnalysis nogc;                                    \
-    return (T)op((U *)addr, (U)val);                                    \
+#define JIT_FETCHBITOPX(T, U, name, op)             \
+  template <>                                       \
+  inline T AtomicOperations::name(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                \
+    return (T)op((U*)addr, (U)val);                 \
   }
 
-#define JIT_FETCHBITOP(T, U, andop, orop, xorop)                        \
-  JIT_FETCHBITOPX(T, U, fetchAndSeqCst, andop)                          \
-  JIT_FETCHBITOPX(T, U, fetchOrSeqCst, orop)                            \
+#define JIT_FETCHBITOP(T, U, andop, orop, xorop) \
+  JIT_FETCHBITOPX(T, U, fetchAndSeqCst, andop)   \
+  JIT_FETCHBITOPX(T, U, fetchOrSeqCst, orop)     \
   JIT_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
 
 #ifndef JS_64BIT
 
 #  define AND_OP &
-#  define OR_OP  |
+#  define OR_OP |
 #  define XOR_OP ^
 
-#  define JIT_FETCHBITOPX_CAS(T, name, OP)                              \
-  template<> inline T                                                   \
-  AtomicOperations::name(T* addr, T val) {                              \
-    JS::AutoSuppressGCAnalysis nogc;                                    \
-    AtomicCompilerFence();                                              \
-    T oldval = *addr;                                                   \
-    for (;;) {                                                          \
-      T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr,             \
-                                           (uint64_t)oldval,            \
-                                           (uint64_t)(oldval OP val));  \
-      if (nextval == oldval) {                                          \
-        break;                                                          \
-      }                                                                 \
-      oldval = nextval;                                                 \
-    }                                                                   \
-    AtomicCompilerFence();                                              \
-    return oldval;                                                      \
-  }
+#  define JIT_FETCHBITOPX_CAS(T, name, OP)                                 \
+    template <>                                                            \
+    inline T AtomicOperations::name(T* addr, T val) {                      \
+      JS::AutoSuppressGCAnalysis nogc;                                     \
+      AtomicCompilerFence();                                               \
+      T oldval = *addr;                                                    \
+      for (;;) {                                                           \
+        T nextval = (T)AtomicCmpXchg64SeqCst(                              \
+            (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval OP val)); \
+        if (nextval == oldval) {                                           \
+          break;                                                           \
+        }                                                                  \
+        oldval = nextval;                                                  \
+      }                                                                    \
+      AtomicCompilerFence();                                               \
+      return oldval;                                                       \
+    }
 
-#  define JIT_FETCHBITOP_CAS(T)                                      \
-  JIT_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP)                     \
-  JIT_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP)                       \
-  JIT_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
+#  define JIT_FETCHBITOP_CAS(T)                    \
+    JIT_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
+    JIT_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP)   \
+    JIT_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
 
 #endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
-JIT_FETCHBITOP(int8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst, AtomicXor8SeqCst)
-JIT_FETCHBITOP(uint8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst, AtomicXor8SeqCst)
-JIT_FETCHBITOP(int16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst, AtomicXor16SeqCst)
-JIT_FETCHBITOP(uint16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst, AtomicXor16SeqCst)
-JIT_FETCHBITOP(int32_t, uint32_t,  AtomicAnd32SeqCst, AtomicOr32SeqCst, AtomicXor32SeqCst)
-JIT_FETCHBITOP(uint32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst, AtomicXor32SeqCst)
+JIT_FETCHBITOP(int8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+               AtomicXor8SeqCst)
+JIT_FETCHBITOP(uint8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+               AtomicXor8SeqCst)
+JIT_FETCHBITOP(int16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+               AtomicXor16SeqCst)
+JIT_FETCHBITOP(uint16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+               AtomicXor16SeqCst)
+JIT_FETCHBITOP(int32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+               AtomicXor32SeqCst)
+JIT_FETCHBITOP(uint32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+               AtomicXor32SeqCst)
 
 #ifdef JIT_FETCHBITOP_CAS
 JIT_FETCHBITOP_CAS(int64_t)
 JIT_FETCHBITOP_CAS(uint64_t)
 #else
-JIT_FETCHBITOP(int64_t,  uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst, AtomicXor64SeqCst)
-JIT_FETCHBITOP(uint64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst, AtomicXor64SeqCst)
+JIT_FETCHBITOP(int64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+               AtomicXor64SeqCst)
+JIT_FETCHBITOP(uint64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+               AtomicXor64SeqCst)
 #endif
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_FETCHBITOPX_CAS
 #undef JIT_FETCHBITOPX
 #undef JIT_FETCHBITOP_CAS
 #undef JIT_FETCHBITOP
 
-#define JIT_LOADSAFE(T, U, loadop)                              \
-  template<>                                                    \
-  inline T                                                      \
-  js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {        \
-    JS::AutoSuppressGCAnalysis nogc;                            \
-    union { U u; T t; };                                        \
-    u = loadop((U*)addr);                                       \
-    return t;                                                   \
+#define JIT_LOADSAFE(T, U, loadop)                                \
+  template <>                                                     \
+  inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+    JS::AutoSuppressGCAnalysis nogc;                              \
+    union {                                                       \
+      U u;                                                        \
+      T t;                                                        \
+    };                                                            \
+    u = loadop((U*)addr);                                         \
+    return t;                                                     \
   }
 
 #ifndef JS_64BIT
-#  define JIT_LOADSAFE_TEARING(T)                               \
-  template<>                                                    \
-  inline T                                                      \
-  js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {        \
-    JS::AutoSuppressGCAnalysis nogc;                            \
-    MOZ_ASSERT(sizeof(T) == 8);                                 \
-    union { uint32_t u[2]; T t; };                              \
-    uint32_t* ptr = (uint32_t*)addr;                            \
-    u[0] = AtomicLoad32Unsynchronized(ptr);                     \
-    u[1] = AtomicLoad32Unsynchronized(ptr + 1);                 \
-    return t;                                                   \
-  }
-#endif // !JS_64BIT
+#  define JIT_LOADSAFE_TEARING(T)                                   \
+    template <>                                                     \
+    inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+      JS::AutoSuppressGCAnalysis nogc;                              \
+      MOZ_ASSERT(sizeof(T) == 8);                                   \
+      union {                                                       \
+        uint32_t u[2];                                              \
+        T t;                                                        \
+      };                                                            \
+      uint32_t* ptr = (uint32_t*)addr;                              \
+      u[0] = AtomicLoad32Unsynchronized(ptr);                       \
+      u[1] = AtomicLoad32Unsynchronized(ptr + 1);                   \
+      return t;                                                     \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
-JIT_LOADSAFE(int8_t,   uint8_t, AtomicLoad8Unsynchronized)
-JIT_LOADSAFE(uint8_t,  uint8_t, AtomicLoad8Unsynchronized)
-JIT_LOADSAFE(int16_t,  uint16_t, AtomicLoad16Unsynchronized)
+JIT_LOADSAFE(int8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(uint8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(int16_t, uint16_t, AtomicLoad16Unsynchronized)
 JIT_LOADSAFE(uint16_t, uint16_t, AtomicLoad16Unsynchronized)
-JIT_LOADSAFE(int32_t,  uint32_t, AtomicLoad32Unsynchronized)
+JIT_LOADSAFE(int32_t, uint32_t, AtomicLoad32Unsynchronized)
 JIT_LOADSAFE(uint32_t, uint32_t, AtomicLoad32Unsynchronized)
 #ifdef JIT_LOADSAFE_TEARING
 JIT_LOADSAFE_TEARING(int64_t)
 JIT_LOADSAFE_TEARING(uint64_t)
 JIT_LOADSAFE_TEARING(double)
 #else
-JIT_LOADSAFE(int64_t,  uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(int64_t, uint64_t, AtomicLoad64Unsynchronized)
 JIT_LOADSAFE(uint64_t, uint64_t, AtomicLoad64Unsynchronized)
-JIT_LOADSAFE(double,   uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(double, uint64_t, AtomicLoad64Unsynchronized)
 #endif
-JIT_LOADSAFE(float,    uint32_t, AtomicLoad32Unsynchronized)
+JIT_LOADSAFE(float, uint32_t, AtomicLoad32Unsynchronized)
 
 // Clang requires a specialization for uint8_clamped.
-template<>
+template <>
 inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
-  uint8_clamped* addr) {
+    uint8_clamped* addr) {
   return uint8_clamped(loadSafeWhenRacy((uint8_t*)addr));
 }
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_LOADSAFE
 #undef JIT_LOADSAFE_TEARING
 
-#define JIT_STORESAFE(T, U, storeop)                               \
-  template<>                                                       \
-  inline void                                                      \
-  js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {   \
-    JS::AutoSuppressGCAnalysis nogc;                               \
-    union { U u; T t; };                                           \
-    t = val;                                                       \
-    storeop((U*)addr, u);                                          \
+#define JIT_STORESAFE(T, U, storeop)                                         \
+  template <>                                                                \
+  inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+    JS::AutoSuppressGCAnalysis nogc;                                         \
+    union {                                                                  \
+      U u;                                                                   \
+      T t;                                                                   \
+    };                                                                       \
+    t = val;                                                                 \
+    storeop((U*)addr, u);                                                    \
   }
 
 #ifndef JS_64BIT
-#  define JIT_STORESAFE_TEARING(T)                                    \
-  template<>                                                          \
-  inline void                                                         \
-  js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {      \
-    JS::AutoSuppressGCAnalysis nogc;                                  \
-    union { uint32_t u[2]; T t; };                                    \
-    t = val;                                                          \
-    uint32_t* ptr = (uint32_t*)addr;                                  \
-    AtomicStore32Unsynchronized(ptr, u[0]);                           \
-    AtomicStore32Unsynchronized(ptr + 1, u[1]);                       \
-  }
-#endif // !JS_64BIT
+#  define JIT_STORESAFE_TEARING(T)                                             \
+    template <>                                                                \
+    inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+      JS::AutoSuppressGCAnalysis nogc;                                         \
+      union {                                                                  \
+        uint32_t u[2];                                                         \
+        T t;                                                                   \
+      };                                                                       \
+      t = val;                                                                 \
+      uint32_t* ptr = (uint32_t*)addr;                                         \
+      AtomicStore32Unsynchronized(ptr, u[0]);                                  \
+      AtomicStore32Unsynchronized(ptr + 1, u[1]);                              \
+    }
+#endif  // !JS_64BIT
 
 namespace js {
 namespace jit {
 
-JIT_STORESAFE(int8_t,   uint8_t, AtomicStore8Unsynchronized)
-JIT_STORESAFE(uint8_t,  uint8_t, AtomicStore8Unsynchronized)
-JIT_STORESAFE(int16_t,  uint16_t, AtomicStore16Unsynchronized)
+JIT_STORESAFE(int8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(uint8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(int16_t, uint16_t, AtomicStore16Unsynchronized)
 JIT_STORESAFE(uint16_t, uint16_t, AtomicStore16Unsynchronized)
-JIT_STORESAFE(int32_t,  uint32_t, AtomicStore32Unsynchronized)
+JIT_STORESAFE(int32_t, uint32_t, AtomicStore32Unsynchronized)
 JIT_STORESAFE(uint32_t, uint32_t, AtomicStore32Unsynchronized)
 #ifdef JIT_STORESAFE_TEARING
 JIT_STORESAFE_TEARING(int64_t)
 JIT_STORESAFE_TEARING(uint64_t)
 JIT_STORESAFE_TEARING(double)
 #else
-JIT_STORESAFE(int64_t,  uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(int64_t, uint64_t, AtomicStore64Unsynchronized)
 JIT_STORESAFE(uint64_t, uint64_t, AtomicStore64Unsynchronized)
-JIT_STORESAFE(double,   uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(double, uint64_t, AtomicStore64Unsynchronized)
 #endif
-JIT_STORESAFE(float,    uint32_t, AtomicStore32Unsynchronized)
+JIT_STORESAFE(float, uint32_t, AtomicStore32Unsynchronized)
 
 // Clang requires a specialization for uint8_clamped.
-template<>
+template <>
 inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
                                                          uint8_clamped val) {
-    storeSafeWhenRacy((uint8_t*)addr, (uint8_t)val);
+  storeSafeWhenRacy((uint8_t*)addr, (uint8_t)val);
 }
 
-}}
+}  // namespace jit
+}  // namespace js
 
 #undef JIT_STORESAFE
 #undef JIT_STORESAFE_TEARING
 
 void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src,
                                                    size_t nbytes) {
-    JS::AutoSuppressGCAnalysis nogc;
-    MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest+nbytes));
-    MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src+nbytes));
-    AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+  JS::AutoSuppressGCAnalysis nogc;
+  MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+  MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+  AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
 }
 
 inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
                                                            const void* src,
                                                            size_t nbytes) {
-    JS::AutoSuppressGCAnalysis nogc;
-    if ((char*)dest <= (char*)src) {
-        AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src,
-                                       nbytes);
-    } else {
-        AtomicMemcpyUpUnsynchronized((uint8_t*)dest, (const uint8_t*)src,
-                                     nbytes);
-    }
+  JS::AutoSuppressGCAnalysis nogc;
+  if ((char*)dest <= (char*)src) {
+    AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+  } else {
+    AtomicMemcpyUpUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+  }
 }
 
 namespace js {
 namespace jit {
 
 extern bool InitializeJittedAtomics();
 extern void ShutDownJittedAtomics();
 
-}}
+}  // namespace jit
+}  // namespace js
 
 inline bool js::jit::AtomicOperations::Initialize() {
   return InitializeJittedAtomics();
 }
 
-inline void js::jit::AtomicOperations::ShutDown() {
-  ShutDownJittedAtomics();
-}
+inline void js::jit::AtomicOperations::ShutDown() { ShutDownJittedAtomics(); }
 
-#endif // jit_shared_AtomicOperations_shared_jit_h
+#endif  // jit_shared_AtomicOperations_shared_jit_h
--- a/layout/build/nsLayoutStatics.cpp
+++ b/layout/build/nsLayoutStatics.cpp
@@ -107,17 +107,17 @@
 #include "mozilla/ClearSiteData.h"
 #include "mozilla/Fuzzyfox.h"
 #include "mozilla/ServoBindings.h"
 #include "mozilla/StaticPresData.h"
 #include "mozilla/dom/WebIDLGlobalNameHash.h"
 #include "mozilla/dom/ipc/IPCBlobInputStreamStorage.h"
 #include "mozilla/dom/U2FTokenManager.h"
 #ifdef OS_WIN
-#include "mozilla/dom/WinWebAuthnManager.h"
+#  include "mozilla/dom/WinWebAuthnManager.h"
 #endif
 #include "mozilla/dom/PointerEventHandler.h"
 #include "mozilla/dom/RemoteWorkerService.h"
 #include "mozilla/dom/BlobURLProtocolHandler.h"
 #include "mozilla/dom/ReportingHeader.h"
 #include "mozilla/net/UrlClassifierFeatureFactory.h"
 #include "nsThreadManager.h"
 #include "mozilla/css/ImageLoader.h"
--- a/security/manager/ssl/TransportSecurityInfo.cpp
+++ b/security/manager/ssl/TransportSecurityInfo.cpp
@@ -279,18 +279,17 @@ nsresult TransportSecurityInfo::ReadSSLS
   nsCOMPtr<nsISupports> cert;
   rv = aStream->ReadObject(true, getter_AddRefs(cert));
   MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv), "Deserialization should not fail");
   NS_ENSURE_SUCCESS(rv, rv);
 
   if (cert) {
     mServerCert = do_QueryInterface(cert);
     if (!mServerCert) {
-      MOZ_DIAGNOSTIC_ASSERT(false,
-                            "Deserialization should not fail");
+      MOZ_DIAGNOSTIC_ASSERT(false, "Deserialization should not fail");
       return NS_NOINTERFACE;
     }
   }
 
   rv = aStream->Read16(&mCipherSuite);
   MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv), "Deserialization should not fail");
   NS_ENSURE_SUCCESS(rv, rv);
 
@@ -440,18 +439,17 @@ TransportSecurityInfo::Read(nsIObjectInp
     nsCOMPtr<nsISupports> cert;
     rv = NS_ReadOptionalObject(aStream, true, getter_AddRefs(cert));
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv), "Deserialization should not fail");
     NS_ENSURE_SUCCESS(rv, rv);
 
     if (cert != nullptr) {
       mServerCert = do_QueryInterface(cert);
       if (!mServerCert) {
-        MOZ_DIAGNOSTIC_ASSERT(false,
-                              "Deserialization should not fail");
+        MOZ_DIAGNOSTIC_ASSERT(false, "Deserialization should not fail");
         return NS_NOINTERFACE;
       }
     }
 
     rv = aStream->Read16(&mCipherSuite);
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv), "Deserialization should not fail");
     NS_ENSURE_SUCCESS(rv, rv);
 
--- a/widget/cocoa/nsNativeThemeCocoa.mm
+++ b/widget/cocoa/nsNativeThemeCocoa.mm
@@ -111,17 +111,16 @@ void CUIDraw(CUIRendererRef r, CGRect re
 @end
 
 @interface CheckboxCell : NSButtonCell
 @end
 
 @implementation CheckboxCell
 @end
 
-
 static void DrawFocusRingForCellIfNeeded(NSCell* aCell, NSRect aWithFrame, NSView* aInView) {
   if ([aCell showsFirstResponder]) {
     CGContextRef cgContext = (CGContextRef)[[NSGraphicsContext currentContext] graphicsPort];
     CGContextSaveGState(cgContext);
 
     // It's important to set the focus ring style before we enter the
     // transparency layer so that the transparency layer only contains
     // the normal button mask without the focus ring, and the conversion
--- a/widget/cocoa/nsTouchBar.mm
+++ b/widget/cocoa/nsTouchBar.mm
@@ -7,38 +7,35 @@
 #include "mozilla/MacStringHelpers.h"
 #include "mozilla/Telemetry.h"
 #include "nsArrayUtils.h"
 #include "nsDirectoryServiceDefs.h"
 #include "nsIArray.h"
 
 @implementation nsTouchBar
 
-static NSTouchBarItemIdentifier CustomButtonIdentifier =
-    @"com.mozilla.firefox.touchbar.button";
+static NSTouchBarItemIdentifier CustomButtonIdentifier = @"com.mozilla.firefox.touchbar.button";
 static NSTouchBarItemIdentifier CustomMainButtonIdentifier =
     @"com.mozilla.firefox.touchbar.mainbutton";
-static NSTouchBarItemIdentifier ScrubberIdentifier =
-    @"com.mozilla.firefox.touchbar.scrubber";
+static NSTouchBarItemIdentifier ScrubberIdentifier = @"com.mozilla.firefox.touchbar.scrubber";
 
 // Non-JS scrubber implemention for the Share Scrubber,
 // since it is defined by an Apple API.
 static NSTouchBarItemIdentifier ShareScrubberIdentifier =
-  [ScrubberIdentifier stringByAppendingPathExtension:@"share"];
+    [ScrubberIdentifier stringByAppendingPathExtension:@"share"];
 
 // Used to tie action strings to buttons.
 static char sIdentifierAssociationKey;
 
 // The system default width for Touch Bar inputs is 128px. This is double.
 #define MAIN_BUTTON_WIDTH 256
 
 #pragma mark - NSTouchBarDelegate
 
-- (instancetype)init
-{
+- (instancetype)init {
   if ((self = [super init])) {
     mTouchBarHelper = do_GetService(NS_TOUCHBARHELPER_CID);
     if (!mTouchBarHelper) {
       return nil;
     }
 
     self.delegate = self;
     self.mappedLayoutItems = [NSMutableDictionary dictionary];
@@ -46,104 +43,92 @@ static char sIdentifierAssociationKey;
 
     nsresult rv = mTouchBarHelper->GetLayout(getter_AddRefs(layoutItems));
     if (NS_FAILED(rv) || !layoutItems) {
       return nil;
     }
 
     uint32_t itemCount = 0;
     layoutItems->GetLength(&itemCount);
-    // This is copied to self.defaultItemIdentifiers. Required since 
+    // This is copied to self.defaultItemIdentifiers. Required since
     // [self.mappedLayoutItems allKeys] does not preserve order.
-    NSMutableArray* orderedLayoutIdentifiers = 
-      [NSMutableArray arrayWithCapacity:itemCount];
+    NSMutableArray* orderedLayoutIdentifiers = [NSMutableArray arrayWithCapacity:itemCount];
     for (uint32_t i = 0; i < itemCount; ++i) {
       nsCOMPtr<nsITouchBarInput> input = do_QueryElementAt(layoutItems, i);
       if (!input) {
         continue;
       }
 
-      TouchBarInput* convertedInput =
-        [[TouchBarInput alloc] initWithXPCOM:input];
+      TouchBarInput* convertedInput = [[TouchBarInput alloc] initWithXPCOM:input];
 
       // Add new input to dictionary for lookup of properties in delegate.
       self.mappedLayoutItems[[convertedInput nativeIdentifier]] = convertedInput;
       orderedLayoutIdentifiers[i] = [convertedInput nativeIdentifier];
     }
 
     self.defaultItemIdentifiers = [orderedLayoutIdentifiers copy];
   }
 
   return self;
 }
 
-- (void)dealloc
-{
+- (void)dealloc {
   for (NSTouchBarItemIdentifier identifier in self.mappedLayoutItems) {
     NSTouchBarItem* item = [self itemForIdentifier:identifier];
     [item release];
   }
 
   [self.defaultItemIdentifiers release];
 
   [self.mappedLayoutItems removeAllObjects];
   [self.mappedLayoutItems release];
   [super dealloc];
 }
 
 - (NSTouchBarItem*)touchBar:(NSTouchBar*)aTouchBar
-      makeItemForIdentifier:(NSTouchBarItemIdentifier)aIdentifier
-{
+      makeItemForIdentifier:(NSTouchBarItemIdentifier)aIdentifier {
   if ([aIdentifier hasPrefix:ScrubberIdentifier]) {
     if (![aIdentifier isEqualToString:ShareScrubberIdentifier]) {
       // We're only supporting the Share scrubber for now.
       return nil;
     }
     return [self makeShareScrubberForIdentifier:aIdentifier];
   }
 
   // The cases of a button or main button require the same setup.
-  NSButton* button = [NSButton buttonWithTitle:@""
-                                        target:self
-                                        action:@selector(touchBarAction:)];
-  NSCustomTouchBarItem* item = [[NSCustomTouchBarItem alloc]
-                                  initWithIdentifier:aIdentifier];
+  NSButton* button = [NSButton buttonWithTitle:@"" target:self action:@selector(touchBarAction:)];
+  NSCustomTouchBarItem* item = [[NSCustomTouchBarItem alloc] initWithIdentifier:aIdentifier];
   item.view = button;
 
   TouchBarInput* input = self.mappedLayoutItems[aIdentifier];
   if ([aIdentifier hasPrefix:CustomButtonIdentifier]) {
     return [self updateButton:item input:input];
   } else if ([aIdentifier hasPrefix:CustomMainButtonIdentifier]) {
     return [self updateMainButton:item input:input];
   }
 
   return nil;
 }
 
-- (void)updateItem:(TouchBarInput*)aInput
-{
+- (void)updateItem:(TouchBarInput*)aInput {
   NSTouchBarItem* item = [self itemForIdentifier:[aInput nativeIdentifier]];
   if (!item) {
     return;
   }
   if ([[aInput nativeIdentifier] hasPrefix:CustomButtonIdentifier]) {
-    [self updateButton:(NSCustomTouchBarItem*)item
-                 input:aInput];
+    [self updateButton:(NSCustomTouchBarItem*)item input:aInput];
   } else if ([[aInput nativeIdentifier] hasPrefix:CustomMainButtonIdentifier]) {
-    [self updateMainButton:(NSCustomTouchBarItem*)item
-                     input:aInput];
+    [self updateMainButton:(NSCustomTouchBarItem*)item input:aInput];
   }
 
   [self.mappedLayoutItems[[aInput nativeIdentifier]] release];
   self.mappedLayoutItems[[aInput nativeIdentifier]] = aInput;
 }
 
-- (NSTouchBarItem*)updateButton:(NSCustomTouchBarItem*)aButton
-                          input:(TouchBarInput*)aInput
-{
+- (NSTouchBarItem*)updateButton:(NSCustomTouchBarItem*)aButton input:(TouchBarInput*)aInput {
   NSButton* button = (NSButton*)aButton.view;
   if (!button) {
     return nil;
   }
 
   button.title = [aInput title];
   if ([aInput image]) {
     button.image = [aInput image];
@@ -151,108 +136,96 @@ static char sIdentifierAssociationKey;
   }
 
   [button setEnabled:![aInput isDisabled]];
 
   if ([aInput color]) {
     button.bezelColor = [aInput color];
   }
 
-  objc_setAssociatedObject(button,
-    &sIdentifierAssociationKey,
-    [aInput nativeIdentifier], OBJC_ASSOCIATION_RETAIN);
+  objc_setAssociatedObject(button, &sIdentifierAssociationKey, [aInput nativeIdentifier],
+                           OBJC_ASSOCIATION_RETAIN);
 
   aButton.customizationLabel = [aInput title];
 
   return aButton;
 }
 
 - (NSTouchBarItem*)updateMainButton:(NSCustomTouchBarItem*)aMainButton
-                              input:(TouchBarInput*)aInput
-{
-  aMainButton = (NSCustomTouchBarItem*)[self updateButton:aMainButton
-                                                    input:aInput];
+                              input:(TouchBarInput*)aInput {
+  aMainButton = (NSCustomTouchBarItem*)[self updateButton:aMainButton input:aInput];
   NSButton* button = (NSButton*)aMainButton.view;
   button.imageHugsTitle = YES;
   // If empty, string is still being localized. Display a blank input instead.
   if ([button.title isEqualToString:@""]) {
     [button setImagePosition:NSNoImage];
   } else {
     [button setImagePosition:NSImageLeft];
   }
 
-  [button.widthAnchor
-    constraintGreaterThanOrEqualToConstant:MAIN_BUTTON_WIDTH].active = YES;
-  [button setContentHuggingPriority:1.0
-                      forOrientation:NSLayoutConstraintOrientationHorizontal];
+  [button.widthAnchor constraintGreaterThanOrEqualToConstant:MAIN_BUTTON_WIDTH].active = YES;
+  [button setContentHuggingPriority:1.0 forOrientation:NSLayoutConstraintOrientationHorizontal];
   return aMainButton;
 }
 
-- (NSTouchBarItem*)makeShareScrubberForIdentifier:
-                        (NSTouchBarItemIdentifier)aIdentifier 
-{
+- (NSTouchBarItem*)makeShareScrubberForIdentifier:(NSTouchBarItemIdentifier)aIdentifier {
   TouchBarInput* input = self.mappedLayoutItems[aIdentifier];
   // System-default share menu
   NSSharingServicePickerTouchBarItem* servicesItem =
-      [[NSSharingServicePickerTouchBarItem alloc]
-        initWithIdentifier:aIdentifier];
+      [[NSSharingServicePickerTouchBarItem alloc] initWithIdentifier:aIdentifier];
   servicesItem.buttonImage = [input image];
   servicesItem.delegate = self;
   return servicesItem;
 }
 
-- (void)touchBarAction:(id)aSender
-{
+- (void)touchBarAction:(id)aSender {
   NSTouchBarItemIdentifier identifier =
-    objc_getAssociatedObject(aSender, &sIdentifierAssociationKey);
-  if (!identifier || [identifier isEqualToString: @""]) {
+      objc_getAssociatedObject(aSender, &sIdentifierAssociationKey);
+  if (!identifier || [identifier isEqualToString:@""]) {
     return;
   }
 
   TouchBarInput* input = self.mappedLayoutItems[identifier];
   if (!input) {
     return;
   }
 
   nsCOMPtr<nsITouchBarInputCallback> callback = [input callback];
   callback->OnCommand();
 }
 
 #pragma mark - TouchBar Utilities
 
-+ (NSImage*)getTouchBarIconNamed:(NSString*)aImageName
-{
++ (NSImage*)getTouchBarIconNamed:(NSString*)aImageName {
   nsCOMPtr<nsIFile> resDir;
   nsAutoCString resPath;
   NSString* pathToImage;
 
   nsresult rv = NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(resDir));
   resDir->AppendNative(NS_LITERAL_CSTRING("res"));
   resDir->AppendNative(NS_LITERAL_CSTRING("touchbar"));
 
   rv = resDir->GetNativePath(resPath);
-  
+
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return nil;
   }
 
   pathToImage = [NSString stringWithUTF8String:(const char*)resPath.get()];
   pathToImage = [pathToImage stringByAppendingPathComponent:aImageName];
-  NSImage* image = [[[NSImage alloc]
-                      initWithContentsOfFile:pathToImage] autorelease];
+  NSImage* image = [[[NSImage alloc] initWithContentsOfFile:pathToImage] autorelease];
   // A nil image will fail gracefully to a labelled button
 
   return image;
 }
 
 #pragma mark - NSSharingServicePickerTouchBarItemDelegate
 
 - (NSArray*)itemsForSharingServicePickerTouchBarItem:
-                (NSSharingServicePickerTouchBarItem*)aPickerTouchBarItem
-{
+    (NSSharingServicePickerTouchBarItem*)aPickerTouchBarItem {
   NSURL* urlToShare = nil;
   NSString* titleToShare = @"";
   nsAutoString url;
   nsAutoString title;
   if (mTouchBarHelper) {
     nsresult rv = mTouchBarHelper->GetActiveUrl(url);
     if (!NS_FAILED(rv)) {
       urlToShare = [NSURL URLWithString:nsCocoaUtils::ToNSString(url)];
@@ -267,148 +240,150 @@ static char sIdentifierAssociationKey;
     rv = mTouchBarHelper->GetActiveTitle(title);
     if (!NS_FAILED(rv)) {
       titleToShare = nsCocoaUtils::ToNSString(title);
     }
   }
 
   // If the user has gotten this far, they have clicked the share button so it
   // is logged.
-  Telemetry::AccumulateCategorical(
-    Telemetry::LABELS_TOUCHBAR_BUTTON_PRESSES::Share);
+  Telemetry::AccumulateCategorical(Telemetry::LABELS_TOUCHBAR_BUTTON_PRESSES::Share);
 
-  return @[urlToShare, titleToShare];
+  return @[ urlToShare, titleToShare ];
 }
 
-- (NSArray<NSSharingService*>*)sharingServicePicker:
-                           (NSSharingServicePicker*)aSharingServicePicker
-                            sharingServicesForItems:
-                                          (NSArray*)aItems
-                            proposedSharingServices:
-                       (NSArray<NSSharingService*>*)aProposedServices
-{
+- (NSArray<NSSharingService*>*)sharingServicePicker:(NSSharingServicePicker*)aSharingServicePicker
+                            sharingServicesForItems:(NSArray*)aItems
+                            proposedSharingServices:(NSArray<NSSharingService*>*)aProposedServices {
   // redundant services
   NSArray* excludedServices = @[
     @"com.apple.share.System.add-to-safari-reading-list",
   ];
 
   NSArray* sharingServices = [aProposedServices
-    filteredArrayUsingPredicate:
-      [NSPredicate predicateWithFormat:@"NOT (name IN %@)", excludedServices]];
+      filteredArrayUsingPredicate:[NSPredicate
+                                      predicateWithFormat:@"NOT (name IN %@)", excludedServices]];
 
   return sharingServices;
 }
 
 @end
 
 @implementation TouchBarInput
-- (NSString*)key { return mKey; }
-- (NSString*)title { return mTitle; }
-- (NSImage*)image { return mImage; }
-- (NSString*)type { return mType; }
-- (NSColor*)color { return mColor; }
-- (BOOL)isDisabled { return mDisabled; }
-- (NSTouchBarItemIdentifier)nativeIdentifier { return mNativeIdentifier; }
-- (nsCOMPtr<nsITouchBarInputCallback>)callback { return mCallback; }
-- (void)setKey:(NSString*)aKey
-{
+- (NSString*)key {
+  return mKey;
+}
+- (NSString*)title {
+  return mTitle;
+}
+- (NSImage*)image {
+  return mImage;
+}
+- (NSString*)type {
+  return mType;
+}
+- (NSColor*)color {
+  return mColor;
+}
+- (BOOL)isDisabled {
+  return mDisabled;
+}
+- (NSTouchBarItemIdentifier)nativeIdentifier {
+  return mNativeIdentifier;
+}
+- (nsCOMPtr<nsITouchBarInputCallback>)callback {
+  return mCallback;
+}
+- (void)setKey:(NSString*)aKey {
   [aKey retain];
   [mKey release];
   mKey = aKey;
 }
 
-- (void)setTitle:(NSString*)aTitle
-{
+- (void)setTitle:(NSString*)aTitle {
   [aTitle retain];
   [mTitle release];
   mTitle = aTitle;
 }
 
-- (void)setImage:(NSImage*)aImage
-{
+- (void)setImage:(NSImage*)aImage {
   [aImage retain];
   [mImage release];
   mImage = aImage;
 }
 
-- (void)setType:(NSString*)aType
-{
+- (void)setType:(NSString*)aType {
   [aType retain];
   [mType release];
   mType = aType;
 }
 
-- (void)setColor:(NSColor*)aColor
-{
+- (void)setColor:(NSColor*)aColor {
   [aColor retain];
   [mColor release];
   mColor = aColor;
 }
 
-- (void)setDisabled:(BOOL)aDisabled { mDisabled = aDisabled; }
+- (void)setDisabled:(BOOL)aDisabled {
+  mDisabled = aDisabled;
+}
 
-- (void)setNativeIdentifier:(NSTouchBarItemIdentifier)aNativeIdentifier
-{
+- (void)setNativeIdentifier:(NSTouchBarItemIdentifier)aNativeIdentifier {
   [aNativeIdentifier retain];
   [mNativeIdentifier release];
   mNativeIdentifier = aNativeIdentifier;
 }
 
-- (void)setCallback:(nsCOMPtr<nsITouchBarInputCallback>)aCallback
-{
+- (void)setCallback:(nsCOMPtr<nsITouchBarInputCallback>)aCallback {
   mCallback = aCallback;
 }
 
 - (id)initWithKey:(NSString*)aKey
             title:(NSString*)aTitle
             image:(NSString*)aImage
-            type:(NSString*)aType
-        callback:(nsCOMPtr<nsITouchBarInputCallback>)aCallback
-           color:(uint32_t)aColor
-        disabled:(BOOL)aDisabled
-{
+             type:(NSString*)aType
+         callback:(nsCOMPtr<nsITouchBarInputCallback>)aCallback
+            color:(uint32_t)aColor
+         disabled:(BOOL)aDisabled {
   if (self = [super init]) {
     [self setKey:aKey];
     [self setTitle:aTitle];
     [self setImage:[nsTouchBar getTouchBarIconNamed:aImage]];
     [self setType:aType];
     [self setCallback:aCallback];
     if (aColor) {
-      [self setColor:[NSColor colorWithDisplayP3Red:((aColor>>16)&0xFF)/255.0
-                                            green:((aColor>>8)&0xFF)/255.0
-                                             blue:((aColor)&0xFF)/255.0
-                                            alpha:1.0]];
+      [self setColor:[NSColor colorWithDisplayP3Red:((aColor >> 16) & 0xFF) / 255.0
+                                              green:((aColor >> 8) & 0xFF) / 255.0
+                                               blue:((aColor)&0xFF) / 255.0
+                                              alpha:1.0]];
     }
     [self setDisabled:aDisabled];
 
     NSTouchBarItemIdentifier TypeIdentifier = @"";
     if ([aType isEqualToString:@"scrubber"]) {
       TypeIdentifier = ScrubberIdentifier;
     } else if ([aType isEqualToString:@"mainButton"]) {
       TypeIdentifier = CustomMainButtonIdentifier;
     } else {
       TypeIdentifier = CustomButtonIdentifier;
     }
 
     if (!aKey) {
       [self setNativeIdentifier:TypeIdentifier];
     } else if ([aKey isEqualToString:@"share"]) {
-      [self setNativeIdentifier:
-        [TypeIdentifier stringByAppendingPathExtension:aKey]];
+      [self setNativeIdentifier:[TypeIdentifier stringByAppendingPathExtension:aKey]];
     } else {
-      [self setNativeIdentifier:
-        [TypeIdentifier stringByAppendingPathExtension:aKey]];
+      [self setNativeIdentifier:[TypeIdentifier stringByAppendingPathExtension:aKey]];
     }
   }
 
   return self;
 }
 
-- (TouchBarInput*)initWithXPCOM:(nsCOMPtr<nsITouchBarInput>)aInput
-{
+- (TouchBarInput*)initWithXPCOM:(nsCOMPtr<nsITouchBarInput>)aInput {
   nsAutoString keyStr;
   nsresult rv = aInput->GetKey(keyStr);
   if (NS_FAILED(rv)) {
     return nil;
   }
 
   nsAutoString titleStr;
   rv = aInput->GetTitle(titleStr);
@@ -450,18 +425,17 @@ static char sIdentifierAssociationKey;
                      title:nsCocoaUtils::ToNSString(titleStr)
                      image:nsCocoaUtils::ToNSString(imageStr)
                       type:nsCocoaUtils::ToNSString(typeStr)
                   callback:callback
                      color:colorInt
                   disabled:(BOOL)disabled];
 }
 
-- (void)dealloc
-{
+- (void)dealloc {
   [mKey release];
   [mTitle release];
   [mImage release];
   [mType release];
   [mColor release];
   [mNativeIdentifier release];
   [super dealloc];
 }
--- a/widget/cocoa/nsTouchBarUpdater.mm
+++ b/widget/cocoa/nsTouchBarUpdater.mm
@@ -7,39 +7,35 @@
 #include "nsTouchBar.h"
 #include "nsITouchBarInput.h"
 #include "nsTouchBarUpdater.h"
 
 #include "nsCocoaWindow.h"
 #include "nsIBaseWindow.h"
 #include "nsIWidget.h"
 
-#if !defined(MAC_OS_X_VERSION_10_12_2) || \
-    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12_2
-@interface BaseWindow(NSTouchBarProvider)
-@property (strong) NSTouchBar* touchBar;
+#if !defined(MAC_OS_X_VERSION_10_12_2) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12_2
+@interface BaseWindow (NSTouchBarProvider)
+@property(strong) NSTouchBar* touchBar;
 @end
 #endif
 
 NS_IMPL_ISUPPORTS(nsTouchBarUpdater, nsITouchBarUpdater);
 
 NS_IMETHODIMP
-nsTouchBarUpdater::UpdateTouchBarInput(nsIBaseWindow* aWindow,
-                                       nsITouchBarInput* aInput)
-{
+nsTouchBarUpdater::UpdateTouchBarInput(nsIBaseWindow* aWindow, nsITouchBarInput* aInput) {
   nsCOMPtr<nsIWidget> widget = nullptr;
   aWindow->GetMainWidget(getter_AddRefs(widget));
   if (!widget) {
     return NS_ERROR_FAILURE;
   }
   BaseWindow* cocoaWin = (BaseWindow*)widget->GetNativeData(NS_NATIVE_WINDOW);
   if (!cocoaWin) {
     return NS_ERROR_FAILURE;
   }
 
   if ([cocoaWin respondsToSelector:@selector(touchBar)]) {
-    TouchBarInput* convertedInput =
-      [[TouchBarInput alloc] initWithXPCOM:aInput];
+    TouchBarInput* convertedInput = [[TouchBarInput alloc] initWithXPCOM:aInput];
     [(nsTouchBar*)cocoaWin.touchBar updateItem:convertedInput];
   }
-  
+
   return NS_OK;
 }
--- a/xpcom/threads/nsThreadManager.cpp
+++ b/xpcom/threads/nsThreadManager.cpp
@@ -212,18 +212,18 @@ nsresult nsThreadManager::Init() {
       env_var_flag
           ? (env_var_flag[0] ? open(env_var_flag, flags, mode) : STDERR_FILENO)
           : 0;
 #endif
 
   nsCOMPtr<nsIIdlePeriod> idlePeriod = new MainThreadIdlePeriod();
 
   mMainThread =
-    CreateMainThread<ThreadEventQueue<PrioritizedEventQueue<EventQueue>>,
-                     EventQueue>(idlePeriod);
+      CreateMainThread<ThreadEventQueue<PrioritizedEventQueue<EventQueue>>,
+                       EventQueue>(idlePeriod);
 
   nsresult rv = mMainThread->InitCurrentThread();
   if (NS_FAILED(rv)) {
     mMainThread = nullptr;
     return rv;
   }
 
   // We need to keep a pointer to the current thread, so we can satisfy