Bug 995730 - Fix style violations in xpcom/base/. r=froydnj,continuation
authorBirunthan Mohanathas <birunthan@mohanathas.com>
Tue, 13 May 2014 20:41:38 +0300
changeset 202204 74e5dc1deb8e0b4045fc7a06af72fd842f03b5fc
parent 202203 9a4275f1337aa835aed5c71fbe6a8c9a99c1e4a9
child 202205 dd3f37cb6e4e22a8d28d2ea6f11036155a5c9ae6
push id3741
push userasasaki@mozilla.com
push dateMon, 21 Jul 2014 20:25:18 +0000
treeherdermozilla-beta@4d6f46f5af68 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj, continuation
bugs995730
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 995730 - Fix style violations in xpcom/base/. r=froydnj,continuation
xpcom/base/AvailableMemoryTracker.cpp
xpcom/base/ClearOnShutdown.cpp
xpcom/base/ClearOnShutdown.h
xpcom/base/CycleCollectedJSRuntime.cpp
xpcom/base/CycleCollectedJSRuntime.h
xpcom/base/Debug.cpp
xpcom/base/StackWalk.h
xpcom/base/StaticMutex.h
xpcom/base/StaticPtr.h
xpcom/base/SystemMemoryReporter.cpp
xpcom/base/SystemMemoryReporter.h
xpcom/base/VisualEventTracer.cpp
xpcom/base/VisualEventTracer.h
xpcom/base/nsAutoPtr.h
xpcom/base/nsAutoRef.h
xpcom/base/nsConsoleMessage.cpp
xpcom/base/nsConsoleMessage.h
xpcom/base/nsConsoleService.cpp
xpcom/base/nsConsoleService.h
xpcom/base/nsCrashOnException.cpp
xpcom/base/nsCrashOnException.h
xpcom/base/nsCycleCollector.cpp
xpcom/base/nsCycleCollector.h
xpcom/base/nsDebugImpl.cpp
xpcom/base/nsDebugImpl.h
xpcom/base/nsDumpUtils.cpp
xpcom/base/nsDumpUtils.h
xpcom/base/nsError.h
xpcom/base/nsErrorService.cpp
xpcom/base/nsGZFileWriter.cpp
xpcom/base/nsISupportsBase.h
xpcom/base/nsInterfaceRequestorAgg.cpp
xpcom/base/nsInterfaceRequestorAgg.h
xpcom/base/nsMacUtilsImpl.cpp
xpcom/base/nsMacUtilsImpl.h
xpcom/base/nsMemoryImpl.cpp
xpcom/base/nsMemoryImpl.h
xpcom/base/nsMemoryInfoDumper.cpp
xpcom/base/nsMemoryInfoDumper.h
xpcom/base/nsMemoryReporterManager.cpp
xpcom/base/nsMemoryReporterManager.h
xpcom/base/nsMessageLoop.cpp
xpcom/base/nsMessageLoop.h
xpcom/base/nsObjCExceptions.h
xpcom/base/nsSetDllDirectory.h
xpcom/base/nsStackWalk.cpp
xpcom/base/nsStackWalk.h
xpcom/base/nsStatusReporterManager.cpp
xpcom/base/nsSystemInfo.cpp
xpcom/base/nsSystemInfo.h
xpcom/base/nsTraceRefcnt.cpp
xpcom/base/nsTraceRefcnt.h
xpcom/base/nsUUIDGenerator.cpp
xpcom/base/nsUUIDGenerator.h
xpcom/base/nsVersionComparatorImpl.cpp
xpcom/base/nsWindowsHelpers.h
xpcom/base/nscore.h
--- a/xpcom/base/AvailableMemoryTracker.cpp
+++ b/xpcom/base/AvailableMemoryTracker.cpp
@@ -79,34 +79,35 @@ namespace {
 
 #define LOG(msg)
 #define LOG2(m1, m2)
 #define LOG3(m1, m2, m3)
 #define LOG4(m1, m2, m3, m4)
 
 #endif
 
-void safe_write(const char *a)
+void
+safe_write(const char* aStr)
 {
   // Well, puts isn't exactly "safe", but at least it doesn't call malloc...
-  fputs(a, stdout);
+  fputs(aStr, stdout);
 }
 
-void safe_write(uint64_t x)
+void
+safe_write(uint64_t aNum)
 {
   // 2^64 is 20 decimal digits.
   const unsigned int max_len = 21;
   char buf[max_len];
   buf[max_len - 1] = '\0';
 
   uint32_t i;
-  for (i = max_len - 2; i < max_len && x > 0; i--)
-  {
-    buf[i] = "0123456789"[x % 10];
-    x /= 10;
+  for (i = max_len - 2; i < max_len && aNum > 0; i--) {
+    buf[i] = "0123456789"[aNum % 10];
+    aNum /= 10;
   }
 
   safe_write(&buf[i + 1]);
 }
 
 #ifdef DEBUG
 #define DEBUG_WARN_IF_FALSE(cond, msg)          \
   do {                                          \
@@ -143,34 +144,35 @@ bool sHooksActive = false;
 
 // Alas, we'd like to use mozilla::TimeStamp, but we can't, because it acquires
 // a lock!
 volatile bool sHasScheduledOneLowMemoryNotification = false;
 volatile PRIntervalTime sLastLowMemoryNotificationTime;
 
 // These are function pointers to the functions we wrap in Init().
 
-void* (WINAPI *sVirtualAllocOrig)
+void* (WINAPI* sVirtualAllocOrig)
   (LPVOID aAddress, SIZE_T aSize, DWORD aAllocationType, DWORD aProtect);
 
-void* (WINAPI *sMapViewOfFileOrig)
+void* (WINAPI* sMapViewOfFileOrig)
   (HANDLE aFileMappingObject, DWORD aDesiredAccess,
    DWORD aFileOffsetHigh, DWORD aFileOffsetLow,
    SIZE_T aNumBytesToMap);
 
-HBITMAP (WINAPI *sCreateDIBSectionOrig)
-  (HDC aDC, const BITMAPINFO *aBitmapInfo,
-   UINT aUsage, VOID **aBits,
+HBITMAP (WINAPI* sCreateDIBSectionOrig)
+  (HDC aDC, const BITMAPINFO* aBitmapInfo,
+   UINT aUsage, VOID** aBits,
    HANDLE aSection, DWORD aOffset);
 
 /**
  * Fire a memory pressure event if it's been long enough since the last one we
  * fired.
  */
-bool MaybeScheduleMemoryPressureEvent()
+bool
+MaybeScheduleMemoryPressureEvent()
 {
   // If this interval rolls over, we may fire an extra memory pressure
   // event, but that's not a big deal.
   PRIntervalTime interval = PR_IntervalNow() - sLastLowMemoryNotificationTime;
   if (sHasScheduledOneLowMemoryNotification &&
       PR_IntervalToMilliseconds(interval) < sLowMemoryNotificationIntervalMS) {
 
     LOG("Not scheduling low physical memory notification, "
@@ -187,46 +189,44 @@ bool MaybeScheduleMemoryPressureEvent()
   sHasScheduledOneLowMemoryNotification = true;
   sLastLowMemoryNotificationTime = PR_IntervalNow();
 
   LOG("Scheduling memory pressure notification.");
   NS_DispatchEventualMemoryPressure(MemPressure_New);
   return true;
 }
 
-void CheckMemAvailable()
+void
+CheckMemAvailable()
 {
   if (!sHooksActive) {
     return;
   }
 
   MEMORYSTATUSEX stat;
   stat.dwLength = sizeof(stat);
   bool success = GlobalMemoryStatusEx(&stat);
 
   DEBUG_WARN_IF_FALSE(success, "GlobalMemoryStatusEx failed.");
 
-  if (success)
-  {
+  if (success) {
     // sLowVirtualMemoryThreshold is in MB, but ullAvailVirtual is in bytes.
     if (stat.ullAvailVirtual < sLowVirtualMemoryThreshold * 1024 * 1024) {
       // If we're running low on virtual memory, unconditionally schedule the
       // notification.  We'll probably crash if we run out of virtual memory,
       // so don't worry about firing this notification too often.
       LOG("Detected low virtual memory.");
       ++sNumLowVirtualMemEvents;
       NS_DispatchEventualMemoryPressure(MemPressure_New);
-    }
-    else if (stat.ullAvailPageFile < sLowCommitSpaceThreshold * 1024 * 1024) {
+    } else if (stat.ullAvailPageFile < sLowCommitSpaceThreshold * 1024 * 1024) {
       LOG("Detected low available page file space.");
       if (MaybeScheduleMemoryPressureEvent()) {
         ++sNumLowCommitSpaceEvents;
       }
-    }
-    else if (stat.ullAvailPhys < sLowPhysicalMemoryThreshold * 1024 * 1024) {
+    } else if (stat.ullAvailPhys < sLowPhysicalMemoryThreshold * 1024 * 1024) {
       LOG("Detected low physical memory.");
       if (MaybeScheduleMemoryPressureEvent()) {
         ++sNumLowPhysicalMemEvents;
       }
     }
   }
 }
 
@@ -273,19 +273,19 @@ MapViewOfFileHook(HANDLE aFileMappingObj
                                      aNumBytesToMap);
   LOG("MapViewOfFileHook");
   CheckMemAvailable();
   return result;
 }
 
 HBITMAP WINAPI
 CreateDIBSectionHook(HDC aDC,
-                     const BITMAPINFO *aBitmapInfo,
+                     const BITMAPINFO* aBitmapInfo,
                      UINT aUsage,
-                     VOID **aBits,
+                     VOID** aBits,
                      HANDLE aSection,
                      DWORD aOffset)
 {
   // There are a lot of calls to CreateDIBSection, so we make some effort not
   // to CheckMemAvailable() for calls to CreateDIBSection which allocate only
   // a small amount of memory.
 
   // If aSection is non-null, CreateDIBSection won't allocate any new memory.
@@ -299,18 +299,19 @@ CreateDIBSectionHook(HDC aDC,
       bitCount = 32;
     }
 
     // |size| contains the expected allocation size in *bits*.  Height may be
     // negative (indicating the direction the DIB is drawn in), so we take the
     // absolute value.
     int64_t size = bitCount * aBitmapInfo->bmiHeader.biWidth *
                               aBitmapInfo->bmiHeader.biHeight;
-    if (size < 0)
+    if (size < 0) {
       size *= -1;
+    }
 
     // If we're allocating more than 1MB, check how much memory is left after
     // the allocation.
     if (size > 1024 * 1024 * 8) {
       LOG3("CreateDIBSectionHook: Large allocation (size=", size, ")");
       doCheck = true;
     }
   }
@@ -453,70 +454,73 @@ nsMemoryPressureWatcher::Init()
                                false);
 }
 
 /**
  * Reacts to all types of memory-pressure events, launches a runnable to
  * free dirty pages held by jemalloc.
  */
 NS_IMETHODIMP
-nsMemoryPressureWatcher::Observe(nsISupports *subject, const char *topic,
-                                 const char16_t *data)
+nsMemoryPressureWatcher::Observe(nsISupports* aSubject, const char* aTopic,
+                                 const char16_t* aData)
 {
-  MOZ_ASSERT(!strcmp(topic, "memory-pressure"), "Unknown topic");
+  MOZ_ASSERT(!strcmp(aTopic, "memory-pressure"), "Unknown topic");
 
   if (sFreeDirtyPages) {
     nsRefPtr<nsIRunnable> runnable = new nsJemallocFreeDirtyPagesRunnable();
 
     NS_DispatchToMainThread(runnable);
   }
 
   return NS_OK;
 }
 
 } // anonymous namespace
 
 namespace mozilla {
 namespace AvailableMemoryTracker {
 
-void Activate()
+void
+Activate()
 {
 #if defined(_M_IX86) && defined(XP_WIN)
   MOZ_ASSERT(sInitialized);
   MOZ_ASSERT(!sHooksActive);
 
   // On 64-bit systems, hardcode sLowVirtualMemoryThreshold to 0 -- we assume
   // we're not going to run out of virtual memory!
   if (sizeof(void*) > 4) {
     sLowVirtualMemoryThreshold = 0;
-  }
-  else {
+  } else {
     Preferences::AddUintVarCache(&sLowVirtualMemoryThreshold,
                                  "memory.low_virtual_mem_threshold_mb", 128);
   }
 
   Preferences::AddUintVarCache(&sLowPhysicalMemoryThreshold,
                                "memory.low_physical_memory_threshold_mb", 0);
   Preferences::AddUintVarCache(&sLowCommitSpaceThreshold,
                                "memory.low_commit_space_threshold_mb", 128);
   Preferences::AddUintVarCache(&sLowMemoryNotificationIntervalMS,
                                "memory.low_memory_notification_interval_ms", 10000);
 
   RegisterStrongMemoryReporter(new LowEventsReporter());
-  RegisterLowMemoryEventsVirtualDistinguishedAmount(LowMemoryEventsVirtualDistinguishedAmount);
-  RegisterLowMemoryEventsPhysicalDistinguishedAmount(LowMemoryEventsPhysicalDistinguishedAmount);
+  RegisterLowMemoryEventsVirtualDistinguishedAmount(
+    LowMemoryEventsVirtualDistinguishedAmount);
+  RegisterLowMemoryEventsPhysicalDistinguishedAmount(
+    LowMemoryEventsPhysicalDistinguishedAmount);
   sHooksActive = true;
 #endif
 
   // This object is held alive by the observer service.
   nsRefPtr<nsMemoryPressureWatcher> watcher = new nsMemoryPressureWatcher();
   watcher->Init();
 }
 
-void Init()
+void
+Init()
 {
   // Do nothing on x86-64, because nsWindowsDllInterceptor is not thread-safe
   // on 64-bit.  (On 32-bit, it's probably thread-safe.)  Even if we run Init()
   // before any other of our threads are running, another process may have
   // started a remote thread which could call VirtualAlloc!
   //
   // Moreover, the benefit of this code is less clear when we're a 64-bit
   // process, because we aren't going to run out of virtual memory, and the
@@ -526,25 +530,25 @@ void Init()
   // Don't register the hooks if we're a build instrumented for PGO: If we're
   // an instrumented build, the compiler adds function calls all over the place
   // which may call VirtualAlloc; this makes it hard to prevent
   // VirtualAllocHook from reentering itself.
   if (!PR_GetEnv("MOZ_PGO_INSTRUMENTED")) {
     sKernel32Intercept.Init("Kernel32.dll");
     sKernel32Intercept.AddHook("VirtualAlloc",
                                reinterpret_cast<intptr_t>(VirtualAllocHook),
-                               (void**) &sVirtualAllocOrig);
+                               reinterpret_cast<void**>(&sVirtualAllocOrig));
     sKernel32Intercept.AddHook("MapViewOfFile",
                                reinterpret_cast<intptr_t>(MapViewOfFileHook),
-                               (void**) &sMapViewOfFileOrig);
+                               reinterpret_cast<void**>(&sMapViewOfFileOrig));
 
     sGdi32Intercept.Init("Gdi32.dll");
     sGdi32Intercept.AddHook("CreateDIBSection",
                             reinterpret_cast<intptr_t>(CreateDIBSectionHook),
-                            (void**) &sCreateDIBSectionOrig);
+                            reinterpret_cast<void**>(&sCreateDIBSectionOrig));
   }
 
   sInitialized = true;
 #endif
 }
 
 } // namespace AvailableMemoryTracker
 } // namespace mozilla
--- a/xpcom/base/ClearOnShutdown.cpp
+++ b/xpcom/base/ClearOnShutdown.cpp
@@ -6,12 +6,12 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/ClearOnShutdown.h"
 
 namespace mozilla {
 namespace ClearOnShutdown_Internal {
 
 bool sHasShutDown = false;
-StaticAutoPtr<LinkedList<ShutdownObserver> > sShutdownObservers;
+StaticAutoPtr<LinkedList<ShutdownObserver>> sShutdownObservers;
 
 } // namespace ClearOnShutdown_Internal
 } // namespace mozilla
--- a/xpcom/base/ClearOnShutdown.h
+++ b/xpcom/base/ClearOnShutdown.h
@@ -37,68 +37,72 @@
 
 namespace mozilla {
 namespace ClearOnShutdown_Internal {
 
 class ShutdownObserver : public LinkedListElement<ShutdownObserver>
 {
 public:
   virtual void Shutdown() = 0;
-  virtual ~ShutdownObserver() {}
+  virtual ~ShutdownObserver()
+  {
+  }
 };
 
 template<class SmartPtr>
 class PointerClearer : public ShutdownObserver
 {
 public:
-  PointerClearer(SmartPtr *aPtr)
+  PointerClearer(SmartPtr* aPtr)
     : mPtr(aPtr)
-  {}
+  {
+  }
 
   virtual void Shutdown()
   {
     if (mPtr) {
       *mPtr = nullptr;
     }
   }
 
 private:
-  SmartPtr *mPtr;
+  SmartPtr* mPtr;
 };
 
 extern bool sHasShutDown;
-extern StaticAutoPtr<LinkedList<ShutdownObserver> > sShutdownObservers;
+extern StaticAutoPtr<LinkedList<ShutdownObserver>> sShutdownObservers;
 
 } // namespace ClearOnShutdown_Internal
 
 template<class SmartPtr>
-inline void ClearOnShutdown(SmartPtr *aPtr)
+inline void
+ClearOnShutdown(SmartPtr* aPtr)
 {
   using namespace ClearOnShutdown_Internal;
 
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(!sHasShutDown);
 
   if (!sShutdownObservers) {
     sShutdownObservers = new LinkedList<ShutdownObserver>();
   }
   sShutdownObservers->insertBack(new PointerClearer<SmartPtr>(aPtr));
 }
 
 // Called when XPCOM is shutting down, after all shutdown notifications have
 // been sent and after all threads' event loops have been purged.
-inline void KillClearOnShutdown()
+inline void
+KillClearOnShutdown()
 {
   using namespace ClearOnShutdown_Internal;
 
   MOZ_ASSERT(NS_IsMainThread());
 
   if (sShutdownObservers) {
-    ShutdownObserver *observer;
-    while ((observer = sShutdownObservers->popFirst())) {
+    while (ShutdownObserver* observer = sShutdownObservers->popFirst()) {
       observer->Shutdown();
       delete observer;
     }
   }
 
   sShutdownObservers = nullptr;
   sHasShutDown = true;
 }
--- a/xpcom/base/CycleCollectedJSRuntime.cpp
+++ b/xpcom/base/CycleCollectedJSRuntime.cpp
@@ -73,17 +73,17 @@
 using namespace mozilla;
 using namespace mozilla::dom;
 
 namespace mozilla {
 
 struct DeferredFinalizeFunctionHolder
 {
   DeferredFinalizeFunction run;
-  void *data;
+  void* data;
 };
 
 class IncrementalFinalizeRunnable : public nsRunnable
 {
   typedef nsAutoTArray<DeferredFinalizeFunctionHolder, 16> DeferredFinalizeArray;
   typedef CycleCollectedJSRuntime::DeferredFinalizerTable DeferredFinalizerTable;
 
   CycleCollectedJSRuntime* mRuntime;
@@ -95,191 +95,198 @@ class IncrementalFinalizeRunnable : publ
 
   static PLDHashOperator
   DeferredFinalizerEnumerator(DeferredFinalizeFunction& aFunction,
                               void*& aData,
                               void* aClosure);
 
 public:
   IncrementalFinalizeRunnable(CycleCollectedJSRuntime* aRt,
-                              nsTArray<nsISupports*>& mSupports,
+                              nsTArray<nsISupports*>& aMSupports,
                               DeferredFinalizerTable& aFinalizerTable);
   virtual ~IncrementalFinalizeRunnable();
 
   void ReleaseNow(bool aLimited);
 
   NS_DECL_NSIRUNNABLE
 };
 
 } // namespace mozilla
 
 inline bool
-AddToCCKind(JSGCTraceKind kind)
+AddToCCKind(JSGCTraceKind aKind)
 {
-  return kind == JSTRACE_OBJECT || kind == JSTRACE_SCRIPT;
+  return aKind == JSTRACE_OBJECT || aKind == JSTRACE_SCRIPT;
 }
 
 static void
-TraceWeakMappingChild(JSTracer* trc, void** thingp, JSGCTraceKind kind);
+TraceWeakMappingChild(JSTracer* aTrc, void** aThingp, JSGCTraceKind aKind);
 
 struct NoteWeakMapChildrenTracer : public JSTracer
 {
-  NoteWeakMapChildrenTracer(JSRuntime *rt, nsCycleCollectionNoteRootCallback& cb)
-    : JSTracer(rt, TraceWeakMappingChild), mCb(cb)
+  NoteWeakMapChildrenTracer(JSRuntime* aRt,
+                            nsCycleCollectionNoteRootCallback& aCb)
+    : JSTracer(aRt, TraceWeakMappingChild), mCb(aCb)
   {
   }
   nsCycleCollectionNoteRootCallback& mCb;
   bool mTracedAny;
   JSObject* mMap;
   void* mKey;
   void* mKeyDelegate;
 };
 
 static void
-TraceWeakMappingChild(JSTracer* trc, void** thingp, JSGCTraceKind kind)
+TraceWeakMappingChild(JSTracer* aTrc, void** aThingp, JSGCTraceKind aKind)
 {
-  MOZ_ASSERT(trc->callback == TraceWeakMappingChild);
-  void* thing = *thingp;
+  MOZ_ASSERT(aTrc->callback == TraceWeakMappingChild);
+  void* thing = *aThingp;
   NoteWeakMapChildrenTracer* tracer =
-    static_cast<NoteWeakMapChildrenTracer*>(trc);
+    static_cast<NoteWeakMapChildrenTracer*>(aTrc);
 
-  if (kind == JSTRACE_STRING) {
+  if (aKind == JSTRACE_STRING) {
     return;
   }
 
   if (!xpc_IsGrayGCThing(thing) && !tracer->mCb.WantAllTraces()) {
     return;
   }
 
-  if (AddToCCKind(kind)) {
+  if (AddToCCKind(aKind)) {
     tracer->mCb.NoteWeakMapping(tracer->mMap, tracer->mKey, tracer->mKeyDelegate, thing);
     tracer->mTracedAny = true;
   } else {
-    JS_TraceChildren(trc, thing, kind);
+    JS_TraceChildren(aTrc, thing, aKind);
   }
 }
 
 struct NoteWeakMapsTracer : public js::WeakMapTracer
 {
-  NoteWeakMapsTracer(JSRuntime* rt, js::WeakMapTraceCallback cb,
-                     nsCycleCollectionNoteRootCallback& cccb)
-    : js::WeakMapTracer(rt, cb), mCb(cccb), mChildTracer(rt, cccb)
+  NoteWeakMapsTracer(JSRuntime* aRt, js::WeakMapTraceCallback aCb,
+                     nsCycleCollectionNoteRootCallback& aCccb)
+    : js::WeakMapTracer(aRt, aCb), mCb(aCccb), mChildTracer(aRt, aCccb)
   {
   }
   nsCycleCollectionNoteRootCallback& mCb;
   NoteWeakMapChildrenTracer mChildTracer;
 };
 
 static void
-TraceWeakMapping(js::WeakMapTracer* trc, JSObject* m,
-                 void* k, JSGCTraceKind kkind,
-                 void* v, JSGCTraceKind vkind)
+TraceWeakMapping(js::WeakMapTracer* aTrc, JSObject* aMap,
+                 void* aKey, JSGCTraceKind aKeyKind,
+                 void* aValue, JSGCTraceKind aValueKind)
 {
-  MOZ_ASSERT(trc->callback == TraceWeakMapping);
-  NoteWeakMapsTracer* tracer = static_cast<NoteWeakMapsTracer* >(trc);
+  MOZ_ASSERT(aTrc->callback == TraceWeakMapping);
+  NoteWeakMapsTracer* tracer = static_cast<NoteWeakMapsTracer*>(aTrc);
 
   // If nothing that could be held alive by this entry is marked gray, return.
-  if ((!k || !xpc_IsGrayGCThing(k)) && MOZ_LIKELY(!tracer->mCb.WantAllTraces())) {
-    if (!v || !xpc_IsGrayGCThing(v) || vkind == JSTRACE_STRING) {
+  if ((!aKey || !xpc_IsGrayGCThing(aKey)) &&
+      MOZ_LIKELY(!tracer->mCb.WantAllTraces())) {
+    if (!aValue || !xpc_IsGrayGCThing(aValue) ||
+        aValueKind == JSTRACE_STRING) {
       return;
     }
   }
 
   // The cycle collector can only properly reason about weak maps if it can
   // reason about the liveness of their keys, which in turn requires that
   // the key can be represented in the cycle collector graph.  All existing
   // uses of weak maps use either objects or scripts as keys, which are okay.
-  MOZ_ASSERT(AddToCCKind(kkind));
+  MOZ_ASSERT(AddToCCKind(aKeyKind));
 
   // As an emergency fallback for non-debug builds, if the key is not
   // representable in the cycle collector graph, we treat it as marked.  This
   // can cause leaks, but is preferable to ignoring the binding, which could
   // cause the cycle collector to free live objects.
-  if (!AddToCCKind(kkind)) {
-    k = nullptr;
+  if (!AddToCCKind(aKeyKind)) {
+    aKey = nullptr;
   }
 
   JSObject* kdelegate = nullptr;
-  if (k && kkind == JSTRACE_OBJECT) {
-    kdelegate = js::GetWeakmapKeyDelegate((JSObject*)k);
+  if (aKey && aKeyKind == JSTRACE_OBJECT) {
+    kdelegate = js::GetWeakmapKeyDelegate((JSObject*)aKey);
   }
 
-  if (AddToCCKind(vkind)) {
-    tracer->mCb.NoteWeakMapping(m, k, kdelegate, v);
+  if (AddToCCKind(aValueKind)) {
+    tracer->mCb.NoteWeakMapping(aMap, aKey, kdelegate, aValue);
   } else {
     tracer->mChildTracer.mTracedAny = false;
-    tracer->mChildTracer.mMap = m;
-    tracer->mChildTracer.mKey = k;
+    tracer->mChildTracer.mMap = aMap;
+    tracer->mChildTracer.mKey = aKey;
     tracer->mChildTracer.mKeyDelegate = kdelegate;
 
-    if (v && vkind != JSTRACE_STRING) {
-      JS_TraceChildren(&tracer->mChildTracer, v, vkind);
+    if (aValue && aValueKind != JSTRACE_STRING) {
+      JS_TraceChildren(&tracer->mChildTracer, aValue, aValueKind);
     }
 
     // The delegate could hold alive the key, so report something to the CC
     // if we haven't already.
-    if (!tracer->mChildTracer.mTracedAny && k && xpc_IsGrayGCThing(k) && kdelegate) {
-      tracer->mCb.NoteWeakMapping(m, k, kdelegate, nullptr);
+    if (!tracer->mChildTracer.mTracedAny &&
+        aKey && xpc_IsGrayGCThing(aKey) && kdelegate) {
+      tracer->mCb.NoteWeakMapping(aMap, aKey, kdelegate, nullptr);
     }
   }
 }
 
 // This is based on the logic in TraceWeakMapping.
 struct FixWeakMappingGrayBitsTracer : public js::WeakMapTracer
 {
-  FixWeakMappingGrayBitsTracer(JSRuntime* rt)
-    : js::WeakMapTracer(rt, FixWeakMappingGrayBits)
-  {}
+  FixWeakMappingGrayBitsTracer(JSRuntime* aRt)
+    : js::WeakMapTracer(aRt, FixWeakMappingGrayBits)
+  {
+  }
 
   void
   FixAll()
   {
     do {
       mAnyMarked = false;
       js::TraceWeakMaps(this);
     } while (mAnyMarked);
   }
 
 private:
 
   static void
-  FixWeakMappingGrayBits(js::WeakMapTracer* trc, JSObject* m,
-                         void* k, JSGCTraceKind kkind,
-                         void* v, JSGCTraceKind vkind)
+  FixWeakMappingGrayBits(js::WeakMapTracer* aTrc, JSObject* aMap,
+                         void* aKey, JSGCTraceKind aKeyKind,
+                         void* aValue, JSGCTraceKind aValueKind)
   {
-    MOZ_ASSERT(!JS::IsIncrementalGCInProgress(trc->runtime),
+    MOZ_ASSERT(!JS::IsIncrementalGCInProgress(aTrc->runtime),
                "Don't call FixWeakMappingGrayBits during a GC.");
 
-    FixWeakMappingGrayBitsTracer* tracer = static_cast<FixWeakMappingGrayBitsTracer*>(trc);
+    FixWeakMappingGrayBitsTracer* tracer =
+      static_cast<FixWeakMappingGrayBitsTracer*>(aTrc);
 
     // If nothing that could be held alive by this entry is marked gray, return.
-    bool delegateMightNeedMarking = k && xpc_IsGrayGCThing(k);
-    bool valueMightNeedMarking = v && xpc_IsGrayGCThing(v) && vkind != JSTRACE_STRING;
+    bool delegateMightNeedMarking = aKey && xpc_IsGrayGCThing(aKey);
+    bool valueMightNeedMarking = aValue && xpc_IsGrayGCThing(aValue) &&
+                                 aValueKind != JSTRACE_STRING;
     if (!delegateMightNeedMarking && !valueMightNeedMarking) {
       return;
     }
 
-    if (!AddToCCKind(kkind)) {
-      k = nullptr;
+    if (!AddToCCKind(aKeyKind)) {
+      aKey = nullptr;
     }
 
-    if (delegateMightNeedMarking && kkind == JSTRACE_OBJECT) {
-      JSObject* kdelegate = js::GetWeakmapKeyDelegate((JSObject*)k);
+    if (delegateMightNeedMarking && aKeyKind == JSTRACE_OBJECT) {
+      JSObject* kdelegate = js::GetWeakmapKeyDelegate((JSObject*)aKey);
       if (kdelegate && !xpc_IsGrayGCThing(kdelegate)) {
-        if (JS::UnmarkGrayGCThingRecursively(k, JSTRACE_OBJECT)) {
+        if (JS::UnmarkGrayGCThingRecursively(aKey, JSTRACE_OBJECT)) {
           tracer->mAnyMarked = true;
         }
       }
     }
 
-    if (v && xpc_IsGrayGCThing(v) &&
-        (!k || !xpc_IsGrayGCThing(k)) &&
-        (!m || !xpc_IsGrayGCThing(m)) &&
-        vkind != JSTRACE_SHAPE) {
-      if (JS::UnmarkGrayGCThingRecursively(v, vkind)) {
+    if (aValue && xpc_IsGrayGCThing(aValue) &&
+        (!aKey || !xpc_IsGrayGCThing(aKey)) &&
+        (!aMap || !xpc_IsGrayGCThing(aMap)) &&
+        aValueKind != JSTRACE_SHAPE) {
+      if (JS::UnmarkGrayGCThingRecursively(aValue, aValueKind)) {
         tracer->mAnyMarked = true;
       }
     }
   }
 
   bool mAnyMarked;
 };
 
@@ -299,80 +306,82 @@ CheckParticipatesInCycleCollection(void*
 {
   Closure* closure = static_cast<Closure*>(aClosure);
 
   if (closure->mCycleCollectionEnabled) {
     return;
   }
 
   if (AddToCCKind(js::GCThingTraceKind(aThing)) &&
-      xpc_IsGrayGCThing(aThing))
-  {
+      xpc_IsGrayGCThing(aThing)) {
     closure->mCycleCollectionEnabled = true;
   }
 }
 
 static PLDHashOperator
-NoteJSHolder(void *holder, nsScriptObjectTracer *&tracer, void *arg)
+NoteJSHolder(void* aHolder, nsScriptObjectTracer*& aTracer, void* aArg)
 {
-  Closure *closure = static_cast<Closure*>(arg);
+  Closure* closure = static_cast<Closure*>(aArg);
 
   bool noteRoot;
   if (MOZ_UNLIKELY(closure->mCb->WantAllTraces())) {
     noteRoot = true;
   } else {
     closure->mCycleCollectionEnabled = false;
-    tracer->Trace(holder, TraceCallbackFunc(CheckParticipatesInCycleCollection), closure);
+    aTracer->Trace(aHolder,
+                   TraceCallbackFunc(CheckParticipatesInCycleCollection),
+                   closure);
     noteRoot = closure->mCycleCollectionEnabled;
   }
 
   if (noteRoot) {
-    closure->mCb->NoteNativeRoot(holder, tracer);
+    closure->mCb->NoteNativeRoot(aHolder, aTracer);
   }
 
   return PL_DHASH_NEXT;
 }
 
 NS_IMETHODIMP
-JSGCThingParticipant::Traverse(void* p, nsCycleCollectionTraversalCallback& cb)
+JSGCThingParticipant::Traverse(void* aPtr,
+                               nsCycleCollectionTraversalCallback& aCb)
 {
   CycleCollectedJSRuntime* runtime = reinterpret_cast<CycleCollectedJSRuntime*>
     (reinterpret_cast<char*>(this) -
      offsetof(CycleCollectedJSRuntime, mGCThingCycleCollectorGlobal));
 
   runtime->TraverseGCThing(CycleCollectedJSRuntime::TRAVERSE_FULL,
-                           p, js::GCThingTraceKind(p), cb);
+                           aPtr, js::GCThingTraceKind(aPtr), aCb);
   return NS_OK;
 }
 
 // NB: This is only used to initialize the participant in
 // CycleCollectedJSRuntime. It should never be used directly.
 static JSGCThingParticipant sGCThingCycleCollectorGlobal;
 
 NS_IMETHODIMP
-JSZoneParticipant::Traverse(void* p, nsCycleCollectionTraversalCallback& cb)
+JSZoneParticipant::Traverse(void* aPtr, nsCycleCollectionTraversalCallback& aCb)
 {
   CycleCollectedJSRuntime* runtime = reinterpret_cast<CycleCollectedJSRuntime*>
     (reinterpret_cast<char*>(this) -
      offsetof(CycleCollectedJSRuntime, mJSZoneCycleCollectorGlobal));
 
-  MOZ_ASSERT(!cb.WantAllTraces());
-  JS::Zone* zone = static_cast<JS::Zone*>(p);
+  MOZ_ASSERT(!aCb.WantAllTraces());
+  JS::Zone* zone = static_cast<JS::Zone*>(aPtr);
 
-  runtime->TraverseZone(zone, cb);
+  runtime->TraverseZone(zone, aCb);
   return NS_OK;
 }
 
 static void
 NoteJSChildTracerShim(JSTracer* aTrc, void** aThingp, JSGCTraceKind aTraceKind);
 
 struct TraversalTracer : public JSTracer
 {
-  TraversalTracer(JSRuntime *rt, nsCycleCollectionTraversalCallback& aCb)
-    : JSTracer(rt, NoteJSChildTracerShim, DoNotTraceWeakMaps), mCb(aCb)
+  TraversalTracer(JSRuntime* aRt, nsCycleCollectionTraversalCallback& aCb)
+    : JSTracer(aRt, NoteJSChildTracerShim, DoNotTraceWeakMaps), mCb(aCb)
   {
   }
   nsCycleCollectionTraversalCallback& mCb;
 };
 
 static void
 NoteJSChild(JSTracer* aTrc, void* aThing, JSGCTraceKind aTraceKind)
 {
@@ -396,17 +405,17 @@ NoteJSChild(JSTracer* aTrc, void* aThing
       // based on DumpNotify in jsapi.cpp
       if (tracer->debugPrinter()) {
         char buffer[200];
         tracer->debugPrinter()(aTrc, buffer, sizeof(buffer));
         tracer->mCb.NoteNextEdgeName(buffer);
       } else if (tracer->debugPrintIndex() != (size_t)-1) {
         char buffer[200];
         JS_snprintf(buffer, sizeof(buffer), "%s[%lu]",
-                    static_cast<const char *>(tracer->debugPrintArg()),
+                    static_cast<const char*>(tracer->debugPrintArg()),
                     tracer->debugPrintIndex());
         tracer->mCb.NoteNextEdgeName(buffer);
       } else {
         tracer->mCb.NoteNextEdgeName(static_cast<const char*>(tracer->debugPrintArg()));
       }
     }
     tracer->mCb.NoteJSChild(aThing);
   } else if (aTraceKind == JSTRACE_SHAPE) {
@@ -452,20 +461,20 @@ NoteJSChildGrayWrapperShim(void* aData, 
 
 // NB: This is only used to initialize the participant in
 // CycleCollectedJSRuntime. It should never be used directly.
 static const JSZoneParticipant sJSZoneCycleCollectorGlobal;
 
 CycleCollectedJSRuntime::CycleCollectedJSRuntime(JSRuntime* aParentRuntime,
                                                  uint32_t aMaxbytes,
                                                  JSUseHelperThreads aUseHelperThreads)
-  : mGCThingCycleCollectorGlobal(sGCThingCycleCollectorGlobal),
-    mJSZoneCycleCollectorGlobal(sJSZoneCycleCollectorGlobal),
-    mJSRuntime(nullptr),
-    mJSHolders(512)
+  : mGCThingCycleCollectorGlobal(sGCThingCycleCollectorGlobal)
+  , mJSZoneCycleCollectorGlobal(sJSZoneCycleCollectorGlobal)
+  , mJSRuntime(nullptr)
+  , mJSHolders(512)
 {
   mozilla::dom::InitScriptSettings();
 
   mJSRuntime = JS_NewRuntime(aMaxbytes, aUseHelperThreads, aParentRuntime);
   if (!mJSRuntime) {
     MOZ_CRASH();
   }
 
@@ -505,19 +514,19 @@ CycleCollectedJSRuntime::SizeOfExcluding
   // nullptr for the second arg;  we're not measuring anything hanging off the
   // entries in mJSHolders.
   n += mJSHolders.SizeOfExcludingThis(nullptr, aMallocSizeOf);
 
   return n;
 }
 
 static PLDHashOperator
-UnmarkJSHolder(void* holder, nsScriptObjectTracer*& tracer, void* arg)
+UnmarkJSHolder(void* aHolder, nsScriptObjectTracer*& aTracer, void* aArg)
 {
-  tracer->CanSkip(holder, true);
+  aTracer->CanSkip(aHolder, true);
   return PL_DHASH_NEXT;
 }
 
 void
 CycleCollectedJSRuntime::UnmarkSkippableJSHolders()
 {
   mJSHolders.Enumerate(UnmarkJSHolder, nullptr);
 }
@@ -642,17 +651,18 @@ CycleCollectedJSRuntime::TraverseGCThing
   }
 
   if (aTraceKind == JSTRACE_OBJECT) {
     JSObject* obj = static_cast<JSObject*>(aThing);
     NoteGCThingXPCOMChildren(js::GetObjectClass(obj), obj, aCb);
   }
 }
 
-struct TraverseObjectShimClosure {
+struct TraverseObjectShimClosure
+{
   nsCycleCollectionTraversalCallback& cb;
   CycleCollectedJSRuntime* self;
 };
 
 void
 CycleCollectedJSRuntime::TraverseZone(JS::Zone* aZone,
                                       nsCycleCollectionTraversalCallback& aCb)
 {
@@ -747,36 +757,50 @@ CycleCollectedJSRuntime::ContextCallback
 
   MOZ_ASSERT(JS_GetRuntime(aContext) == self->Runtime());
 
   return self->CustomContextCallback(aContext, aOperation);
 }
 
 struct JsGcTracer : public TraceCallbacks
 {
-  virtual void Trace(JS::Heap<JS::Value> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapValueTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<JS::Value>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapValueTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::Heap<jsid> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapIdTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<jsid>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapIdTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::Heap<JSObject *> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapObjectTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<JSObject*>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapObjectTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::TenuredHeap<JSObject *> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallTenuredObjectTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::TenuredHeap<JSObject*>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallTenuredObjectTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::Heap<JSString *> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapStringTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<JSString*>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapStringTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::Heap<JSScript *> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapScriptTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<JSScript*>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapScriptTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
-  virtual void Trace(JS::Heap<JSFunction *> *p, const char *name, void *closure) const MOZ_OVERRIDE {
-    JS_CallHeapFunctionTracer(static_cast<JSTracer*>(closure), p, name);
+  virtual void Trace(JS::Heap<JSFunction*>* aPtr, const char* aName,
+                     void* aClosure) const MOZ_OVERRIDE
+  {
+    JS_CallHeapFunctionTracer(static_cast<JSTracer*>(aClosure), aPtr, aName);
   }
 };
 
 static PLDHashOperator
 TraceJSHolder(void* aHolder, nsScriptObjectTracer*& aTracer, void* aArg)
 {
   aTracer->Trace(aHolder, JsGcTracer(), aArg);
 
@@ -892,17 +916,17 @@ CycleCollectedJSRuntime::GCThingParticip
 
 nsCycleCollectionParticipant*
 CycleCollectedJSRuntime::ZoneParticipant()
 {
   return &mJSZoneCycleCollectorGlobal;
 }
 
 nsresult
-CycleCollectedJSRuntime::TraverseRoots(nsCycleCollectionNoteRootCallback &aCb)
+CycleCollectedJSRuntime::TraverseRoots(nsCycleCollectionNoteRootCallback& aCb)
 {
   TraverseNativeRoots(aCb);
 
   NoteWeakMapsTracer trc(mJSRuntime, TraceWeakMapping, aCb);
   js::TraceWeakMaps(&trc);
 
   return NS_OK;
 }
@@ -980,19 +1004,19 @@ CycleCollectedJSRuntime::DeferredFinaliz
 
 void
 CycleCollectedJSRuntime::DeferredFinalize(nsISupports* aSupports)
 {
   mDeferredSupports.AppendElement(aSupports);
 }
 
 void
-CycleCollectedJSRuntime::DumpJSHeap(FILE* file)
+CycleCollectedJSRuntime::DumpJSHeap(FILE* aFile)
 {
-  js::DumpHeapComplete(Runtime(), file, js::CollectNurseryBeforeDump);
+  js::DumpHeapComplete(Runtime(), aFile, js::CollectNurseryBeforeDump);
 }
 
 
 bool
 ReleaseSliceNow(uint32_t aSlice, void* aData)
 {
   MOZ_ASSERT(aSlice > 0, "nonsensical/useless call with slice == 0");
   nsTArray<nsISupports*>* items = static_cast<nsTArray<nsISupports*>*>(aData);
@@ -1023,18 +1047,18 @@ IncrementalFinalizeRunnable::DeferredFin
   function->data = aData;
 
   return PL_DHASH_REMOVE;
 }
 
 IncrementalFinalizeRunnable::IncrementalFinalizeRunnable(CycleCollectedJSRuntime* aRt,
                                                          nsTArray<nsISupports*>& aSupports,
                                                          DeferredFinalizerTable& aFinalizers)
-  : mRuntime(aRt),
-    mFinalizeFunctionToRun(0)
+  : mRuntime(aRt)
+  , mFinalizeFunctionToRun(0)
 {
   this->mSupports.SwapElements(aSupports);
   DeferredFinalizeFunctionHolder* function = mDeferredFinalizeFunctions.AppendElement();
   function->run = ReleaseSliceNow;
   function->data = &this->mSupports;
 
   // Enumerate the hashtable into our array.
   aFinalizers.Enumerate(DeferredFinalizerEnumerator, &mDeferredFinalizeFunctions);
@@ -1053,17 +1077,17 @@ IncrementalFinalizeRunnable::ReleaseNow(
              "We should have at least ReleaseSliceNow to run");
   MOZ_ASSERT(mFinalizeFunctionToRun < mDeferredFinalizeFunctions.Length(),
              "No more finalizers to run?");
 
   TimeDuration sliceTime = TimeDuration::FromMilliseconds(SliceMillis);
   TimeStamp started = TimeStamp::Now();
   bool timeout = false;
   do {
-    const DeferredFinalizeFunctionHolder &function =
+    const DeferredFinalizeFunctionHolder& function =
       mDeferredFinalizeFunctions[mFinalizeFunctionToRun];
     if (aLimited) {
       bool done = false;
       while (!timeout && !done) {
         /*
          * We don't want to read the clock too often, so we try to
          * release slices of 100 items.
          */
@@ -1134,18 +1158,17 @@ CycleCollectedJSRuntime::FinalizeDeferre
 
 void
 CycleCollectedJSRuntime::OnGC(JSGCStatus aStatus)
 {
   switch (aStatus) {
     case JSGC_BEGIN:
       nsCycleCollector_prepareForGarbageCollection();
       break;
-    case JSGC_END:
-    {
+    case JSGC_END: {
       /*
        * If the previous GC created a runnable to finalize objects
        * incrementally, and if it hasn't finished yet, finish it now. We
        * don't want these to build up. We also don't want to allow any
        * existing incremental finalize runnables to run after a
        * non-incremental GC, since they are often used to detect leaks.
        */
       if (mFinalizeRunnable) {
--- a/xpcom/base/CycleCollectedJSRuntime.h
+++ b/xpcom/base/CycleCollectedJSRuntime.h
@@ -23,57 +23,59 @@ namespace js {
 class Class;
 }
 
 namespace mozilla {
 
 class JSGCThingParticipant: public nsCycleCollectionParticipant
 {
 public:
-  NS_IMETHOD_(void) Root(void *n)
+  NS_IMETHOD_(void) Root(void* aPtr)
   {
   }
 
-  NS_IMETHOD_(void) Unlink(void *n)
+  NS_IMETHOD_(void) Unlink(void* aPtr)
   {
   }
 
-  NS_IMETHOD_(void) Unroot(void *n)
+  NS_IMETHOD_(void) Unroot(void* aPtr)
   {
   }
 
-  NS_IMETHOD_(void) DeleteCycleCollectable(void *n)
+  NS_IMETHOD_(void) DeleteCycleCollectable(void* aPtr)
   {
   }
 
-  NS_IMETHOD Traverse(void *n, nsCycleCollectionTraversalCallback &cb);
+  NS_IMETHOD Traverse(void* aPtr, nsCycleCollectionTraversalCallback& aCb);
 };
 
 class JSZoneParticipant : public nsCycleCollectionParticipant
 {
 public:
-  MOZ_CONSTEXPR JSZoneParticipant(): nsCycleCollectionParticipant() {}
-
-  NS_IMETHOD_(void) Root(void *p)
+  MOZ_CONSTEXPR JSZoneParticipant(): nsCycleCollectionParticipant()
   {
   }
 
-  NS_IMETHOD_(void) Unlink(void *p)
+  NS_IMETHOD_(void) Root(void* aPtr)
   {
   }
 
-  NS_IMETHOD_(void) Unroot(void *p)
+  NS_IMETHOD_(void) Unlink(void* aPtr)
   {
   }
 
-  NS_IMETHOD_(void) DeleteCycleCollectable(void *n)
+  NS_IMETHOD_(void) Unroot(void* aPtr)
   {
   }
 
-  NS_IMETHOD Traverse(void *p, nsCycleCollectionTraversalCallback &cb);
+  NS_IMETHOD_(void) DeleteCycleCollectable(void* aPtr)
+  {
+  }
+
+  NS_IMETHOD Traverse(void* aPtr, nsCycleCollectionTraversalCallback& aCb);
 };
 
 class IncrementalFinalizeRunnable;
 
 // Contains various stats about the cycle collection.
 struct CycleCollectorResults
 {
   CycleCollectorResults()
@@ -114,20 +116,26 @@ protected:
   CycleCollectedJSRuntime(JSRuntime* aParentRuntime,
                           uint32_t aMaxbytes,
                           JSUseHelperThreads aUseHelperThreads);
   virtual ~CycleCollectedJSRuntime();
 
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   void UnmarkSkippableJSHolders();
 
-  virtual void TraverseAdditionalNativeRoots(nsCycleCollectionNoteRootCallback& aCb) {}
-  virtual void TraceAdditionalNativeGrayRoots(JSTracer* aTracer) {}
+  virtual void TraverseAdditionalNativeRoots(nsCycleCollectionNoteRootCallback& aCb)
+  {
+  }
+  virtual void TraceAdditionalNativeGrayRoots(JSTracer* aTracer)
+  {
+  }
 
-  virtual void CustomGCCallback(JSGCStatus aStatus) {}
+  virtual void CustomGCCallback(JSGCStatus aStatus)
+  {
+  }
   virtual bool CustomContextCallback(JSContext* aCx, unsigned aOperation)
   {
     return true; // Don't block context creation.
   }
 
 private:
 
   void
@@ -201,32 +209,32 @@ public:
 #endif
 
   already_AddRefed<nsIException> GetPendingException() const;
   void SetPendingException(nsIException* aException);
 
   nsCycleCollectionParticipant* GCThingParticipant();
   nsCycleCollectionParticipant* ZoneParticipant();
 
-  nsresult TraverseRoots(nsCycleCollectionNoteRootCallback &aCb);
+  nsresult TraverseRoots(nsCycleCollectionNoteRootCallback& aCb);
   bool UsefulToMergeZones() const;
   void FixWeakMappingGrayBits() const;
   bool AreGCGrayBitsValid() const;
-  void GarbageCollect(uint32_t reason) const;
+  void GarbageCollect(uint32_t aReason) const;
 
   void DeferredFinalize(DeferredFinalizeAppendFunction aAppendFunc,
                         DeferredFinalizeFunction aFunc,
                         void* aThing);
   void DeferredFinalize(nsISupports* aSupports);
 
   void DumpJSHeap(FILE* aFile);
 
   virtual void PrepareForForgetSkippable() = 0;
   virtual void BeginCycleCollectionCallback() = 0;
-  virtual void EndCycleCollectionCallback(CycleCollectorResults &aResults) = 0;
+  virtual void EndCycleCollectionCallback(CycleCollectorResults& aResults) = 0;
   virtual void DispatchDeferredDeletion(bool aContinuation) = 0;
 
   JSRuntime* Runtime() const
   {
     MOZ_ASSERT(mJSRuntime);
     return mJSRuntime;
   }
 
--- a/xpcom/base/Debug.cpp
+++ b/xpcom/base/Debug.cpp
@@ -6,16 +6,17 @@
 #include "mozilla/Debug.h"
 
 #ifdef XP_WIN
 #include <windows.h>
 #endif
 
 #ifdef XP_WIN
 
-void mozilla::PrintToDebugger(const char* aStr)
+void
+mozilla::PrintToDebugger(const char* aStr)
 {
   if (::IsDebuggerPresent()) {
     ::OutputDebugStringA(aStr);
   }
 }
 
 #endif
--- a/xpcom/base/StackWalk.h
+++ b/xpcom/base/StackWalk.h
@@ -10,14 +10,14 @@
 
 // XXX: it would be nice to eventually remove this header dependency on nsStackWalk.h
 #include "nsStackWalk.h"
 
 namespace mozilla {
 
 nsresult
 FramePointerStackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-                      uint32_t aMaxFrames, void *aClosure, void **bp,
-                      void *stackEnd);
+                      uint32_t aMaxFrames, void* aClosure, void** aBp,
+                      void* aStackEnd);
 
 }
 
 #endif /* !defined(StackWalk_h_) */
--- a/xpcom/base/StaticMutex.h
+++ b/xpcom/base/StaticMutex.h
@@ -68,21 +68,21 @@ private:
   Atomic<OffTheBooksMutex*> mMutex;
 
 
   // Disallow copy constructor, but only in debug mode.  We only define
   // a default constructor in debug mode (see above); if we declared
   // this constructor always, the compiler wouldn't generate a trivial
   // default constructor for us in non-debug mode.
 #ifdef DEBUG
-  StaticMutex(StaticMutex &other);
+  StaticMutex(StaticMutex& other);
 #endif
 
   // Disallow these operators.
-  StaticMutex& operator=(StaticMutex* rhs);
+  StaticMutex& operator=(StaticMutex* aRhs);
   static void* operator new(size_t) CPP_THROW_NEW;
   static void operator delete(void*);
 };
 
 typedef BaseAutoLock<StaticMutex> StaticMutexAutoLock;
 typedef BaseAutoUnlock<StaticMutex> StaticMutexAutoUnlock;
 
 } // namespace mozilla
--- a/xpcom/base/StaticPtr.h
+++ b/xpcom/base/StaticPtr.h
@@ -43,19 +43,19 @@ public:
   // so that the compiler can see that the constructor is trivial.
 #ifdef DEBUG
   StaticAutoPtr()
   {
     MOZ_ASSERT(!mRawPtr);
   }
 #endif
 
-  StaticAutoPtr<T>& operator=(T* rhs)
+  StaticAutoPtr<T>& operator=(T* aRhs)
   {
-    Assign(rhs);
+    Assign(aRhs);
     return *this;
   }
 
   T* get() const
   {
     return mRawPtr;
   }
 
@@ -76,24 +76,24 @@ public:
   }
 
 private:
   // Disallow copy constructor, but only in debug mode.  We only define
   // a default constructor in debug mode (see above); if we declared
   // this constructor always, the compiler wouldn't generate a trivial
   // default constructor for us in non-debug mode.
 #ifdef DEBUG
-  StaticAutoPtr(StaticAutoPtr<T> &other);
+  StaticAutoPtr(StaticAutoPtr<T>& other);
 #endif
 
-  void Assign(T* newPtr)
+  void Assign(T* aNewPtr)
   {
-    MOZ_ASSERT(!newPtr || mRawPtr != newPtr);
+    MOZ_ASSERT(!aNewPtr || mRawPtr != aNewPtr);
     T* oldPtr = mRawPtr;
-    mRawPtr = newPtr;
+    mRawPtr = aNewPtr;
     delete oldPtr;
   }
 
   T* mRawPtr;
 };
 
 template<class T>
 class StaticRefPtr
@@ -104,25 +104,25 @@ public:
   // so that the compiler can see that the constructor is trivial.
 #ifdef DEBUG
   StaticRefPtr()
   {
     MOZ_ASSERT(!mRawPtr);
   }
 #endif
 
-  StaticRefPtr<T>& operator=(T* rhs)
+  StaticRefPtr<T>& operator=(T* aRhs)
   {
-    AssignWithAddref(rhs);
+    AssignWithAddref(aRhs);
     return *this;
   }
 
-  StaticRefPtr<T>& operator=(const StaticRefPtr<T>& rhs)
+  StaticRefPtr<T>& operator=(const StaticRefPtr<T>& aRhs)
   {
-    return (this = rhs.mRawPtr);
+    return (this = aRhs.mRawPtr);
   }
 
   T* get() const
   {
     return mRawPtr;
   }
 
   operator T*() const
@@ -137,28 +137,28 @@ public:
   }
 
   T& operator*() const
   {
     return *get();
   }
 
 private:
-  void AssignWithAddref(T* newPtr)
+  void AssignWithAddref(T* aNewPtr)
   {
-    if (newPtr) {
-      newPtr->AddRef();
+    if (aNewPtr) {
+      aNewPtr->AddRef();
     }
-    AssignAssumingAddRef(newPtr);
+    AssignAssumingAddRef(aNewPtr);
   }
 
-  void AssignAssumingAddRef(T* newPtr)
+  void AssignAssumingAddRef(T* aNewPtr)
   {
     T* oldPtr = mRawPtr;
-    mRawPtr = newPtr;
+    mRawPtr = aNewPtr;
     if (oldPtr) {
       oldPtr->Release();
     }
   }
 
   T* mRawPtr;
 };
 
@@ -194,52 +194,52 @@ class Zero;
   {                                                            \
     return !(lhs == rhs);                                      \
   }
 
 // StaticAutoPtr (in)equality operators
 
 template<class T, class U>
 inline bool
-operator==(const StaticAutoPtr<T>& lhs, const StaticAutoPtr<U>& rhs)
+operator==(const StaticAutoPtr<T>& aLhs, const StaticAutoPtr<U>& aRhs)
 {
-  return lhs.get() == rhs.get();
+  return aLhs.get() == aRhs.get();
 }
 
 template<class T, class U>
 inline bool
-operator!=(const StaticAutoPtr<T>& lhs, const StaticAutoPtr<U>& rhs)
+operator!=(const StaticAutoPtr<T>& aLhs, const StaticAutoPtr<U>& aRhs)
 {
-  return !(lhs == rhs);
+  return !(aLhs == aRhs);
 }
 
 REFLEXIVE_EQUALITY_OPERATORS(const StaticAutoPtr<T>&, const U*,
                              lhs.get() == rhs, class T, class U)
 
 REFLEXIVE_EQUALITY_OPERATORS(const StaticAutoPtr<T>&, U*,
                              lhs.get() == rhs, class T, class U)
 
 // Let us compare StaticAutoPtr to 0.
 REFLEXIVE_EQUALITY_OPERATORS(const StaticAutoPtr<T>&, StaticPtr_internal::Zero*,
                              lhs.get() == nullptr, class T)
 
 // StaticRefPtr (in)equality operators
 
 template<class T, class U>
 inline bool
-operator==(const StaticRefPtr<T>& lhs, const StaticRefPtr<U>& rhs)
+operator==(const StaticRefPtr<T>& aLhs, const StaticRefPtr<U>& aRhs)
 {
-  return lhs.get() == rhs.get();
+  return aLhs.get() == aRhs.get();
 }
 
 template<class T, class U>
 inline bool
-operator!=(const StaticRefPtr<T>& lhs, const StaticRefPtr<U>& rhs)
+operator!=(const StaticRefPtr<T>& aLhs, const StaticRefPtr<U>& aRhs)
 {
-  return !(lhs == rhs);
+  return !(aLhs == aRhs);
 }
 
 REFLEXIVE_EQUALITY_OPERATORS(const StaticRefPtr<T>&, const U*,
                              lhs.get() == rhs, class T, class U)
 
 REFLEXIVE_EQUALITY_OPERATORS(const StaticRefPtr<T>&, U*,
                              lhs.get() == rhs, class T, class U)
 
--- a/xpcom/base/SystemMemoryReporter.cpp
+++ b/xpcom/base/SystemMemoryReporter.cpp
@@ -79,24 +79,24 @@ GetBasename(const nsCString& aPath, nsAC
     out.Assign(Substring(out, 0, out.RFind("(deleted)")));
   }
   out.StripChars(" ");
 
   aOut.Assign(out);
 }
 
 static bool
-IsNumeric(const char* s)
+IsNumeric(const char* aStr)
 {
-  MOZ_ASSERT(*s);   // shouldn't see empty strings
-  while (*s) {
-    if (!isdigit(*s)) {
+  MOZ_ASSERT(*aStr);  // shouldn't see empty strings
+  while (*aStr) {
+    if (!isdigit(*aStr)) {
       return false;
     }
-    s++;
+    ++aStr;
   }
   return true;
 }
 
 static bool
 IsAnonymous(const nsACString& aName)
 {
   // Recent kernels (e.g. 3.5) have multiple [stack:nnnn] entries, where |nnnn|
@@ -185,17 +185,20 @@ private:
     ProcessSizeKindLimit = 9  // must be last
   };
 
   static const char* kindPathSuffixes[ProcessSizeKindLimit];
 
   // These are the cross-cutting measurements across all processes.
   struct ProcessSizes
   {
-    ProcessSizes() { memset(this, 0, sizeof(*this)); }
+    ProcessSizes()
+    {
+      memset(this, 0, sizeof(*this));
+    }
 
     size_t mSizes[ProcessSizeKindLimit];
   };
 
   nsresult ReadMemInfo(int64_t* aMemTotal, int64_t* aMemFree)
   {
     FILE* f = fopen("/proc/meminfo", "r");
     if (!f) {
@@ -266,18 +269,19 @@ private:
         if (!f) {
           // Processes can terminate between the readdir() call above and now,
           // so just skip if we can't open the file.
           continue;
         }
         while (true) {
           nsresult rv = ParseMapping(f, processName, aHandleReport, aData,
                                      &processSizes, aTotalPss);
-          if (NS_FAILED(rv))
+          if (NS_FAILED(rv)) {
             break;
+          }
         }
         fclose(f);
 
         // Report the open file descriptors for this process.
         nsPrintfCString procFdPath("/proc/%s/fd", pidStr);
         nsresult rv = CollectOpenFileReports(
                   aHandleReport, aData, procFdPath, processName);
         if (NS_FAILED(rv)) {
@@ -356,18 +360,19 @@ private:
     nsAutoCString name, description;
     ProcessSizeKind kind;
     GetReporterNameAndDescription(path, perms, name, description, &kind);
 
     while (true) {
       size_t pss = 0;
       nsresult rv = ParseMapBody(aFile, aProcessName, name, description,
                                  aHandleReport, aData, &pss);
-      if (NS_FAILED(rv))
+      if (NS_FAILED(rv)) {
         break;
+      }
 
       // Increment the appropriate aProcessSizes values, and the total.
       aProcessSizes->mSizes[kind] += pss;
       *aTotalPss += pss;
     }
 
     return NS_OK;
   }
@@ -822,15 +827,16 @@ const char* SystemReporter::kindPathSuff
   "shared-libraries/read-write",
   "shared-libraries/read-only",
   "shared-libraries/other",
   "other-files",
   "main-thread-stack",
   "vdso"
 };
 
-void Init()
+void
+Init()
 {
   RegisterStrongMemoryReporter(new SystemReporter());
 }
 
 } // namespace SystemMemoryReporter
 } // namespace mozilla
--- a/xpcom/base/SystemMemoryReporter.h
+++ b/xpcom/base/SystemMemoryReporter.h
@@ -9,17 +9,21 @@
 
 namespace mozilla {
 namespace SystemMemoryReporter {
 
 // This only works on Linux, but to make callers' lives easier, we stub out
 // empty functions on other platforms.
 
 #if defined(XP_LINUX)
-void Init();
+void
+Init();
 #else
-void Init() {}
+void
+Init()
+{
+}
 #endif
 
 } // namespace SystemMemoryReporter
 } // namespace mozilla
 
 #endif
--- a/xpcom/base/VisualEventTracer.cpp
+++ b/xpcom/base/VisualEventTracer.cpp
@@ -7,57 +7,59 @@
 #include "mozilla/TimeStamp.h"
 #include "nscore.h"
 #include "prthread.h"
 #include "prprf.h"
 #include "prenv.h"
 #include "plstr.h"
 #include "nsThreadUtils.h"
 
-namespace mozilla { namespace eventtracer {
+namespace mozilla {
+namespace eventtracer {
 
 #ifdef MOZ_VISUAL_EVENT_TRACER
 
 namespace {
 
 const uint32_t kBatchSize = 256;
-const char kTypeChars[eventtracer::eLast] = {' ','N','S','W','E','D'};
+const char kTypeChars[eventtracer::eLast] = {' ', 'N', 'S', 'W', 'E', 'D'};
 
 // Flushing thread and records queue monitor
-mozilla::Monitor * gMonitor = nullptr;
+mozilla::Monitor* gMonitor = nullptr;
 
 // gInitialized and gCapture can be accessed from multiple threads
 // simultaneously without any locking.  However, since they are only ever
 // *set* from the main thread, the chance of races manifesting is small
 // and unlikely to be a problem in practice.
 bool gInitialized;
 
 // Flag to allow capturing
 bool gCapture;
 
 // Time stamp of the epoch we have started to capture
-mozilla::TimeStamp * gProfilerStart;
+mozilla::TimeStamp* gProfilerStart;
 
 // Duration of the log to keep up to
-mozilla::TimeDuration * gMaxBacklogTime;
+mozilla::TimeDuration* gMaxBacklogTime;
 
 
 // Record of a single event
-class Record {
+class Record
+{
 public:
   Record()
     : mType(::mozilla::eventtracer::eNone)
     , mItem(nullptr)
     , mText(nullptr)
     , mText2(nullptr)
   {
     MOZ_COUNT_CTOR(Record);
   }
 
-  Record& operator=(const Record & aOther)
+  Record& operator=(const Record& aOther)
   {
     mType = aOther.mType;
     mTime = aOther.mTime;
     mItem = aOther.mItem;
     mText = PL_strdup(aOther.mText);
     mText2 = aOther.mText2 ? PL_strdup(aOther.mText2) : nullptr;
     return *this;
   }
@@ -66,42 +68,45 @@ public:
   {
     PL_strfree(mText2);
     PL_strfree(mText);
     MOZ_COUNT_DTOR(Record);
   }
 
   uint32_t mType;
   TimeStamp mTime;
-  void * mItem;
-  char * mText;
-  char * mText2;
+  void* mItem;
+  char* mText;
+  char* mText2;
 };
 
-char * DupCurrentThreadName()
+char* DupCurrentThreadName()
 {
-  if (NS_IsMainThread())
+  if (NS_IsMainThread()) {
     return PL_strdup("Main Thread");
+  }
 
-  PRThread * currentThread = PR_GetCurrentThread();
-  const char * name = PR_GetThreadName(currentThread);
-  if (name)
+  PRThread* currentThread = PR_GetCurrentThread();
+  const char* name = PR_GetThreadName(currentThread);
+  if (name) {
     return PL_strdup(name);
+  }
 
   char buffer[128];
   PR_snprintf(buffer, 127, "Nameless %p", currentThread);
 
   return PL_strdup(buffer);
 }
 
 // An array of events, each thread keeps its own private instance
-class RecordBatch {
+class RecordBatch
+{
 public:
   RecordBatch(size_t aLength = kBatchSize,
-              char * aThreadName = DupCurrentThreadName())
+              char* aThreadName = DupCurrentThreadName())
     : mRecordsHead(new Record[aLength])
     , mRecordsTail(mRecordsHead + aLength)
     , mNextRecord(mRecordsHead)
     , mNextBatch(nullptr)
     , mThreadNameCopy(aThreadName)
     , mClosed(false)
   {
     MOZ_COUNT_CTOR(RecordBatch);
@@ -109,130 +114,142 @@ public:
 
   ~RecordBatch()
   {
     delete [] mRecordsHead;
     PL_strfree(mThreadNameCopy);
     MOZ_COUNT_DTOR(RecordBatch);
   }
 
-  void Close() { mClosed = true; }
+  void Close()
+  {
+    mClosed = true;
+  }
 
-  size_t Length() const { return mNextRecord - mRecordsHead; }
+  size_t Length() const
+  {
+    return mNextRecord - mRecordsHead;
+  }
   bool CanBeDeleted(const TimeStamp& aUntil) const;
 
-  static RecordBatch * Register();
-  static void Close(void * data); // Registered on freeing thread data
-  static RecordBatch * Clone(RecordBatch * aLog, const TimeStamp& aSince);
-  static void Delete(RecordBatch * aLog);
+  static RecordBatch* Register();
+  static void Close(void* data);  // Registered on freeing thread data
+  static RecordBatch* Clone(RecordBatch* aLog, const TimeStamp& aSince);
+  static void Delete(RecordBatch* aLog);
 
-  static RecordBatch * CloneLog();
+  static RecordBatch* CloneLog();
   static void GCLog(const TimeStamp& aUntil);
   static void DeleteLog();
 
-  Record * mRecordsHead;
-  Record * mRecordsTail;
-  Record * mNextRecord;
+  Record* mRecordsHead;
+  Record* mRecordsTail;
+  Record* mNextRecord;
 
-  RecordBatch * mNextBatch;
-  char * mThreadNameCopy;
+  RecordBatch* mNextBatch;
+  char* mThreadNameCopy;
   bool mClosed;
 };
 
 // Protected by gMonitor, accessed concurently
 // Linked list of batches threads want to flush on disk
-RecordBatch * gLogHead = nullptr;
-RecordBatch * gLogTail = nullptr;
+RecordBatch* gLogHead = nullptr;
+RecordBatch* gLogTail = nullptr;
 
 // Registers the batch in the linked list
 // static
-RecordBatch *
+RecordBatch*
 RecordBatch::Register()
 {
   MonitorAutoLock mon(*gMonitor);
 
-  if (!gInitialized)
+  if (!gInitialized) {
     return nullptr;
-
-  if (gLogHead)
-    RecordBatch::GCLog(TimeStamp::Now() - *gMaxBacklogTime);
+  }
 
-  RecordBatch * batch = new RecordBatch();
-  if (!gLogHead)
+  if (gLogHead) {
+    RecordBatch::GCLog(TimeStamp::Now() - *gMaxBacklogTime);
+  }
+
+  RecordBatch* batch = new RecordBatch();
+  if (!gLogHead) {
     gLogHead = batch;
-  else // gLogTail is non-null
+  } else { // gLogTail is non-null
     gLogTail->mNextBatch = batch;
+  }
   gLogTail = batch;
 
   mon.Notify();
   return batch;
 }
 
 void
-RecordBatch::Close(void * data)
+RecordBatch::Close(void* data)
 {
-  RecordBatch * batch = static_cast<RecordBatch*>(data);
+  RecordBatch* batch = static_cast<RecordBatch*>(data);
   batch->Close();
 }
 
 // static
-RecordBatch *
-RecordBatch::Clone(RecordBatch * aOther, const TimeStamp& aSince)
+RecordBatch*
+RecordBatch::Clone(RecordBatch* aOther, const TimeStamp& aSince)
 {
-  if (!aOther)
+  if (!aOther) {
     return nullptr;
+  }
 
   size_t length = aOther->Length();
   size_t min = 0;
   size_t max = length;
-  Record * record = nullptr;
+  Record* record = nullptr;
 
   // Binary search for record with time >= aSince
   size_t i;
   while (min < max) {
     i = (max + min) / 2;
 
     record = aOther->mRecordsHead + i;
-    if (record->mTime >= aSince)
+    if (record->mTime >= aSince) {
       max = i;
-    else
-      min = i+1;
+    } else {
+      min = i + 1;
+    }
   }
   i = (max + min) / 2;
 
   // How many Record's to copy?
   size_t toCopy = length - i;
-  if (!toCopy)
+  if (!toCopy) {
     return RecordBatch::Clone(aOther->mNextBatch, aSince);
+  }
 
   // Clone
-  RecordBatch * clone = new RecordBatch(toCopy, PL_strdup(aOther->mThreadNameCopy));
+  RecordBatch* clone = new RecordBatch(toCopy, PL_strdup(aOther->mThreadNameCopy));
   for (; i < length; ++i) {
     record = aOther->mRecordsHead + i;
     *clone->mNextRecord = *record;
     ++clone->mNextRecord;
   }
   clone->mNextBatch = RecordBatch::Clone(aOther->mNextBatch, aSince);
 
   return clone;
 }
 
 // static
 void
-RecordBatch::Delete(RecordBatch * aLog)
+RecordBatch::Delete(RecordBatch* aLog)
 {
   while (aLog) {
-    RecordBatch * batch = aLog;
+    RecordBatch* batch = aLog;
     aLog = aLog->mNextBatch;
     delete batch;
   }
 }
 
 // static
-RecordBatch *
+RecordBatch*
 RecordBatch::CloneLog()
 {
   TimeStamp startEpoch = *gProfilerStart;
   TimeStamp backlogEpoch = TimeStamp::Now() - *gMaxBacklogTime;
 
   TimeStamp since = (startEpoch > backlogEpoch) ? startEpoch : backlogEpoch;
 
   MonitorAutoLock mon(*gMonitor);
@@ -242,47 +259,46 @@ RecordBatch::CloneLog()
 
 // static
 void
 RecordBatch::GCLog(const TimeStamp& aUntil)
 {
   // Garbage collect all unreferenced and old batches
   gMonitor->AssertCurrentThreadOwns();
 
-  RecordBatch *volatile * referer = &gLogHead;
+  RecordBatch* volatile* referer = &gLogHead;
   gLogTail = nullptr;
 
-  RecordBatch * batch = *referer;
+  RecordBatch* batch = *referer;
   while (batch) {
     if (batch->CanBeDeleted(aUntil)) {
       // The batch is completed and thus unreferenced by the thread
       // and the most recent record has time older then the time
       // we want to save records for, hence delete it.
       *referer = batch->mNextBatch;
       delete batch;
       batch = *referer;
-    }
-    else {
+    } else {
       // We walk the whole list, so this will end up filled with
       // the very last valid element of it.
       gLogTail = batch;
       // The current batch is active, examine the next in the list.
       batch = batch->mNextBatch;
       // When the next batch is found expired, we must extract it
       // from the list, shift the referer.
       referer = &((*referer)->mNextBatch);
     }
   }
 }
 
 // static
 void
 RecordBatch::DeleteLog()
 {
-  RecordBatch * batch;
+  RecordBatch* batch;
   {
     MonitorAutoLock mon(*gMonitor);
     batch = gLogHead;
     gLogHead = nullptr;
     gLogTail = nullptr;
   }
 
   RecordBatch::Delete(batch);
@@ -297,142 +313,154 @@ RecordBatch::CanBeDeleted(const TimeStam
     // when the thread ends its job.  We must not delete this
     // batch from memory while it's held by a thread.
 
     if (!Length()) {
       // There are no records, just get rid of this empty batch.
       return true;
     }
 
-    if ((mNextRecord-1)->mTime <= aUntil) {
+    if ((mNextRecord - 1)->mTime <= aUntil) {
       // Is the last record older then the time we demand records
       // for?  If not, this batch has expired.
       return true;
     }
   }
 
   // Not all conditions to close the batch met, keep it.
   return false;
 }
 
 // Helper class for filtering events by MOZ_PROFILING_EVENTS
 class EventFilter
 {
 public:
-  static EventFilter * Build(const char * filterVar);
-  bool EventPasses(const char * eventName);
+  static EventFilter* Build(const char* filterVar);
+  bool EventPasses(const char* eventName);
 
   ~EventFilter()
   {
     delete mNext;
     PL_strfree(mFilter);
     MOZ_COUNT_DTOR(EventFilter);
   }
 
 private:
-  EventFilter(const char * eventName, EventFilter * next)
+  EventFilter(const char* eventName, EventFilter* next)
     : mFilter(PL_strdup(eventName))
     , mNext(next)
   {
     MOZ_COUNT_CTOR(EventFilter);
   }
 
-  char * mFilter;
-  EventFilter * mNext;
+  char* mFilter;
+  EventFilter* mNext;
 };
 
 // static
-EventFilter *
-EventFilter::Build(const char * filterVar)
+EventFilter*
+EventFilter::Build(const char* filterVar)
 {
-  if (!filterVar || !*filterVar)
+  if (!filterVar || !*filterVar) {
     return nullptr;
+  }
 
   // Reads a comma serpatated list of events.
 
   // Copied from nspr logging code (read of NSPR_LOG_MODULES)
   char eventName[64];
   int pos = 0, count, delta = 0;
 
   // Read up to a comma or EOF -> get name of an event first in the list
   count = sscanf(filterVar, "%63[^,]%n", eventName, &delta);
-  if (count == 0)
+  if (count == 0) {
     return nullptr;
+  }
 
   pos = delta;
 
   // Skip a comma, if present, accept spaces around it
   count = sscanf(filterVar + pos, " , %n", &delta);
-  if (count != EOF)
+  if (count != EOF) {
     pos += delta;
+  }
 
   // eventName contains name of the first event in the list
   // second argument recursively parses the rest of the list string and
   // fills mNext of the just created EventFilter object chaining the objects
   return new EventFilter(eventName, Build(filterVar + pos));
 }
 
 bool
-EventFilter::EventPasses(const char * eventName)
+EventFilter::EventPasses(const char* eventName)
 {
-  if (!strcmp(eventName, mFilter))
+  if (!strcmp(eventName, mFilter)) {
     return true;
+  }
 
-  if (mNext)
+  if (mNext) {
     return mNext->EventPasses(eventName);
+  }
 
   return false;
 }
 
 // State and control variables, initialized in Init() method, after it
 // immutable and read concurently.
-EventFilter * gEventFilter = nullptr;
+EventFilter* gEventFilter = nullptr;
 unsigned gThreadPrivateIndex;
 
 // static
-bool CheckEventFilters(uint32_t aType, void * aItem, const char * aText)
+bool CheckEventFilters(uint32_t aType, void* aItem, const char* aText)
 {
-  if (!gEventFilter)
+  if (!gEventFilter) {
     return true;
+  }
 
-  if (aType == eName)
+  if (aType == eName) {
     return true;
+  }
 
   return gEventFilter->EventPasses(aText);
 }
 
 } // anon namespace
 
 #endif //MOZ_VISUAL_EVENT_TRACER
 
 // static
-void Init()
+void
+Init()
 {
 #ifdef MOZ_VISUAL_EVENT_TRACER
-  const char * logEvents = PR_GetEnv("MOZ_PROFILING_EVENTS");
-  if (logEvents && *logEvents)
+  const char* logEvents = PR_GetEnv("MOZ_PROFILING_EVENTS");
+  if (logEvents && *logEvents) {
     gEventFilter = EventFilter::Build(logEvents);
+  }
 
   PRStatus status = PR_NewThreadPrivateIndex(&gThreadPrivateIndex, &RecordBatch::Close);
-  if (status != PR_SUCCESS)
+  if (status != PR_SUCCESS) {
     return;
+  }
 
   gMonitor = new mozilla::Monitor("Profiler");
-  if (!gMonitor)
+  if (!gMonitor) {
     return;
+  }
 
   gProfilerStart = new mozilla::TimeStamp();
   gMaxBacklogTime = new mozilla::TimeDuration();
 
   gInitialized = true;
 #endif
 }
 
 // static
-void Shutdown()
+void
+Shutdown()
 {
 #ifdef MOZ_VISUAL_EVENT_TRACER
   gCapture = false;
   gInitialized = false;
 
   RecordBatch::DeleteLog();
 
   if (gMonitor) {
@@ -453,39 +481,44 @@ void Shutdown()
   if (gMaxBacklogTime) {
     delete gMaxBacklogTime;
     gMaxBacklogTime = nullptr;
   }
 #endif
 }
 
 // static
-void Mark(uint32_t aType, void * aItem, const char * aText, const char * aText2)
+void
+Mark(uint32_t aType, void* aItem, const char* aText, const char* aText2)
 {
 #ifdef MOZ_VISUAL_EVENT_TRACER
-  if (!gInitialized || !gCapture)
+  if (!gInitialized || !gCapture) {
     return;
+  }
 
-  if (aType == eNone)
+  if (aType == eNone) {
     return;
+  }
 
-  if (!CheckEventFilters(aType, aItem, aText)) // Events use just aText
+  if (!CheckEventFilters(aType, aItem, aText)) { // Events use just aText
     return;
+  }
 
-  RecordBatch * threadLogPrivate = static_cast<RecordBatch *>(
+  RecordBatch* threadLogPrivate = static_cast<RecordBatch*>(
     PR_GetThreadPrivate(gThreadPrivateIndex));
   if (!threadLogPrivate) {
     threadLogPrivate = RecordBatch::Register();
-    if (!threadLogPrivate)
+    if (!threadLogPrivate) {
       return;
+    }
 
     PR_SetThreadPrivate(gThreadPrivateIndex, threadLogPrivate);
   }
 
-  Record * record = threadLogPrivate->mNextRecord;
+  Record* record = threadLogPrivate->mNextRecord;
   record->mType = aType;
   record->mTime = mozilla::TimeStamp::Now();
   record->mItem = aItem;
   record->mText = PL_strdup(aText);
   record->mText2 = aText2 ? PL_strdup(aText2) : nullptr;
 
   ++threadLogPrivate->mNextRecord;
   if (threadLogPrivate->mNextRecord == threadLogPrivate->mRecordsTail) {
@@ -504,54 +537,55 @@ void Mark(uint32_t aType, void * aItem, 
 class VisualEventTracerLog : public nsIVisualEventTracerLog
 {
   NS_DECL_ISUPPORTS
   NS_DECL_NSIVISUALEVENTTRACERLOG
 
   VisualEventTracerLog(RecordBatch* aBatch)
     : mBatch(aBatch)
     , mProfilerStart(*gProfilerStart)
-  {}
+  {
+  }
 
   virtual ~VisualEventTracerLog();
 
 protected:
-  RecordBatch * mBatch;
+  RecordBatch* mBatch;
   TimeStamp mProfilerStart;
 };
 
 NS_IMPL_ISUPPORTS(VisualEventTracerLog, nsIVisualEventTracerLog)
 
 VisualEventTracerLog::~VisualEventTracerLog()
 {
   RecordBatch::Delete(mBatch);
 }
 
 NS_IMETHODIMP
-VisualEventTracerLog::GetJSONString(nsACString & _retval)
+VisualEventTracerLog::GetJSONString(nsACString& _retval)
 {
   nsCString buffer;
 
   buffer.Assign(NS_LITERAL_CSTRING("{\n\"version\": 1,\n\"records\":[\n"));
 
-  RecordBatch * batch = mBatch;
+  RecordBatch* batch = mBatch;
   while (batch) {
     if (batch != mBatch) {
       // This is not the first batch we are writting, add comma
       buffer.Append(NS_LITERAL_CSTRING(",\n"));
     }
 
     buffer.Append(NS_LITERAL_CSTRING("{\"thread\":\""));
     buffer.Append(batch->mThreadNameCopy);
     buffer.Append(NS_LITERAL_CSTRING("\",\"log\":[\n"));
 
     static const int kBufferSize = 2048;
     char buf[kBufferSize];
 
-    for (Record * record = batch->mRecordsHead;
+    for (Record* record = batch->mRecordsHead;
          record < batch->mNextRecord;
          ++record) {
 
       // mType carries both type and flags, separate type
       // as lower 16 bits and flags as higher 16 bits.
       // The json format expects this separated.
       uint32_t type = record->mType & 0xffffUL;
       uint32_t flags = record->mType >> 16;
@@ -565,17 +599,17 @@ VisualEventTracerLog::GetJSONString(nsAC
         record->mText2 ? record->mText2 : "",
         (record == batch->mNextRecord - 1) ? "" : ",");
 
       buffer.Append(buf);
     }
 
     buffer.Append(NS_LITERAL_CSTRING("]}\n"));
 
-    RecordBatch * next = batch->mNextBatch;
+    RecordBatch* next = batch->mNextBatch;
     batch = next;
   }
 
   buffer.Append(NS_LITERAL_CSTRING("]}\n"));
   _retval.Assign(buffer);
 
   return NS_OK;
 }
@@ -607,18 +641,19 @@ VisualEventTracerLog::WriteToProfilingFi
   return NS_OK;
 }
 
 NS_IMPL_ISUPPORTS(VisualEventTracer, nsIVisualEventTracer)
 
 NS_IMETHODIMP
 VisualEventTracer::Start(const uint32_t aMaxBacklogSeconds)
 {
-  if (!gInitialized)
+  if (!gInitialized) {
     return NS_ERROR_UNEXPECTED;
+  }
 
   if (gCapture) {
     NS_WARNING("VisualEventTracer has already been started");
     return NS_ERROR_ALREADY_INITIALIZED;
   }
 
   *gMaxBacklogTime = TimeDuration::FromMilliseconds(aMaxBacklogSeconds * 1000);
 
@@ -632,18 +667,19 @@ VisualEventTracer::Start(const uint32_t 
   MOZ_EVENT_TRACER_MARK(this, "trace::start");
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 VisualEventTracer::Stop()
 {
-  if (!gInitialized)
+  if (!gInitialized) {
     return NS_ERROR_UNEXPECTED;
+  }
 
   if (!gCapture) {
     NS_WARNING("VisualEventTracer is not runing");
     return NS_ERROR_NOT_INITIALIZED;
   }
 
   MOZ_EVENT_TRACER_MARK(this, "trace::stop");
 
@@ -657,22 +693,23 @@ VisualEventTracer::Stop()
       rv = tracelog->WriteToProfilingFile();
     }
   }
 
   return rv;
 }
 
 NS_IMETHODIMP
-VisualEventTracer::Snapshot(nsIVisualEventTracerLog ** _result)
+VisualEventTracer::Snapshot(nsIVisualEventTracerLog** _result)
 {
-  if (!gInitialized)
+  if (!gInitialized) {
     return NS_ERROR_UNEXPECTED;
+  }
 
-  RecordBatch * batch = RecordBatch::CloneLog();
+  RecordBatch* batch = RecordBatch::CloneLog();
 
   nsRefPtr<VisualEventTracerLog> log = new VisualEventTracerLog(batch);
   log.forget(_result);
 
   return NS_OK;
 }
 
 #endif
--- a/xpcom/base/VisualEventTracer.h
+++ b/xpcom/base/VisualEventTracer.h
@@ -154,38 +154,38 @@ enum MarkType {
 //    happening on.  Can be actually anything, but valid poitners should
 //    be used.
 // @param aText
 //    Text of the name (for eName) or event's name for others.  The string
 //    is duplicated.
 // @param aText2
 //    Optional second part of the instnace name, or event name.
 //    Event filtering does apply only to the first part (aText).
-void Mark(uint32_t aType, void * aItem,
-          const char * aText, const char * aText2 = 0);
+void Mark(uint32_t aType, void* aItem,
+          const char* aText, const char* aText2 = 0);
 
 
 // Helper guard object.  Use to mark an event in the constructor and a different
 // event in the destructor.
 //
 // Example:
 // int class::func()
 // {
 //    AutoEventTracer tracer(this, eventtracer::eExec, eventtracer::eDone, "func");
 //
 //    do_something_taking_a_long_time();
 // }
 class MOZ_STACK_CLASS AutoEventTracer
 {
 public:
-  AutoEventTracer(void * aInstance,
+  AutoEventTracer(void* aInstance,
                   uint32_t aTypeOn, // MarkType marked in constructor
                   uint32_t aTypeOff, // MarkType marked in destructor
-                  const char * aName,
-                  const char * aName2 = 0
+                  const char* aName,
+                  const char* aName2 = 0
                   MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
     : mInstance(aInstance)
     , mName(aName)
     , mName2(aName2)
     , mTypeOn(aTypeOn)
     , mTypeOff(aTypeOff)
   {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
@@ -194,19 +194,19 @@ public:
   }
 
   ~AutoEventTracer()
   {
     ::mozilla::eventtracer::Mark(mTypeOff, mInstance, mName, mName2);
   }
 
 private:
-  void * mInstance;
-  const char * mName;
-  const char * mName2;
+  void* mInstance;
+  const char* mName;
+  const char* mName2;
   uint32_t mTypeOn;
   uint32_t mTypeOff;
 
   MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 };
 
 #ifdef MOZ_VISUAL_EVENT_TRACER
 
--- a/xpcom/base/nsAutoPtr.h
+++ b/xpcom/base/nsAutoPtr.h
@@ -23,37 +23,37 @@ private:
   void**
   begin_assignment()
   {
     assign(0);
     return reinterpret_cast<void**>(&mRawPtr);
   }
 
   void
-  assign( T* newPtr )
+  assign(T* aNewPtr)
   {
     T* oldPtr = mRawPtr;
 
-    if (newPtr != nullptr && newPtr == oldPtr) {
+    if (aNewPtr && aNewPtr == oldPtr) {
       NS_RUNTIMEABORT("Logic flaw in the caller");
     }
 
-    mRawPtr = newPtr;
+    mRawPtr = aNewPtr;
     delete oldPtr;
   }
 
   // |class Ptr| helps us prevent implicit "copy construction"
   // through |operator T*() const| from a |const nsAutoPtr<T>|
   // because two implicit conversions in a row aren't allowed.
   // It still allows assignment from T* through implicit conversion
   // from |T*| to |nsAutoPtr<T>::Ptr|
   class Ptr
   {
   public:
-    Ptr( T* aPtr )
+    Ptr(T* aPtr)
       : mPtr(aPtr)
     {
     }
 
     operator T*() const
     {
       return mPtr;
     }
@@ -76,50 +76,50 @@ public:
   // Constructors
 
   nsAutoPtr()
     : mRawPtr(0)
     // default constructor
   {
   }
 
-  nsAutoPtr( Ptr aRawPtr )
+  nsAutoPtr(Ptr aRawPtr)
     : mRawPtr(aRawPtr)
     // construct from a raw pointer (of the right type)
   {
   }
 
   // This constructor shouldn't exist; we should just use the &&
   // constructor.
-  nsAutoPtr( nsAutoPtr<T>& aSmartPtr )
-    : mRawPtr( aSmartPtr.forget() )
+  nsAutoPtr(nsAutoPtr<T>& aSmartPtr)
+    : mRawPtr(aSmartPtr.forget())
     // Construct by transferring ownership from another smart pointer.
   {
   }
 
-  nsAutoPtr( nsAutoPtr<T>&& aSmartPtr )
-    : mRawPtr( aSmartPtr.forget() )
+  nsAutoPtr(nsAutoPtr<T>&& aSmartPtr)
+    : mRawPtr(aSmartPtr.forget())
     // Construct by transferring ownership from another smart pointer.
   {
   }
 
   // Assignment operators
 
   nsAutoPtr<T>&
-  operator=( T* rhs )
+  operator=(T* aRhs)
   // assign from a raw pointer (of the right type)
   {
-    assign(rhs);
+    assign(aRhs);
     return *this;
   }
 
-  nsAutoPtr<T>& operator=( nsAutoPtr<T>& rhs )
+  nsAutoPtr<T>& operator=(nsAutoPtr<T>& aRhs)
   // assign by transferring ownership from another smart pointer.
   {
-    assign(rhs.forget());
+    assign(aRhs.forget());
     return *this;
   }
 
   // Other pointer operators
 
   T*
   get() const
   /*
@@ -151,29 +151,31 @@ public:
     T* temp = mRawPtr;
     mRawPtr = 0;
     return temp;
   }
 
   T*
   operator->() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsAutoPtr with operator->().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsAutoPtr with operator->().");
     return get();
   }
 
   // This operator is needed for gcc <= 4.0.* and for Sun Studio; it
   // causes internal compiler errors for some MSVC versions.  (It's not
   // clear to me whether it should be needed.)
 #ifndef _MSC_VER
   template <class U, class V>
   U&
   operator->*(U V::* aMember)
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsAutoPtr with operator->*().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsAutoPtr with operator->*().");
     return get()->*aMember;
   }
 #endif
 
   nsAutoPtr<T>*
   get_address()
   // This is not intended to be used by clients.  See |address_of|
   // below.
@@ -188,17 +190,18 @@ public:
   {
     return this;
   }
 
 public:
   T&
   operator*() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsAutoPtr with operator*().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsAutoPtr with operator*().");
     return *get();
   }
 
   T**
   StartAssignment()
   {
 #ifndef NSCAP_FEATURE_INLINE_STARTASSIGNMENT
     return reinterpret_cast<T**>(begin_assignment());
@@ -207,25 +210,25 @@ public:
     return reinterpret_cast<T**>(&mRawPtr);
 #endif
   }
 };
 
 template <class T>
 inline
 nsAutoPtr<T>*
-address_of( nsAutoPtr<T>& aPtr )
+address_of(nsAutoPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 inline
 const nsAutoPtr<T>*
-address_of( const nsAutoPtr<T>& aPtr )
+address_of(const nsAutoPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 class nsAutoPtrGetterTransfers
 /*
   ...
@@ -242,17 +245,17 @@ class nsAutoPtrGetterTransfers
   a |void**|, a |T**|, or an |nsISupports**| as needed, that the
   outer call (|GetTransferedPointer| in this case) can fill in.
 
   This type should be a nested class inside |nsAutoPtr<T>|.
 */
 {
 public:
   explicit
-  nsAutoPtrGetterTransfers( nsAutoPtr<T>& aSmartPtr )
+  nsAutoPtrGetterTransfers(nsAutoPtr<T>& aSmartPtr)
     : mTargetSmartPtr(aSmartPtr)
   {
     // nothing else to do
   }
 
   operator void**()
   {
     return reinterpret_cast<void**>(mTargetSmartPtr.StartAssignment());
@@ -271,181 +274,181 @@ public:
 
 private:
   nsAutoPtr<T>& mTargetSmartPtr;
 };
 
 template <class T>
 inline
 nsAutoPtrGetterTransfers<T>
-getter_Transfers( nsAutoPtr<T>& aSmartPtr )
+getter_Transfers(nsAutoPtr<T>& aSmartPtr)
 /*
   Used around a |nsAutoPtr| when
   ...makes the class |nsAutoPtrGetterTransfers<T>| invisible.
 */
 {
   return nsAutoPtrGetterTransfers<T>(aSmartPtr);
 }
 
 
 
 // Comparing two |nsAutoPtr|s
 
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoPtr<T>& lhs, const nsAutoPtr<U>& rhs )
+operator==(const nsAutoPtr<T>& aLhs, const nsAutoPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs.get());
 }
 
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoPtr<T>& lhs, const nsAutoPtr<U>& rhs )
+operator!=(const nsAutoPtr<T>& aLhs, const nsAutoPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs.get());
 }
 
 
 // Comparing an |nsAutoPtr| to a raw pointer
 
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoPtr<T>& lhs, const U* rhs )
+operator==(const nsAutoPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( const U* lhs, const nsAutoPtr<T>& rhs )
+operator==(const U* aLhs, const nsAutoPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoPtr<T>& lhs, const U* rhs )
+operator!=(const nsAutoPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const U* lhs, const nsAutoPtr<T>& rhs )
+operator!=(const U* aLhs, const nsAutoPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 
 // To avoid ambiguities caused by the presence of builtin |operator==|s
 // creating a situation where one of the |operator==| defined above
 // has a better conversion for one argument and the builtin has a
 // better conversion for the other argument, define additional
 // |operator==| without the |const| on the raw pointer.
 // See bug 65664 for details.
 
 #ifndef NSCAP_DONT_PROVIDE_NONCONST_OPEQ
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoPtr<T>& lhs, U* rhs )
+operator==(const nsAutoPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( U* lhs, const nsAutoPtr<T>& rhs )
+operator==(U* aLhs, const nsAutoPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoPtr<T>& lhs, U* rhs )
+operator!=(const nsAutoPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( U* lhs, const nsAutoPtr<T>& rhs )
+operator!=(U* aLhs, const nsAutoPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 #endif
 
 
 
 // Comparing an |nsAutoPtr| to |0|
 
 template <class T>
 inline
 bool
-operator==( const nsAutoPtr<T>& lhs, NSCAP_Zero* rhs )
+operator==(const nsAutoPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr == 0|
 {
-  return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) == reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator==( NSCAP_Zero* lhs, const nsAutoPtr<T>& rhs )
+operator==(NSCAP_Zero* aLhs, const nsAutoPtr<T>& aRhs)
 // specifically to allow |0 == smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) == static_cast<const void*>(aRhs.get());
 }
 
 template <class T>
 inline
 bool
-operator!=( const nsAutoPtr<T>& lhs, NSCAP_Zero* rhs )
+operator!=(const nsAutoPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr != 0|
 {
-  return static_cast<const void*>(lhs.get()) != reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) != reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator!=( NSCAP_Zero* lhs, const nsAutoPtr<T>& rhs )
+operator!=(NSCAP_Zero* aLhs, const nsAutoPtr<T>& aRhs)
 // specifically to allow |0 != smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) != static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) != static_cast<const void*>(aRhs.get());
 }
 
 
 #ifdef HAVE_CPP_TROUBLE_COMPARING_TO_ZERO
 
 // We need to explicitly define comparison operators for `int'
 // because the compiler is lame.
 
 template <class T>
 inline
 bool
-operator==( const nsAutoPtr<T>& lhs, int rhs )
+operator==(const nsAutoPtr<T>& lhs, int rhs)
 // specifically to allow |smartPtr == 0|
 {
   return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
 }
 
 template <class T>
 inline
 bool
-operator==( int lhs, const nsAutoPtr<T>& rhs )
+operator==(int lhs, const nsAutoPtr<T>& rhs)
 // specifically to allow |0 == smartPtr|
 {
   return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
 }
 
 #endif // !defined(HAVE_CPP_TROUBLE_COMPARING_TO_ZERO)
 
 /*****************************************************************************/
@@ -459,20 +462,20 @@ private:
   void**
   begin_assignment()
   {
     assign(0);
     return reinterpret_cast<void**>(&mRawPtr);
   }
 
   void
-  assign( T* newPtr )
+  assign(T* aNewPtr)
   {
     T* oldPtr = mRawPtr;
-    mRawPtr = newPtr;
+    mRawPtr = aNewPtr;
     delete [] oldPtr;
   }
 
 private:
   T* mRawPtr;
 
 public:
   typedef T element_type;
@@ -485,43 +488,43 @@ public:
   // Constructors
 
   nsAutoArrayPtr()
     : mRawPtr(0)
     // default constructor
   {
   }
 
-  nsAutoArrayPtr( T* aRawPtr )
+  nsAutoArrayPtr(T* aRawPtr)
     : mRawPtr(aRawPtr)
     // construct from a raw pointer (of the right type)
   {
   }
 
-  nsAutoArrayPtr( nsAutoArrayPtr<T>& aSmartPtr )
-    : mRawPtr( aSmartPtr.forget() )
+  nsAutoArrayPtr(nsAutoArrayPtr<T>& aSmartPtr)
+    : mRawPtr(aSmartPtr.forget())
     // Construct by transferring ownership from another smart pointer.
   {
   }
 
 
   // Assignment operators
 
   nsAutoArrayPtr<T>&
-  operator=( T* rhs )
+  operator=(T* aRhs)
   // assign from a raw pointer (of the right type)
   {
-    assign(rhs);
+    assign(aRhs);
     return *this;
   }
 
-  nsAutoArrayPtr<T>& operator=( nsAutoArrayPtr<T>& rhs )
+  nsAutoArrayPtr<T>& operator=(nsAutoArrayPtr<T>& aRhs)
   // assign by transferring ownership from another smart pointer.
   {
-    assign(rhs.forget());
+    assign(aRhs.forget());
     return *this;
   }
 
   // Other pointer operators
 
   T*
   get() const
   /*
@@ -553,17 +556,18 @@ public:
     T* temp = mRawPtr;
     mRawPtr = 0;
     return temp;
   }
 
   T*
   operator->() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsAutoArrayPtr with operator->().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsAutoArrayPtr with operator->().");
     return get();
   }
 
   nsAutoArrayPtr<T>*
   get_address()
   // This is not intended to be used by clients.  See |address_of|
   // below.
   {
@@ -577,17 +581,18 @@ public:
   {
     return this;
   }
 
 public:
   T&
   operator*() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsAutoArrayPtr with operator*().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsAutoArrayPtr with operator*().");
     return *get();
   }
 
   T**
   StartAssignment()
   {
 #ifndef NSCAP_FEATURE_INLINE_STARTASSIGNMENT
     return reinterpret_cast<T**>(begin_assignment());
@@ -608,25 +613,25 @@ public:
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 };
 
 template <class T>
 inline
 nsAutoArrayPtr<T>*
-address_of( nsAutoArrayPtr<T>& aPtr )
+address_of(nsAutoArrayPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 inline
 const nsAutoArrayPtr<T>*
-address_of( const nsAutoArrayPtr<T>& aPtr )
+address_of(const nsAutoArrayPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 class nsAutoArrayPtrGetterTransfers
 /*
   ...
@@ -643,17 +648,17 @@ class nsAutoArrayPtrGetterTransfers
   a |void**|, a |T**|, or an |nsISupports**| as needed, that the
   outer call (|GetTransferedPointer| in this case) can fill in.
 
   This type should be a nested class inside |nsAutoArrayPtr<T>|.
 */
 {
 public:
   explicit
-  nsAutoArrayPtrGetterTransfers( nsAutoArrayPtr<T>& aSmartPtr )
+  nsAutoArrayPtrGetterTransfers(nsAutoArrayPtr<T>& aSmartPtr)
     : mTargetSmartPtr(aSmartPtr)
   {
     // nothing else to do
   }
 
   operator void**()
   {
     return reinterpret_cast<void**>(mTargetSmartPtr.StartAssignment());
@@ -672,181 +677,181 @@ public:
 
 private:
   nsAutoArrayPtr<T>& mTargetSmartPtr;
 };
 
 template <class T>
 inline
 nsAutoArrayPtrGetterTransfers<T>
-getter_Transfers( nsAutoArrayPtr<T>& aSmartPtr )
+getter_Transfers(nsAutoArrayPtr<T>& aSmartPtr)
 /*
   Used around a |nsAutoArrayPtr| when
   ...makes the class |nsAutoArrayPtrGetterTransfers<T>| invisible.
 */
 {
   return nsAutoArrayPtrGetterTransfers<T>(aSmartPtr);
 }
 
 
 
 // Comparing two |nsAutoArrayPtr|s
 
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoArrayPtr<T>& lhs, const nsAutoArrayPtr<U>& rhs )
+operator==(const nsAutoArrayPtr<T>& aLhs, const nsAutoArrayPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs.get());
 }
 
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoArrayPtr<T>& lhs, const nsAutoArrayPtr<U>& rhs )
+operator!=(const nsAutoArrayPtr<T>& aLhs, const nsAutoArrayPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs.get());
 }
 
 
 // Comparing an |nsAutoArrayPtr| to a raw pointer
 
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoArrayPtr<T>& lhs, const U* rhs )
+operator==(const nsAutoArrayPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( const U* lhs, const nsAutoArrayPtr<T>& rhs )
+operator==(const U* aLhs, const nsAutoArrayPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoArrayPtr<T>& lhs, const U* rhs )
+operator!=(const nsAutoArrayPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const U* lhs, const nsAutoArrayPtr<T>& rhs )
+operator!=(const U* aLhs, const nsAutoArrayPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 
 // To avoid ambiguities caused by the presence of builtin |operator==|s
 // creating a situation where one of the |operator==| defined above
 // has a better conversion for one argument and the builtin has a
 // better conversion for the other argument, define additional
 // |operator==| without the |const| on the raw pointer.
 // See bug 65664 for details.
 
 #ifndef NSCAP_DONT_PROVIDE_NONCONST_OPEQ
 template <class T, class U>
 inline
 bool
-operator==( const nsAutoArrayPtr<T>& lhs, U* rhs )
+operator==(const nsAutoArrayPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( U* lhs, const nsAutoArrayPtr<T>& rhs )
+operator==(U* aLhs, const nsAutoArrayPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsAutoArrayPtr<T>& lhs, U* rhs )
+operator!=(const nsAutoArrayPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( U* lhs, const nsAutoArrayPtr<T>& rhs )
+operator!=(U* aLhs, const nsAutoArrayPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 #endif
 
 
 
 // Comparing an |nsAutoArrayPtr| to |0|
 
 template <class T>
 inline
 bool
-operator==( const nsAutoArrayPtr<T>& lhs, NSCAP_Zero* rhs )
+operator==(const nsAutoArrayPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr == 0|
 {
-  return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) == reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator==( NSCAP_Zero* lhs, const nsAutoArrayPtr<T>& rhs )
+operator==(NSCAP_Zero* aLhs, const nsAutoArrayPtr<T>& aRhs)
 // specifically to allow |0 == smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) == static_cast<const void*>(aRhs.get());
 }
 
 template <class T>
 inline
 bool
-operator!=( const nsAutoArrayPtr<T>& lhs, NSCAP_Zero* rhs )
+operator!=(const nsAutoArrayPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr != 0|
 {
-  return static_cast<const void*>(lhs.get()) != reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) != reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator!=( NSCAP_Zero* lhs, const nsAutoArrayPtr<T>& rhs )
+operator!=(NSCAP_Zero* aLhs, const nsAutoArrayPtr<T>& aRhs)
 // specifically to allow |0 != smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) != static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) != static_cast<const void*>(aRhs.get());
 }
 
 
 #ifdef HAVE_CPP_TROUBLE_COMPARING_TO_ZERO
 
 // We need to explicitly define comparison operators for `int'
 // because the compiler is lame.
 
 template <class T>
 inline
 bool
-operator==( const nsAutoArrayPtr<T>& lhs, int rhs )
+operator==(const nsAutoArrayPtr<T>& lhs, int rhs)
 // specifically to allow |smartPtr == 0|
 {
   return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
 }
 
 template <class T>
 inline
 bool
-operator==( int lhs, const nsAutoArrayPtr<T>& rhs )
+operator==(int lhs, const nsAutoArrayPtr<T>& rhs)
 // specifically to allow |0 == smartPtr|
 {
   return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
 }
 
 #endif // !defined(HAVE_CPP_TROUBLE_COMPARING_TO_ZERO)
 
 
@@ -855,198 +860,205 @@ operator==( int lhs, const nsAutoArrayPt
 // template <class T> class nsRefPtrGetterAddRefs;
 
 template <class T>
 class nsRefPtr
 {
 private:
 
   void
-  assign_with_AddRef( T* rawPtr )
+  assign_with_AddRef(T* aRawPtr)
   {
-    if ( rawPtr )
-      rawPtr->AddRef();
-    assign_assuming_AddRef(rawPtr);
+    if (aRawPtr) {
+      aRawPtr->AddRef();
+    }
+    assign_assuming_AddRef(aRawPtr);
   }
 
   void**
   begin_assignment()
   {
     assign_assuming_AddRef(0);
     return reinterpret_cast<void**>(&mRawPtr);
   }
 
   void
-  assign_assuming_AddRef( T* newPtr )
+  assign_assuming_AddRef(T* aNewPtr)
   {
     T* oldPtr = mRawPtr;
-    mRawPtr = newPtr;
-    if ( oldPtr )
+    mRawPtr = aNewPtr;
+    if (oldPtr) {
       oldPtr->Release();
+    }
   }
 
 private:
   T* mRawPtr;
 
 public:
   typedef T element_type;
 
   ~nsRefPtr()
   {
-    if ( mRawPtr )
+    if (mRawPtr) {
       mRawPtr->Release();
+    }
   }
 
   // Constructors
 
   nsRefPtr()
     : mRawPtr(0)
     // default constructor
   {
   }
 
   nsRefPtr(const nsRefPtr<T>& aSmartPtr)
     : mRawPtr(aSmartPtr.mRawPtr)
     // copy-constructor
   {
-    if ( mRawPtr )
+    if (mRawPtr) {
       mRawPtr->AddRef();
+    }
   }
 
   nsRefPtr(nsRefPtr<T>&& aRefPtr)
     : mRawPtr(aRefPtr.mRawPtr)
   {
     aRefPtr.mRawPtr = nullptr;
   }
 
   // construct from a raw pointer (of the right type)
 
   nsRefPtr(T* aRawPtr)
     : mRawPtr(aRawPtr)
   {
-    if ( mRawPtr )
+    if (mRawPtr) {
       mRawPtr->AddRef();
+    }
   }
 
   template <typename I>
-  nsRefPtr( already_AddRefed<I>& aSmartPtr )
+  nsRefPtr(already_AddRefed<I>& aSmartPtr)
     : mRawPtr(aSmartPtr.take())
     // construct from |already_AddRefed|
   {
   }
 
   template <typename I>
-  nsRefPtr( already_AddRefed<I>&& aSmartPtr )
+  nsRefPtr(already_AddRefed<I>&& aSmartPtr)
     : mRawPtr(aSmartPtr.take())
     // construct from |otherRefPtr.forget()|
   {
   }
 
-  nsRefPtr( const nsCOMPtr_helper& helper )
+  nsRefPtr(const nsCOMPtr_helper& aHelper)
   {
     void* newRawPtr;
-    if (NS_FAILED(helper(NS_GET_TEMPLATE_IID(T), &newRawPtr)))
+    if (NS_FAILED(aHelper(NS_GET_TEMPLATE_IID(T), &newRawPtr))) {
       newRawPtr = 0;
+    }
     mRawPtr = static_cast<T*>(newRawPtr);
   }
 
   // Assignment operators
 
   nsRefPtr<T>&
-  operator=(const nsRefPtr<T>& rhs)
+  operator=(const nsRefPtr<T>& aRhs)
   // copy assignment operator
   {
-    assign_with_AddRef(rhs.mRawPtr);
+    assign_with_AddRef(aRhs.mRawPtr);
     return *this;
   }
 
   nsRefPtr<T>&
-  operator=( T* rhs )
+  operator=(T* aRhs)
   // assign from a raw pointer (of the right type)
   {
-    assign_with_AddRef(rhs);
+    assign_with_AddRef(aRhs);
     return *this;
   }
 
   template <typename I>
   nsRefPtr<T>&
-  operator=( already_AddRefed<I>& rhs )
+  operator=(already_AddRefed<I>& aRhs)
   // assign from |already_AddRefed|
   {
-    assign_assuming_AddRef(rhs.take());
+    assign_assuming_AddRef(aRhs.take());
     return *this;
   }
 
   template <typename I>
   nsRefPtr<T>&
-  operator=( already_AddRefed<I>&& rhs )
+  operator=(already_AddRefed<I> && aRhs)
   // assign from |otherRefPtr.forget()|
   {
-    assign_assuming_AddRef(rhs.take());
+    assign_assuming_AddRef(aRhs.take());
     return *this;
   }
 
   nsRefPtr<T>&
-  operator=( const nsCOMPtr_helper& helper )
+  operator=(const nsCOMPtr_helper& aHelper)
   {
     void* newRawPtr;
-    if (NS_FAILED(helper(NS_GET_TEMPLATE_IID(T), &newRawPtr)))
+    if (NS_FAILED(aHelper(NS_GET_TEMPLATE_IID(T), &newRawPtr))) {
       newRawPtr = 0;
+    }
     assign_assuming_AddRef(static_cast<T*>(newRawPtr));
     return *this;
   }
 
   nsRefPtr<T>&
-  operator=(nsRefPtr<T>&& aRefPtr)
+  operator=(nsRefPtr<T> && aRefPtr)
   {
     assign_assuming_AddRef(aRefPtr.mRawPtr);
     aRefPtr.mRawPtr = nullptr;
     return *this;
   }
 
   // Other pointer operators
 
   void
-  swap( nsRefPtr<T>& rhs )
-  // ...exchange ownership with |rhs|; can save a pair of refcount operations
+  swap(nsRefPtr<T>& aRhs)
+  // ...exchange ownership with |aRhs|; can save a pair of refcount operations
   {
-    T* temp = rhs.mRawPtr;
-    rhs.mRawPtr = mRawPtr;
+    T* temp = aRhs.mRawPtr;
+    aRhs.mRawPtr = mRawPtr;
     mRawPtr = temp;
   }
 
   void
-  swap( T*& rhs )
-  // ...exchange ownership with |rhs|; can save a pair of refcount operations
+  swap(T*& aRhs)
+  // ...exchange ownership with |aRhs|; can save a pair of refcount operations
   {
-    T* temp = rhs;
-    rhs = mRawPtr;
+    T* temp = aRhs;
+    aRhs = mRawPtr;
     mRawPtr = temp;
   }
 
   already_AddRefed<T>
   forget()
   // return the value of mRawPtr and null out mRawPtr. Useful for
   // already_AddRefed return values.
   {
     T* temp = 0;
     swap(temp);
     return already_AddRefed<T>(temp);
   }
 
   template <typename I>
   void
-  forget( I** rhs)
-  // Set the target of rhs to the value of mRawPtr and null out mRawPtr.
+  forget(I** aRhs)
+  // Set the target of aRhs to the value of mRawPtr and null out mRawPtr.
   // Useful to avoid unnecessary AddRef/Release pairs with "out"
-  // parameters where rhs bay be a T** or an I** where I is a base class
+  // parameters where aRhs bay be a T** or an I** where I is a base class
   // of T.
   {
-    NS_ASSERTION(rhs, "Null pointer passed to forget!");
-    *rhs = mRawPtr;
+    NS_ASSERTION(aRhs, "Null pointer passed to forget!");
+    *aRhs = mRawPtr;
     mRawPtr = 0;
   }
 
   T*
   get() const
   /*
     Prefer the implicit conversion provided automatically by |operator T*() const|.
     Use |get()| to resolve ambiguity or to get a castable pointer.
@@ -1066,29 +1078,31 @@ public:
   */
   {
     return get();
   }
 
   T*
   operator->() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsRefPtr with operator->().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsRefPtr with operator->().");
     return get();
   }
 
   // This operator is needed for gcc <= 4.0.* and for Sun Studio; it
   // causes internal compiler errors for some MSVC versions.  (It's not
   // clear to me whether it should be needed.)
 #ifndef _MSC_VER
   template <class U, class V>
   U&
   operator->*(U V::* aMember)
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsRefPtr with operator->*().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsRefPtr with operator->*().");
     return get()->*aMember;
   }
 #endif
 
   nsRefPtr<T>*
   get_address()
   // This is not intended to be used by clients.  See |address_of|
   // below.
@@ -1103,17 +1117,18 @@ public:
   {
     return this;
   }
 
 public:
   T&
   operator*() const
   {
-    NS_PRECONDITION(mRawPtr != 0, "You can't dereference a NULL nsRefPtr with operator*().");
+    NS_PRECONDITION(mRawPtr != 0,
+                    "You can't dereference a NULL nsRefPtr with operator*().");
     return *get();
   }
 
   T**
   StartAssignment()
   {
 #ifndef NSCAP_FEATURE_INLINE_STARTASSIGNMENT
     return reinterpret_cast<T**>(begin_assignment());
@@ -1139,25 +1154,25 @@ ImplCycleCollectionTraverse(nsCycleColle
                             uint32_t aFlags = 0)
 {
   CycleCollectionNoteChild(aCallback, aField.get(), aName, aFlags);
 }
 
 template <class T>
 inline
 nsRefPtr<T>*
-address_of( nsRefPtr<T>& aPtr )
+address_of(nsRefPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 inline
 const nsRefPtr<T>*
-address_of( const nsRefPtr<T>& aPtr )
+address_of(const nsRefPtr<T>& aPtr)
 {
   return aPtr.get_address();
 }
 
 template <class T>
 class nsRefPtrGetterAddRefs
 /*
   ...
@@ -1174,17 +1189,17 @@ class nsRefPtrGetterAddRefs
   a |void**|, a |T**|, or an |nsISupports**| as needed, that the
   outer call (|GetAddRefedPointer| in this case) can fill in.
 
   This type should be a nested class inside |nsRefPtr<T>|.
 */
 {
 public:
   explicit
-  nsRefPtrGetterAddRefs( nsRefPtr<T>& aSmartPtr )
+  nsRefPtrGetterAddRefs(nsRefPtr<T>& aSmartPtr)
     : mTargetSmartPtr(aSmartPtr)
   {
     // nothing else to do
   }
 
   operator void**()
   {
     return reinterpret_cast<void**>(mTargetSmartPtr.StartAssignment());
@@ -1203,226 +1218,235 @@ public:
 
 private:
   nsRefPtr<T>& mTargetSmartPtr;
 };
 
 template <class T>
 inline
 nsRefPtrGetterAddRefs<T>
-getter_AddRefs( nsRefPtr<T>& aSmartPtr )
+getter_AddRefs(nsRefPtr<T>& aSmartPtr)
 /*
   Used around a |nsRefPtr| when
   ...makes the class |nsRefPtrGetterAddRefs<T>| invisible.
 */
 {
   return nsRefPtrGetterAddRefs<T>(aSmartPtr);
 }
 
 
 
 // Comparing two |nsRefPtr|s
 
 template <class T, class U>
 inline
 bool
-operator==( const nsRefPtr<T>& lhs, const nsRefPtr<U>& rhs )
+operator==(const nsRefPtr<T>& aLhs, const nsRefPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs.get());
 }
 
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsRefPtr<T>& lhs, const nsRefPtr<U>& rhs )
+operator!=(const nsRefPtr<T>& aLhs, const nsRefPtr<U>& aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs.get());
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs.get());
 }
 
 
 // Comparing an |nsRefPtr| to a raw pointer
 
 template <class T, class U>
 inline
 bool
-operator==( const nsRefPtr<T>& lhs, const U* rhs )
+operator==(const nsRefPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( const U* lhs, const nsRefPtr<T>& rhs )
+operator==(const U* aLhs, const nsRefPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsRefPtr<T>& lhs, const U* rhs )
+operator!=(const nsRefPtr<T>& aLhs, const U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != static_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != static_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const U* lhs, const nsRefPtr<T>& rhs )
+operator!=(const U* aLhs, const nsRefPtr<T>& aRhs)
 {
-  return static_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return static_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 
 // To avoid ambiguities caused by the presence of builtin |operator==|s
 // creating a situation where one of the |operator==| defined above
 // has a better conversion for one argument and the builtin has a
 // better conversion for the other argument, define additional
 // |operator==| without the |const| on the raw pointer.
 // See bug 65664 for details.
 
 #ifndef NSCAP_DONT_PROVIDE_NONCONST_OPEQ
 template <class T, class U>
 inline
 bool
-operator==( const nsRefPtr<T>& lhs, U* rhs )
+operator==(const nsRefPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) == const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) == const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator==( U* lhs, const nsRefPtr<T>& rhs )
+operator==(U* aLhs, const nsRefPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) == static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) == static_cast<const T*>(aRhs.get());
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( const nsRefPtr<T>& lhs, U* rhs )
+operator!=(const nsRefPtr<T>& aLhs, U* aRhs)
 {
-  return static_cast<const T*>(lhs.get()) != const_cast<const U*>(rhs);
+  return static_cast<const T*>(aLhs.get()) != const_cast<const U*>(aRhs);
 }
 
 template <class T, class U>
 inline
 bool
-operator!=( U* lhs, const nsRefPtr<T>& rhs )
+operator!=(U* aLhs, const nsRefPtr<T>& aRhs)
 {
-  return const_cast<const U*>(lhs) != static_cast<const T*>(rhs.get());
+  return const_cast<const U*>(aLhs) != static_cast<const T*>(aRhs.get());
 }
 #endif
 
 
 
 // Comparing an |nsRefPtr| to |0|
 
 template <class T>
 inline
 bool
-operator==( const nsRefPtr<T>& lhs, NSCAP_Zero* rhs )
+operator==(const nsRefPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr == 0|
 {
-  return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) == reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator==( NSCAP_Zero* lhs, const nsRefPtr<T>& rhs )
+operator==(NSCAP_Zero* aLhs, const nsRefPtr<T>& aRhs)
 // specifically to allow |0 == smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) == static_cast<const void*>(aRhs.get());
 }
 
 template <class T>
 inline
 bool
-operator!=( const nsRefPtr<T>& lhs, NSCAP_Zero* rhs )
+operator!=(const nsRefPtr<T>& aLhs, NSCAP_Zero* aRhs)
 // specifically to allow |smartPtr != 0|
 {
-  return static_cast<const void*>(lhs.get()) != reinterpret_cast<const void*>(rhs);
+  return static_cast<const void*>(aLhs.get()) != reinterpret_cast<const void*>(aRhs);
 }
 
 template <class T>
 inline
 bool
-operator!=( NSCAP_Zero* lhs, const nsRefPtr<T>& rhs )
+operator!=(NSCAP_Zero* aLhs, const nsRefPtr<T>& aRhs)
 // specifically to allow |0 != smartPtr|
 {
-  return reinterpret_cast<const void*>(lhs) != static_cast<const void*>(rhs.get());
+  return reinterpret_cast<const void*>(aLhs) != static_cast<const void*>(aRhs.get());
 }
 
 
 #ifdef HAVE_CPP_TROUBLE_COMPARING_TO_ZERO
 
 // We need to explicitly define comparison operators for `int'
 // because the compiler is lame.
 
 template <class T>
 inline
 bool
-operator==( const nsRefPtr<T>& lhs, int rhs )
+operator==(const nsRefPtr<T>& lhs, int rhs)
 // specifically to allow |smartPtr == 0|
 {
   return static_cast<const void*>(lhs.get()) == reinterpret_cast<const void*>(rhs);
 }
 
 template <class T>
 inline
 bool
-operator==( int lhs, const nsRefPtr<T>& rhs )
+operator==(int lhs, const nsRefPtr<T>& rhs)
 // specifically to allow |0 == smartPtr|
 {
   return reinterpret_cast<const void*>(lhs) == static_cast<const void*>(rhs.get());
 }
 
 #endif // !defined(HAVE_CPP_TROUBLE_COMPARING_TO_ZERO)
 
 template <class SourceType, class DestinationType>
 inline
 nsresult
-CallQueryInterface( nsRefPtr<SourceType>& aSourcePtr, DestinationType** aDestPtr )
+CallQueryInterface(nsRefPtr<SourceType>& aSourcePtr, DestinationType** aDestPtr)
 {
   return CallQueryInterface(aSourcePtr.get(), aDestPtr);
 }
 
 /*****************************************************************************/
 
 template<class T>
 class nsQueryObject : public nsCOMPtr_helper
 {
 public:
   nsQueryObject(T* aRawPtr)
-    : mRawPtr(aRawPtr) {}
+    : mRawPtr(aRawPtr)
+  {
+  }
 
-  virtual nsresult NS_FASTCALL operator()( const nsIID& aIID, void** aResult ) const {
+  virtual nsresult NS_FASTCALL operator()(const nsIID& aIID,
+                                          void** aResult) const
+  {
     nsresult status = mRawPtr ? mRawPtr->QueryInterface(aIID, aResult)
                               : NS_ERROR_NULL_POINTER;
     return status;
   }
 private:
   T* mRawPtr;
 };
 
 template<class T>
 class nsQueryObjectWithError : public nsCOMPtr_helper
 {
 public:
   nsQueryObjectWithError(T* aRawPtr, nsresult* aErrorPtr)
-    : mRawPtr(aRawPtr), mErrorPtr(aErrorPtr) {}
+    : mRawPtr(aRawPtr), mErrorPtr(aErrorPtr)
+  {
+  }
 
-  virtual nsresult NS_FASTCALL operator()( const nsIID& aIID, void** aResult ) const {
+  virtual nsresult NS_FASTCALL operator()(const nsIID& aIID,
+                                          void** aResult) const
+  {
     nsresult status = mRawPtr ? mRawPtr->QueryInterface(aIID, aResult)
                               : NS_ERROR_NULL_POINTER;
-    if (mErrorPtr)
+    if (mErrorPtr) {
       *mErrorPtr = status;
+    }
     return status;
   }
 private:
   T* mRawPtr;
   nsresult* mErrorPtr;
 };
 
 template<class T>
--- a/xpcom/base/nsAutoRef.h
+++ b/xpcom/base/nsAutoRef.h
@@ -246,18 +246,19 @@ public:
   // or a raw ref copies and increments the ref count.
   nsCountedRef(const ThisClass& aRefToCopy)
   {
     SimpleRef::operator=(aRefToCopy);
     SafeAddRef();
   }
   ThisClass& operator=(const ThisClass& aRefToCopy)
   {
-    if (this == &aRefToCopy)
+    if (this == &aRefToCopy) {
       return *this;
+    }
 
     this->SafeRelease();
     SimpleRef::operator=(aRefToCopy);
     SafeAddRef();
     return *this;
   }
 
   // Implicit conversion from another smart ref argument (to a raw ref) is
@@ -286,18 +287,19 @@ public:
     BaseClass::operator=(aReturning);
     return *this;
   }
 
 protected:
   // Increase the reference count if there is a resource.
   void SafeAddRef()
   {
-    if (this->HaveResource())
+    if (this->HaveResource()) {
       this->AddRef(this->get());
+    }
   }
 };
 
 /**
  * template <class T> class nsReturnRef
  *
  * A type for function return values that hold a reference to a resource that
  * must be released.  See also |nsAutoRef<T>::out()|.
@@ -462,17 +464,20 @@ template <class T> class nsAutoRefTraits
 
 template <class T>
 class nsPointerRefTraits
 {
 public:
   // The handle is a pointer to T.
   typedef T* RawRef;
   // A nullptr does not have a resource.
-  static RawRef Void() { return nullptr; }
+  static RawRef Void()
+  {
+    return nullptr;
+  }
 };
 
 /**
  * template <class T> class nsSimpleRef
  *
  * Constructs a non-smart reference, and provides methods to test whether
  * there is an associated resource and (if so) get its raw handle.
  *
@@ -653,14 +658,15 @@ protected:
     SafeRelease();
     LocalSimpleRef ref(aRefToRelease);
     SimpleRef::operator=(ref);
   }
 
   // Release a resource if there is one.
   void SafeRelease()
   {
-    if (this->HaveResource())
+    if (this->HaveResource()) {
       this->Release(this->get());
+    }
   }
 };
 
 #endif // !defined(nsAutoRef_h_)
--- a/xpcom/base/nsConsoleMessage.cpp
+++ b/xpcom/base/nsConsoleMessage.cpp
@@ -9,37 +9,37 @@
  */
 
 #include "nsConsoleMessage.h"
 #include "jsapi.h"
 
 NS_IMPL_ISUPPORTS(nsConsoleMessage, nsIConsoleMessage)
 
 nsConsoleMessage::nsConsoleMessage()
-  : mTimeStamp(0),
-    mMessage()
+  : mTimeStamp(0)
+  , mMessage()
 {
 }
 
-nsConsoleMessage::nsConsoleMessage(const char16_t *message)
+nsConsoleMessage::nsConsoleMessage(const char16_t* aMessage)
 {
   mTimeStamp = JS_Now() / 1000;
-  mMessage.Assign(message);
+  mMessage.Assign(aMessage);
 }
 
 NS_IMETHODIMP
-nsConsoleMessage::GetMessageMoz(char16_t **result)
+nsConsoleMessage::GetMessageMoz(char16_t** aResult)
 {
-  *result = ToNewUnicode(mMessage);
+  *aResult = ToNewUnicode(mMessage);
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsConsoleMessage::GetTimeStamp(int64_t *aTimeStamp)
+nsConsoleMessage::GetTimeStamp(int64_t* aTimeStamp)
 {
   *aTimeStamp = mTimeStamp;
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsConsoleMessage::ToString(nsACString& /*UTF8*/ aResult)
 {
--- a/xpcom/base/nsConsoleMessage.h
+++ b/xpcom/base/nsConsoleMessage.h
@@ -7,24 +7,27 @@
 #ifndef __nsconsolemessage_h__
 #define __nsconsolemessage_h__
 
 #include "mozilla/Attributes.h"
 
 #include "nsIConsoleMessage.h"
 #include "nsString.h"
 
-class nsConsoleMessage MOZ_FINAL : public nsIConsoleMessage {
+class nsConsoleMessage MOZ_FINAL : public nsIConsoleMessage
+{
 public:
   nsConsoleMessage();
-  nsConsoleMessage(const char16_t *message);
+  nsConsoleMessage(const char16_t* aMessage);
 
   NS_DECL_THREADSAFE_ISUPPORTS
   NS_DECL_NSICONSOLEMESSAGE
 
 private:
-  ~nsConsoleMessage() {}
+  ~nsConsoleMessage()
+  {
+  }
 
   int64_t mTimeStamp;
   nsString mMessage;
 };
 
 #endif /* __nsconsolemessage_h__ */
--- a/xpcom/base/nsConsoleService.cpp
+++ b/xpcom/base/nsConsoleService.cpp
@@ -57,24 +57,27 @@ nsConsoleService::nsConsoleService()
 nsConsoleService::~nsConsoleService()
 {
   uint32_t i = 0;
   while (i < mBufferSize && mMessages[i] != nullptr) {
     NS_RELEASE(mMessages[i]);
     i++;
   }
 
-  if (mMessages)
+  if (mMessages) {
     nsMemory::Free(mMessages);
+  }
 }
 
 class AddConsolePrefWatchers : public nsRunnable
 {
 public:
-  AddConsolePrefWatchers(nsConsoleService* aConsole) : mConsole(aConsole) {}
+  AddConsolePrefWatchers(nsConsoleService* aConsole) : mConsole(aConsole)
+  {
+  }
 
   NS_IMETHOD Run()
   {
     Preferences::AddBoolVarCache(&sLoggingEnabled, "consoleservice.enabled", true);
     Preferences::AddBoolVarCache(&sLoggingBuffered, "consoleservice.buffered", true);
     if (!sLoggingBuffered) {
       mConsole->Reset();
     }
@@ -83,219 +86,224 @@ public:
 
 private:
   nsRefPtr<nsConsoleService> mConsole;
 };
 
 nsresult
 nsConsoleService::Init()
 {
-  mMessages = (nsIConsoleMessage **)
-    nsMemory::Alloc(mBufferSize * sizeof(nsIConsoleMessage *));
-  if (!mMessages)
+  mMessages = (nsIConsoleMessage**)
+    nsMemory::Alloc(mBufferSize * sizeof(nsIConsoleMessage*));
+  if (!mMessages) {
     return NS_ERROR_OUT_OF_MEMORY;
+  }
 
   // Array elements should be 0 initially for circular buffer algorithm.
-  memset(mMessages, 0, mBufferSize * sizeof(nsIConsoleMessage *));
+  memset(mMessages, 0, mBufferSize * sizeof(nsIConsoleMessage*));
 
   NS_DispatchToMainThread(new AddConsolePrefWatchers(this));
 
   return NS_OK;
 }
 
 namespace {
 
 class LogMessageRunnable : public nsRunnable
 {
 public:
-  LogMessageRunnable(nsIConsoleMessage* message, nsConsoleService* service)
-    : mMessage(message)
-    , mService(service)
+  LogMessageRunnable(nsIConsoleMessage* aMessage, nsConsoleService* aService)
+    : mMessage(aMessage)
+    , mService(aService)
   { }
 
   NS_DECL_NSIRUNNABLE
 
 private:
   nsCOMPtr<nsIConsoleMessage> mMessage;
   nsRefPtr<nsConsoleService> mService;
 };
 
 typedef nsCOMArray<nsIConsoleListener> ListenerArrayType;
 
 PLDHashOperator
 CollectCurrentListeners(nsISupports* aKey, nsIConsoleListener* aValue,
-                        void* closure)
+                        void* aClosure)
 {
-  ListenerArrayType* listeners = static_cast<ListenerArrayType*>(closure);
+  ListenerArrayType* listeners = static_cast<ListenerArrayType*>(aClosure);
   listeners->AppendObject(aValue);
   return PL_DHASH_NEXT;
 }
 
 NS_IMETHODIMP
 LogMessageRunnable::Run()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   // Snapshot of listeners so that we don't reenter this hash during
   // enumeration.
   nsCOMArray<nsIConsoleListener> listeners;
   mService->EnumerateListeners(CollectCurrentListeners, &listeners);
 
   mService->SetIsDelivering();
 
-  for (int32_t i = 0; i < listeners.Count(); ++i)
+  for (int32_t i = 0; i < listeners.Count(); ++i) {
     listeners[i]->Observe(mMessage);
+  }
 
   mService->SetDoneDelivering();
 
   return NS_OK;
 }
 
 } // anonymous namespace
 
 // nsIConsoleService methods
 NS_IMETHODIMP
-nsConsoleService::LogMessage(nsIConsoleMessage *message)
+nsConsoleService::LogMessage(nsIConsoleMessage* aMessage)
 {
-  return LogMessageWithMode(message, OutputToLog);
+  return LogMessageWithMode(aMessage, OutputToLog);
 }
 
 nsresult
-nsConsoleService::LogMessageWithMode(nsIConsoleMessage *message, nsConsoleService::OutputMode outputMode)
+nsConsoleService::LogMessageWithMode(nsIConsoleMessage* aMessage,
+                                     nsConsoleService::OutputMode aOutputMode)
 {
-  if (message == nullptr)
+  if (!aMessage) {
     return NS_ERROR_INVALID_ARG;
+  }
 
   if (!sLoggingEnabled) {
     return NS_OK;
   }
 
   if (NS_IsMainThread() && mDeliveringMessage) {
     nsCString msg;
-    message->ToString(msg);
+    aMessage->ToString(msg);
     NS_WARNING(nsPrintfCString("Reentrancy error: some client attempted "
       "to display a message to the console while in a console listener. "
       "The following message was discarded: \"%s\"", msg.get()).get());
     return NS_ERROR_FAILURE;
   }
 
   nsRefPtr<LogMessageRunnable> r;
-  nsIConsoleMessage *retiredMessage;
+  nsIConsoleMessage* retiredMessage;
 
   if (sLoggingBuffered) {
-    NS_ADDREF(message); // early, in case it's same as replaced below.
+    NS_ADDREF(aMessage); // early, in case it's same as replaced below.
   }
 
   /*
    * Lock while updating buffer, and while taking snapshot of
    * listeners array.
    */
   {
     MutexAutoLock lock(mLock);
 
 #if defined(ANDROID)
-    if (outputMode == OutputToLog)
-    {
+    if (aOutputMode == OutputToLog) {
       nsCString msg;
-      message->ToString(msg);
+      aMessage->ToString(msg);
       __android_log_print(ANDROID_LOG_ERROR, "GeckoConsole",
                           "%s", msg.get());
     }
 #endif
 #ifdef XP_WIN
     if (IsDebuggerPresent()) {
       nsString msg;
-      message->GetMessageMoz(getter_Copies(msg));
+      aMessage->GetMessageMoz(getter_Copies(msg));
       msg.AppendLiteral("\n");
       OutputDebugStringW(msg.get());
     }
 #endif
 
     /*
      * If there's already a message in the slot we're about to replace,
      * we've wrapped around, and we need to release the old message.  We
      * save a pointer to it, so we can release below outside the lock.
      */
     retiredMessage = mMessages[mCurrent];
 
     if (sLoggingBuffered) {
-      mMessages[mCurrent++] = message;
+      mMessages[mCurrent++] = aMessage;
       if (mCurrent == mBufferSize) {
         mCurrent = 0; // wrap around.
         mFull = true;
       }
     }
 
     if (mListeners.Count() > 0) {
-      r = new LogMessageRunnable(message, this);
+      r = new LogMessageRunnable(aMessage, this);
     }
   }
 
-  if (retiredMessage != nullptr)
+  if (retiredMessage) {
     NS_RELEASE(retiredMessage);
+  }
 
-  if (r)
+  if (r) {
     NS_DispatchToMainThread(r);
+  }
 
   return NS_OK;
 }
 
 void
 nsConsoleService::EnumerateListeners(ListenerHash::EnumReadFunction aFunction,
                                      void* aClosure)
 {
   MutexAutoLock lock(mLock);
   mListeners.EnumerateRead(aFunction, aClosure);
 }
 
 NS_IMETHODIMP
-nsConsoleService::LogStringMessage(const char16_t *message)
+nsConsoleService::LogStringMessage(const char16_t* aMessage)
 {
   if (!sLoggingEnabled) {
     return NS_OK;
   }
 
-  nsRefPtr<nsConsoleMessage> msg(new nsConsoleMessage(message));
+  nsRefPtr<nsConsoleMessage> msg(new nsConsoleMessage(aMessage));
   return this->LogMessage(msg);
 }
 
 NS_IMETHODIMP
-nsConsoleService::GetMessageArray(uint32_t *count, nsIConsoleMessage ***messages)
+nsConsoleService::GetMessageArray(uint32_t* aCount, nsIConsoleMessage*** aMessages)
 {
-  nsIConsoleMessage **messageArray;
+  nsIConsoleMessage** messageArray;
 
   /*
    * Lock the whole method, as we don't want anyone mucking with mCurrent or
    * mFull while we're copying out the buffer.
    */
   MutexAutoLock lock(mLock);
 
   if (mCurrent == 0 && !mFull) {
     /*
      * Make a 1-length output array so that nobody gets confused,
      * and return a count of 0.  This should result in a 0-length
      * array object when called from script.
      */
-    messageArray = (nsIConsoleMessage **)
-      nsMemory::Alloc(sizeof (nsIConsoleMessage *));
+    messageArray = (nsIConsoleMessage**)
+      nsMemory::Alloc(sizeof(nsIConsoleMessage*));
     *messageArray = nullptr;
-    *messages = messageArray;
-    *count = 0;
+    *aMessages = messageArray;
+    *aCount = 0;
 
     return NS_OK;
   }
 
   uint32_t resultSize = mFull ? mBufferSize : mCurrent;
   messageArray =
-    (nsIConsoleMessage **)nsMemory::Alloc((sizeof (nsIConsoleMessage *))
-                                          * resultSize);
+    (nsIConsoleMessage**)nsMemory::Alloc((sizeof(nsIConsoleMessage*))
+                                         * resultSize);
 
-  if (messageArray == nullptr) {
-    *messages = nullptr;
-    *count = 0;
+  if (!messageArray) {
+    *aMessages = nullptr;
+    *aCount = 0;
     return NS_ERROR_FAILURE;
   }
 
   uint32_t i;
   if (mFull) {
     for (i = 0; i < mBufferSize; i++) {
       // if full, fill the buffer starting from mCurrent (which'll be
       // oldest) wrapping around the buffer to the most recent.
@@ -303,50 +311,50 @@ nsConsoleService::GetMessageArray(uint32
       NS_ADDREF(messageArray[i]);
     }
   } else {
     for (i = 0; i < mCurrent; i++) {
       messageArray[i] = mMessages[i];
       NS_ADDREF(messageArray[i]);
     }
   }
-  *count = resultSize;
-  *messages = messageArray;
+  *aCount = resultSize;
+  *aMessages = messageArray;
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsConsoleService::RegisterListener(nsIConsoleListener *listener)
+nsConsoleService::RegisterListener(nsIConsoleListener* aListener)
 {
   if (!NS_IsMainThread()) {
     NS_ERROR("nsConsoleService::RegisterListener is main thread only.");
     return NS_ERROR_NOT_SAME_THREAD;
   }
 
-  nsCOMPtr<nsISupports> canonical = do_QueryInterface(listener);
+  nsCOMPtr<nsISupports> canonical = do_QueryInterface(aListener);
 
   MutexAutoLock lock(mLock);
   if (mListeners.GetWeak(canonical)) {
     // Reregistering a listener isn't good
     return NS_ERROR_FAILURE;
   }
-  mListeners.Put(canonical, listener);
+  mListeners.Put(canonical, aListener);
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsConsoleService::UnregisterListener(nsIConsoleListener *listener)
+nsConsoleService::UnregisterListener(nsIConsoleListener* aListener)
 {
   if (!NS_IsMainThread()) {
     NS_ERROR("nsConsoleService::UnregisterListener is main thread only.");
     return NS_ERROR_NOT_SAME_THREAD;
   }
 
-  nsCOMPtr<nsISupports> canonical = do_QueryInterface(listener);
+  nsCOMPtr<nsISupports> canonical = do_QueryInterface(aListener);
 
   MutexAutoLock lock(mLock);
 
   if (!mListeners.GetWeak(canonical)) {
     // Unregistering a listener that was never registered?
     return NS_ERROR_FAILURE;
   }
   mListeners.Remove(canonical);
@@ -362,13 +370,14 @@ nsConsoleService::Reset()
   MutexAutoLock lock(mLock);
 
   mCurrent = 0;
   mFull = false;
 
   /*
    * Free all messages stored so far (cf. destructor)
    */
-  for (uint32_t i = 0; i < mBufferSize && mMessages[i] != nullptr; i++)
+  for (uint32_t i = 0; i < mBufferSize && mMessages[i]; i++) {
     NS_RELEASE(mMessages[i]);
+  }
 
   return NS_OK;
 }
--- a/xpcom/base/nsConsoleService.h
+++ b/xpcom/base/nsConsoleService.h
@@ -23,46 +23,49 @@ class nsConsoleService MOZ_FINAL : publi
 {
 public:
   nsConsoleService();
   nsresult Init();
 
   NS_DECL_THREADSAFE_ISUPPORTS
   NS_DECL_NSICONSOLESERVICE
 
-  void SetIsDelivering() {
+  void SetIsDelivering()
+  {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(!mDeliveringMessage);
     mDeliveringMessage = true;
   }
 
-  void SetDoneDelivering() {
+  void SetDoneDelivering()
+  {
     MOZ_ASSERT(NS_IsMainThread());
     MOZ_ASSERT(mDeliveringMessage);
     mDeliveringMessage = false;
   }
 
   // This is a variant of LogMessage which allows the caller to determine
   // if the message should be output to an OS-specific log. This is used on
   // B2G to control whether the message is logged to the android log or not.
 
   enum OutputMode {
     SuppressLog,
     OutputToLog
   };
-  virtual nsresult LogMessageWithMode(nsIConsoleMessage *message, OutputMode outputMode);
+  virtual nsresult LogMessageWithMode(nsIConsoleMessage* aMessage,
+                                      OutputMode aOutputMode);
 
   typedef nsInterfaceHashtable<nsISupportsHashKey, nsIConsoleListener> ListenerHash;
   void EnumerateListeners(ListenerHash::EnumReadFunction aFunction, void* aClosure);
 
 private:
   ~nsConsoleService();
 
   // Circular buffer of saved messages
-  nsIConsoleMessage **mMessages;
+  nsIConsoleMessage** mMessages;
 
   // How big?
   uint32_t mBufferSize;
 
   // Index of slot in mMessages that'll be filled by *next* log message
   uint32_t mCurrent;
 
   // Is the buffer full? (Has mCurrent wrapped around at least once?)
--- a/xpcom/base/nsCrashOnException.cpp
+++ b/xpcom/base/nsCrashOnException.cpp
@@ -9,33 +9,35 @@
 #include "nsServiceManagerUtils.h"
 
 #ifdef MOZ_CRASHREPORTER
 #include "nsICrashReporter.h"
 #endif
 
 namespace mozilla {
 
-static int ReportException(EXCEPTION_POINTERS *aExceptionInfo)
+static int
+ReportException(EXCEPTION_POINTERS* aExceptionInfo)
 {
 #ifdef MOZ_CRASHREPORTER
   nsCOMPtr<nsICrashReporter> cr =
     do_GetService("@mozilla.org/toolkit/crash-reporter;1");
-  if (cr)
+  if (cr) {
     cr->WriteMinidumpForException(aExceptionInfo);
+  }
 #endif
   return EXCEPTION_EXECUTE_HANDLER;
 }
 
 XPCOM_API(LRESULT)
-CallWindowProcCrashProtected(WNDPROC wndProc, HWND hWnd, UINT msg,
-                             WPARAM wParam, LPARAM lParam)
+CallWindowProcCrashProtected(WNDPROC aWndProc, HWND aHWnd, UINT aMsg,
+                             WPARAM aWParam, LPARAM aLParam)
 {
   MOZ_SEH_TRY {
-    return wndProc(hWnd, msg, wParam, lParam);
+    return aWndProc(aHWnd, aMsg, aWParam, aLParam);
   }
   MOZ_SEH_EXCEPT(ReportException(GetExceptionInformation())) {
     ::TerminateProcess(::GetCurrentProcess(), 253);
   }
   return 0; // not reached
 }
 
 }
--- a/xpcom/base/nsCrashOnException.h
+++ b/xpcom/base/nsCrashOnException.h
@@ -9,14 +9,15 @@
 
 #include <nscore.h>
 #include <windows.h>
 
 namespace mozilla {
 
 // Call a given window procedure, and catch any Win32 exceptions raised from it,
 // and report them as crashes.
-XPCOM_API(LRESULT) CallWindowProcCrashProtected(WNDPROC wndProc, HWND hWnd, UINT msg,
-                                                WPARAM wParam, LPARAM lParam);
+XPCOM_API(LRESULT) CallWindowProcCrashProtected(WNDPROC aWndProc, HWND aHWnd,
+                                                UINT aMsg, WPARAM aWParam,
+                                                LPARAM aLParam);
 
 }
 
 #endif
--- a/xpcom/base/nsCycleCollector.cpp
+++ b/xpcom/base/nsCycleCollector.cpp
@@ -235,18 +235,18 @@ struct nsCycleCollectorParams
 {
   bool mLogAll;
   bool mLogShutdown;
   bool mAllTracesAll;
   bool mAllTracesShutdown;
   bool mLogThisThread;
 
   nsCycleCollectorParams() :
-    mLogAll      (PR_GetEnv("MOZ_CC_LOG_ALL") != nullptr),
-    mLogShutdown (PR_GetEnv("MOZ_CC_LOG_SHUTDOWN") != nullptr),
+    mLogAll(PR_GetEnv("MOZ_CC_LOG_ALL") != nullptr),
+    mLogShutdown(PR_GetEnv("MOZ_CC_LOG_SHUTDOWN") != nullptr),
     mAllTracesAll(false),
     mAllTracesShutdown(false)
   {
     const char* logThreadEnv = PR_GetEnv("MOZ_CC_LOG_THREAD");
     bool threadLogging = true;
     if (logThreadEnv && !!strcmp(logThreadEnv, "all")) {
       if (NS_IsMainThread()) {
         threadLogging = !strcmp(logThreadEnv, "main");
@@ -295,17 +295,19 @@ struct nsCycleCollectorParams
     return mAllTracesAll || (aIsShutdown && mAllTracesShutdown);
   }
 };
 
 #ifdef COLLECT_TIME_DEBUG
 class TimeLog
 {
 public:
-  TimeLog() : mLastCheckpoint(TimeStamp::Now()) {}
+  TimeLog() : mLastCheckpoint(TimeStamp::Now())
+  {
+  }
 
   void
   Checkpoint(const char* aEvent)
   {
     TimeStamp now = TimeStamp::Now();
     double dur = (now - mLastCheckpoint).ToMilliseconds();
     if (dur >= 0.5) {
       printf("cc: %s took %.1fms\n", aEvent, dur);
@@ -315,18 +317,22 @@ public:
 
 private:
   TimeStamp mLastCheckpoint;
 };
 #else
 class TimeLog
 {
 public:
-  TimeLog() {}
-  void Checkpoint(const char* aEvent) {}
+  TimeLog()
+  {
+  }
+  void Checkpoint(const char* aEvent)
+  {
+  }
 };
 #endif
 
 
 ////////////////////////////////////////////////////////////////////////
 // Base types
 ////////////////////////////////////////////////////////////////////////
 
@@ -351,19 +357,19 @@ public:
   {
     MOZ_ASSERT(!mSentinelAndBlocks[0].block &&
                !mSentinelAndBlocks[1].block,
                "Didn't call Clear()?");
   }
 
   void Clear()
   {
-    Block *b = Blocks();
+    Block* b = Blocks();
     while (b) {
-      Block *next = b->Next();
+      Block* next = b->Next();
       delete b;
       b = next;
     }
 
     mSentinelAndBlocks[0].block = nullptr;
     mSentinelAndBlocks[1].block = nullptr;
   }
 
@@ -375,113 +381,147 @@ public:
   }
 #endif
 
 private:
   struct Block;
   union PtrInfoOrBlock {
     // Use a union to avoid reinterpret_cast and the ensuing
     // potential aliasing bugs.
-    PtrInfo *ptrInfo;
-    Block *block;
+    PtrInfo* ptrInfo;
+    Block* block;
   };
-  struct Block {
+  struct Block
+  {
     enum { BlockSize = 16 * 1024 };
 
     PtrInfoOrBlock mPointers[BlockSize];
-    Block() {
+    Block()
+    {
       mPointers[BlockSize - 2].block = nullptr; // sentinel
       mPointers[BlockSize - 1].block = nullptr; // next block pointer
     }
-    Block*& Next()          { return mPointers[BlockSize - 1].block; }
-    PtrInfoOrBlock* Start() { return &mPointers[0]; }
-    PtrInfoOrBlock* End()   { return &mPointers[BlockSize - 2]; }
+    Block*& Next()
+    {
+      return mPointers[BlockSize - 1].block;
+    }
+    PtrInfoOrBlock* Start()
+    {
+      return &mPointers[0];
+    }
+    PtrInfoOrBlock* End()
+    {
+      return &mPointers[BlockSize - 2];
+    }
   };
 
   // Store the null sentinel so that we can have valid iterators
   // before adding any edges and without adding any blocks.
   PtrInfoOrBlock mSentinelAndBlocks[2];
 
-  Block*& Blocks()       { return mSentinelAndBlocks[1].block; }
-  Block*  Blocks() const { return mSentinelAndBlocks[1].block; }
+  Block*& Blocks()
+  {
+    return mSentinelAndBlocks[1].block;
+  }
+  Block* Blocks() const
+  {
+    return mSentinelAndBlocks[1].block;
+  }
 
 public:
   class Iterator
   {
   public:
-    Iterator() : mPointer(nullptr) {}
-    Iterator(PtrInfoOrBlock *aPointer) : mPointer(aPointer) {}
-    Iterator(const Iterator& aOther) : mPointer(aOther.mPointer) {}
+    Iterator() : mPointer(nullptr)
+  {
+  }
+    Iterator(PtrInfoOrBlock* aPointer) : mPointer(aPointer)
+  {
+  }
+    Iterator(const Iterator& aOther) : mPointer(aOther.mPointer)
+  {
+  }
 
     Iterator& operator++()
     {
-      if (mPointer->ptrInfo == nullptr) {
+      if (!mPointer->ptrInfo) {
         // Null pointer is a sentinel for link to the next block.
         mPointer = (mPointer + 1)->block->mPointers;
       }
       ++mPointer;
       return *this;
     }
 
     PtrInfo* operator*() const
     {
-      if (mPointer->ptrInfo == nullptr) {
+      if (!mPointer->ptrInfo) {
         // Null pointer is a sentinel for link to the next block.
         return (mPointer + 1)->block->mPointers->ptrInfo;
       }
       return mPointer->ptrInfo;
     }
     bool operator==(const Iterator& aOther) const
-    { return mPointer == aOther.mPointer; }
+    {
+      return mPointer == aOther.mPointer;
+    }
     bool operator!=(const Iterator& aOther) const
-    { return mPointer != aOther.mPointer; }
+    {
+      return mPointer != aOther.mPointer;
+    }
 
 #ifdef DEBUG_CC_GRAPH
     bool Initialized() const
     {
       return mPointer != nullptr;
     }
 #endif
 
   private:
-    PtrInfoOrBlock *mPointer;
+    PtrInfoOrBlock* mPointer;
   };
 
   class Builder;
   friend class Builder;
-  class Builder {
+  class Builder
+  {
   public:
-    Builder(EdgePool &aPool)
-      : mCurrent(&aPool.mSentinelAndBlocks[0]),
-        mBlockEnd(&aPool.mSentinelAndBlocks[0]),
-        mNextBlockPtr(&aPool.Blocks())
+    Builder(EdgePool& aPool)
+      : mCurrent(&aPool.mSentinelAndBlocks[0])
+      , mBlockEnd(&aPool.mSentinelAndBlocks[0])
+      , mNextBlockPtr(&aPool.Blocks())
     {
     }
 
-    Iterator Mark() { return Iterator(mCurrent); }
-
-    void Add(PtrInfo* aEdge) {
+    Iterator Mark()
+    {
+      return Iterator(mCurrent);
+    }
+
+    void Add(PtrInfo* aEdge)
+    {
       if (mCurrent == mBlockEnd) {
-        Block *b = new Block();
+        Block* b = new Block();
         *mNextBlockPtr = b;
         mCurrent = b->Start();
         mBlockEnd = b->End();
         mNextBlockPtr = &b->Next();
       }
       (mCurrent++)->ptrInfo = aEdge;
     }
   private:
     // mBlockEnd points to space for null sentinel
-    PtrInfoOrBlock *mCurrent, *mBlockEnd;
-    Block **mNextBlockPtr;
+    PtrInfoOrBlock* mCurrent;
+    PtrInfoOrBlock* mBlockEnd;
+    Block** mNextBlockPtr;
   };
 
-  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+  {
     size_t n = 0;
-    Block *b = Blocks();
+    Block* b = Blocks();
     while (b) {
       n += aMallocSizeOf(b);
       b = b->Next();
     }
     return n;
   }
 };
 
@@ -503,43 +543,44 @@ public:
 enum NodeColor { black, white, grey };
 
 // This structure should be kept as small as possible; we may expect
 // hundreds of thousands of them to be allocated and touched
 // repeatedly during each cycle collection.
 
 struct PtrInfo
 {
-  void *mPointer;
-  nsCycleCollectionParticipant *mParticipant;
+  void* mPointer;
+  nsCycleCollectionParticipant* mParticipant;
   uint32_t mColor : 2;
   uint32_t mInternalRefs : 30;
   uint32_t mRefCount;
 private:
   EdgePool::Iterator mFirstChild;
 
 public:
 
-  PtrInfo(void *aPointer, nsCycleCollectionParticipant *aParticipant)
+  PtrInfo(void* aPointer, nsCycleCollectionParticipant* aParticipant)
     : mPointer(aPointer),
       mParticipant(aParticipant),
       mColor(grey),
       mInternalRefs(0),
       mRefCount(UINT32_MAX - 1),
       mFirstChild()
   {
     // We initialize mRefCount to a large non-zero value so
     // that it doesn't look like a JS object to the cycle collector
     // in the case where the object dies before being traversed.
 
     MOZ_ASSERT(aParticipant);
   }
 
   // Allow NodePool::Block's constructor to compile.
-  PtrInfo() {
+  PtrInfo()
+  {
     NS_NOTREACHED("should never be called");
   }
 
   EdgePool::Iterator FirstChild()
   {
     CC_GRAPH_ASSERT(mFirstChild.Initialized());
     return mFirstChild;
   }
@@ -569,44 +610,51 @@ public:
  * A structure designed to be used like a linked list of PtrInfo, except
  * that allocates the PtrInfo 32K-at-a-time.
  */
 class NodePool
 {
 private:
   enum { BlockSize = 8 * 1024 }; // could be int template parameter
 
-  struct Block {
+  struct Block
+  {
     // We create and destroy Block using NS_Alloc/NS_Free rather
     // than new and delete to avoid calling its constructor and
     // destructor.
-    Block()  { NS_NOTREACHED("should never be called"); }
-    ~Block() { NS_NOTREACHED("should never be called"); }
+    Block()
+    {
+      NS_NOTREACHED("should never be called");
+    }
+    ~Block()
+    {
+      NS_NOTREACHED("should never be called");
+    }
 
     Block* mNext;
     PtrInfo mEntries[BlockSize + 1]; // +1 to store last child of last node
   };
 
 public:
   NodePool()
-    : mBlocks(nullptr),
-      mLast(nullptr)
+    : mBlocks(nullptr)
+    , mLast(nullptr)
   {
   }
 
   ~NodePool()
   {
     MOZ_ASSERT(!mBlocks, "Didn't call Clear()?");
   }
 
   void Clear()
   {
-    Block *b = mBlocks;
+    Block* b = mBlocks;
     while (b) {
-      Block *n = b->mNext;
+      Block* n = b->mNext;
       NS_Free(b);
       b = n;
     }
 
     mBlocks = nullptr;
     mLast = nullptr;
   }
 
@@ -614,54 +662,55 @@ public:
   bool IsEmpty()
   {
     return !mBlocks && !mLast;
   }
 #endif
 
   class Builder;
   friend class Builder;
-  class Builder {
+  class Builder
+  {
   public:
     Builder(NodePool& aPool)
-      : mNextBlock(&aPool.mBlocks),
-        mNext(aPool.mLast),
-        mBlockEnd(nullptr)
+      : mNextBlock(&aPool.mBlocks)
+      , mNext(aPool.mLast)
+      , mBlockEnd(nullptr)
     {
-      MOZ_ASSERT(aPool.mBlocks == nullptr && aPool.mLast == nullptr,
-                 "pool not empty");
+      MOZ_ASSERT(!aPool.mBlocks && !aPool.mLast, "pool not empty");
     }
-    PtrInfo *Add(void *aPointer, nsCycleCollectionParticipant *aParticipant)
+    PtrInfo* Add(void* aPointer, nsCycleCollectionParticipant* aParticipant)
     {
       if (mNext == mBlockEnd) {
-        Block *block = static_cast<Block*>(NS_Alloc(sizeof(Block)));
+        Block* block = static_cast<Block*>(NS_Alloc(sizeof(Block)));
         *mNextBlock = block;
         mNext = block->mEntries;
         mBlockEnd = block->mEntries + BlockSize;
         block->mNext = nullptr;
         mNextBlock = &block->mNext;
       }
       return new (mNext++) PtrInfo(aPointer, aParticipant);
     }
   private:
-    Block **mNextBlock;
-    PtrInfo *&mNext;
-    PtrInfo *mBlockEnd;
+    Block** mNextBlock;
+    PtrInfo*& mNext;
+    PtrInfo* mBlockEnd;
   };
 
   class Enumerator;
   friend class Enumerator;
-  class Enumerator {
+  class Enumerator
+  {
   public:
     Enumerator(NodePool& aPool)
-      : mFirstBlock(aPool.mBlocks),
-        mCurBlock(nullptr),
-        mNext(nullptr),
-        mBlockEnd(nullptr),
-        mLast(aPool.mLast)
+      : mFirstBlock(aPool.mBlocks)
+      , mCurBlock(nullptr)
+      , mNext(nullptr)
+      , mBlockEnd(nullptr)
+      , mLast(aPool.mLast)
     {
     }
 
     bool IsDone() const
     {
       return mNext == mLast;
     }
 
@@ -669,66 +718,69 @@ public:
     {
       return mNext == mBlockEnd;
     }
 
     PtrInfo* GetNext()
     {
       MOZ_ASSERT(!IsDone(), "calling GetNext when done");
       if (mNext == mBlockEnd) {
-        Block *nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock;
+        Block* nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock;
         mNext = nextBlock->mEntries;
         mBlockEnd = mNext + BlockSize;
         mCurBlock = nextBlock;
       }
       return mNext++;
     }
   private:
     // mFirstBlock is a reference to allow an Enumerator to be constructed
     // for an empty graph.
-    Block *&mFirstBlock;
-    Block *mCurBlock;
+    Block*& mFirstBlock;
+    Block* mCurBlock;
     // mNext is the next value we want to return, unless mNext == mBlockEnd
     // NB: mLast is a reference to allow enumerating while building!
-    PtrInfo *mNext, *mBlockEnd, *&mLast;
+    PtrInfo* mNext;
+    PtrInfo* mBlockEnd;
+    PtrInfo*& mLast;
   };
 
-  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
+  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+  {
     // We don't measure the things pointed to by mEntries[] because those
     // pointers are non-owning.
     size_t n = 0;
-    Block *b = mBlocks;
+    Block* b = mBlocks;
     while (b) {
       n += aMallocSizeOf(b);
       b = b->mNext;
     }
     return n;
   }
 
 private:
-  Block *mBlocks;
-  PtrInfo *mLast;
+  Block* mBlocks;
+  PtrInfo* mLast;
 };
 
 
 // Declarations for mPtrToNodeMap.
 
 struct PtrToNodeEntry : public PLDHashEntryHdr
 {
   // The key is mNode->mPointer
-  PtrInfo *mNode;
+  PtrInfo* mNode;
 };
 
 static bool
-PtrToNodeMatchEntry(PLDHashTable *table,
-                    const PLDHashEntryHdr *entry,
-                    const void *key)
+PtrToNodeMatchEntry(PLDHashTable* aTable,
+                    const PLDHashEntryHdr* aEntry,
+                    const void* aKey)
 {
-  const PtrToNodeEntry *n = static_cast<const PtrToNodeEntry*>(entry);
-  return n->mNode->mPointer == key;
+  const PtrToNodeEntry* n = static_cast<const PtrToNodeEntry*>(aEntry);
+  return n->mNode->mPointer == aKey;
 }
 
 static PLDHashTableOps PtrNodeOps = {
   PL_DHashAllocTable,
   PL_DHashFreeTable,
   PL_DHashVoidPtrKeyStub,
   PtrToNodeMatchEntry,
   PL_DHashMoveEntryStub,
@@ -736,20 +788,20 @@ static PLDHashTableOps PtrNodeOps = {
   PL_DHashFinalizeStub,
   nullptr
 };
 
 
 struct WeakMapping
 {
   // map and key will be null if the corresponding objects are GC marked
-  PtrInfo *mMap;
-  PtrInfo *mKey;
-  PtrInfo *mKeyDelegate;
-  PtrInfo *mVal;
+  PtrInfo* mMap;
+  PtrInfo* mKey;
+  PtrInfo* mKeyDelegate;
+  PtrInfo* mVal;
 };
 
 class GCGraphBuilder;
 
 struct GCGraph
 {
   NodePool mNodes;
   EdgePool mEdges;
@@ -793,334 +845,347 @@ public:
   bool IsEmpty()
   {
     return mNodes.IsEmpty() && mEdges.IsEmpty() &&
            mWeakMaps.IsEmpty() && mRootCount == 0 &&
            !mPtrToNodeMap.ops;
   }
 #endif
 
-  PtrInfo* FindNode(void *aPtr);
-  PtrToNodeEntry* AddNodeToMap(void *aPtr);
-  void RemoveNodeFromMap(void *aPtr);
+  PtrInfo* FindNode(void* aPtr);
+  PtrToNodeEntry* AddNodeToMap(void* aPtr);
+  void RemoveNodeFromMap(void* aPtr);
 
   uint32_t MapCount() const
   {
     return mPtrToNodeMap.entryCount;
   }
 
   void SizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
-                           size_t *aNodesSize, size_t *aEdgesSize,
-                           size_t *aWeakMapsSize) const {
+                           size_t* aNodesSize, size_t* aEdgesSize,
+                           size_t* aWeakMapsSize) const
+  {
     *aNodesSize = mNodes.SizeOfExcludingThis(aMallocSizeOf);
     *aEdgesSize = mEdges.SizeOfExcludingThis(aMallocSizeOf);
 
     // We don't measure what the WeakMappings point to, because the
     // pointers are non-owning.
     *aWeakMapsSize = mWeakMaps.SizeOfExcludingThis(aMallocSizeOf);
   }
 };
 
 PtrInfo*
-GCGraph::FindNode(void *aPtr)
+GCGraph::FindNode(void* aPtr)
 {
-  PtrToNodeEntry *e = static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_LOOKUP));
+  PtrToNodeEntry* e =
+    static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr,
+                                                      PL_DHASH_LOOKUP));
   if (!PL_DHASH_ENTRY_IS_BUSY(e)) {
     return nullptr;
   }
   return e->mNode;
 }
 
 PtrToNodeEntry*
-GCGraph::AddNodeToMap(void *aPtr)
+GCGraph::AddNodeToMap(void* aPtr)
 {
-  PtrToNodeEntry *e = static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_ADD));
+  PtrToNodeEntry* e =
+    static_cast<PtrToNodeEntry*>(PL_DHashTableOperate(&mPtrToNodeMap, aPtr,
+                                                      PL_DHASH_ADD));
   if (!e) {
     // Caller should track OOMs
     return nullptr;
   }
   return e;
 }
 
 void
-GCGraph::RemoveNodeFromMap(void *aPtr)
+GCGraph::RemoveNodeFromMap(void* aPtr)
 {
   PL_DHashTableOperate(&mPtrToNodeMap, aPtr, PL_DHASH_REMOVE);
 }
 
 
-static nsISupports *
-CanonicalizeXPCOMParticipant(nsISupports *in)
+static nsISupports*
+CanonicalizeXPCOMParticipant(nsISupports* aIn)
 {
   nsISupports* out;
-  in->QueryInterface(NS_GET_IID(nsCycleCollectionISupports),
-                     reinterpret_cast<void**>(&out));
+  aIn->QueryInterface(NS_GET_IID(nsCycleCollectionISupports),
+                      reinterpret_cast<void**>(&out));
   return out;
 }
 
 static inline void
-ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp);
+ToParticipant(nsISupports* aPtr, nsXPCOMCycleCollectionParticipant** aCp);
 
 static void
-CanonicalizeParticipant(void **parti, nsCycleCollectionParticipant **cp)
+CanonicalizeParticipant(void** aParti, nsCycleCollectionParticipant** aCp)
 {
   // If the participant is null, this is an nsISupports participant,
   // so we must QI to get the real participant.
 
-  if (!*cp) {
-    nsISupports *nsparti = static_cast<nsISupports*>(*parti);
+  if (!*aCp) {
+    nsISupports* nsparti = static_cast<nsISupports*>(*aParti);
     nsparti = CanonicalizeXPCOMParticipant(nsparti);
     NS_ASSERTION(nsparti,
                  "Don't add objects that don't participate in collection!");
-    nsXPCOMCycleCollectionParticipant *xcp;
+    nsXPCOMCycleCollectionParticipant* xcp;
     ToParticipant(nsparti, &xcp);
-    *parti = nsparti;
-    *cp = xcp;
+    *aParti = nsparti;
+    *aCp = xcp;
   }
 }
 
-struct nsPurpleBufferEntry {
+struct nsPurpleBufferEntry
+{
   union {
-    void *mObject;                        // when low bit unset
-    nsPurpleBufferEntry *mNextInFreeList; // when low bit set
+    void* mObject;                        // when low bit unset
+    nsPurpleBufferEntry* mNextInFreeList; // when low bit set
   };
 
-  nsCycleCollectingAutoRefCnt *mRefCnt;
-
-  nsCycleCollectionParticipant *mParticipant; // nullptr for nsISupports
+  nsCycleCollectingAutoRefCnt* mRefCnt;
+
+  nsCycleCollectionParticipant* mParticipant; // nullptr for nsISupports
 };
 
 class nsCycleCollector;
 
 struct nsPurpleBuffer
 {
 private:
-  struct Block {
-    Block *mNext;
+  struct Block
+  {
+    Block* mNext;
     // Try to match the size of a jemalloc bucket, to minimize slop bytes.
     // - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries
     //   is 16,380 bytes, which leaves 4 bytes for mNext.
     // - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries
     //   is 32,544 bytes, which leaves 8 bytes for mNext.
     nsPurpleBufferEntry mEntries[1365];
 
-    Block() : mNext(nullptr) {
+    Block() : mNext(nullptr)
+    {
       // Ensure Block is the right size (see above).
       static_assert(
         sizeof(Block) == 16384 ||       // 32-bit
         sizeof(Block) == 32768,         // 64-bit
         "ill-sized nsPurpleBuffer::Block"
       );
     }
 
     template <class PurpleVisitor>
-    void VisitEntries(nsPurpleBuffer &aBuffer, PurpleVisitor &aVisitor)
+    void VisitEntries(nsPurpleBuffer& aBuffer, PurpleVisitor& aVisitor)
     {
-      nsPurpleBufferEntry *eEnd = ArrayEnd(mEntries);
-      for (nsPurpleBufferEntry *e = mEntries; e != eEnd; ++e) {
+      nsPurpleBufferEntry* eEnd = ArrayEnd(mEntries);
+      for (nsPurpleBufferEntry* e = mEntries; e != eEnd; ++e) {
         if (!(uintptr_t(e->mObject) & uintptr_t(1))) {
           aVisitor.Visit(aBuffer, e);
         }
       }
     }
   };
   // This class wraps a linked list of the elements in the purple
   // buffer.
 
   uint32_t mCount;
   Block mFirstBlock;
-  nsPurpleBufferEntry *mFreeList;
+  nsPurpleBufferEntry* mFreeList;
 
 public:
   nsPurpleBuffer()
   {
     InitBlocks();
   }
 
   ~nsPurpleBuffer()
   {
     FreeBlocks();
   }
 
   template <class PurpleVisitor>
-  void VisitEntries(PurpleVisitor &aVisitor)
+  void VisitEntries(PurpleVisitor& aVisitor)
   {
-    for (Block *b = &mFirstBlock; b; b = b->mNext) {
+    for (Block* b = &mFirstBlock; b; b = b->mNext) {
       b->VisitEntries(*this, aVisitor);
     }
   }
 
   void InitBlocks()
   {
     mCount = 0;
     mFreeList = nullptr;
     StartBlock(&mFirstBlock);
   }
 
-  void StartBlock(Block *aBlock)
+  void StartBlock(Block* aBlock)
   {
     NS_ABORT_IF_FALSE(!mFreeList, "should not have free list");
 
     // Put all the entries in the block on the free list.
-    nsPurpleBufferEntry *entries = aBlock->mEntries;
+    nsPurpleBufferEntry* entries = aBlock->mEntries;
     mFreeList = entries;
     for (uint32_t i = 1; i < ArrayLength(aBlock->mEntries); ++i) {
       entries[i - 1].mNextInFreeList =
         (nsPurpleBufferEntry*)(uintptr_t(entries + i) | 1);
     }
     entries[ArrayLength(aBlock->mEntries) - 1].mNextInFreeList =
       (nsPurpleBufferEntry*)1;
   }
 
   void FreeBlocks()
   {
-    if (mCount > 0)
+    if (mCount > 0) {
       UnmarkRemainingPurple(&mFirstBlock);
-    Block *b = mFirstBlock.mNext;
+    }
+    Block* b = mFirstBlock.mNext;
     while (b) {
-      if (mCount > 0)
+      if (mCount > 0) {
         UnmarkRemainingPurple(b);
-      Block *next = b->mNext;
+      }
+      Block* next = b->mNext;
       delete b;
       b = next;
     }
     mFirstBlock.mNext = nullptr;
   }
 
   struct UnmarkRemainingPurpleVisitor
   {
     void
-    Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry)
+    Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
     {
       if (aEntry->mRefCnt) {
         aEntry->mRefCnt->RemoveFromPurpleBuffer();
         aEntry->mRefCnt = nullptr;
       }
       aEntry->mObject = nullptr;
       --aBuffer.mCount;
     }
   };
 
-  void UnmarkRemainingPurple(Block *b)
+  void UnmarkRemainingPurple(Block* aBlock)
   {
     UnmarkRemainingPurpleVisitor visitor;
-    b->VisitEntries(*this, visitor);
+    aBlock->VisitEntries(*this, visitor);
   }
 
-  void SelectPointers(GCGraphBuilder &builder);
+  void SelectPointers(GCGraphBuilder& aBuilder);
 
   // RemoveSkippable removes entries from the purple buffer synchronously
   // (1) if aAsyncSnowWhiteFreeing is false and nsPurpleBufferEntry::mRefCnt is 0 or
   // (2) if the object's nsXPCOMCycleCollectionParticipant::CanSkip() returns true or
   // (3) if nsPurpleBufferEntry::mRefCnt->IsPurple() is false.
   // (4) If removeChildlessNodes is true, then any nodes in the purple buffer
   //     that will have no children in the cycle collector graph will also be
   //     removed. CanSkip() may be run on these children.
   void RemoveSkippable(nsCycleCollector* aCollector,
-                       bool removeChildlessNodes,
+                       bool aRemoveChildlessNodes,
                        bool aAsyncSnowWhiteFreeing,
                        CC_ForgetSkippableCallback aCb);
 
   MOZ_ALWAYS_INLINE nsPurpleBufferEntry* NewEntry()
   {
     if (MOZ_UNLIKELY(!mFreeList)) {
-      Block *b = new Block;
+      Block* b = new Block;
       StartBlock(b);
 
       // Add the new block as the second block in the list.
       b->mNext = mFirstBlock.mNext;
       mFirstBlock.mNext = b;
     }
 
-    nsPurpleBufferEntry *e = mFreeList;
+    nsPurpleBufferEntry* e = mFreeList;
     mFreeList = (nsPurpleBufferEntry*)
       (uintptr_t(mFreeList->mNextInFreeList) & ~uintptr_t(1));
     return e;
   }
 
-  MOZ_ALWAYS_INLINE void Put(void *p, nsCycleCollectionParticipant *cp,
-                             nsCycleCollectingAutoRefCnt *aRefCnt)
+  MOZ_ALWAYS_INLINE void Put(void* aObject, nsCycleCollectionParticipant* aCp,
+                             nsCycleCollectingAutoRefCnt* aRefCnt)
   {
-    nsPurpleBufferEntry *e = NewEntry();
+    nsPurpleBufferEntry* e = NewEntry();
 
     ++mCount;
 
-    e->mObject = p;
+    e->mObject = aObject;
     e->mRefCnt = aRefCnt;
-    e->mParticipant = cp;
+    e->mParticipant = aCp;
   }
 
-  void Remove(nsPurpleBufferEntry *e)
+  void Remove(nsPurpleBufferEntry* aEntry)
   {
     MOZ_ASSERT(mCount != 0, "must have entries");
 
-    if (e->mRefCnt) {
-      e->mRefCnt->RemoveFromPurpleBuffer();
-      e->mRefCnt = nullptr;
+    if (aEntry->mRefCnt) {
+      aEntry->mRefCnt->RemoveFromPurpleBuffer();
+      aEntry->mRefCnt = nullptr;
     }
-    e->mNextInFreeList =
+    aEntry->mNextInFreeList =
       (nsPurpleBufferEntry*)(uintptr_t(mFreeList) | uintptr_t(1));
-    mFreeList = e;
+    mFreeList = aEntry;
 
     --mCount;
   }
 
   uint32_t Count() const
   {
     return mCount;
   }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   {
     size_t n = 0;
 
     // Don't measure mFirstBlock because it's within |this|.
-    const Block *block = mFirstBlock.mNext;
+    const Block* block = mFirstBlock.mNext;
     while (block) {
       n += aMallocSizeOf(block);
       block = block->mNext;
     }
 
     // mFreeList is deliberately not measured because it points into
     // the purple buffer, which is within mFirstBlock and thus within |this|.
     //
     // We also don't measure the things pointed to by mEntries[] because
     // those pointers are non-owning.
 
     return n;
   }
 };
 
 static bool
-AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti);
+AddPurpleRoot(GCGraphBuilder& aBuilder, void* aRoot,
+              nsCycleCollectionParticipant* aParti);
 
 struct SelectPointersVisitor
 {
-  SelectPointersVisitor(GCGraphBuilder &aBuilder)
+  SelectPointersVisitor(GCGraphBuilder& aBuilder)
     : mBuilder(aBuilder)
-  {}
+  
+  {
+  }
 
   void
-  Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry)
+  Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
   {
     MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer");
     MOZ_ASSERT(aEntry->mRefCnt->get() != 0,
                "SelectPointersVisitor: snow-white object in the purple buffer");
     if (!aEntry->mRefCnt->IsPurple() ||
         AddPurpleRoot(mBuilder, aEntry->mObject, aEntry->mParticipant)) {
       aBuffer.Remove(aEntry);
     }
   }
 
 private:
-  GCGraphBuilder &mBuilder;
+  GCGraphBuilder& mBuilder;
 };
 
 void
-nsPurpleBuffer::SelectPointers(GCGraphBuilder &aBuilder)
+nsPurpleBuffer::SelectPointers(GCGraphBuilder& aBuilder)
 {
   SelectPointersVisitor visitor(aBuilder);
   VisitEntries(visitor);
 
   NS_ASSERTION(mCount == 0, "AddPurpleRoot failed");
   if (mCount == 0) {
     FreeBlocks();
     InitBlocks();
@@ -1159,17 +1224,17 @@ class nsCycleCollector : public nsIMemor
 
   bool mActivelyCollecting;
   bool mFreeingSnowWhite;
   // mScanInProgress should be false when we're collecting white objects.
   bool mScanInProgress;
   CycleCollectorResults mResults;
   TimeStamp mCollectionStart;
 
-  CycleCollectedJSRuntime *mJSRuntime;
+  CycleCollectedJSRuntime* mJSRuntime;
 
   ccPhase mIncrementalPhase;
   GCGraph mGraph;
   nsAutoPtr<GCGraphBuilder> mBuilder;
   nsAutoPtr<NodePool::Enumerator> mCurrNode;
   nsCOMPtr<nsICycleCollectorListener> mListener;
 
   nsIThread* mThread;
@@ -1187,65 +1252,65 @@ class nsCycleCollector : public nsIMemor
   uint32_t mMergedInARow;
 
   JSPurpleBuffer* mJSPurpleBuffer;
 
 public:
   nsCycleCollector();
   virtual ~nsCycleCollector();
 
-  void RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime);
+  void RegisterJSRuntime(CycleCollectedJSRuntime* aJSRuntime);
   void ForgetJSRuntime();
 
   void SetBeforeUnlinkCallback(CC_BeforeUnlinkCallback aBeforeUnlinkCB)
   {
     CheckThreadSafety();
     mBeforeUnlinkCB = aBeforeUnlinkCB;
   }
 
   void SetForgetSkippableCallback(CC_ForgetSkippableCallback aForgetSkippableCB)
   {
     CheckThreadSafety();
     mForgetSkippableCB = aForgetSkippableCB;
   }
 
-  void Suspect(void *n, nsCycleCollectionParticipant *cp,
-               nsCycleCollectingAutoRefCnt *aRefCnt);
+  void Suspect(void* aPtr, nsCycleCollectionParticipant* aCp,
+               nsCycleCollectingAutoRefCnt* aRefCnt);
   uint32_t SuspectedCount();
   void ForgetSkippable(bool aRemoveChildlessNodes, bool aAsyncSnowWhiteFreeing);
   bool FreeSnowWhite(bool aUntilNoSWInPurpleBuffer);
 
   // This method assumes its argument is already canonicalized.
-  void RemoveObjectFromGraph(void *aPtr);
+  void RemoveObjectFromGraph(void* aPtr);
 
   void PrepareForGarbageCollection();
   void FinishAnyCurrentCollection();
 
   bool Collect(ccType aCCType,
-               SliceBudget &aBudget,
-               nsICycleCollectorListener *aManualListener);
+               SliceBudget& aBudget,
+               nsICycleCollectorListener* aManualListener);
   void Shutdown();
 
   void SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf,
-                           size_t *aObjectSize,
-                           size_t *aGraphNodesSize,
-                           size_t *aGraphEdgesSize,
-                           size_t *aWeakMapsSize,
-                           size_t *aPurpleBufferSize) const;
+                           size_t* aObjectSize,
+                           size_t* aGraphNodesSize,
+                           size_t* aGraphEdgesSize,
+                           size_t* aWeakMapsSize,
+                           size_t* aPurpleBufferSize) const;
 
   JSPurpleBuffer* GetJSPurpleBuffer();
 private:
   void CheckThreadSafety();
   void ShutdownCollect();
 
   void FixGrayBits(bool aForceGC);
   bool ShouldMergeZones(ccType aCCType);
 
-  void BeginCollection(ccType aCCType, nsICycleCollectorListener *aManualListener);
-  void MarkRoots(SliceBudget &aBudget);
+  void BeginCollection(ccType aCCType, nsICycleCollectorListener* aManualListener);
+  void MarkRoots(SliceBudget& aBudget);
   void ScanRoots(bool aFullySynchGraphBuild);
   void ScanIncrementalRoots();
   void ScanWhiteNodes(bool aFullySynchGraphBuild);
   void ScanBlackNodes();
   void ScanWeakMaps();
 
   // returns whether anything was collected
   bool CollectWhite();
@@ -1263,85 +1328,89 @@ NS_IMPL_ISUPPORTS(nsCycleCollector, nsIM
  * void VisitNode(PtrInfo *pi);
  */
 template <class Visitor>
 class GraphWalker
 {
 private:
   Visitor mVisitor;
 
-  void DoWalk(nsDeque &aQueue);
-
-  void CheckedPush(nsDeque &aQueue, PtrInfo *pi)
+  void DoWalk(nsDeque& aQueue);
+
+  void CheckedPush(nsDeque& aQueue, PtrInfo* aPi)
   {
-    if (!pi) {
+    if (!aPi) {
       MOZ_CRASH();
     }
-    if (!aQueue.Push(pi, fallible_t())) {
+    if (!aQueue.Push(aPi, fallible_t())) {
       mVisitor.Failed();
     }
   }
 
 public:
-  void Walk(PtrInfo *s0);
-  void WalkFromRoots(GCGraph &aGraph);
+  void Walk(PtrInfo* aPi);
+  void WalkFromRoots(GCGraph& aGraph);
   // copy-constructing the visitor should be cheap, and less
   // indirection than using a reference
-  GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor) {}
+  GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor)
+  {
+  }
 };
 
 
 ////////////////////////////////////////////////////////////////////////
 // The static collector struct
 ////////////////////////////////////////////////////////////////////////
 
-struct CollectorData {
+struct CollectorData
+{
   nsRefPtr<nsCycleCollector> mCollector;
   CycleCollectedJSRuntime* mRuntime;
 };
 
 static mozilla::ThreadLocal<CollectorData*> sCollectorData;
 
 ////////////////////////////////////////////////////////////////////////
 // Utility functions
 ////////////////////////////////////////////////////////////////////////
 
 MOZ_NEVER_INLINE static void
-Fault(const char *msg, const void *ptr=nullptr)
+Fault(const char* aMsg, const void* aPtr = nullptr)
 {
-  if (ptr)
-    printf("Fault in cycle collector: %s (ptr: %p)\n", msg, ptr);
-  else
-    printf("Fault in cycle collector: %s\n", msg);
+  if (aPtr) {
+    printf("Fault in cycle collector: %s (ptr: %p)\n", aMsg, aPtr);
+  } else {
+    printf("Fault in cycle collector: %s\n", aMsg);
+  }
 
   NS_RUNTIMEABORT("cycle collector fault");
 }
 
 static void
-Fault(const char *msg, PtrInfo *pi)
+Fault(const char* aMsg, PtrInfo* aPi)
 {
-  Fault(msg, pi->mPointer);
+  Fault(aMsg, aPi->mPointer);
 }
 
 static inline void
-ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp)
+ToParticipant(nsISupports* aPtr, nsXPCOMCycleCollectionParticipant** aCp)
 {
   // We use QI to move from an nsISupports to an
   // nsXPCOMCycleCollectionParticipant, which is a per-class singleton helper
   // object that implements traversal and unlinking logic for the nsISupports
   // in question.
-  CallQueryInterface(s, cp);
+  CallQueryInterface(aPtr, aCp);
 }
 
 template <class Visitor>
 MOZ_NEVER_INLINE void
-GraphWalker<Visitor>::Walk(PtrInfo *s0)
+GraphWalker<Visitor>::Walk(PtrInfo* aPi)
 {
   nsDeque queue;
-  CheckedPush(queue, s0);
+  CheckedPush(queue, aPi);
   DoWalk(queue);
 }
 
 template <class Visitor>
 MOZ_NEVER_INLINE void
 GraphWalker<Visitor>::WalkFromRoots(GCGraph& aGraph)
 {
   nsDeque queue;
@@ -1349,41 +1418,42 @@ GraphWalker<Visitor>::WalkFromRoots(GCGr
   for (uint32_t i = 0; i < aGraph.mRootCount; ++i) {
     CheckedPush(queue, etor.GetNext());
   }
   DoWalk(queue);
 }
 
 template <class Visitor>
 MOZ_NEVER_INLINE void
-GraphWalker<Visitor>::DoWalk(nsDeque &aQueue)
+GraphWalker<Visitor>::DoWalk(nsDeque& aQueue)
 {
   // Use a aQueue to match the breadth-first traversal used when we
   // built the graph, for hopefully-better locality.
   while (aQueue.GetSize() > 0) {
-    PtrInfo *pi = static_cast<PtrInfo*>(aQueue.PopFront());
+    PtrInfo* pi = static_cast<PtrInfo*>(aQueue.PopFront());
 
     if (pi->mParticipant && mVisitor.ShouldVisitNode(pi)) {
       mVisitor.VisitNode(pi);
       for (EdgePool::Iterator child = pi->FirstChild(),
            child_end = pi->LastChild();
            child != child_end; ++child) {
         CheckedPush(aQueue, *child);
       }
     }
   }
 }
 
 struct CCGraphDescriber : public LinkedListElement<CCGraphDescriber>
 {
   CCGraphDescriber()
-    : mAddress("0x"), mCnt(0), mType(eUnknown) {}
-
-  enum Type
+    : mAddress("0x"), mCnt(0), mType(eUnknown)
   {
+  }
+
+  enum Type {
     eRefCountedObject,
     eGCedObject,
     eGCMarkedObject,
     eEdge,
     eRoot,
     eGarbage,
     eUnknown
   };
@@ -1462,23 +1532,23 @@ public:
   }
 
   NS_IMETHOD SetFilenameIdentifier(const nsAString& aIdentifier)
   {
     mFilenameIdentifier = aIdentifier;
     return NS_OK;
   }
 
-  NS_IMETHOD GetGcLogPath(nsAString &aPath)
+  NS_IMETHOD GetGcLogPath(nsAString& aPath)
   {
     aPath = mGCLogPath;
     return NS_OK;
   }
 
-  NS_IMETHOD GetCcLogPath(nsAString &aPath)
+  NS_IMETHOD GetCcLogPath(nsAString& aPath)
   {
     aPath = mCCLogPath;
     return NS_OK;
   }
 
   NS_IMETHOD Begin()
   {
     mCurrentAddress.AssignLiteral("0x");
@@ -1488,41 +1558,46 @@ public:
     }
 
     // Initially create the log in a file starting with
     // "incomplete-gc-edges".  We'll move the file and strip off the
     // "incomplete-" once the dump completes.  (We do this because we don't
     // want scripts which poll the filesystem looking for gc/cc dumps to
     // grab a file before we're finished writing to it.)
     nsCOMPtr<nsIFile> gcLogFile = CreateTempFile("incomplete-gc-edges");
-    if (NS_WARN_IF(!gcLogFile))
+    if (NS_WARN_IF(!gcLogFile)) {
       return NS_ERROR_UNEXPECTED;
+    }
 
     // Dump the JS heap.
     FILE* gcLogANSIFile = nullptr;
     gcLogFile->OpenANSIFileDesc("w", &gcLogANSIFile);
-    if (NS_WARN_IF(!gcLogANSIFile))
+    if (NS_WARN_IF(!gcLogANSIFile)) {
       return NS_ERROR_UNEXPECTED;
+    }
     MozillaRegisterDebugFILE(gcLogANSIFile);
-    CollectorData *data = sCollectorData.get();
-    if (data && data->mRuntime)
+    CollectorData* data = sCollectorData.get();
+    if (data && data->mRuntime) {
       data->mRuntime->DumpJSHeap(gcLogANSIFile);
+    }
     MozillaUnRegisterDebugFILE(gcLogANSIFile);
     fclose(gcLogANSIFile);
 
     // Strip off "incomplete-".
     nsCOMPtr<nsIFile> gcLogFileFinalDestination =
       CreateTempFile("gc-edges");
-    if (NS_WARN_IF(!gcLogFileFinalDestination))
+    if (NS_WARN_IF(!gcLogFileFinalDestination)) {
       return NS_ERROR_UNEXPECTED;
+    }
 
     nsAutoString gcLogFileFinalDestinationName;
     gcLogFileFinalDestination->GetLeafName(gcLogFileFinalDestinationName);
-    if (NS_WARN_IF(gcLogFileFinalDestinationName.IsEmpty()))
+    if (NS_WARN_IF(gcLogFileFinalDestinationName.IsEmpty())) {
       return NS_ERROR_UNEXPECTED;
+    }
 
     gcLogFile->MoveTo(/* directory */ nullptr, gcLogFileFinalDestinationName);
 
     // Log to the error console.
     nsCOMPtr<nsIConsoleService> cs =
       do_GetService(NS_CONSOLESERVICE_CONTRACTID);
     if (cs) {
       nsAutoString gcLogPath;
@@ -1533,49 +1608,51 @@ public:
       cs->LogStringMessage(msg.get());
 
       mGCLogPath = gcLogPath;
     }
 
     // Open a file for dumping the CC graph.  We again prefix with
     // "incomplete-".
     mOutFile = CreateTempFile("incomplete-cc-edges");
-    if (NS_WARN_IF(!mOutFile))
+    if (NS_WARN_IF(!mOutFile)) {
       return NS_ERROR_UNEXPECTED;
+    }
     MOZ_ASSERT(!mStream);
     mOutFile->OpenANSIFileDesc("w", &mStream);
-    if (NS_WARN_IF(!mStream))
+    if (NS_WARN_IF(!mStream)) {
       return NS_ERROR_UNEXPECTED;
+    }
     MozillaRegisterDebugFILE(mStream);
 
     fprintf(mStream, "# WantAllTraces=%s\n", mWantAllTraces ? "true" : "false");
 
     return NS_OK;
   }
-  NS_IMETHOD NoteRefCountedObject(uint64_t aAddress, uint32_t refCount,
-                                  const char *aObjectDescription)
+  NS_IMETHOD NoteRefCountedObject(uint64_t aAddress, uint32_t aRefCount,
+                                  const char* aObjectDescription)
   {
     if (!mDisableLog) {
-      fprintf(mStream, "%p [rc=%u] %s\n", (void*)aAddress, refCount,
+      fprintf(mStream, "%p [rc=%u] %s\n", (void*)aAddress, aRefCount,
               aObjectDescription);
     }
     if (mWantAfterProcessing) {
       CCGraphDescriber* d =  new CCGraphDescriber();
       mDescribers.insertBack(d);
       mCurrentAddress.AssignLiteral("0x");
       mCurrentAddress.AppendInt(aAddress, 16);
       d->mType = CCGraphDescriber::eRefCountedObject;
       d->mAddress = mCurrentAddress;
-      d->mCnt = refCount;
+      d->mCnt = aRefCount;
       d->mName.Append(aObjectDescription);
     }
     return NS_OK;
   }
   NS_IMETHOD NoteGCedObject(uint64_t aAddress, bool aMarked,
-                            const char *aObjectDescription,
+                            const char* aObjectDescription,
                             uint64_t aCompartmentAddress)
   {
     if (!mDisableLog) {
       fprintf(mStream, "%p [gc%s] %s\n", (void*)aAddress,
               aMarked ? ".marked" : "", aObjectDescription);
     }
     if (mWantAfterProcessing) {
       CCGraphDescriber* d =  new CCGraphDescriber();
@@ -1590,17 +1667,17 @@ public:
         d->mCompartmentOrToAddress.AssignLiteral("0x");
         d->mCompartmentOrToAddress.AppendInt(aCompartmentAddress, 16);
       } else {
         d->mCompartmentOrToAddress.SetIsVoid(true);
       }
     }
     return NS_OK;
   }
-  NS_IMETHOD NoteEdge(uint64_t aToAddress, const char *aEdgeName)
+  NS_IMETHOD NoteEdge(uint64_t aToAddress, const char* aEdgeName)
   {
     if (!mDisableLog) {
       fprintf(mStream, "> %p %s\n", (void*)aToAddress, aEdgeName);
     }
     if (mWantAfterProcessing) {
       CCGraphDescriber* d =  new CCGraphDescriber();
       mDescribers.insertBack(d);
       d->mType = CCGraphDescriber::eEdge;
@@ -1671,23 +1748,25 @@ public:
 
       MozillaUnRegisterDebugFILE(mStream);
       fclose(mStream);
       mStream = nullptr;
 
       // Strip off "incomplete-" from the log file's name.
       nsCOMPtr<nsIFile> logFileFinalDestination =
         CreateTempFile("cc-edges");
-      if (NS_WARN_IF(!logFileFinalDestination))
+      if (NS_WARN_IF(!logFileFinalDestination)) {
         return NS_ERROR_UNEXPECTED;
+      }
 
       nsAutoString logFileFinalDestinationName;
       logFileFinalDestination->GetLeafName(logFileFinalDestinationName);
-      if (NS_WARN_IF(logFileFinalDestinationName.IsEmpty()))
+      if (NS_WARN_IF(logFileFinalDestinationName.IsEmpty())) {
         return NS_ERROR_UNEXPECTED;
+      }
 
       mOutFile->MoveTo(/* directory = */ nullptr,
                        logFileFinalDestinationName);
       mOutFile = nullptr;
 
       // Log to the error console.
       nsCOMPtr<nsIConsoleService> cs =
         do_GetService(NS_CONSOLESERVICE_CONTRACTID);
@@ -1702,18 +1781,19 @@ public:
         mCCLogPath = ccLogPath;
       }
     }
     return NS_OK;
   }
   NS_IMETHOD ProcessNext(nsICycleCollectorHandler* aHandler,
                          bool* aCanContinue)
   {
-    if (NS_WARN_IF(!aHandler) || NS_WARN_IF(!mWantAfterProcessing))
+    if (NS_WARN_IF(!aHandler) || NS_WARN_IF(!mWantAfterProcessing)) {
       return NS_ERROR_UNEXPECTED;
+    }
     CCGraphDescriber* d = mDescribers.popFirst();
     if (d) {
       switch (d->mType) {
         case CCGraphDescriber::eRefCountedObject:
           aHandler->NoteRefCountedObject(d->mAddress,
                                          d->mCnt,
                                          d->mName);
           break;
@@ -1788,164 +1868,172 @@ private:
     }
 
     return dont_AddRef(logFile);
   }
 
   void ClearDescribers()
   {
     CCGraphDescriber* d;
-    while((d = mDescribers.popFirst())) {
+    while ((d = mDescribers.popFirst())) {
       delete d;
     }
   }
 
-  FILE *mStream;
+  FILE* mStream;
   nsCOMPtr<nsIFile> mOutFile;
   bool mWantAllTraces;
   bool mDisableLog;
   bool mWantAfterProcessing;
   nsString mFilenameIdentifier;
   nsString mGCLogPath;
   nsString mCCLogPath;
   nsCString mCurrentAddress;
   mozilla::LinkedList<CCGraphDescriber> mDescribers;
 };
 
 NS_IMPL_ISUPPORTS(nsCycleCollectorLogger, nsICycleCollectorListener)
 
 nsresult
 nsCycleCollectorLoggerConstructor(nsISupports* aOuter,
                                   const nsIID& aIID,
-                                  void* *aInstancePtr)
+                                  void** aInstancePtr)
 {
-  if (NS_WARN_IF(aOuter))
+  if (NS_WARN_IF(aOuter)) {
     return NS_ERROR_NO_AGGREGATION;
-
-  nsISupports *logger = new nsCycleCollectorLogger();
+  }
+
+  nsISupports* logger = new nsCycleCollectorLogger();
 
   return logger->QueryInterface(aIID, aInstancePtr);
 }
 
 ////////////////////////////////////////////////////////////////////////
 // Bacon & Rajan's |MarkRoots| routine.
 ////////////////////////////////////////////////////////////////////////
 
 class GCGraphBuilder : public nsCycleCollectionTraversalCallback,
-                       public nsCycleCollectionNoteRootCallback
+  public nsCycleCollectionNoteRootCallback
 {
 private:
-  GCGraph &mGraph;
-  CycleCollectorResults &mResults;
+  GCGraph& mGraph;
+  CycleCollectorResults& mResults;
   NodePool::Builder mNodeBuilder;
   EdgePool::Builder mEdgeBuilder;
-  PtrInfo *mCurrPi;
-  nsCycleCollectionParticipant *mJSParticipant;
-  nsCycleCollectionParticipant *mJSZoneParticipant;
+  PtrInfo* mCurrPi;
+  nsCycleCollectionParticipant* mJSParticipant;
+  nsCycleCollectionParticipant* mJSZoneParticipant;
   nsCString mNextEdgeName;
-  nsICycleCollectorListener *mListener;
+  nsICycleCollectorListener* mListener;
   bool mMergeZones;
   bool mRanOutOfMemory;
 
 public:
-  GCGraphBuilder(GCGraph &aGraph,
-                 CycleCollectorResults &aResults,
-                 CycleCollectedJSRuntime *aJSRuntime,
-                 nsICycleCollectorListener *aListener,
+  GCGraphBuilder(GCGraph& aGraph,
+                 CycleCollectorResults& aResults,
+                 CycleCollectedJSRuntime* aJSRuntime,
+                 nsICycleCollectorListener* aListener,
                  bool aMergeZones);
   virtual ~GCGraphBuilder();
 
   bool WantAllTraces() const
   {
     return nsCycleCollectionNoteRootCallback::WantAllTraces();
   }
 
-  PtrInfo* AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant);
-  PtrInfo* AddWeakMapNode(void* node);
+  PtrInfo* AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant);
+  PtrInfo* AddWeakMapNode(void* aNode);
   void Traverse(PtrInfo* aPtrInfo);
   void SetLastChild();
 
-  bool RanOutOfMemory() const { return mRanOutOfMemory; }
+  bool RanOutOfMemory() const
+  {
+    return mRanOutOfMemory;
+  }
 
 private:
-  void DescribeNode(uint32_t refCount, const char *objName)
+  void DescribeNode(uint32_t aRefCount, const char* aObjName)
   {
-    mCurrPi->mRefCount = refCount;
+    mCurrPi->mRefCount = aRefCount;
   }
 
 public:
   // nsCycleCollectionNoteRootCallback methods.
-  NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports *root);
-  NS_IMETHOD_(void) NoteJSRoot(void *root);
-  NS_IMETHOD_(void) NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant);
-  NS_IMETHOD_(void) NoteWeakMapping(void *map, void *key, void *kdelegate, void *val);
+  NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports* aRoot);
+  NS_IMETHOD_(void) NoteJSRoot(void* aRoot);
+  NS_IMETHOD_(void) NoteNativeRoot(void* aRoot,
+                                   nsCycleCollectionParticipant* aParticipant);
+  NS_IMETHOD_(void) NoteWeakMapping(void* aMap, void* aKey, void* aKdelegate,
+                                    void* aVal);
 
   // nsCycleCollectionTraversalCallback methods.
-  NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refCount,
-                                           const char *objName);
-  NS_IMETHOD_(void) DescribeGCedNode(bool isMarked, const char *objName,
+  NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt aRefCount,
+                                           const char* aObjName);
+  NS_IMETHOD_(void) DescribeGCedNode(bool aIsMarked, const char* aObjName,
                                      uint64_t aCompartmentAddress);
 
-  NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child);
-  NS_IMETHOD_(void) NoteJSChild(void *child);
-  NS_IMETHOD_(void) NoteNativeChild(void *child,
-                                    nsCycleCollectionParticipant *participant);
-  NS_IMETHOD_(void) NoteNextEdgeName(const char* name);
+  NS_IMETHOD_(void) NoteXPCOMChild(nsISupports* aChild);
+  NS_IMETHOD_(void) NoteJSChild(void* aChild);
+  NS_IMETHOD_(void) NoteNativeChild(void* aChild,
+                                    nsCycleCollectionParticipant* aParticipant);
+  NS_IMETHOD_(void) NoteNextEdgeName(const char* aName);
 
 private:
-  NS_IMETHOD_(void) NoteRoot(void *root,
-                             nsCycleCollectionParticipant *participant)
+  NS_IMETHOD_(void) NoteRoot(void* aRoot,
+                             nsCycleCollectionParticipant* aParticipant)
   {
-    MOZ_ASSERT(root);
-    MOZ_ASSERT(participant);
-
-    if (!participant->CanSkipInCC(root) || MOZ_UNLIKELY(WantAllTraces())) {
-      AddNode(root, participant);
+    MOZ_ASSERT(aRoot);
+    MOZ_ASSERT(aParticipant);
+
+    if (!aParticipant->CanSkipInCC(aRoot) || MOZ_UNLIKELY(WantAllTraces())) {
+      AddNode(aRoot, aParticipant);
     }
   }
 
-  NS_IMETHOD_(void) NoteChild(void *child, nsCycleCollectionParticipant *cp,
-                              nsCString edgeName)
+  NS_IMETHOD_(void) NoteChild(void* aChild, nsCycleCollectionParticipant* aCp,
+                              nsCString aEdgeName)
   {
-    PtrInfo *childPi = AddNode(child, cp);
-    if (!childPi)
+    PtrInfo* childPi = AddNode(aChild, aCp);
+    if (!childPi) {
       return;
+    }
     mEdgeBuilder.Add(childPi);
     if (mListener) {
-      mListener->NoteEdge((uint64_t)child, edgeName.get());
+      mListener->NoteEdge((uint64_t)aChild, aEdgeName.get());
     }
     ++childPi->mInternalRefs;
   }
 
-  JS::Zone *MergeZone(void *gcthing) {
+  JS::Zone* MergeZone(void* aGcthing)
+  {
     if (!mMergeZones) {
       return nullptr;
     }
-    JS::Zone *zone = JS::GetGCThingZone(gcthing);
+    JS::Zone* zone = JS::GetGCThingZone(aGcthing);
     if (js::IsSystemZone(zone)) {
       return nullptr;
     }
     return zone;
   }
 };
 
-GCGraphBuilder::GCGraphBuilder(GCGraph &aGraph,
-                               CycleCollectorResults &aResults,
-                               CycleCollectedJSRuntime *aJSRuntime,
-                               nsICycleCollectorListener *aListener,
+GCGraphBuilder::GCGraphBuilder(GCGraph& aGraph,
+                               CycleCollectorResults& aResults,
+                               CycleCollectedJSRuntime* aJSRuntime,
+                               nsICycleCollectorListener* aListener,
                                bool aMergeZones)
-  : mGraph(aGraph),
-    mResults(aResults),
-    mNodeBuilder(aGraph.mNodes),
-    mEdgeBuilder(aGraph.mEdges),
-    mJSParticipant(nullptr),
-    mJSZoneParticipant(nullptr),
-    mListener(aListener),
-    mMergeZones(aMergeZones),
-    mRanOutOfMemory(false)
+  : mGraph(aGraph)
+  , mResults(aResults)
+  , mNodeBuilder(aGraph.mNodes)
+  , mEdgeBuilder(aGraph.mEdges)
+  , mJSParticipant(nullptr)
+  , mJSZoneParticipant(nullptr)
+  , mListener(aListener)
+  , mMergeZones(aMergeZones)
+  , mRanOutOfMemory(false)
 {
   if (aJSRuntime) {
     mJSParticipant = aJSRuntime->GCThingParticipant();
     mJSZoneParticipant = aJSRuntime->ZoneParticipant();
   }
 
   uint32_t flags = 0;
   if (!flags && mListener) {
@@ -1966,25 +2054,25 @@ GCGraphBuilder::GCGraphBuilder(GCGraph &
              nsCycleCollectionTraversalCallback::WantAllTraces());
 }
 
 GCGraphBuilder::~GCGraphBuilder()
 {
 }
 
 PtrInfo*
-GCGraphBuilder::AddNode(void *aPtr, nsCycleCollectionParticipant *aParticipant)
+GCGraphBuilder::AddNode(void* aPtr, nsCycleCollectionParticipant* aParticipant)
 {
-  PtrToNodeEntry *e = mGraph.AddNodeToMap(aPtr);
+  PtrToNodeEntry* e = mGraph.AddNodeToMap(aPtr);
   if (!e) {
     mRanOutOfMemory = true;
     return nullptr;
   }
 
-  PtrInfo *result;
+  PtrInfo* result;
   if (!e->mNode) {
     // New entry.
     result = mNodeBuilder.Add(aPtr, aParticipant);
     e->mNode = result;
     NS_ASSERTION(result, "mNodeBuilder.Add returned null");
   } else {
     result = e->mNode;
     MOZ_ASSERT(result->mParticipant == aParticipant,
@@ -2012,252 +2100,271 @@ GCGraphBuilder::Traverse(PtrInfo* aPtrIn
 
 void
 GCGraphBuilder::SetLastChild()
 {
   mCurrPi->SetLastChild(mEdgeBuilder.Mark());
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteXPCOMRoot(nsISupports *root)
+GCGraphBuilder::NoteXPCOMRoot(nsISupports* aRoot)
 {
-  root = CanonicalizeXPCOMParticipant(root);
-  NS_ASSERTION(root,
+  aRoot = CanonicalizeXPCOMParticipant(aRoot);
+  NS_ASSERTION(aRoot,
                "Don't add objects that don't participate in collection!");
 
-  nsXPCOMCycleCollectionParticipant *cp;
-  ToParticipant(root, &cp);
-
-  NoteRoot(root, cp);
+  nsXPCOMCycleCollectionParticipant* cp;
+  ToParticipant(aRoot, &cp);
+
+  NoteRoot(aRoot, cp);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteJSRoot(void *root)
+GCGraphBuilder::NoteJSRoot(void* aRoot)
 {
-  if (JS::Zone *zone = MergeZone(root)) {
+  if (JS::Zone* zone = MergeZone(aRoot)) {
     NoteRoot(zone, mJSZoneParticipant);
   } else {
-    NoteRoot(root, mJSParticipant);
+    NoteRoot(aRoot, mJSParticipant);
   }
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteNativeRoot(void *root, nsCycleCollectionParticipant *participant)
+GCGraphBuilder::NoteNativeRoot(void* aRoot,
+                               nsCycleCollectionParticipant* aParticipant)
 {
-  NoteRoot(root, participant);
+  NoteRoot(aRoot, aParticipant);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::DescribeRefCountedNode(nsrefcnt refCount, const char *objName)
+GCGraphBuilder::DescribeRefCountedNode(nsrefcnt aRefCount, const char* aObjName)
 {
-  if (refCount == 0)
+  if (aRefCount == 0) {
     Fault("zero refcount", mCurrPi);
-  if (refCount == UINT32_MAX)
+  }
+  if (aRefCount == UINT32_MAX) {
     Fault("overflowing refcount", mCurrPi);
+  }
   mResults.mVisitedRefCounted++;
 
   if (mListener) {
-    mListener->NoteRefCountedObject((uint64_t)mCurrPi->mPointer, refCount,
-                                    objName);
+    mListener->NoteRefCountedObject((uint64_t)mCurrPi->mPointer, aRefCount,
+                                    aObjName);
   }
 
-  DescribeNode(refCount, objName);
+  DescribeNode(aRefCount, aObjName);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::DescribeGCedNode(bool isMarked, const char *objName,
+GCGraphBuilder::DescribeGCedNode(bool aIsMarked, const char* aObjName,
                                  uint64_t aCompartmentAddress)
 {
-  uint32_t refCount = isMarked ? UINT32_MAX : 0;
+  uint32_t refCount = aIsMarked ? UINT32_MAX : 0;
   mResults.mVisitedGCed++;
 
   if (mListener) {
-    mListener->NoteGCedObject((uint64_t)mCurrPi->mPointer, isMarked,
-                              objName, aCompartmentAddress);
+    mListener->NoteGCedObject((uint64_t)mCurrPi->mPointer, aIsMarked,
+                              aObjName, aCompartmentAddress);
   }
 
-  DescribeNode(refCount, objName);
+  DescribeNode(refCount, aObjName);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteXPCOMChild(nsISupports *child)
+GCGraphBuilder::NoteXPCOMChild(nsISupports* aChild)
 {
   nsCString edgeName;
   if (WantDebugInfo()) {
     edgeName.Assign(mNextEdgeName);
     mNextEdgeName.Truncate();
   }
-  if (!child || !(child = CanonicalizeXPCOMParticipant(child)))
+  if (!aChild || !(aChild = CanonicalizeXPCOMParticipant(aChild))) {
     return;
-
-  nsXPCOMCycleCollectionParticipant *cp;
-  ToParticipant(child, &cp);
-  if (cp && (!cp->CanSkipThis(child) || WantAllTraces())) {
-    NoteChild(child, cp, edgeName);
+  }
+
+  nsXPCOMCycleCollectionParticipant* cp;
+  ToParticipant(aChild, &cp);
+  if (cp && (!cp->CanSkipThis(aChild) || WantAllTraces())) {
+    NoteChild(aChild, cp, edgeName);
   }
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteNativeChild(void *child,
-                                nsCycleCollectionParticipant *participant)
+GCGraphBuilder::NoteNativeChild(void* aChild,
+                                nsCycleCollectionParticipant* aParticipant)
 {
   nsCString edgeName;
   if (WantDebugInfo()) {
     edgeName.Assign(mNextEdgeName);
     mNextEdgeName.Truncate();
   }
-  if (!child)
+  if (!aChild) {
     return;
-
-  MOZ_ASSERT(participant, "Need a nsCycleCollectionParticipant!");
-  NoteChild(child, participant, edgeName);
+  }
+
+  MOZ_ASSERT(aParticipant, "Need a nsCycleCollectionParticipant!");
+  NoteChild(aChild, aParticipant, edgeName);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteJSChild(void *child)
+GCGraphBuilder::NoteJSChild(void* aChild)
 {
-  if (!child) {
+  if (!aChild) {
     return;
   }
 
   nsCString edgeName;
   if (MOZ_UNLIKELY(WantDebugInfo())) {
     edgeName.Assign(mNextEdgeName);
     mNextEdgeName.Truncate();
   }
 
-  if (xpc_GCThingIsGrayCCThing(child) || MOZ_UNLIKELY(WantAllTraces())) {
-    if (JS::Zone *zone = MergeZone(child)) {
+  if (xpc_GCThingIsGrayCCThing(aChild) || MOZ_UNLIKELY(WantAllTraces())) {
+    if (JS::Zone* zone = MergeZone(aChild)) {
       NoteChild(zone, mJSZoneParticipant, edgeName);
     } else {
-      NoteChild(child, mJSParticipant, edgeName);
+      NoteChild(aChild, mJSParticipant, edgeName);
     }
   }
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteNextEdgeName(const char* name)
+GCGraphBuilder::NoteNextEdgeName(const char* aName)
 {
   if (WantDebugInfo()) {
-    mNextEdgeName = name;
+    mNextEdgeName = aName;
   }
 }
 
 PtrInfo*
-GCGraphBuilder::AddWeakMapNode(void *node)
+GCGraphBuilder::AddWeakMapNode(void* aNode)
 {
-  MOZ_ASSERT(node, "Weak map node should be non-null.");
-
-  if (!xpc_GCThingIsGrayCCThing(node) && !WantAllTraces())
+  MOZ_ASSERT(aNode, "Weak map node should be non-null.");
+
+  if (!xpc_GCThingIsGrayCCThing(aNode) && !WantAllTraces()) {
     return nullptr;
-
-  if (JS::Zone *zone = MergeZone(node)) {
+  }
+
+  if (JS::Zone* zone = MergeZone(aNode)) {
     return AddNode(zone, mJSZoneParticipant);
-  } else {
-    return AddNode(node, mJSParticipant);
   }
+  return AddNode(aNode, mJSParticipant);
 }
 
 NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteWeakMapping(void *map, void *key, void *kdelegate, void *val)
+GCGraphBuilder::NoteWeakMapping(void* aMap, void* aKey, void* aKdelegate, void* aVal)
 {
   // Don't try to optimize away the entry here, as we've already attempted to
   // do that in TraceWeakMapping in nsXPConnect.
-  WeakMapping *mapping = mGraph.mWeakMaps.AppendElement();
-  mapping->mMap = map ? AddWeakMapNode(map) : nullptr;
-  mapping->mKey = key ? AddWeakMapNode(key) : nullptr;
-  mapping->mKeyDelegate = kdelegate ? AddWeakMapNode(kdelegate) : mapping->mKey;
-  mapping->mVal = val ? AddWeakMapNode(val) : nullptr;
+  WeakMapping* mapping = mGraph.mWeakMaps.AppendElement();
+  mapping->mMap = aMap ? AddWeakMapNode(aMap) : nullptr;
+  mapping->mKey = aKey ? AddWeakMapNode(aKey) : nullptr;
+  mapping->mKeyDelegate = aKdelegate ? AddWeakMapNode(aKdelegate) : mapping->mKey;
+  mapping->mVal = aVal ? AddWeakMapNode(aVal) : nullptr;
 
   if (mListener) {
-    mListener->NoteWeakMapEntry((uint64_t)map, (uint64_t)key,
-                                (uint64_t)kdelegate, (uint64_t)val);
+    mListener->NoteWeakMapEntry((uint64_t)aMap, (uint64_t)aKey,
+                                (uint64_t)aKdelegate, (uint64_t)aVal);
   }
 }
 
 static bool
-AddPurpleRoot(GCGraphBuilder &aBuilder, void *aRoot, nsCycleCollectionParticipant *aParti)
+AddPurpleRoot(GCGraphBuilder& aBuilder, void* aRoot,
+              nsCycleCollectionParticipant* aParti)
 {
   CanonicalizeParticipant(&aRoot, &aParti);
 
   if (aBuilder.WantAllTraces() || !aParti->CanSkipInCC(aRoot)) {
-    PtrInfo *pinfo = aBuilder.AddNode(aRoot, aParti);
+    PtrInfo* pinfo = aBuilder.AddNode(aRoot, aParti);
     if (!pinfo) {
       return false;
     }
   }
 
   return true;
 }
 
 // MayHaveChild() will be false after a Traverse if the object does
 // not have any children the CC will visit.
 class ChildFinder : public nsCycleCollectionTraversalCallback
 {
 public:
-  ChildFinder() : mMayHaveChild(false) {}
+  ChildFinder() : mMayHaveChild(false)
+  {
+  }
 
   // The logic of the Note*Child functions must mirror that of their
   // respective functions in GCGraphBuilder.
-  NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child);
-  NS_IMETHOD_(void) NoteNativeChild(void *child,
-                                    nsCycleCollectionParticipant *helper);
-  NS_IMETHOD_(void) NoteJSChild(void *child);
-
-  NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refcount,
-                                           const char *objname) {}
-  NS_IMETHOD_(void) DescribeGCedNode(bool ismarked,
-                                     const char *objname,
-                                     uint64_t aCompartmentAddress) {}
-  NS_IMETHOD_(void) NoteNextEdgeName(const char* name) {}
-  bool MayHaveChild() {
+  NS_IMETHOD_(void) NoteXPCOMChild(nsISupports* aChild);
+  NS_IMETHOD_(void) NoteNativeChild(void* aChild,
+                                    nsCycleCollectionParticipant* aHelper);
+  NS_IMETHOD_(void) NoteJSChild(void* aChild);
+
+  NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt aRefcount,
+                                           const char* aObjname)
+  {
+  }
+  NS_IMETHOD_(void) DescribeGCedNode(bool aIsMarked,
+                                     const char* aObjname,
+                                     uint64_t aCompartmentAddress)
+  {
+  }
+  NS_IMETHOD_(void) NoteNextEdgeName(const char* aName)
+  {
+  }
+  bool MayHaveChild()
+  {
     return mMayHaveChild;
   }
 private:
   bool mMayHaveChild;
 };
 
 NS_IMETHODIMP_(void)
-ChildFinder::NoteXPCOMChild(nsISupports *child)
+ChildFinder::NoteXPCOMChild(nsISupports* aChild)
 {
-  if (!child || !(child = CanonicalizeXPCOMParticipant(child)))
+  if (!aChild || !(aChild = CanonicalizeXPCOMParticipant(aChild))) {
     return;
-  nsXPCOMCycleCollectionParticipant *cp;
-  ToParticipant(child, &cp);
-  if (cp && !cp->CanSkip(child, true))
+  }
+  nsXPCOMCycleCollectionParticipant* cp;
+  ToParticipant(aChild, &cp);
+  if (cp && !cp->CanSkip(aChild, true)) {
     mMayHaveChild = true;
+  }
 }
 
 NS_IMETHODIMP_(void)
-ChildFinder::NoteNativeChild(void *child,
-                             nsCycleCollectionParticipant *helper)
+ChildFinder::NoteNativeChild(void* aChild,
+                             nsCycleCollectionParticipant* aHelper)
 {
-  if (child)
+  if (aChild) {
     mMayHaveChild = true;
+  }
 }
 
 NS_IMETHODIMP_(void)
-ChildFinder::NoteJSChild(void *child)
+ChildFinder::NoteJSChild(void* aChild)
 {
-  if (child && xpc_GCThingIsGrayCCThing(child)) {
+  if (aChild && xpc_GCThingIsGrayCCThing(aChild)) {
     mMayHaveChild = true;
   }
 }
 
 static bool
-MayHaveChild(void *o, nsCycleCollectionParticipant* cp)
+MayHaveChild(void* aObj, nsCycleCollectionParticipant* aCp)
 {
   ChildFinder cf;
-  cp->Traverse(o, cf);
+  aCp->Traverse(aObj, cf);
   return cf.MayHaveChild();
 }
 
 template<class T>
-class SegmentedArrayElement : public LinkedListElement<SegmentedArrayElement<T>>
-                            , public AutoFallibleTArray<T, 60>
+class SegmentedArrayElement
+  : public LinkedListElement<SegmentedArrayElement<T>>
+  , public AutoFallibleTArray<T, 60>
 {
 };
 
 template<class T>
 class SegmentedArray
 {
 public:
   ~SegmentedArray()
@@ -2375,17 +2482,17 @@ struct SnowWhiteObject
   void* mPointer;
   nsCycleCollectionParticipant* mParticipant;
   nsCycleCollectingAutoRefCnt* mRefCnt;
 };
 
 class SnowWhiteKiller : public TraceCallbacks
 {
 public:
-  SnowWhiteKiller(nsCycleCollector *aCollector, uint32_t aMaxCount)
+  SnowWhiteKiller(nsCycleCollector* aCollector, uint32_t aMaxCount)
     : mCollector(aCollector)
   {
     MOZ_ASSERT(mCollector, "Calling SnowWhiteKiller after nsCC went away");
     while (true) {
       if (mObjects.SetCapacity(aMaxCount)) {
         break;
       }
       if (aMaxCount == 1) {
@@ -2408,18 +2515,18 @@ public:
     }
   }
 
   void
   Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
   {
     MOZ_ASSERT(aEntry->mObject, "Null object in purple buffer");
     if (!aEntry->mRefCnt->get()) {
-      void *o = aEntry->mObject;
-      nsCycleCollectionParticipant *cp = aEntry->mParticipant;
+      void* o = aEntry->mObject;
+      nsCycleCollectionParticipant* cp = aEntry->mParticipant;
       CanonicalizeParticipant(&o, &cp);
       SnowWhiteObject swo = { o, cp, aEntry->mRefCnt };
       if (mObjects.AppendElement(swo)) {
         aBuffer.Remove(aEntry);
       }
     }
   }
 
@@ -2471,62 +2578,63 @@ public:
   }
 
   virtual void Trace(JS::Heap<JSFunction*>* aFunction, const char* aName,
                      void* aClosure) const
   {
   }
 
 private:
-  nsCycleCollector *mCollector;
+  nsCycleCollector* mCollector;
   FallibleTArray<SnowWhiteObject> mObjects;
 };
 
 class RemoveSkippableVisitor : public SnowWhiteKiller
 {
 public:
   RemoveSkippableVisitor(nsCycleCollector* aCollector,
                          uint32_t aMaxCount, bool aRemoveChildlessNodes,
                          bool aAsyncSnowWhiteFreeing,
                          CC_ForgetSkippableCallback aCb)
-    : SnowWhiteKiller(aCollector, aAsyncSnowWhiteFreeing ? 0 : aMaxCount),
-      mRemoveChildlessNodes(aRemoveChildlessNodes),
-      mAsyncSnowWhiteFreeing(aAsyncSnowWhiteFreeing),
-      mDispatchedDeferredDeletion(false),
-      mCallback(aCb)
-  {}
+    : SnowWhiteKiller(aCollector, aAsyncSnowWhiteFreeing ? 0 : aMaxCount)
+    , mRemoveChildlessNodes(aRemoveChildlessNodes)
+    , mAsyncSnowWhiteFreeing(aAsyncSnowWhiteFreeing)
+    , mDispatchedDeferredDeletion(false)
+    , mCallback(aCb)
+  {
+  }
 
   ~RemoveSkippableVisitor()
   {
     // Note, we must call the callback before SnowWhiteKiller calls
     // DeleteCycleCollectable!
     if (mCallback) {
       mCallback();
     }
     if (HasSnowWhiteObjects()) {
       // Effectively a continuation.
       nsCycleCollector_dispatchDeferredDeletion(true);
     }
   }
 
   void
-  Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry)
+  Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
   {
     MOZ_ASSERT(aEntry->mObject, "null mObject in purple buffer");
     if (!aEntry->mRefCnt->get()) {
       if (!mAsyncSnowWhiteFreeing) {
         SnowWhiteKiller::Visit(aBuffer, aEntry);
       } else if (!mDispatchedDeferredDeletion) {
         mDispatchedDeferredDeletion = true;
         nsCycleCollector_dispatchDeferredDeletion(false);
       }
       return;
     }
-    void *o = aEntry->mObject;
-    nsCycleCollectionParticipant *cp = aEntry->mParticipant;
+    void* o = aEntry->mObject;
+    nsCycleCollectionParticipant* cp = aEntry->mParticipant;
     CanonicalizeParticipant(&o, &cp);
     if (aEntry->mRefCnt->IsPurple() && !cp->CanSkip(o, false) &&
         (!mRemoveChildlessNodes || MayHaveChild(o, cp))) {
       return;
     }
     aBuffer.Remove(aEntry);
   }
 
@@ -2581,36 +2689,37 @@ nsCycleCollector::ForgetSkippable(bool a
 
   // If we remove things from the purple buffer during graph building, we may
   // lose track of an object that was mutated during graph building.
   MOZ_ASSERT(mIncrementalPhase == IdlePhase);
 
   if (mJSRuntime) {
     mJSRuntime->PrepareForForgetSkippable();
   }
-  MOZ_ASSERT(!mScanInProgress, "Don't forget skippable or free snow-white while scan is in progress.");
+  MOZ_ASSERT(!mScanInProgress,
+             "Don't forget skippable or free snow-white while scan is in progress.");
   mPurpleBuf.RemoveSkippable(this, aRemoveChildlessNodes,
                              aAsyncSnowWhiteFreeing, mForgetSkippableCB);
 }
 
 MOZ_NEVER_INLINE void
-nsCycleCollector::MarkRoots(SliceBudget &aBudget)
+nsCycleCollector::MarkRoots(SliceBudget& aBudget)
 {
   const intptr_t kNumNodesBetweenTimeChecks = 1000;
   const intptr_t kStep = SliceBudget::CounterReset / kNumNodesBetweenTimeChecks;
 
   TimeLog timeLog;
   AutoRestore<bool> ar(mScanInProgress);
   MOZ_ASSERT(!mScanInProgress);
   mScanInProgress = true;
   MOZ_ASSERT(mIncrementalPhase == GraphBuildingPhase);
   MOZ_ASSERT(mCurrNode);
 
   while (!aBudget.isOverBudget() && !mCurrNode->IsDone()) {
-    PtrInfo *pi = mCurrNode->GetNext();
+    PtrInfo* pi = mCurrNode->GetNext();
     if (!pi) {
       MOZ_CRASH();
     }
 
     // We need to call the builder's Traverse() method on deleted nodes, to
     // set their firstChild() that may be read by a prior non-deleted
     // neighbor.
     mBuilder->Traverse(pi);
@@ -2643,61 +2752,63 @@ nsCycleCollector::MarkRoots(SliceBudget 
 
 ////////////////////////////////////////////////////////////////////////
 // Bacon & Rajan's |ScanRoots| routine.
 ////////////////////////////////////////////////////////////////////////
 
 
 struct ScanBlackVisitor
 {
-  ScanBlackVisitor(uint32_t &aWhiteNodeCount, bool &aFailed)
+  ScanBlackVisitor(uint32_t& aWhiteNodeCount, bool& aFailed)
     : mWhiteNodeCount(aWhiteNodeCount), mFailed(aFailed)
   {
   }
 
-  bool ShouldVisitNode(PtrInfo const *pi)
+  bool ShouldVisitNode(PtrInfo const* aPi)
   {
-    return pi->mColor != black;
+    return aPi->mColor != black;
   }
 
-  MOZ_NEVER_INLINE void VisitNode(PtrInfo *pi)
+  MOZ_NEVER_INLINE void VisitNode(PtrInfo* aPi)
   {
-    if (pi->mColor == white)
+    if (aPi->mColor == white) {
       --mWhiteNodeCount;
-    pi->mColor = black;
+    }
+    aPi->mColor = black;
   }
 
   void Failed()
   {
     mFailed = true;
   }
 
 private:
-  uint32_t &mWhiteNodeCount;
-  bool &mFailed;
+  uint32_t& mWhiteNodeCount;
+  bool& mFailed;
 };
 
 static void
 FloodBlackNode(uint32_t& aWhiteNodeCount, bool& aFailed, PtrInfo* aPi)
 {
     GraphWalker<ScanBlackVisitor>(ScanBlackVisitor(aWhiteNodeCount, aFailed)).Walk(aPi);
-    MOZ_ASSERT(aPi->mColor == black || !aPi->mParticipant, "FloodBlackNode should make aPi black");
+    MOZ_ASSERT(aPi->mColor == black || !aPi->mParticipant,
+               "FloodBlackNode should make aPi black");
 }
 
 // Iterate over the WeakMaps.  If we mark anything while iterating
 // over the WeakMaps, we must iterate over all of the WeakMaps again.
 void
 nsCycleCollector::ScanWeakMaps()
 {
   bool anyChanged;
   bool failed = false;
   do {
     anyChanged = false;
     for (uint32_t i = 0; i < mGraph.mWeakMaps.Length(); i++) {
-      WeakMapping *wm = &mGraph.mWeakMaps[i];
+      WeakMapping* wm = &mGraph.mWeakMaps[i];
 
       // If any of these are null, the original object was marked black.
       uint32_t mColor = wm->mMap ? wm->mMap->mColor : black;
       uint32_t kColor = wm->mKey ? wm->mKey->mColor : black;
       uint32_t kdColor = wm->mKeyDelegate ? wm->mKeyDelegate->mColor : black;
       uint32_t vColor = wm->mVal ? wm->mVal->mColor : black;
 
       MOZ_ASSERT(mColor != grey, "Uncolored weak map");
@@ -2722,53 +2833,55 @@ nsCycleCollector::ScanWeakMaps()
     CC_TELEMETRY(_OOM, true);
   }
 }
 
 // Flood black from any objects in the purple buffer that are in the CC graph.
 class PurpleScanBlackVisitor
 {
 public:
-  PurpleScanBlackVisitor(GCGraph &aGraph, nsICycleCollectorListener *aListener,
-                         uint32_t &aCount, bool &aFailed)
+  PurpleScanBlackVisitor(GCGraph& aGraph, nsICycleCollectorListener* aListener,
+                         uint32_t& aCount, bool& aFailed)
     : mGraph(aGraph), mListener(aListener), mCount(aCount), mFailed(aFailed)
   {
   }
 
   void
-  Visit(nsPurpleBuffer &aBuffer, nsPurpleBufferEntry *aEntry)
+  Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
   {
-    MOZ_ASSERT(aEntry->mObject, "Entries with null mObject shouldn't be in the purple buffer.");
-    MOZ_ASSERT(aEntry->mRefCnt->get() != 0, "Snow-white objects shouldn't be in the purple buffer.");
-
-    void *obj = aEntry->mObject;
+    MOZ_ASSERT(aEntry->mObject,
+               "Entries with null mObject shouldn't be in the purple buffer.");
+    MOZ_ASSERT(aEntry->mRefCnt->get() != 0,
+               "Snow-white objects shouldn't be in the purple buffer.");
+
+    void* obj = aEntry->mObject;
     if (!aEntry->mParticipant) {
       obj = CanonicalizeXPCOMParticipant(static_cast<nsISupports*>(obj));
       MOZ_ASSERT(obj, "Don't add objects that don't participate in collection!");
     }
 
-    PtrInfo *pi = mGraph.FindNode(obj);
+    PtrInfo* pi = mGraph.FindNode(obj);
     if (!pi) {
       return;
     }
     MOZ_ASSERT(pi->mParticipant, "No dead objects should be in the purple buffer.");
     if (MOZ_UNLIKELY(mListener)) {
       mListener->NoteIncrementalRoot((uint64_t)pi->mPointer);
     }
     if (pi->mColor == black) {
       return;
     }
     FloodBlackNode(mCount, mFailed, pi);
   }
 
 private:
-  GCGraph &mGraph;
-  nsICycleCollectorListener *mListener;
-  uint32_t &mCount;
-  bool &mFailed;
+  GCGraph& mGraph;
+  nsICycleCollectorListener* mListener;
+  uint32_t& mCount;
+  bool& mFailed;
 };
 
 // Objects that have been stored somewhere since the start of incremental graph building must
 // be treated as live for this cycle collection, because we may not have accurate information
 // about who holds references to them.
 void
 nsCycleCollector::ScanIncrementalRoots()
 {
@@ -2789,22 +2902,22 @@ nsCycleCollector::ScanIncrementalRoots()
   timeLog.Checkpoint("ScanIncrementalRoots::fix purple");
 
   // Garbage collected objects:
   // If a GCed object was added to the graph with a refcount of zero, and is
   // now marked black by the GC, it was probably gray before and was exposed
   // to active JS, so it may have been stored somewhere, so it needs to be
   // treated as live.
   if (mJSRuntime) {
-    nsCycleCollectionParticipant *jsParticipant = mJSRuntime->GCThingParticipant();
-    nsCycleCollectionParticipant *zoneParticipant = mJSRuntime->ZoneParticipant();
+    nsCycleCollectionParticipant* jsParticipant = mJSRuntime->GCThingParticipant();
+    nsCycleCollectionParticipant* zoneParticipant = mJSRuntime->ZoneParticipant();
     NodePool::Enumerator etor(mGraph.mNodes);
 
     while (!etor.IsDone()) {
-      PtrInfo *pi = etor.GetNext();
+      PtrInfo* pi = etor.GetNext();
 
       // If the refcount is non-zero, pi can't have been a gray JS object.
       if (pi->mRefCount != 0) {
         continue;
       }
 
       // As an optimization, if an object has already been determined to be live,
       // don't consider it further.  We can't do this if there is a listener,
@@ -2815,17 +2928,17 @@ nsCycleCollector::ScanIncrementalRoots()
 
       // If the object is still marked gray by the GC, nothing could have gotten
       // hold of it, so it isn't an incremental root.
       if (pi->mParticipant == jsParticipant) {
         if (xpc_GCThingIsGrayCCThing(pi->mPointer)) {
           continue;
         }
       } else if (pi->mParticipant == zoneParticipant) {
-        JS::Zone *zone = static_cast<JS::Zone*>(pi->mPointer);
+        JS::Zone* zone = static_cast<JS::Zone*>(pi->mPointer);
         if (js::ZoneGlobalsAreAllGray(zone)) {
           continue;
         }
       } else {
         MOZ_ASSERT(false, "Non-JS thing with 0 refcount? Treating as live.");
       }
 
       // At this point, pi must be an incremental root.
@@ -2935,17 +3048,17 @@ nsCycleCollector::ScanRoots(bool aFullyS
   ScanWeakMaps();
   timeLog.Checkpoint("ScanRoots::ScanWeakMaps");
 
   if (mListener) {
     mListener->BeginResults();
 
     NodePool::Enumerator etor(mGraph.mNodes);
     while (!etor.IsDone()) {
-      PtrInfo *pi = etor.GetNext();
+      PtrInfo* pi = etor.GetNext();
       if (!pi->mParticipant) {
         continue;
       }
       switch (pi->mColor) {
         case black:
           if (pi->mRefCount > 0 && pi->mRefCount < UINT32_MAX &&
               pi->mInternalRefs != pi->mRefCount) {
             mListener->DescribeRoot((uint64_t)pi->mPointer,
@@ -2994,19 +3107,18 @@ nsCycleCollector::CollectWhite()
   nsAutoTArray<PtrInfo*, 4000> whiteNodes;
 
   MOZ_ASSERT(mIncrementalPhase == ScanAndCollectWhitePhase);
 
   whiteNodes.SetCapacity(mWhiteNodeCount);
   uint32_t numWhiteGCed = 0;
 
   NodePool::Enumerator etor(mGraph.mNodes);
-  while (!etor.IsDone())
-  {
-    PtrInfo *pinfo = etor.GetNext();
+  while (!etor.IsDone()) {
+    PtrInfo* pinfo = etor.GetNext();
     if (pinfo->mColor == white && pinfo->mParticipant) {
       whiteNodes.AppendElement(pinfo);
       pinfo->mParticipant->Root(pinfo->mPointer);
       if (pinfo->mRefCount == 0) {
         // only JS objects have a refcount of 0
         ++numWhiteGCed;
       }
     }
@@ -3021,29 +3133,29 @@ nsCycleCollector::CollectWhite()
   timeLog.Checkpoint("CollectWhite::Root");
 
   if (mBeforeUnlinkCB) {
     mBeforeUnlinkCB();
     timeLog.Checkpoint("CollectWhite::BeforeUnlinkCB");
   }
 
   for (uint32_t i = 0; i < count; ++i) {
-    PtrInfo *pinfo = whiteNodes.ElementAt(i);
+    PtrInfo* pinfo = whiteNodes.ElementAt(i);
     MOZ_ASSERT(pinfo->mParticipant, "Unlink shouldn't see objects removed from graph.");
     pinfo->mParticipant->Unlink(pinfo->mPointer);
 #ifdef DEBUG
     if (mJSRuntime) {
       mJSRuntime->AssertNoObjectsToTrace(pinfo->mPointer);
     }
 #endif
   }
   timeLog.Checkpoint("CollectWhite::Unlink");
 
   for (uint32_t i = 0; i < count; ++i) {
-    PtrInfo *pinfo = whiteNodes.ElementAt(i);
+    PtrInfo* pinfo = whiteNodes.ElementAt(i);
     MOZ_ASSERT(pinfo->mParticipant, "Unroot shouldn't see objects removed from graph.");
     pinfo->mParticipant->Unroot(pinfo->mPointer);
   }
   timeLog.Checkpoint("CollectWhite::Unroot");
 
   nsCycleCollector_dispatchDeferredDeletion(false);
   timeLog.Checkpoint("CollectWhite::dispatchDeferredDeletion");
 
@@ -3132,59 +3244,61 @@ nsCycleCollector::nsCycleCollector() :
 }
 
 nsCycleCollector::~nsCycleCollector()
 {
   UnregisterWeakMemoryReporter(this);
 }
 
 void
-nsCycleCollector::RegisterJSRuntime(CycleCollectedJSRuntime *aJSRuntime)
+nsCycleCollector::RegisterJSRuntime(CycleCollectedJSRuntime* aJSRuntime)
 {
-  if (mJSRuntime)
+  if (mJSRuntime) {
     Fault("multiple registrations of cycle collector JS runtime", aJSRuntime);
+  }
 
   mJSRuntime = aJSRuntime;
 
   // We can't register as a reporter in nsCycleCollector() because that runs
   // before the memory reporter manager is initialized.  So we do it here
   // instead.
   static bool registered = false;
   if (!registered) {
     RegisterWeakMemoryReporter(this);
     registered = true;
   }
 }
 
 void
 nsCycleCollector::ForgetJSRuntime()
 {
-  if (!mJSRuntime)
+  if (!mJSRuntime) {
     Fault("forgetting non-registered cycle collector JS runtime");
+  }
 
   mJSRuntime = nullptr;
 }
 
 #ifdef DEBUG
 static bool
-HasParticipant(void *aPtr, nsCycleCollectionParticipant *aParti)
+HasParticipant(void* aPtr, nsCycleCollectionParticipant* aParti)
 {
   if (aParti) {
     return true;
   }
 
-  nsXPCOMCycleCollectionParticipant *xcp;
+  nsXPCOMCycleCollectionParticipant* xcp;
   ToParticipant(static_cast<nsISupports*>(aPtr), &xcp);
   return xcp != nullptr;
 }
 #endif
 
 MOZ_ALWAYS_INLINE void
-nsCycleCollector::Suspect(void *aPtr, nsCycleCollectionParticipant *aParti,
-                          nsCycleCollectingAutoRefCnt *aRefCnt)
+nsCycleCollector::Suspect(void* aPtr, nsCycleCollectionParticipant* aParti,
+                          nsCycleCollectingAutoRefCnt* aRefCnt)
 {
   CheckThreadSafety();
 
   // Re-entering ::Suspect during collection used to be a fault, but
   // we are canonicalizing nsISupports pointers using QI, so we will
   // see some spurious refcount traffic here.
 
   if (MOZ_UNLIKELY(mScanInProgress)) {
@@ -3216,46 +3330,51 @@ nsCycleCollector::CheckThreadSafety()
 // this CC. It returns true on startup (before the mark bits have been set),
 // and also when UnmarkGray has run out of stack.  We also force GCs on shut
 // down to collect cycles involving both DOM and JS.
 void
 nsCycleCollector::FixGrayBits(bool aForceGC)
 {
   CheckThreadSafety();
 
-  if (!mJSRuntime)
+  if (!mJSRuntime) {
     return;
+  }
 
   if (!aForceGC) {
     mJSRuntime->FixWeakMappingGrayBits();
 
     bool needGC = !mJSRuntime->AreGCGrayBitsValid();
     // Only do a telemetry ping for non-shutdown CCs.
     CC_TELEMETRY(_NEED_GC, needGC);
-    if (!needGC)
+    if (!needGC) {
       return;
+    }
     mResults.mForcedGC = true;
   }
 
   TimeLog timeLog;
-  mJSRuntime->GarbageCollect(aForceGC ? JS::gcreason::SHUTDOWN_CC : JS::gcreason::CC_FORCED);
+  mJSRuntime->GarbageCollect(aForceGC ? JS::gcreason::SHUTDOWN_CC
+                                      : JS::gcreason::CC_FORCED);
   timeLog.Checkpoint("GC()");
 }
 
 void
 nsCycleCollector::CleanupAfterCollection()
 {
   TimeLog timeLog;
   MOZ_ASSERT(mIncrementalPhase == CleanupPhase);
   mGraph.Clear();
   timeLog.Checkpoint("CleanupAfterCollection::mGraph.Clear()");
 
-  uint32_t interval = (uint32_t) ((TimeStamp::Now() - mCollectionStart).ToMilliseconds());
+  uint32_t interval =
+    (uint32_t)((TimeStamp::Now() - mCollectionStart).ToMilliseconds());
 #ifdef COLLECT_TIME_DEBUG
-  printf("cc: total cycle collector time was %ums in %u slices\n", interval, mResults.mNumSlices);
+  printf("cc: total cycle collector time was %ums in %u slices\n", interval,
+         mResults.mNumSlices);
   printf("cc: visited %u ref counted and %u GCed objects, freed %d ref counted and %d GCed objects",
          mResults.mVisitedRefCounted, mResults.mVisitedGCed,
          mResults.mFreedRefCounted, mResults.mFreedGCed);
   uint32_t numVisited = mResults.mVisitedRefCounted + mResults.mVisitedGCed;
   if (numVisited > 1000) {
     uint32_t numFreed = mResults.mFreedRefCounted + mResults.mFreedGCed;
     printf(" (%d%%)", 100 * numFreed / numVisited);
   }
@@ -3284,28 +3403,28 @@ nsCycleCollector::ShutdownCollect()
     if (!Collect(ShutdownCC, unlimitedBudget, nullptr)) {
       break;
     }
   }
   NS_WARN_IF_FALSE(i < NORMAL_SHUTDOWN_COLLECTIONS, "Extra shutdown CC");
 }
 
 static void
-PrintPhase(const char *aPhase)
+PrintPhase(const char* aPhase)
 {
 #ifdef DEBUG_PHASES
   printf("cc: begin %s on %s\n", aPhase,
          NS_IsMainThread() ? "mainthread" : "worker");
 #endif
 }
 
 bool
 nsCycleCollector::Collect(ccType aCCType,
-                          SliceBudget &aBudget,
-                          nsICycleCollectorListener *aManualListener)
+                          SliceBudget& aBudget,
+                          nsICycleCollectorListener* aManualListener)
 {
   CheckThreadSafety();
 
   // This can legitimately happen in a few cases. See bug 383651.
   if (mActivelyCollecting || mFreeingSnowWhite) {
     return false;
   }
   mActivelyCollecting = true;
@@ -3436,17 +3555,17 @@ nsCycleCollector::ShouldMergeZones(ccTyp
   } else {
     mMergedInARow = 0;
     return false;
   }
 }
 
 void
 nsCycleCollector::BeginCollection(ccType aCCType,
-                                  nsICycleCollectorListener *aManualListener)
+                                  nsICycleCollectorListener* aManualListener)
 {
   TimeLog timeLog;
   MOZ_ASSERT(mIncrementalPhase == IdlePhase);
 
   mCollectionStart = TimeStamp::Now();
 
   if (mJSRuntime) {
     mJSRuntime->BeginCycleCollectionCallback();
@@ -3528,37 +3647,37 @@ nsCycleCollector::Shutdown()
   if (PR_GetEnv("MOZ_CC_RUN_DURING_SHUTDOWN"))
 #endif
   {
     ShutdownCollect();
   }
 }
 
 void
-nsCycleCollector::RemoveObjectFromGraph(void *aObj)
+nsCycleCollector::RemoveObjectFromGraph(void* aObj)
 {
   if (mIncrementalPhase == IdlePhase) {
     return;
   }
 
-  if (PtrInfo *pinfo = mGraph.FindNode(aObj)) {
+  if (PtrInfo* pinfo = mGraph.FindNode(aObj)) {
     mGraph.RemoveNodeFromMap(aObj);
 
     pinfo->mPointer = nullptr;
     pinfo->mParticipant = nullptr;
   }
 }
 
 void
 nsCycleCollector::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf,
-                                      size_t *aObjectSize,
-                                      size_t *aGraphNodesSize,
-                                      size_t *aGraphEdgesSize,
-                                      size_t *aWeakMapsSize,
-                                      size_t *aPurpleBufferSize) const
+                                      size_t* aObjectSize,
+                                      size_t* aGraphNodesSize,
+                                      size_t* aGraphEdgesSize,
+                                      size_t* aWeakMapsSize,
+                                      size_t* aPurpleBufferSize) const
 {
   *aObjectSize = aMallocSizeOf(this);
 
   mGraph.SizeOfExcludingThis(aMallocSizeOf, aGraphNodesSize, aGraphEdgesSize,
                              aWeakMapsSize);
 
   *aPurpleBufferSize = mPurpleBuf.SizeOfExcludingThis(aMallocSizeOf);
 
@@ -3580,34 +3699,34 @@ nsCycleCollector::GetJSPurpleBuffer()
 }
 
 ////////////////////////////////////////////////////////////////////////
 // Module public API (exported in nsCycleCollector.h)
 // Just functions that redirect into the singleton, once it's built.
 ////////////////////////////////////////////////////////////////////////
 
 void
-nsCycleCollector_registerJSRuntime(CycleCollectedJSRuntime *rt)
+nsCycleCollector_registerJSRuntime(CycleCollectedJSRuntime* aRt)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
   // But we shouldn't already have a runtime.
   MOZ_ASSERT(!data->mRuntime);
 
-  data->mRuntime = rt;
-  data->mCollector->RegisterJSRuntime(rt);
+  data->mRuntime = aRt;
+  data->mCollector->RegisterJSRuntime(aRt);
 }
 
 void
 nsCycleCollector_forgetJSRuntime()
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   // And we shouldn't have already forgotten our runtime.
   MOZ_ASSERT(data->mRuntime);
 
   // But it may have shutdown already.
   if (data->mCollector) {
@@ -3686,103 +3805,103 @@ DropJSObjectsImpl(nsISupports* aHolder)
 #endif
   DropJSObjectsImpl(static_cast<void*>(aHolder));
 }
 
 #ifdef DEBUG
 bool
 IsJSHolder(void* aHolder)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now, and not completely
   // shut down.
   MOZ_ASSERT(data);
   // And we should have a runtime.
   MOZ_ASSERT(data->mRuntime);
 
   return data->mRuntime->IsJSHolder(aHolder);
 }
 #endif
 
 void
 DeferredFinalize(nsISupports* aSupports)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now, and not completely
   // shut down.
   MOZ_ASSERT(data);
   // And we should have a runtime.
   MOZ_ASSERT(data->mRuntime);
 
   data->mRuntime->DeferredFinalize(aSupports);
 }
 
 void
 DeferredFinalize(DeferredFinalizeAppendFunction aAppendFunc,
                  DeferredFinalizeFunction aFunc,
                  void* aThing)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now, and not completely
   // shut down.
   MOZ_ASSERT(data);
   // And we should have a runtime.
   MOZ_ASSERT(data->mRuntime);
 
   data->mRuntime->DeferredFinalize(aAppendFunc, aFunc, aThing);
 }
 
 } // namespace cyclecollector
 } // namespace mozilla
 
 
 MOZ_NEVER_INLINE static void
-SuspectAfterShutdown(void* n, nsCycleCollectionParticipant* cp,
+SuspectAfterShutdown(void* aPtr, nsCycleCollectionParticipant* aCp,
                      nsCycleCollectingAutoRefCnt* aRefCnt,
                      bool* aShouldDelete)
 {
   if (aRefCnt->get() == 0) {
     if (!aShouldDelete) {
       // The CC is shut down, so we can't be in the middle of an ICC.
-      CanonicalizeParticipant(&n, &cp);
+      CanonicalizeParticipant(&aPtr, &aCp);
       aRefCnt->stabilizeForDeletion();
-      cp->DeleteCycleCollectable(n);
+      aCp->DeleteCycleCollectable(aPtr);
     } else {
       *aShouldDelete = true;
     }
   } else {
     // Make sure we'll get called again.
     aRefCnt->RemoveFromPurpleBuffer();
   }
 }
 
 void
-NS_CycleCollectorSuspect3(void *n, nsCycleCollectionParticipant *cp,
-                          nsCycleCollectingAutoRefCnt *aRefCnt,
+NS_CycleCollectorSuspect3(void* aPtr, nsCycleCollectionParticipant* aCp,
+                          nsCycleCollectingAutoRefCnt* aRefCnt,
                           bool* aShouldDelete)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
 
   if (MOZ_LIKELY(data->mCollector)) {
-    data->mCollector->Suspect(n, cp, aRefCnt);
+    data->mCollector->Suspect(aPtr, aCp, aRefCnt);
     return;
   }
-  SuspectAfterShutdown(n, cp, aRefCnt, aShouldDelete);
+  SuspectAfterShutdown(aPtr, aCp, aRefCnt, aShouldDelete);
 }
 
 uint32_t
 nsCycleCollector_suspectedCount()
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
 
   if (!data->mCollector) {
     return 0;
   }
 
@@ -3812,159 +3931,159 @@ nsCycleCollector_startup()
   data->mRuntime = nullptr;
 
   sCollectorData.set(data);
 }
 
 void
 nsCycleCollector_setBeforeUnlinkCallback(CC_BeforeUnlinkCallback aCB)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   data->mCollector->SetBeforeUnlinkCallback(aCB);
 }
 
 void
 nsCycleCollector_setForgetSkippableCallback(CC_ForgetSkippableCallback aCB)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   data->mCollector->SetForgetSkippableCallback(aCB);
 }
 
 void
 nsCycleCollector_forgetSkippable(bool aRemoveChildlessNodes,
                                  bool aAsyncSnowWhiteFreeing)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   PROFILER_LABEL("CC", "nsCycleCollector_forgetSkippable");
   TimeLog timeLog;
   data->mCollector->ForgetSkippable(aRemoveChildlessNodes,
                                     aAsyncSnowWhiteFreeing);
   timeLog.Checkpoint("ForgetSkippable()");
 }
 
 void
 nsCycleCollector_dispatchDeferredDeletion(bool aContinuation)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   if (!data || !data->mRuntime) {
     return;
   }
 
   data->mRuntime->DispatchDeferredDeletion(aContinuation);
 }
 
 bool
 nsCycleCollector_doDeferredDeletion()
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
   MOZ_ASSERT(data->mRuntime);
 
   return data->mCollector->FreeSnowWhite(false);
 }
 
 void
-nsCycleCollector_collect(nsICycleCollectorListener *aManualListener)
+nsCycleCollector_collect(nsICycleCollectorListener* aManualListener)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   PROFILER_LABEL("CC", "nsCycleCollector_collect");
   SliceBudget unlimitedBudget;
   data->mCollector->Collect(ManualCC, unlimitedBudget, aManualListener);
 }
 
 void
 nsCycleCollector_collectSlice(int64_t aSliceTime)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   PROFILER_LABEL("CC", "nsCycleCollector_collectSlice");
   SliceBudget budget;
   if (aSliceTime >= 0) {
     budget = SliceBudget::TimeBudget(aSliceTime);
   }
   data->mCollector->Collect(SliceCC, budget, nullptr);
 }
 
 void
 nsCycleCollector_collectSliceWork(int64_t aSliceWork)
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   // We should have started the cycle collector by now.
   MOZ_ASSERT(data);
   MOZ_ASSERT(data->mCollector);
 
   PROFILER_LABEL("CC", "nsCycleCollector_collectSliceWork");
   SliceBudget budget;
   if (aSliceWork >= 0) {
     budget = SliceBudget::WorkBudget(aSliceWork);
   }
   data->mCollector->Collect(SliceCC, budget, nullptr);
 }
 
 void
 nsCycleCollector_prepareForGarbageCollection()
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   MOZ_ASSERT(data);
 
   if (!data->mCollector) {
     return;
   }
 
   data->mCollector->PrepareForGarbageCollection();
 }
 
 void
 nsCycleCollector_finishAnyCurrentCollection()
 {
-    CollectorData *data = sCollectorData.get();
-
-    MOZ_ASSERT(data);
-
-    if (!data->mCollector) {
-        return;
-    }
-
-    data->mCollector->FinishAnyCurrentCollection();
+  CollectorData* data = sCollectorData.get();
+
+  MOZ_ASSERT(data);
+
+  if (!data->mCollector) {
+    return;
+  }
+
+  data->mCollector->FinishAnyCurrentCollection();
 }
 
 void
 nsCycleCollector_shutdown()
 {
-  CollectorData *data = sCollectorData.get();
+  CollectorData* data = sCollectorData.get();
 
   if (data) {
     MOZ_ASSERT(data->mCollector);
     PROFILER_LABEL("CC", "nsCycleCollector_shutdown");
     data->mCollector->Shutdown();
     data->mCollector = nullptr;
     if (!data->mRuntime) {
       delete data;
--- a/xpcom/base/nsCycleCollector.h
+++ b/xpcom/base/nsCycleCollector.h
@@ -38,41 +38,41 @@ void nsCycleCollector_forgetSkippable(bo
 void nsCycleCollector_prepareForGarbageCollection();
 
 // If an incremental cycle collection is in progress, finish it.
 void nsCycleCollector_finishAnyCurrentCollection();
 
 void nsCycleCollector_dispatchDeferredDeletion(bool aContinuation = false);
 bool nsCycleCollector_doDeferredDeletion();
 
-void nsCycleCollector_collect(nsICycleCollectorListener *aManualListener);
+void nsCycleCollector_collect(nsICycleCollectorListener* aManualListener);
 
 // If aSliceTime is negative, the CC will run to completion. Otherwise,
 // aSliceTime will be used as the time budget for the slice, in ms.
 void nsCycleCollector_collectSlice(int64_t aSliceTime);
 
 // If aSliceTime is negative, the CC will run to completion. Otherwise,
 // aSliceTime will be used as the work budget for the slice.
 void nsCycleCollector_collectSliceWork(int64_t aSliceWork);
 
 uint32_t nsCycleCollector_suspectedCount();
 void nsCycleCollector_shutdown();
 
 // Helpers for interacting with JS
-void nsCycleCollector_registerJSRuntime(mozilla::CycleCollectedJSRuntime *aRt);
+void nsCycleCollector_registerJSRuntime(mozilla::CycleCollectedJSRuntime* aRt);
 void nsCycleCollector_forgetJSRuntime();
 
 #define NS_CYCLE_COLLECTOR_LOGGER_CID \
 { 0x58be81b4, 0x39d2, 0x437c, \
 { 0x94, 0xea, 0xae, 0xde, 0x2c, 0x62, 0x08, 0xd3 } }
 
 extern nsresult
-nsCycleCollectorLoggerConstructor(nsISupports* outer,
+nsCycleCollectorLoggerConstructor(nsISupports* aOuter,
                                   const nsIID& aIID,
-                                  void* *aInstancePtr);
+                                  void** aInstancePtr);
 
 namespace mozilla {
 namespace cyclecollector {
 
 #ifdef DEBUG
 bool IsJSHolder(void* aHolder);
 #endif
 
--- a/xpcom/base/nsDebugImpl.cpp
+++ b/xpcom/base/nsDebugImpl.cpp
@@ -49,35 +49,35 @@
 #include <stdbool.h>
 #include <unistd.h>
 #include <sys/sysctl.h>
 #endif
 
 #include "mozilla/mozalloc_abort.h"
 
 static void
-Abort(const char *aMsg);
+Abort(const char* aMsg);
 
 static void
 RealBreak();
 
 static void
-Break(const char *aMsg);
+Break(const char* aMsg);
 
 #if defined(_WIN32)
 #include <windows.h>
 #include <signal.h>
 #include <malloc.h> // for _alloca
 #elif defined(XP_UNIX)
 #include <stdlib.h>
 #endif
 
 using namespace mozilla;
 
-static const char *sMultiprocessDescription = nullptr;
+static const char* sMultiprocessDescription = nullptr;
 
 static Atomic<int32_t> gAssertionCount;
 
 NS_IMPL_QUERY_INTERFACE(nsDebugImpl, nsIDebug, nsIDebug2)
 
 NS_IMETHODIMP_(MozExternalRefCountType)
 nsDebugImpl::AddRef()
 {
@@ -86,39 +86,39 @@ nsDebugImpl::AddRef()
 
 NS_IMETHODIMP_(MozExternalRefCountType)
 nsDebugImpl::Release()
 {
   return 1;
 }
 
 NS_IMETHODIMP
-nsDebugImpl::Assertion(const char *aStr, const char *aExpr,
-                       const char *aFile, int32_t aLine)
+nsDebugImpl::Assertion(const char* aStr, const char* aExpr,
+                       const char* aFile, int32_t aLine)
 {
   NS_DebugBreak(NS_DEBUG_ASSERTION, aStr, aExpr, aFile, aLine);
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsDebugImpl::Warning(const char *aStr, const char *aFile, int32_t aLine)
+nsDebugImpl::Warning(const char* aStr, const char* aFile, int32_t aLine)
 {
   NS_DebugBreak(NS_DEBUG_WARNING, aStr, nullptr, aFile, aLine);
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsDebugImpl::Break(const char *aFile, int32_t aLine)
+nsDebugImpl::Break(const char* aFile, int32_t aLine)
 {
   NS_DebugBreak(NS_DEBUG_BREAK, nullptr, nullptr, aFile, aLine);
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsDebugImpl::Abort(const char *aFile, int32_t aLine)
+nsDebugImpl::Abort(const char* aFile, int32_t aLine)
 {
   NS_DebugBreak(NS_DEBUG_ABORT, nullptr, nullptr, aFile, aLine);
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsDebugImpl::GetIsDebugBuild(bool* aResult)
 {
@@ -167,129 +167,140 @@ nsDebugImpl::GetIsDebuggerAttached(bool*
     *aResult = true;
   }
 #endif
 
   return NS_OK;
 }
 
 /* static */ void
-nsDebugImpl::SetMultiprocessMode(const char *aDesc)
+nsDebugImpl::SetMultiprocessMode(const char* aDesc)
 {
   sMultiprocessDescription = aDesc;
 }
 
 /**
  * Implementation of the nsDebug methods. Note that this code is
  * always compiled in, in case some other module that uses it is
  * compiled with debugging even if this library is not.
  */
 static PRLogModuleInfo* gDebugLog;
 
-static void InitLog(void)
+static void
+InitLog()
 {
   if (0 == gDebugLog) {
     gDebugLog = PR_NewLogModule("nsDebug");
   }
 }
 
 enum nsAssertBehavior {
   NS_ASSERT_UNINITIALIZED,
   NS_ASSERT_WARN,
   NS_ASSERT_SUSPEND,
   NS_ASSERT_STACK,
   NS_ASSERT_TRAP,
   NS_ASSERT_ABORT,
   NS_ASSERT_STACK_AND_ABORT
 };
 
-static nsAssertBehavior GetAssertBehavior()
+static nsAssertBehavior
+GetAssertBehavior()
 {
   static nsAssertBehavior gAssertBehavior = NS_ASSERT_UNINITIALIZED;
-  if (gAssertBehavior != NS_ASSERT_UNINITIALIZED)
+  if (gAssertBehavior != NS_ASSERT_UNINITIALIZED) {
     return gAssertBehavior;
+  }
 
 #if defined(XP_WIN) && defined(MOZ_METRO)
-  if (IsRunningInWindowsMetro())
+  if (IsRunningInWindowsMetro()) {
     gAssertBehavior = NS_ASSERT_WARN;
-  else
+  } else {
     gAssertBehavior = NS_ASSERT_TRAP;
+  }
 #elif defined(XP_WIN)
   gAssertBehavior = NS_ASSERT_TRAP;
 #else
   gAssertBehavior = NS_ASSERT_WARN;
 #endif
 
-  const char *assertString = PR_GetEnv("XPCOM_DEBUG_BREAK");
-  if (!assertString || !*assertString)
+  const char* assertString = PR_GetEnv("XPCOM_DEBUG_BREAK");
+  if (!assertString || !*assertString) {
     return gAssertBehavior;
-
-  if (!strcmp(assertString, "warn"))
+  }
+  if (!strcmp(assertString, "warn")) {
     return gAssertBehavior = NS_ASSERT_WARN;
-
-  if (!strcmp(assertString, "suspend"))
+  }
+  if (!strcmp(assertString, "suspend")) {
     return gAssertBehavior = NS_ASSERT_SUSPEND;
-
-  if (!strcmp(assertString, "stack"))
+  }
+  if (!strcmp(assertString, "stack")) {
     return gAssertBehavior = NS_ASSERT_STACK;
-
-  if (!strcmp(assertString, "abort"))
+  }
+  if (!strcmp(assertString, "abort")) {
     return gAssertBehavior = NS_ASSERT_ABORT;
-
-  if (!strcmp(assertString, "trap") || !strcmp(assertString, "break"))
+  }
+  if (!strcmp(assertString, "trap") || !strcmp(assertString, "break")) {
     return gAssertBehavior = NS_ASSERT_TRAP;
-
-  if (!strcmp(assertString, "stack-and-abort"))
+  }
+  if (!strcmp(assertString, "stack-and-abort")) {
     return gAssertBehavior = NS_ASSERT_STACK_AND_ABORT;
+  }
 
   fprintf(stderr, "Unrecognized value of XPCOM_DEBUG_BREAK\n");
   return gAssertBehavior;
 }
 
 struct FixedBuffer
 {
-  FixedBuffer() : curlen(0) { buffer[0] = '\0'; }
+  FixedBuffer() : curlen(0)
+  {
+    buffer[0] = '\0';
+  }
 
   char buffer[1000];
   uint32_t curlen;
 };
 
 static int
-StuffFixedBuffer(void *closure, const char *buf, uint32_t len)
+StuffFixedBuffer(void* aClosure, const char* aBuf, uint32_t aLen)
 {
-  if (!len)
+  if (!aLen) {
     return 0;
+  }
 
-  FixedBuffer *fb = (FixedBuffer*) closure;
+  FixedBuffer* fb = (FixedBuffer*)aClosure;
 
   // strip the trailing null, we add it again later
-  if (buf[len - 1] == '\0')
-    --len;
+  if (aBuf[aLen - 1] == '\0') {
+    --aLen;
+  }
 
-  if (fb->curlen + len >= sizeof(fb->buffer))
-    len = sizeof(fb->buffer) - fb->curlen - 1;
+  if (fb->curlen + aLen >= sizeof(fb->buffer)) {
+    aLen = sizeof(fb->buffer) - fb->curlen - 1;
+  }
 
-  if (len) {
-    memcpy(fb->buffer + fb->curlen, buf, len);
-    fb->curlen += len;
+  if (aLen) {
+    memcpy(fb->buffer + fb->curlen, aBuf, aLen);
+    fb->curlen += aLen;
     fb->buffer[fb->curlen] = '\0';
   }
 
-  return len;
+  return aLen;
 }
 
 EXPORT_XPCOM_API(void)
-NS_DebugBreak(uint32_t aSeverity, const char *aStr, const char *aExpr,
-              const char *aFile, int32_t aLine)
+NS_DebugBreak(uint32_t aSeverity, const char* aStr, const char* aExpr,
+              const char* aFile, int32_t aLine)
 {
   InitLog();
 
   FixedBuffer buf;
   PRLogModuleLevel ll = PR_LOG_WARNING;
-  const char *sevString = "WARNING";
+  const char* sevString = "WARNING";
 
   switch (aSeverity) {
     case NS_DEBUG_ASSERTION:
       sevString = "###!!! ASSERTION";
       ll = PR_LOG_ERROR;
       break;
 
     case NS_DEBUG_BREAK:
@@ -312,38 +323,40 @@ NS_DebugBreak(uint32_t aSeverity, const 
   PrintToBuffer("[");
   if (sMultiprocessDescription) {
     PrintToBuffer("%s ", sMultiprocessDescription);
   }
   PrintToBuffer("%d] ", base::GetCurrentProcId());
 
   PrintToBuffer("%s: ", sevString);
 
-  if (aStr)
+  if (aStr) {
     PrintToBuffer("%s: ", aStr);
-
-  if (aExpr)
+  }
+  if (aExpr) {
     PrintToBuffer("'%s', ", aExpr);
-
-  if (aFile)
+  }
+  if (aFile) {
     PrintToBuffer("file %s, ", aFile);
-
-  if (aLine != -1)
+  }
+  if (aLine != -1) {
     PrintToBuffer("line %d", aLine);
+  }
 
 #  undef PrintToBuffer
 
   // Write out the message to the debug log
   PR_LOG(gDebugLog, ll, ("%s", buf.buffer));
   PR_LogFlush();
 
   // errors on platforms without a debugdlg ring a bell on stderr
 #if !defined(XP_WIN)
-  if (ll != PR_LOG_WARNING)
+  if (ll != PR_LOG_WARNING) {
     fprintf(stderr, "\07");
+  }
 #endif
 
 #ifdef ANDROID
   __android_log_print(ANDROID_LOG_INFO, "Gecko", "%s", buf.buffer);
 #endif
 
   // Write the message to stderr unless it's a warning and MOZ_IGNORE_WARNINGS
   // is set.
@@ -412,17 +425,17 @@ NS_DebugBreak(uint32_t aSeverity, const 
     case NS_ASSERT_TRAP:
     case NS_ASSERT_UNINITIALIZED: // Default to "trap" behavior
       Break(buf.buffer);
       return;
   }
 }
 
 static void
-Abort(const char *aMsg)
+Abort(const char* aMsg)
 {
   mozalloc_abort(aMsg);
 }
 
 static void
 RealBreak()
 {
 #if defined(_WIN32)
@@ -450,22 +463,22 @@ RealBreak()
 #endif
 #else
 #warning do not know how to break on this platform
 #endif
 }
 
 // Abort() calls this function, don't call it!
 static void
-Break(const char *aMsg)
+Break(const char* aMsg)
 {
 #if defined(_WIN32)
   static int ignoreDebugger;
   if (!ignoreDebugger) {
-    const char *shouldIgnoreDebugger = getenv("XPCOM_DEBUG_DLG");
+    const char* shouldIgnoreDebugger = getenv("XPCOM_DEBUG_DLG");
     ignoreDebugger = 1 + (shouldIgnoreDebugger && !strcmp(shouldIgnoreDebugger, "1"));
   }
   if ((ignoreDebugger == 2) || !::IsDebuggerPresent()) {
     DWORD code = IDRETRY;
 
     /* Create the debug dialog out of process to avoid the crashes caused by
      * Windows events leaking into our event loop from an in process dialog.
      * We do this by launching windbgdlg.exe (built in xpcom/windbgdlg).
@@ -478,32 +491,32 @@ Break(const char *aMsg)
 
     memset(&pi, 0, sizeof(pi));
 
     memset(&si, 0, sizeof(si));
     si.cb          = sizeof(si);
     si.wShowWindow = SW_SHOW;
 
     // 2nd arg of CreateProcess is in/out
-    wchar_t *msgCopy = (wchar_t*) _alloca((strlen(aMsg) + 1)*sizeof(wchar_t));
+    wchar_t* msgCopy = (wchar_t*)_alloca((strlen(aMsg) + 1) * sizeof(wchar_t));
     wcscpy(msgCopy, NS_ConvertUTF8toUTF16(aMsg).get());
 
-    if(GetModuleFileNameW(GetModuleHandleW(L"xpcom.dll"), executable, MAX_PATH) &&
-        nullptr != (pName = wcsrchr(executable, '\\')) &&
-        nullptr != wcscpy(pName + 1, L"windbgdlg.exe") &&
+    if (GetModuleFileNameW(GetModuleHandleW(L"xpcom.dll"), executable, MAX_PATH) &&
+        (pName = wcsrchr(executable, '\\')) != nullptr &&
+        wcscpy(pName + 1, L"windbgdlg.exe") &&
         CreateProcessW(executable, msgCopy, nullptr, nullptr,
                        false, DETACHED_PROCESS | NORMAL_PRIORITY_CLASS,
                        nullptr, nullptr, &si, &pi)) {
       WaitForSingleObject(pi.hProcess, INFINITE);
       GetExitCodeProcess(pi.hProcess, &code);
       CloseHandle(pi.hProcess);
       CloseHandle(pi.hThread);
     }
 
-    switch(code) {
+    switch (code) {
       case IDABORT:
         //This should exit us
         raise(SIGABRT);
         //If we are ignored exit this way..
         _exit(3);
 
       case IDIGNORE:
         return;
@@ -526,20 +539,21 @@ Break(const char *aMsg)
 #else
 #warning do not know how to break on this platform
 #endif
 }
 
 static const nsDebugImpl kImpl;
 
 nsresult
-nsDebugImpl::Create(nsISupports* outer, const nsIID& aIID, void* *aInstancePtr)
+nsDebugImpl::Create(nsISupports* aOuter, const nsIID& aIID, void** aInstancePtr)
 {
-  if (NS_WARN_IF(outer))
+  if (NS_WARN_IF(aOuter)) {
     return NS_ERROR_NO_AGGREGATION;
+  }
 
   return const_cast<nsDebugImpl*>(&kImpl)->
     QueryInterface(aIID, aInstancePtr);
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 
 nsresult
@@ -561,16 +575,16 @@ NS_ErrorAccordingToNSPR()
     case PR_NAME_TOO_LONG_ERROR:         return NS_ERROR_FILE_NAME_TOO_LONG;
     case PR_DIRECTORY_NOT_EMPTY_ERROR:   return NS_ERROR_FILE_DIR_NOT_EMPTY;
     case PR_NO_ACCESS_RIGHTS_ERROR:      return NS_ERROR_FILE_ACCESS_DENIED;
     default:                             return NS_ERROR_FAILURE;
   }
 }
 
 void
-NS_ABORT_OOM(size_t size)
+NS_ABORT_OOM(size_t aSize)
 {
 #ifdef MOZ_CRASHREPORTER
-  CrashReporter::AnnotateOOMAllocationSize(size);
+  CrashReporter::AnnotateOOMAllocationSize(aSize);
 #endif
   MOZ_CRASH();
 }
 
--- a/xpcom/base/nsDebugImpl.h
+++ b/xpcom/base/nsDebugImpl.h
@@ -8,30 +8,33 @@
 #define nsDebugImpl_h
 
 #include "nsIDebug.h"
 #include "nsIDebug2.h"
 
 class nsDebugImpl : public nsIDebug2
 {
 public:
-  nsDebugImpl() {}
+  nsDebugImpl()
+  {
+  }
   NS_DECL_ISUPPORTS
   NS_DECL_NSIDEBUG
   NS_DECL_NSIDEBUG2
 
-  static nsresult Create(nsISupports* outer, const nsIID& aIID, void* *aInstancePtr);
+  static nsresult Create(nsISupports* aOuter, const nsIID& aIID,
+                         void** aInstancePtr);
 
   /*
    * Inform nsDebugImpl that we're in multiprocess mode.
    *
    * If aDesc is not nullptr, the string it points to must be
    * statically-allocated (i.e., it must be a string literal).
    */
-  static void SetMultiprocessMode(const char *aDesc);
+  static void SetMultiprocessMode(const char* aDesc);
 };
 
 
 #define NS_DEBUG_CONTRACTID "@mozilla.org/xpcom/debug;1"
 #define NS_DEBUG_CID                                 \
 { /* a80b1fb3-aaf6-4852-b678-c27eb7a518af */         \
   0xa80b1fb3,                                        \
     0xaaf6,                                          \
--- a/xpcom/base/nsDumpUtils.cpp
+++ b/xpcom/base/nsDumpUtils.cpp
@@ -60,32 +60,34 @@ DumpSignalHandler(int aSignum)
   if (sDumpPipeWriteFd != -1) {
     uint8_t signum = static_cast<int>(aSignum);
     write(sDumpPipeWriteFd, &signum, sizeof(signum));
   }
 }
 
 NS_IMPL_ISUPPORTS(FdWatcher, nsIObserver);
 
-void FdWatcher::Init()
+void
+FdWatcher::Init()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   nsCOMPtr<nsIObserverService> os = services::GetObserverService();
   os->AddObserver(this, "xpcom-shutdown", /* ownsWeak = */ false);
 
   XRE_GetIOMessageLoop()->PostTask(
     FROM_HERE,
     NewRunnableMethod(this, &FdWatcher::StartWatching));
 }
 
 // Implementations may call this function multiple times if they ensure that
 // it's safe to call OpenFd() multiple times and they call StopWatching()
 // first.
-void FdWatcher::StartWatching()
+void
+FdWatcher::StartWatching()
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
   MOZ_ASSERT(mFd == -1);
 
   mFd = OpenFd();
   if (mFd == -1) {
     LOG("FdWatcher: OpenFd failed.");
     return;
@@ -94,17 +96,18 @@ void FdWatcher::StartWatching()
   MessageLoopForIO::current()->WatchFileDescriptor(
     mFd, /* persistent = */ true,
     MessageLoopForIO::WATCH_READ,
     &mReadWatcher, this);
 }
 
 // Since implementations can call StartWatching() multiple times, they can of
 // course call StopWatching() multiple times.
-void FdWatcher::StopWatching()
+void
+FdWatcher::StopWatching()
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
 
   mReadWatcher.StopWatchingFileDescriptor();
   if (mFd != -1) {
     close(mFd);
     mFd = -1;
   }
@@ -124,18 +127,17 @@ SignalPipeWatcher::GetSingleton()
 }
 
 void
 SignalPipeWatcher::RegisterCallback(uint8_t aSignal,
                                     PipeCallback aCallback)
 {
   MutexAutoLock lock(mSignalInfoLock);
 
-  for (SignalInfoArray::index_type i = 0; i < mSignalInfo.Length(); i++)
-  {
+  for (SignalInfoArray::index_type i = 0; i < mSignalInfo.Length(); ++i) {
     if (mSignalInfo[i].mSignal == aSignal) {
       LOG("Register Signal(%d) callback failed! (DUPLICATE)", aSignal);
       return;
     }
   }
   SignalInfo signalInfo = { aSignal, aCallback };
   mSignalInfo.AppendElement(signalInfo);
   RegisterSignalHandler(signalInfo.mSignal);
@@ -166,17 +168,18 @@ SignalPipeWatcher::RegisterSignalHandler
 
 SignalPipeWatcher::~SignalPipeWatcher()
 {
   if (sDumpPipeWriteFd != -1) {
     StopWatching();
   }
 }
 
-int SignalPipeWatcher::OpenFd()
+int
+SignalPipeWatcher::OpenFd()
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
 
   // Create a pipe.  When we receive a signal in our signal handler, we'll
   // write the signum to the write-end of this pipe.
   int pipeFds[2];
   if (pipe(pipeFds)) {
     LOG("SignalPipeWatcher failed to create pipe.");
@@ -189,49 +192,51 @@ int SignalPipeWatcher::OpenFd()
 
   int readFd = pipeFds[0];
   sDumpPipeWriteFd = pipeFds[1];
 
   RegisterSignalHandler();
   return readFd;
 }
 
-void SignalPipeWatcher::StopWatching()
+void
+SignalPipeWatcher::StopWatching()
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
 
   // Close sDumpPipeWriteFd /after/ setting the fd to -1.
   // Otherwise we have the (admittedly far-fetched) race where we
   //
   //  1) close sDumpPipeWriteFd
   //  2) open a new fd with the same number as sDumpPipeWriteFd
   //     had.
   //  3) receive a signal, then write to the fd.
   int pipeWriteFd = sDumpPipeWriteFd.exchange(-1);
   close(pipeWriteFd);
 
   FdWatcher::StopWatching();
 }
 
-void SignalPipeWatcher::OnFileCanReadWithoutBlocking(int aFd)
+void
+SignalPipeWatcher::OnFileCanReadWithoutBlocking(int aFd)
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
 
   uint8_t signum;
   ssize_t numReceived = read(aFd, &signum, sizeof(signum));
   if (numReceived != sizeof(signum)) {
     LOG("Error reading from buffer in "
         "SignalPipeWatcher::OnFileCanReadWithoutBlocking.");
     return;
   }
 
   {
     MutexAutoLock lock(mSignalInfoLock);
     for (SignalInfoArray::index_type i = 0; i < mSignalInfo.Length(); i++) {
-      if(signum == mSignalInfo[i].mSignal) {
+      if (signum == mSignalInfo[i].mSignal) {
         mSignalInfo[i].mCallback(signum);
         return;
       }
     }
   }
   LOG("SignalPipeWatcher got unexpected signum.");
 }
 
@@ -274,59 +279,62 @@ FifoWatcher::MaybeCreate()
   return true;
 }
 
 void
 FifoWatcher::RegisterCallback(const nsCString& aCommand, FifoCallback aCallback)
 {
   MutexAutoLock lock(mFifoInfoLock);
 
-  for (FifoInfoArray::index_type i = 0; i < mFifoInfo.Length(); i++)
-  {
+  for (FifoInfoArray::index_type i = 0; i < mFifoInfo.Length(); ++i) {
     if (mFifoInfo[i].mCommand.Equals(aCommand)) {
       LOG("Register command(%s) callback failed! (DUPLICATE)", aCommand.get());
       return;
     }
   }
   FifoInfo aFifoInfo = { aCommand, aCallback };
   mFifoInfo.AppendElement(aFifoInfo);
 }
 
 FifoWatcher::~FifoWatcher()
 {
 }
 
-int FifoWatcher::OpenFd()
+int
+FifoWatcher::OpenFd()
 {
   // If the memory_info_dumper.directory pref is specified, put the fifo
   // there.  Otherwise, put it into the system's tmp directory.
 
   nsCOMPtr<nsIFile> file;
 
   nsresult rv;
   if (mDirPath.Length() > 0) {
     rv = XRE_GetFileFromPath(mDirPath.get(), getter_AddRefs(file));
     if (NS_FAILED(rv)) {
       LOG("FifoWatcher failed to open file \"%s\"", mDirPath.get());
       return -1;
     }
   } else {
     rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(file));
-    if (NS_WARN_IF(NS_FAILED(rv)))
+    if (NS_WARN_IF(NS_FAILED(rv))) {
       return -1;
+    }
   }
 
   rv = file->AppendNative(NS_LITERAL_CSTRING("debug_info_trigger"));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return -1;
+  }
 
   nsAutoCString path;
   rv = file->GetNativePath(path);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return -1;
+  }
 
   // unlink might fail because the file doesn't exist, or for other reasons.
   // But we don't care it fails; any problems will be detected later, when we
   // try to mkfifo or open the file.
   if (unlink(path.get())) {
     LOG("FifoWatcher::OpenFifo unlink failed; errno=%d.  "
         "Continuing despite error.", errno);
   }
@@ -360,26 +368,27 @@ int FifoWatcher::OpenFd()
   if (fcntl(fd, F_SETFL, 0)) {
     close(fd);
     return -1;
   }
 
   return fd;
 }
 
-void FifoWatcher::OnFileCanReadWithoutBlocking(int aFd)
+void
+FifoWatcher::OnFileCanReadWithoutBlocking(int aFd)
 {
   MOZ_ASSERT(XRE_GetIOMessageLoop() == MessageLoopForIO::current());
 
   char buf[1024];
   int nread;
   do {
     // sizeof(buf) - 1 to leave space for the null-terminator.
     nread = read(aFd, buf, sizeof(buf));
-  } while(nread == -1 && errno == EINTR);
+  } while (nread == -1 && errno == EINTR);
 
   if (nread == -1) {
     // We want to avoid getting into a situation where
     // OnFileCanReadWithoutBlocking is called in an infinite loop, so when
     // something goes wrong, stop watching the fifo altogether.
     LOG("FifoWatcher hit an error (%d) and is quitting.", errno);
     StopWatching();
     return;
@@ -404,17 +413,17 @@ void FifoWatcher::OnFileCanReadWithoutBl
   // it'll actually write "foo\n" to the fifo.
   inputStr.Trim("\b\t\r\n");
 
   {
     MutexAutoLock lock(mFifoInfoLock);
 
     for (FifoInfoArray::index_type i = 0; i < mFifoInfo.Length(); i++) {
       const nsCString commandStr = mFifoInfo[i].mCommand;
-      if(inputStr == commandStr.get()) {
+      if (inputStr == commandStr.get()) {
         mFifoInfo[i].mCallback(inputStr);
         return;
       }
     }
   }
   LOG("Got unexpected value from fifo; ignoring it.");
 }
 
@@ -426,68 +435,78 @@ void FifoWatcher::OnFileCanReadWithoutBl
 /* static */ nsresult
 nsDumpUtils::OpenTempFile(const nsACString& aFilename, nsIFile** aFile,
                           const nsACString& aFoldername)
 {
 #ifdef ANDROID
   // For Android, first try the downloads directory which is world-readable
   // rather than the temp directory which is not.
   if (!*aFile) {
-    char *env = PR_GetEnv("DOWNLOADS_DIRECTORY");
+    char* env = PR_GetEnv("DOWNLOADS_DIRECTORY");
     if (env) {
       NS_NewNativeLocalFile(nsCString(env), /* followLinks = */ true, aFile);
     }
   }
 #endif
   nsresult rv;
   if (!*aFile) {
     rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, aFile);
-    if (NS_WARN_IF(NS_FAILED(rv)))
+    if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
+    }
   }
 
 #ifdef ANDROID
   // /data/local/tmp is a true tmp directory; anyone can create a file there,
   // but only the user which created the file can remove it.  We want non-root
   // users to be able to remove these files, so we write them into a
   // subdirectory of the temp directory and chmod 777 that directory.
   if (aFoldername != EmptyCString()) {
     rv = (*aFile)->AppendNative(aFoldername);
-    if (NS_WARN_IF(NS_FAILED(rv)))
+    if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
+    }
 
     // It's OK if this fails; that probably just means that the directory already
     // exists.
     (*aFile)->Create(nsIFile::DIRECTORY_TYPE, 0777);
 
     nsAutoCString dirPath;
     rv = (*aFile)->GetNativePath(dirPath);
-    if (NS_WARN_IF(NS_FAILED(rv)))
+    if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
+    }
 
-    while (chmod(dirPath.get(), 0777) == -1 && errno == EINTR) {}
+    while (chmod(dirPath.get(), 0777) == -1 && errno == EINTR)
+    {
+    }
   }
 #endif
 
   nsCOMPtr<nsIFile> file(*aFile);
 
   rv = file->AppendNative(aFilename);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   rv = file->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0666);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
 #ifdef ANDROID
   // Make this file world-read/writable; the permissions passed to the
   // CreateUnique call above are not sufficient on Android, which runs with a
   // umask.
   nsAutoCString path;
   rv = file->GetNativePath(path);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
-  while (chmod(path.get(), 0666) == -1 && errno == EINTR) {}
+  while (chmod(path.get(), 0666) == -1 && errno == EINTR)
+  {
+  }
 #endif
 
   return NS_OK;
 }
--- a/xpcom/base/nsDumpUtils.h
+++ b/xpcom/base/nsDumpUtils.h
@@ -29,18 +29,19 @@
 using namespace mozilla;
 
 #if defined(XP_LINUX) || defined(__FreeBSD__) || defined(XP_MACOSX) // {
 
 /**
  * Abstract base class for something which watches an fd and takes action when
  * we can read from it without blocking.
  */
-class FdWatcher : public MessageLoopForIO::Watcher
-                , public nsIObserver
+class FdWatcher
+  : public MessageLoopForIO::Watcher
+  , public nsIObserver
 {
 protected:
   MessageLoopForIO::FileDescriptorWatcher mReadWatcher;
   int mFd;
 
 public:
   FdWatcher()
     : mFd(-1)
@@ -94,17 +95,18 @@ public:
       FROM_HERE,
       NewRunnableMethod(this, &FdWatcher::StopWatching));
 
     return NS_OK;
   }
 };
 
 typedef void (* FifoCallback)(const nsCString& inputStr);
-struct FifoInfo {
+struct FifoInfo
+{
   nsCString mCommand;
   FifoCallback mCallback;
 };
 typedef nsTArray<FifoInfo> FifoInfoArray;
 
 class FifoWatcher : public FdWatcher
 {
 public:
@@ -128,24 +130,26 @@ public:
 private:
   nsAutoCString mDirPath;
 
   static StaticRefPtr<FifoWatcher> sSingleton;
 
   FifoWatcher(nsCString aPath)
     : mDirPath(aPath)
     , mFifoInfoLock("FifoWatcher.mFifoInfoLock")
-  {}
+  {
+  }
 
   mozilla::Mutex mFifoInfoLock; // protects mFifoInfo
   FifoInfoArray mFifoInfo;
 };
 
 typedef void (* PipeCallback)(const uint8_t recvSig);
-struct SignalInfo {
+struct SignalInfo
+{
   uint8_t mSignal;
   PipeCallback mCallback;
 };
 typedef nsTArray<SignalInfo> SignalInfoArray;
 
 class SignalPipeWatcher : public FdWatcher
 {
 public:
--- a/xpcom/base/nsError.h
+++ b/xpcom/base/nsError.h
@@ -173,18 +173,20 @@
 #undef FAILURE
 
 /**
  * @name Standard Error Handling Macros
  * @return 0 or 1 (false/true with bool type for C++)
  */
 
 #ifdef __cplusplus
-inline uint32_t NS_FAILED_impl(nsresult _nsresult) {
-  return static_cast<uint32_t>(_nsresult) & 0x80000000;
+inline uint32_t
+NS_FAILED_impl(nsresult aErr)
+{
+  return static_cast<uint32_t>(aErr) & 0x80000000;
 }
 #define NS_FAILED(_nsresult)    ((bool)MOZ_UNLIKELY(NS_FAILED_impl(_nsresult)))
 #define NS_SUCCEEDED(_nsresult) ((bool)MOZ_LIKELY(!NS_FAILED_impl(_nsresult)))
 
 /* Check that our enum type is actually uint32_t as expected */
 static_assert(((nsresult)0) < ((nsresult)-1),
               "nsresult must be an unsigned type");
 static_assert(sizeof(nsresult) == sizeof(uint32_t),
@@ -222,24 +224,30 @@ extern nsresult
 NS_ErrorAccordingToNSPR();
 
 
 /**
  * @name Standard Macros for retrieving error bits
  */
 
 #ifdef __cplusplus
-inline uint16_t NS_ERROR_GET_CODE(nsresult err) {
-  return uint32_t(err) & 0xffff;
+inline uint16_t
+NS_ERROR_GET_CODE(nsresult aErr)
+{
+  return uint32_t(aErr) & 0xffff;
 }
-inline uint16_t NS_ERROR_GET_MODULE(nsresult err) {
-  return ((uint32_t(err) >> 16) - NS_ERROR_MODULE_BASE_OFFSET) & 0x1fff;
+inline uint16_t
+NS_ERROR_GET_MODULE(nsresult aErr)
+{
+  return ((uint32_t(aErr) >> 16) - NS_ERROR_MODULE_BASE_OFFSET) & 0x1fff;
 }
-inline bool NS_ERROR_GET_SEVERITY(nsresult err) {
-  return uint32_t(err) >> 31;
+inline bool
+NS_ERROR_GET_SEVERITY(nsresult aErr)
+{
+  return uint32_t(aErr) >> 31;
 }
 #else
 #define NS_ERROR_GET_CODE(err)     ((err) & 0xffff)
 #define NS_ERROR_GET_MODULE(err)   ((((err) >> 16) - NS_ERROR_MODULE_BASE_OFFSET) & 0x1fff)
 #define NS_ERROR_GET_SEVERITY(err) (((err) >> 31) & 0x1)
 #endif
 
 
--- a/xpcom/base/nsErrorService.cpp
+++ b/xpcom/base/nsErrorService.cpp
@@ -8,18 +8,19 @@
 #include "nsCRTGlue.h"
 #include "nsAutoPtr.h"
 
 NS_IMPL_ISUPPORTS(nsErrorService, nsIErrorService)
 
 nsresult
 nsErrorService::Create(nsISupports* aOuter, const nsIID& aIID, void** aInstancePtr)
 {
-  if (NS_WARN_IF(aOuter))
+  if (NS_WARN_IF(aOuter)) {
     return NS_ERROR_NO_AGGREGATION;
+  }
   nsRefPtr<nsErrorService> serv = new nsErrorService();
   return serv->QueryInterface(aIID, aInstancePtr);
 }
 
 NS_IMETHODIMP
 nsErrorService::RegisterErrorStringBundle(int16_t aErrorModule, const char* aStringBundleURL)
 {
   mErrorStringBundleURLMap.Put(aErrorModule, new nsCString(aStringBundleURL));
--- a/xpcom/base/nsGZFileWriter.cpp
+++ b/xpcom/base/nsGZFileWriter.cpp
@@ -16,83 +16,90 @@
 #include <unistd.h>
 #endif
 
 NS_IMPL_ISUPPORTS(nsGZFileWriter, nsIGZFileWriter)
 
 nsGZFileWriter::nsGZFileWriter()
   : mInitialized(false)
   , mFinished(false)
-{}
+{
+}
 
 nsGZFileWriter::~nsGZFileWriter()
 {
   if (mInitialized && !mFinished) {
     Finish();
   }
 }
 
 NS_IMETHODIMP
 nsGZFileWriter::Init(nsIFile* aFile)
 {
   if (NS_WARN_IF(mInitialized) ||
-      NS_WARN_IF(mFinished))
+      NS_WARN_IF(mFinished)) {
     return NS_ERROR_FAILURE;
+  }
 
   // Get a FILE out of our nsIFile.  Convert that into a file descriptor which
   // gzip can own.  Then close our FILE, leaving only gzip's fd open.
 
   FILE* file;
   nsresult rv = aFile->OpenANSIFileDesc("wb", &file);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   mGZFile = gzdopen(dup(fileno(file)), "wb");
   fclose(file);
 
   // gzdopen returns nullptr on error.
-  if (NS_WARN_IF(!mGZFile))
+  if (NS_WARN_IF(!mGZFile)) {
     return NS_ERROR_FAILURE;
+  }
 
   mInitialized = true;
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsGZFileWriter::Write(const nsACString& aStr)
 {
   if (NS_WARN_IF(!mInitialized) ||
-      NS_WARN_IF(mFinished))
+      NS_WARN_IF(mFinished)) {
     return NS_ERROR_FAILURE;
+  }
 
   // gzwrite uses a return value of 0 to indicate failure.  Otherwise, it
   // returns the number of uncompressed bytes written.  To ensure we can
   // distinguish between success and failure, don't call gzwrite when we have 0
   // bytes to write.
   if (aStr.IsEmpty()) {
     return NS_OK;
   }
 
   // gzwrite never does a short write -- that is, the return value should
   // always be either 0 or aStr.Length(), and we shouldn't have to call it
   // multiple times in order to get it to read the whole buffer.
   int rv = gzwrite(mGZFile, aStr.BeginReading(), aStr.Length());
-  if (NS_WARN_IF(rv != static_cast<int>(aStr.Length())))
+  if (NS_WARN_IF(rv != static_cast<int>(aStr.Length()))) {
     return NS_ERROR_FAILURE;
+  }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsGZFileWriter::Finish()
 {
   if (NS_WARN_IF(!mInitialized) ||
-      NS_WARN_IF(mFinished))
+      NS_WARN_IF(mFinished)) {
     return NS_ERROR_FAILURE;
+  }
 
   mFinished = true;
   gzclose(mGZFile);
 
   // Ignore errors from gzclose; it's not like there's anything we can do about
   // it, at this point!
   return NS_OK;
 }
--- a/xpcom/base/nsISupportsBase.h
+++ b/xpcom/base/nsISupportsBase.h
@@ -28,17 +28,18 @@
     {0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46} }
 
 /**
  * Basic component object model interface. Objects which implement
  * this interface support runtime interface discovery (QueryInterface)
  * and a reference counted memory model (AddRef/Release). This is
  * modelled after the win32 IUnknown API.
  */
-class NS_NO_VTABLE nsISupports {
+class NS_NO_VTABLE nsISupports
+{
 public:
 
   NS_DECLARE_STATIC_IID_ACCESSOR(NS_ISUPPORTS_IID)
 
   /**
    * @name Methods
    */
 
--- a/xpcom/base/nsInterfaceRequestorAgg.cpp
+++ b/xpcom/base/nsInterfaceRequestorAgg.cpp
@@ -11,19 +11,19 @@
 
 class nsInterfaceRequestorAgg MOZ_FINAL : public nsIInterfaceRequestor
 {
 public:
   // XXX This needs to support threadsafe refcounting until we fix bug 243591.
   NS_DECL_THREADSAFE_ISUPPORTS
   NS_DECL_NSIINTERFACEREQUESTOR
 
-  nsInterfaceRequestorAgg(nsIInterfaceRequestor *aFirst,
-                          nsIInterfaceRequestor *aSecond,
-                          nsIEventTarget *aConsumerTarget = nullptr)
+  nsInterfaceRequestorAgg(nsIInterfaceRequestor* aFirst,
+                          nsIInterfaceRequestor* aSecond,
+                          nsIEventTarget* aConsumerTarget = nullptr)
     : mFirst(aFirst)
     , mSecond(aSecond)
     , mConsumerTarget(aConsumerTarget)
   {
     if (!mConsumerTarget) {
       mConsumerTarget = NS_GetCurrentThread();
     }
   }
@@ -32,23 +32,25 @@ public:
 private:
   nsCOMPtr<nsIInterfaceRequestor> mFirst, mSecond;
   nsCOMPtr<nsIEventTarget> mConsumerTarget;
 };
 
 NS_IMPL_ISUPPORTS(nsInterfaceRequestorAgg, nsIInterfaceRequestor)
 
 NS_IMETHODIMP
-nsInterfaceRequestorAgg::GetInterface(const nsIID &aIID, void **aResult)
+nsInterfaceRequestorAgg::GetInterface(const nsIID& aIID, void** aResult)
 {
   nsresult rv = NS_ERROR_NO_INTERFACE;
-  if (mFirst)
+  if (mFirst) {
     rv = mFirst->GetInterface(aIID, aResult);
-  if (mSecond && NS_FAILED(rv))
+  }
+  if (mSecond && NS_FAILED(rv)) {
     rv = mSecond->GetInterface(aIID, aResult);
+  }
   return rv;
 }
 
 nsInterfaceRequestorAgg::~nsInterfaceRequestorAgg()
 {
   nsIInterfaceRequestor* iir = nullptr;
   mFirst.swap(iir);
   if (iir) {
@@ -57,31 +59,33 @@ nsInterfaceRequestorAgg::~nsInterfaceReq
   iir = nullptr;
   mSecond.swap(iir);
   if (iir) {
     NS_ProxyRelease(mConsumerTarget, iir);
   }
 }
 
 nsresult
-NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor *aFirst,
-                                    nsIInterfaceRequestor *aSecond,
-                                    nsIInterfaceRequestor **aResult)
+NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor* aFirst,
+                                    nsIInterfaceRequestor* aSecond,
+                                    nsIInterfaceRequestor** aResult)
 {
   *aResult = new nsInterfaceRequestorAgg(aFirst, aSecond);
-  if (!*aResult)
+  if (!*aResult) {
     return NS_ERROR_OUT_OF_MEMORY;
+  }
   NS_ADDREF(*aResult);
   return NS_OK;
 }
 
 nsresult
-NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor *aFirst,
-                                    nsIInterfaceRequestor *aSecond,
+NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor* aFirst,
+                                    nsIInterfaceRequestor* aSecond,
                                     nsIEventTarget* aTarget,
-                                    nsIInterfaceRequestor **aResult)
+                                    nsIInterfaceRequestor** aResult)
 {
   *aResult = new nsInterfaceRequestorAgg(aFirst, aSecond, aTarget);
-  if (!*aResult)
+  if (!*aResult) {
     return NS_ERROR_OUT_OF_MEMORY;
+  }
   NS_ADDREF(*aResult);
   return NS_OK;
 }
--- a/xpcom/base/nsInterfaceRequestorAgg.h
+++ b/xpcom/base/nsInterfaceRequestorAgg.h
@@ -14,23 +14,23 @@ class nsIInterfaceRequestor;
  * This function returns an instance of nsIInterfaceRequestor that aggregates
  * two nsIInterfaceRequestor instances.  Its GetInterface method queries
  * aFirst for the requested interface and will query aSecond only if aFirst
  * failed to supply the requested interface.  Both aFirst and aSecond may
  * be null, and will be released on the main thread when the aggregator is
  * destroyed.
  */
 extern nsresult
-NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor  *aFirst,
-                                    nsIInterfaceRequestor  *aSecond,
-                                    nsIInterfaceRequestor **aResult);
+NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor* aFirst,
+                                    nsIInterfaceRequestor* aSecond,
+                                    nsIInterfaceRequestor** aResult);
 
 /**
  * Like the previous method, but aFirst and aSecond will be released on the
  * provided target thread.
  */
 extern nsresult
-NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor  *aFirst,
-                                    nsIInterfaceRequestor  *aSecond,
-                                    nsIEventTarget         *aTarget,
-                                    nsIInterfaceRequestor **aResult);
+NS_NewInterfaceRequestorAggregation(nsIInterfaceRequestor* aFirst,
+                                    nsIInterfaceRequestor* aSecond,
+                                    nsIEventTarget* aTarget,
+                                    nsIInterfaceRequestor** aResult);
 
 #endif // !defined( nsInterfaceRequestorAgg_h__ )
--- a/xpcom/base/nsMacUtilsImpl.cpp
+++ b/xpcom/base/nsMacUtilsImpl.cpp
@@ -5,24 +5,25 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsMacUtilsImpl.h"
 
 #include <CoreFoundation/CoreFoundation.h>
 
 NS_IMPL_ISUPPORTS(nsMacUtilsImpl, nsIMacUtils)
 
-nsresult nsMacUtilsImpl::GetArchString(nsAString& archString)
+nsresult
+nsMacUtilsImpl::GetArchString(nsAString& aArchString)
 {
   if (!mBinaryArchs.IsEmpty()) {
-    archString.Assign(mBinaryArchs);
+    aArchString.Assign(mBinaryArchs);
     return NS_OK;
   }
 
-  archString.Truncate();
+  aArchString.Truncate();
 
   bool foundPPC = false,
        foundX86 = false,
        foundPPC64 = false,
        foundX86_64 = false;
 
   CFBundleRef mainBundle = ::CFBundleGetMainBundle();
   if (!mainBundle) {
@@ -39,24 +40,25 @@ nsresult nsMacUtilsImpl::GetArchString(n
     CFNumberRef arch = static_cast<CFNumberRef>(::CFArrayGetValueAtIndex(archList, i));
 
     int archInt = 0;
     if (!::CFNumberGetValue(arch, kCFNumberIntType, &archInt)) {
       ::CFRelease(archList);
       return NS_ERROR_FAILURE;
     }
 
-    if (archInt == kCFBundleExecutableArchitecturePPC)
+    if (archInt == kCFBundleExecutableArchitecturePPC) {
       foundPPC = true;
-    else if (archInt == kCFBundleExecutableArchitectureI386)
+    } else if (archInt == kCFBundleExecutableArchitectureI386) {
       foundX86 = true;
-    else if (archInt == kCFBundleExecutableArchitecturePPC64)
+    } else if (archInt == kCFBundleExecutableArchitecturePPC64) {
       foundPPC64 = true;
-    else if (archInt == kCFBundleExecutableArchitectureX86_64)
+    } else if (archInt == kCFBundleExecutableArchitectureX86_64) {
       foundX86_64 = true;
+    }
   }
 
   ::CFRelease(archList);
 
   // The order in the string must always be the same so
   // don't do this in the loop.
   if (foundPPC) {
     mBinaryArchs.Append(NS_LITERAL_STRING("ppc"));
@@ -78,47 +80,52 @@ nsresult nsMacUtilsImpl::GetArchString(n
 
   if (foundX86_64) {
     if (!mBinaryArchs.IsEmpty()) {
       mBinaryArchs.Append(NS_LITERAL_STRING("-"));
     }
     mBinaryArchs.Append(NS_LITERAL_STRING("x86_64"));
   }
 
-  archString.Assign(mBinaryArchs);
+  aArchString.Assign(mBinaryArchs);
 
-  return (archString.IsEmpty() ? NS_ERROR_FAILURE : NS_OK);
+  return (aArchString.IsEmpty() ? NS_ERROR_FAILURE : NS_OK);
 }
 
-NS_IMETHODIMP nsMacUtilsImpl::GetIsUniversalBinary(bool *aIsUniversalBinary)
+NS_IMETHODIMP
+nsMacUtilsImpl::GetIsUniversalBinary(bool* aIsUniversalBinary)
 {
-  if (NS_WARN_IF(!aIsUniversalBinary))
+  if (NS_WARN_IF(!aIsUniversalBinary)) {
     return NS_ERROR_INVALID_ARG;
+  }
   *aIsUniversalBinary = false;
 
   nsAutoString archString;
   nsresult rv = GetArchString(archString);
-  if (NS_FAILED(rv))
+  if (NS_FAILED(rv)) {
     return rv;
+  }
 
   // The delimiter char in the arch string is '-', so if that character
   // is in the string we know we have multiple architectures.
   *aIsUniversalBinary = (archString.Find("-") > -1);
 
   return NS_OK;
 }
 
-NS_IMETHODIMP nsMacUtilsImpl::GetArchitecturesInBinary(nsAString& archString)
+NS_IMETHODIMP
+nsMacUtilsImpl::GetArchitecturesInBinary(nsAString& aArchString)
 {
-  return GetArchString(archString);
+  return GetArchString(aArchString);
 }
 
 /* readonly attribute boolean isTranslated; */
 // True when running under binary translation (Rosetta).
-NS_IMETHODIMP nsMacUtilsImpl::GetIsTranslated(bool *aIsTranslated)
+NS_IMETHODIMP
+nsMacUtilsImpl::GetIsTranslated(bool* aIsTranslated)
 {
 #ifdef __ppc__
   static bool    sInitialized = false;
 
   // Initialize sIsNative to 1.  If the sysctl fails because it doesn't
   // exist, then translation is not possible, so the process must not be
   // running translated.
   static int32_t sIsNative = 1;
--- a/xpcom/base/nsMacUtilsImpl.h
+++ b/xpcom/base/nsMacUtilsImpl.h
@@ -1,9 +1,9 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+  /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef nsMacUtilsImpl_h___
 #define nsMacUtilsImpl_h___
 
 #include "nsIMacUtils.h"
@@ -11,22 +11,26 @@
 #include "mozilla/Attributes.h"
 
 class nsMacUtilsImpl MOZ_FINAL : public nsIMacUtils
 {
 public:
   NS_DECL_ISUPPORTS
   NS_DECL_NSIMACUTILS
 
-  nsMacUtilsImpl() {}
+  nsMacUtilsImpl()
+  {
+  }
 
 private:
-  ~nsMacUtilsImpl() {}
+  ~nsMacUtilsImpl()
+  {
+  }
 
-  nsresult GetArchString(nsAString& archString);
+  nsresult GetArchString(nsAString& aArchString);
 
   // A string containing a "-" delimited list of architectures
   // in our binary.
   nsString mBinaryArchs;
 };
 
 // Global singleton service
 // 697BD3FD-43E5-41CE-AD5E-C339175C0818
--- a/xpcom/base/nsMemoryImpl.cpp
+++ b/xpcom/base/nsMemoryImpl.cpp
@@ -24,83 +24,84 @@
 #define LOW_MEMORY_THRESHOLD_KB (384 * 1024)
 #endif
 
 static nsMemoryImpl sGlobalMemory;
 
 NS_IMPL_QUERY_INTERFACE(nsMemoryImpl, nsIMemory)
 
 NS_IMETHODIMP_(void*)
-nsMemoryImpl::Alloc(size_t size)
+nsMemoryImpl::Alloc(size_t aSize)
 {
-  return NS_Alloc(size);
+  return NS_Alloc(aSize);
 }
 
 NS_IMETHODIMP_(void*)
-nsMemoryImpl::Realloc(void* ptr, size_t size)
+nsMemoryImpl::Realloc(void* aPtr, size_t aSize)
 {
-  return NS_Realloc(ptr, size);
+  return NS_Realloc(aPtr, aSize);
 }
 
 NS_IMETHODIMP_(void)
-nsMemoryImpl::Free(void* ptr)
+nsMemoryImpl::Free(void* aPtr)
 {
-  NS_Free(ptr);
+  NS_Free(aPtr);
 }
 
 NS_IMETHODIMP
 nsMemoryImpl::HeapMinimize(bool aImmediate)
 {
   return FlushMemory(MOZ_UTF16("heap-minimize"), aImmediate);
 }
 
 NS_IMETHODIMP
-nsMemoryImpl::IsLowMemory(bool *result)
+nsMemoryImpl::IsLowMemory(bool* aResult)
 {
   NS_ERROR("IsLowMemory is deprecated.  See bug 592308.");
-  *result = false;
+  *aResult = false;
   return NS_OK;
 }
 
 NS_IMETHODIMP
-nsMemoryImpl::IsLowMemoryPlatform(bool *result)
+nsMemoryImpl::IsLowMemoryPlatform(bool* aResult)
 {
 #ifdef ANDROID
   static int sLowMemory = -1; // initialize to unknown, lazily evaluate to 0 or 1
   if (sLowMemory == -1) {
     sLowMemory = 0; // assume "not low memory" in case file operations fail
-    *result = false;
+    *aResult = false;
 
     // check if MemTotal from /proc/meminfo is less than LOW_MEMORY_THRESHOLD_KB
     FILE* fd = fopen("/proc/meminfo", "r");
     if (!fd) {
       return NS_OK;
     }
     uint64_t mem = 0;
     int rv = fscanf(fd, "MemTotal: %llu kB", &mem);
     if (fclose(fd)) {
       return NS_OK;
     }
     if (rv != 1) {
       return NS_OK;
     }
     sLowMemory = (mem < LOW_MEMORY_THRESHOLD_KB) ? 1 : 0;
   }
-  *result = (sLowMemory == 1);
+  *aResult = (sLowMemory == 1);
 #else
-  *result = false;
+  *aResult = false;
 #endif
   return NS_OK;
 }
 
 /*static*/ nsresult
-nsMemoryImpl::Create(nsISupports* outer, const nsIID& aIID, void **aResult)
+nsMemoryImpl::Create(nsISupports* aOuter, const nsIID& aIID, void** aResult)
 {
-  if (NS_WARN_IF(outer))
+  if (NS_WARN_IF(aOuter)) {
     return NS_ERROR_NO_AGGREGATION;
+  }
   return sGlobalMemory.QueryInterface(aIID, aResult);
 }
 
 nsresult
 nsMemoryImpl::FlushMemory(const char16_t* aReason, bool aImmediate)
 {
   nsresult rv = NS_OK;
 
@@ -110,27 +111,27 @@ nsMemoryImpl::FlushMemory(const char16_t
     // that...are we?
     if (!NS_IsMainThread()) {
       NS_ERROR("can't synchronously flush memory: not on UI thread");
       return NS_ERROR_FAILURE;
     }
   }
 
   bool lastVal = sIsFlushing.exchange(true);
-  if (lastVal)
+  if (lastVal) {
     return NS_OK;
+  }
 
   PRIntervalTime now = PR_IntervalNow();
 
   // Run the flushers immediately if we can; otherwise, proxy to the
   // UI thread an run 'em asynchronously.
   if (aImmediate) {
     rv = RunFlushers(aReason);
-  }
-  else {
+  } else {
     // Don't broadcast more than once every 1000ms to avoid being noisy
     if (PR_IntervalToMicroseconds(now - sLastFlushTime) > 1000) {
       sFlushEvent.mReason = aReason;
       rv = NS_DispatchToMainThread(&sFlushEvent, NS_DISPATCH_NORMAL);
     }
   }
 
   sLastFlushTime = now;
@@ -146,41 +147,49 @@ nsMemoryImpl::RunFlushers(const char16_t
     // Instead of:
     //  os->NotifyObservers(this, "memory-pressure", aReason);
     // we are going to do this manually to see who/what is
     // deallocating.
 
     nsCOMPtr<nsISimpleEnumerator> e;
     os->EnumerateObservers("memory-pressure", getter_AddRefs(e));
 
-    if ( e ) {
+    if (e) {
       nsCOMPtr<nsIObserver> observer;
       bool loop = true;
 
-      while (NS_SUCCEEDED(e->HasMoreElements(&loop)) && loop)
-      {
+      while (NS_SUCCEEDED(e->HasMoreElements(&loop)) && loop) {
         nsCOMPtr<nsISupports> supports;
         e->GetNext(getter_AddRefs(supports));
 
-        if (!supports)
+        if (!supports) {
           continue;
+        }
 
         observer = do_QueryInterface(supports);
         observer->Observe(observer, "memory-pressure", aReason);
       }
     }
   }
 
   sIsFlushing = false;
   return NS_OK;
 }
 
 // XXX need NS_IMPL_STATIC_ADDREF/RELEASE
-NS_IMETHODIMP_(MozExternalRefCountType) nsMemoryImpl::FlushEvent::AddRef() { return 2; }
-NS_IMETHODIMP_(MozExternalRefCountType) nsMemoryImpl::FlushEvent::Release() { return 1; }
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsMemoryImpl::FlushEvent::AddRef()
+{
+  return 2;
+}
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsMemoryImpl::FlushEvent::Release()
+{
+  return 1;
+}
 NS_IMPL_QUERY_INTERFACE(nsMemoryImpl::FlushEvent, nsIRunnable)
 
 NS_IMETHODIMP
 nsMemoryImpl::FlushEvent::Run()
 {
   sGlobalMemory.RunFlushers(mReason);
   return NS_OK;
 }
@@ -190,30 +199,30 @@ nsMemoryImpl::sIsFlushing;
 
 PRIntervalTime
 nsMemoryImpl::sLastFlushTime = 0;
 
 nsMemoryImpl::FlushEvent
 nsMemoryImpl::sFlushEvent;
 
 XPCOM_API(void*)
-NS_Alloc(size_t size)
+NS_Alloc(size_t aSize)
 {
-  return moz_xmalloc(size);
+  return moz_xmalloc(aSize);
 }
 
 XPCOM_API(void*)
-NS_Realloc(void* ptr, size_t size)
+NS_Realloc(void* aPtr, size_t aSize)
 {
-  return moz_xrealloc(ptr, size);
+  return moz_xrealloc(aPtr, aSize);
 }
 
 XPCOM_API(void)
-NS_Free(void* ptr)
+NS_Free(void* aPtr)
 {
-  moz_free(ptr);
+  moz_free(aPtr);
 }
 
 nsresult
-NS_GetMemoryManager(nsIMemory* *result)
+NS_GetMemoryManager(nsIMemory** aResult)
 {
-  return sGlobalMemory.QueryInterface(NS_GET_IID(nsIMemory), (void**) result);
+  return sGlobalMemory.QueryInterface(NS_GET_IID(nsIMemory), (void**)aResult);
 }
--- a/xpcom/base/nsMemoryImpl.h
+++ b/xpcom/base/nsMemoryImpl.h
@@ -16,29 +16,36 @@
 // a constructor/destructor or any instance members. Please don't add
 // instance member variables, only static member variables.
 
 class nsMemoryImpl : public nsIMemory
 {
 public:
   // We don't use the generic macros because we are a special static object
   NS_IMETHOD QueryInterface(REFNSIID aIID, void** aResult);
-  NS_IMETHOD_(MozExternalRefCountType) AddRef(void) { return 1; }
-  NS_IMETHOD_(MozExternalRefCountType) Release(void) { return 1; }
+  NS_IMETHOD_(MozExternalRefCountType) AddRef(void)
+  {
+    return 1;
+  }
+  NS_IMETHOD_(MozExternalRefCountType) Release(void)
+  {
+    return 1;
+  }
 
   NS_DECL_NSIMEMORY
 
-  static nsresult Create(nsISupports* outer,
-                         const nsIID& aIID, void **aResult);
+  static nsresult Create(nsISupports* aOuter,
+                         const nsIID& aIID, void** aResult);
 
   NS_HIDDEN_(nsresult) FlushMemory(const char16_t* aReason, bool aImmediate);
   NS_HIDDEN_(nsresult) RunFlushers(const char16_t* aReason);
 
 protected:
-  struct FlushEvent : public nsIRunnable {
+  struct FlushEvent : public nsIRunnable
+  {
     NS_DECL_ISUPPORTS_INHERITED
     NS_DECL_NSIRUNNABLE
     const char16_t* mReason;
   };
 
   static mozilla::Atomic<bool> sIsFlushing;
   static FlushEvent sFlushEvent;
   static PRIntervalTime sLastFlushTime;
--- a/xpcom/base/nsMemoryInfoDumper.cpp
+++ b/xpcom/base/nsMemoryInfoDumper.cpp
@@ -53,21 +53,23 @@ namespace {
 
 class DumpMemoryInfoToTempDirRunnable : public nsRunnable
 {
 public:
   DumpMemoryInfoToTempDirRunnable(const nsAString& aIdentifier,
                                   bool aMinimizeMemoryUsage)
     : mIdentifier(aIdentifier)
     , mMinimizeMemoryUsage(aMinimizeMemoryUsage)
-  {}
+  {
+  }
 
   NS_IMETHOD Run()
   {
-    nsCOMPtr<nsIMemoryInfoDumper> dumper = do_GetService("@mozilla.org/memory-info-dumper;1");
+    nsCOMPtr<nsIMemoryInfoDumper> dumper =
+      do_GetService("@mozilla.org/memory-info-dumper;1");
     dumper->DumpMemoryInfoToTempDir(mIdentifier, mMinimizeMemoryUsage);
     return NS_OK;
   }
 
 private:
   const nsString mIdentifier;
   const bool mMinimizeMemoryUsage;
 };
@@ -76,17 +78,18 @@ class GCAndCCLogDumpRunnable : public ns
 {
 public:
   GCAndCCLogDumpRunnable(const nsAString& aIdentifier,
                          bool aDumpAllTraces,
                          bool aDumpChildProcesses)
     : mIdentifier(aIdentifier)
     , mDumpAllTraces(aDumpAllTraces)
     , mDumpChildProcesses(aDumpChildProcesses)
-  {}
+  {
+  }
 
   NS_IMETHOD Run()
   {
     nsCOMPtr<nsIMemoryInfoDumper> dumper =
       do_GetService("@mozilla.org/memory-info-dumper;1");
 
     nsString ccLogPath, gcLogPath;
     dumper->DumpGCAndCCLogsToFile(mIdentifier, mDumpAllTraces,
@@ -159,38 +162,41 @@ void doGCCCDump(const uint8_t recvSig)
 }
 
 } // anonymous namespace
 #endif // MOZ_SUPPORTS_RT_SIGNALS }
 
 #if defined(MOZ_SUPPORTS_FIFO) // {
 namespace {
 
-void doMemoryReport(const nsCString& inputStr)
+void
+doMemoryReport(const nsCString& aInputStr)
 {
-  bool doMMUMemoryReport = inputStr == NS_LITERAL_CSTRING("minimize memory report");
-  LOG("FifoWatcher(command:%s) dispatching memory report runnable.", inputStr.get());
+  bool doMMUMemoryReport = aInputStr == NS_LITERAL_CSTRING("minimize memory report");
+  LOG("FifoWatcher(command:%s) dispatching memory report runnable.", aInputStr.get());
   nsRefPtr<DumpMemoryInfoToTempDirRunnable> runnable =
     new DumpMemoryInfoToTempDirRunnable(/* identifier = */ EmptyString(),
                                         doMMUMemoryReport);
   NS_DispatchToMainThread(runnable);
 }
 
-void doGCCCDump(const nsCString& inputStr)
+void
+doGCCCDump(const nsCString& aInputStr)
 {
-  bool doAllTracesGCCCDump = inputStr == NS_LITERAL_CSTRING("gc log");
-  LOG("FifoWatcher(command:%s) dispatching GC/CC log runnable.", inputStr.get());
+  bool doAllTracesGCCCDump = aInputStr == NS_LITERAL_CSTRING("gc log");
+  LOG("FifoWatcher(command:%s) dispatching GC/CC log runnable.", aInputStr.get());
   nsRefPtr<GCAndCCLogDumpRunnable> runnable =
     new GCAndCCLogDumpRunnable(/* identifier = */ EmptyString(),
                                doAllTracesGCCCDump,
                                /* dumpChildProcesses = */ true);
   NS_DispatchToMainThread(runnable);
 }
 
-bool SetupFifo()
+bool
+SetupFifo()
 {
   static bool fifoCallbacksRegistered = false;
 
   if (!FifoWatcher::MaybeCreate()) {
     return false;
   }
 
   MOZ_ASSERT(!fifoCallbacksRegistered,
@@ -207,17 +213,18 @@ bool SetupFifo()
                        doGCCCDump);
   fw->RegisterCallback(NS_LITERAL_CSTRING("abbreviated gc log"),
                        doGCCCDump);
 
   fifoCallbacksRegistered = true;
   return true;
 }
 
-void OnFifoEnabledChange(const char* /*unused*/, void* /*unused*/)
+void
+OnFifoEnabledChange(const char* /*unused*/, void* /*unused*/)
 {
   LOG("%s changed", FifoWatcher::kPrefName);
   if (SetupFifo()) {
     Preferences::UnregisterCallback(OnFifoEnabledChange,
                                     FifoWatcher::kPrefName,
                                     nullptr);
   }
 }
@@ -328,22 +335,23 @@ namespace mozilla {
 class DumpReportCallback MOZ_FINAL : public nsIHandleReportCallback
 {
 public:
   NS_DECL_ISUPPORTS
 
   DumpReportCallback(nsGZFileWriter* aWriter)
     : mIsFirst(true)
     , mWriter(aWriter)
-  {}
+  {
+  }
 
-  NS_IMETHOD Callback(const nsACString &aProcess, const nsACString &aPath,
+  NS_IMETHOD Callback(const nsACString& aProcess, const nsACString& aPath,
                       int32_t aKind, int32_t aUnits, int64_t aAmount,
-                      const nsACString &aDescription,
-                      nsISupports *aData)
+                      const nsACString& aDescription,
+                      nsISupports* aData)
   {
     if (mIsFirst) {
       DUMP(mWriter, "[");
       mIsFirst = false;
     } else {
       DUMP(mWriter, ",");
     }
 
@@ -352,17 +360,17 @@ public:
       // If the process is empty, the report originated with the process doing
       // the dumping.  In that case, generate the process identifier, which is of
       // the form "$PROCESS_NAME (pid $PID)", or just "(pid $PID)" if we don't
       // have a process name.  If we're the main process, we let $PROCESS_NAME be
       // "Main Process".
       if (XRE_GetProcessType() == GeckoProcessType_Default) {
         // We're the main process.
         process.AssignLiteral("Main Process");
-      } else if (ContentChild *cc = ContentChild::GetSingleton()) {
+      } else if (ContentChild* cc = ContentChild::GetSingleton()) {
         // Try to get the process name from ContentChild.
         cc->GetProcessName(process);
       }
       ContentChild::AppendProcessId(process);
 
     } else {
       // Otherwise, the report originated with another process and already has a
       // process name.  Just use that.
@@ -403,41 +411,42 @@ private:
   nsRefPtr<nsGZFileWriter> mWriter;
 };
 
 NS_IMPL_ISUPPORTS(DumpReportCallback, nsIHandleReportCallback)
 
 } // namespace mozilla
 
 static void
-MakeFilename(const char *aPrefix, const nsAString &aIdentifier,
-             const char *aSuffix, nsACString &aResult)
+MakeFilename(const char* aPrefix, const nsAString& aIdentifier,
+             const char* aSuffix, nsACString& aResult)
 {
   aResult = nsPrintfCString("%s-%s-%d.%s",
                             aPrefix,
                             NS_ConvertUTF16toUTF8(aIdentifier).get(),
                             getpid(), aSuffix);
 }
 
 #ifdef MOZ_DMD
 struct DMDWriteState
 {
   static const size_t kBufSize = 4096;
   char mBuf[kBufSize];
   nsRefPtr<nsGZFileWriter> mGZWriter;
 
-  DMDWriteState(nsGZFileWriter *aGZWriter)
+  DMDWriteState(nsGZFileWriter* aGZWriter)
     : mGZWriter(aGZWriter)
-  {}
+  {
+  }
 };
 
 static void
 DMDWrite(void* aState, const char* aFmt, va_list ap)
 {
-  DMDWriteState *state = (DMDWriteState*)aState;
+  DMDWriteState* state = (DMDWriteState*)aState;
   vsnprintf(state->mBuf, state->kBufSize, aFmt, ap);
   unused << state->mGZWriter->Write(state->mBuf);
 }
 #endif
 
 static nsresult
 DumpHeader(nsIGZFileWriter* aWriter)
 {
@@ -447,18 +456,19 @@ DumpHeader(nsIGZFileWriter* aWriter)
   // over 200 KiB of memory.
   //
   DUMP(aWriter, "{\n  \"version\": 1,\n");
 
   DUMP(aWriter, "  \"hasMozMallocUsableSize\": ");
 
   nsCOMPtr<nsIMemoryReporterManager> mgr =
     do_GetService("@mozilla.org/memory-reporter-manager;1");
-  if (NS_WARN_IF(!mgr))
+  if (NS_WARN_IF(!mgr)) {
     return NS_ERROR_UNEXPECTED;
+  }
 
   DUMP(aWriter, mgr->GetHasMozMallocUsableSize() ? "true" : "false");
   DUMP(aWriter, ",\n");
   DUMP(aWriter, "  \"reports\": ");
 
   return NS_OK;
 }
 
@@ -470,27 +480,28 @@ DumpFooter(nsIGZFileWriter* aWriter)
   return NS_OK;
 }
 
 class TempDirMemoryFinishCallback MOZ_FINAL : public nsIFinishReportingCallback
 {
 public:
   NS_DECL_ISUPPORTS
 
-  TempDirMemoryFinishCallback(nsGZFileWriter *aWriter,
-                              nsIFile *aTmpFile,
-                              const nsCString &aFilename,
-                              const nsString &aIdentifier)
+  TempDirMemoryFinishCallback(nsGZFileWriter* aWriter,
+                              nsIFile* aTmpFile,
+                              const nsCString& aFilename,
+                              const nsString& aIdentifier)
     : mrWriter(aWriter)
     , mrTmpFile(aTmpFile)
     , mrFilename(aFilename)
     , mIdentifier(aIdentifier)
-  {}
+  {
+  }
 
-  NS_IMETHOD Callback(nsISupports *aData);
+  NS_IMETHOD Callback(nsISupports* aData);
 
 private:
   nsRefPtr<nsGZFileWriter> mrWriter;
   nsCOMPtr<nsIFile> mrTmpFile;
   nsCString mrFilename;
   nsString mIdentifier;
 };
 
@@ -534,45 +545,48 @@ nsMemoryInfoDumper::DumpMemoryInfoToTemp
   nsresult rv;
   // In Android case, this function will open a file named aFilename under
   // specific folder (/data/local/tmp/memory-reports). Otherwise, it will
   // open a file named aFilename under "NS_OS_TEMP_DIR".
   rv = nsDumpUtils::OpenTempFile(NS_LITERAL_CSTRING("incomplete-") +
                                  mrFilename,
                                  getter_AddRefs(mrTmpFile),
                                  NS_LITERAL_CSTRING("memory-reports"));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   nsRefPtr<nsGZFileWriter> mrWriter = new nsGZFileWriter();
   rv = mrWriter->Init(mrTmpFile);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Dump the memory reports to the file.
   rv = DumpHeader(mrWriter);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Process reporters.
   nsCOMPtr<nsIMemoryReporterManager> mgr =
     do_GetService("@mozilla.org/memory-reporter-manager;1");
   nsRefPtr<DumpReportCallback> dumpReport = new DumpReportCallback(mrWriter);
   nsRefPtr<nsIFinishReportingCallback> finishReport =
     new TempDirMemoryFinishCallback(mrWriter, mrTmpFile, mrFilename, identifier);
   rv = mgr->GetReportsExtended(dumpReport, nullptr,
                                finishReport, nullptr,
                                aMinimizeMemoryUsage,
                                identifier);
   return rv;
 }
 
 #ifdef MOZ_DMD
 nsresult
-nsMemoryInfoDumper::DumpDMD(const nsAString &aIdentifier)
+nsMemoryInfoDumper::DumpDMD(const nsAString& aIdentifier)
 {
   if (!dmd::IsEnabled()) {
     return NS_OK;
   }
 
   nsresult rv;
 
   // Create a filename like dmd-<identifier>-<pid>.txt.gz, which will be used
@@ -584,115 +598,125 @@ nsMemoryInfoDumper::DumpDMD(const nsAStr
   // and dump DMD output to it.  This must occur after the memory reporters
   // have been run (above), but before the memory-reports file has been
   // renamed (so scripts can detect the DMD file, if present).
 
   nsCOMPtr<nsIFile> dmdFile;
   rv = nsDumpUtils::OpenTempFile(dmdFilename,
                                  getter_AddRefs(dmdFile),
                                  NS_LITERAL_CSTRING("memory-reports"));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   nsRefPtr<nsGZFileWriter> dmdWriter = new nsGZFileWriter();
   rv = dmdWriter->Init(dmdFile);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Dump DMD output to the file.
 
   DMDWriteState state(dmdWriter);
   dmd::Writer w(DMDWrite, &state);
   dmd::Dump(w);
 
   rv = dmdWriter->Finish();
   NS_WARN_IF(NS_FAILED(rv));
   return rv;
 }
 #endif  // MOZ_DMD
 
 NS_IMETHODIMP
-TempDirMemoryFinishCallback::Callback(nsISupports *aData)
+TempDirMemoryFinishCallback::Callback(nsISupports* aData)
 {
-  nsresult rv;
-
-  rv = DumpFooter(mrWriter);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  nsresult rv = DumpFooter(mrWriter);
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // The call to Finish() deallocates the memory allocated by mrWriter's first
   // DUMP() call (within DumpProcessMemoryReportsToGZFileWriter()).  Because
   // that memory was live while the memory reporters ran and thus measured by
   // them -- by "heap-allocated" if nothing else -- we want DMD to see it as
   // well.  So we deliberately don't call Finish() until after DMD finishes.
   rv = mrWriter->Finish();
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Rename the memory reports file, now that we're done writing all the files.
   // Its final name is "memory-report<-identifier>-<pid>.json.gz".
 
   nsCOMPtr<nsIFile> mrFinalFile;
   rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(mrFinalFile));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
 #ifdef ANDROID
   rv = mrFinalFile->AppendNative(NS_LITERAL_CSTRING("memory-reports"));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 #endif
 
   rv = mrFinalFile->AppendNative(mrFilename);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   rv = mrFinalFile->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   nsAutoString mrActualFinalFilename;
   rv = mrFinalFile->GetLeafName(mrActualFinalFilename);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   rv = mrTmpFile->MoveTo(/* directory */ nullptr, mrActualFinalFilename);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Write a message to the console.
 
   nsCOMPtr<nsIConsoleService> cs =
     do_GetService(NS_CONSOLESERVICE_CONTRACTID, &rv);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   nsString path;
   mrTmpFile->GetPath(path);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
-  nsString msg = NS_LITERAL_STRING(
-    "nsIMemoryInfoDumper dumped reports to ");
+  nsString msg = NS_LITERAL_STRING("nsIMemoryInfoDumper dumped reports to ");
   msg.Append(path);
   return cs->LogStringMessage(msg.get());
 }
 
 // This dumps the JSON footer and closes the file, and then calls the given
 // nsIFinishDumpingCallback.
 class FinishReportingCallback MOZ_FINAL : public nsIFinishReportingCallback
 {
 public:
   NS_DECL_ISUPPORTS
 
   FinishReportingCallback(nsIFinishDumpingCallback* aFinishDumping,
                           nsISupports* aFinishDumpingData)
     : mFinishDumping(aFinishDumping)
     , mFinishDumpingData(aFinishDumpingData)
-  {}
+  {
+  }
 
   NS_IMETHOD Callback(nsISupports* aData)
   {
     nsCOMPtr<nsIGZFileWriter> writer = do_QueryInterface(aData);
     NS_ENSURE_TRUE(writer, NS_ERROR_FAILURE);
 
     nsresult rv = DumpFooter(writer);
     NS_ENSURE_SUCCESS(rv, rv);
@@ -721,44 +745,50 @@ nsMemoryInfoDumper::DumpMemoryReportsToN
   nsISupports* aFinishDumpingData)
 {
   MOZ_ASSERT(!aFilename.IsEmpty());
 
   // Create the file.
 
   nsCOMPtr<nsIFile> mrFile;
   nsresult rv = NS_NewLocalFile(aFilename, false, getter_AddRefs(mrFile));
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   mrFile->InitWithPath(aFilename);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   bool exists;
   rv = mrFile->Exists(&exists);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   if (!exists) {
     rv = mrFile->Create(nsIFile::NORMAL_FILE_TYPE, 0644);
-    if (NS_WARN_IF(NS_FAILED(rv)))
+    if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
+    }
   }
 
   // Write the memory reports to the file.
 
   nsRefPtr<nsGZFileWriter> mrWriter = new nsGZFileWriter();
   rv = mrWriter->Init(mrFile);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   rv = DumpHeader(mrWriter);
-  if (NS_WARN_IF(NS_FAILED(rv)))
+  if (NS_WARN_IF(NS_FAILED(rv))) {
     return rv;
+  }
 
   // Process reports and finish up.
   nsRefPtr<DumpReportCallback> dumpReport = new DumpReportCallback(mrWriter);
   nsRefPtr<FinishReportingCallback> finishReporting =
     new FinishReportingCallback(aFinishDumping, aFinishDumpingData);
   nsCOMPtr<nsIMemoryReporterManager> mgr =
     do_GetService("@mozilla.org/memory-reporter-manager;1");
   return mgr->GetReports(dumpReport, nullptr, finishReporting, mrWriter);
--- a/xpcom/base/nsMemoryInfoDumper.h
+++ b/xpcom/base/nsMemoryInfoDumper.h
@@ -26,17 +26,17 @@ public:
 
   nsMemoryInfoDumper();
   virtual ~nsMemoryInfoDumper();
 
 public:
   static void Initialize();
 
 #ifdef MOZ_DMD
-  static nsresult DumpDMD(const nsAString &aIdentifier);
+  static nsresult DumpDMD(const nsAString& aIdentifier);
 #endif
 };
 
 #define NS_MEMORY_INFO_DUMPER_CID \
 { 0x00bd71fb, 0x7f09, 0x4ec3, \
 { 0x96, 0xaf, 0xa0, 0xb5, 0x22, 0xb7, 0x79, 0x69 } }
 
 #endif
--- a/xpcom/base/nsMemoryReporterManager.cpp
+++ b/xpcom/base/nsMemoryReporterManager.cpp
@@ -210,18 +210,19 @@ ResidentFastDistinguishedAmount(int64_t*
 #ifdef __FreeBSD__
 #include <libutil.h>
 #include <algorithm>
 
 static nsresult
 GetKinfoVmentrySelf(int64_t* aPrss, uint64_t* aMaxreg)
 {
   int cnt;
-  struct kinfo_vmentry *vmmap, *kve;
-  if ((vmmap = kinfo_getvmmap(getpid(), &cnt)) == nullptr) {
+  struct kinfo_vmentry* vmmap;
+  struct kinfo_vmentry* kve;
+  if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
     return NS_ERROR_FAILURE;
   }
   if (aPrss) {
     *aPrss = 0;
   }
   if (aMaxreg) {
     *aMaxreg = 0;
   }
@@ -675,17 +676,17 @@ NS_IMPL_ISUPPORTS(PageFaultsHardReporter
  **/
 
 #ifdef HAVE_JEMALLOC_STATS
 
 // This has UNITS_PERCENTAGE, so it is multiplied by 100.
 static int64_t
 HeapOverheadRatio(jemalloc_stats_t* aStats)
 {
-  return (int64_t) 10000 *
+  return (int64_t)10000 *
     (aStats->waste + aStats->bookkeeping + aStats->page_cache) /
     ((double)aStats->allocated);
 }
 
 class JemallocHeapReporter MOZ_FINAL : public nsIMemoryReporter
 {
 public:
   NS_DECL_ISUPPORTS
@@ -1037,17 +1038,17 @@ nsMemoryReporterManager::GetReportsExten
     rv = StartGettingReports();
   }
   return rv;
 }
 
 nsresult
 nsMemoryReporterManager::StartGettingReports()
 {
-  GetReportsState *s = mGetReportsState;
+  GetReportsState* s = mGetReportsState;
 
   // Get reports for this process.
   GetReportsForThisProcessExtended(s->mHandleReport, s->mHandleReportData,
                                    s->mDMDDumpIdent);
   s->mParentDone = true;
 
   // If there are no remaining child processes, we can finish up immediately.
   return (s->mNumChildProcessesCompleted >= s->mNumChildProcesses)
@@ -1055,25 +1056,25 @@ nsMemoryReporterManager::StartGettingRep
     : NS_OK;
 }
 
 typedef nsCOMArray<nsIMemoryReporter> MemoryReporterArray;
 
 static PLDHashOperator
 StrongEnumerator(nsRefPtrHashKey<nsIMemoryReporter>* aElem, void* aData)
 {
-  MemoryReporterArray *allReporters = static_cast<MemoryReporterArray*>(aData);
+  MemoryReporterArray* allReporters = static_cast<MemoryReporterArray*>(aData);
   allReporters->AppendElement(aElem->GetKey());
   return PL_DHASH_NEXT;
 }
 
 static PLDHashOperator
 WeakEnumerator(nsPtrHashKey<nsIMemoryReporter>* aElem, void* aData)
 {
-  MemoryReporterArray *allReporters = static_cast<MemoryReporterArray*>(aData);
+  MemoryReporterArray* allReporters = static_cast<MemoryReporterArray*>(aData);
   allReporters->AppendElement(aElem->GetKey());
   return PL_DHASH_NEXT;
 }
 
 NS_IMETHODIMP
 nsMemoryReporterManager::GetReportsForThisProcess(
   nsIHandleReportCallback* aHandleReport,
   nsISupports* aHandleReportData)
@@ -1258,18 +1259,17 @@ nsMemoryReporterManager::RegisterReporte
   // This method is thread-safe.
   mozilla::MutexAutoLock autoLock(mMutex);
 
   if (mIsRegistrationBlocked && !aForce) {
     return NS_ERROR_FAILURE;
   }
 
   if (mStrongReporters->Contains(aReporter) ||
-      mWeakReporters->Contains(aReporter))
-  {
+      mWeakReporters->Contains(aReporter)) {
     return NS_ERROR_FAILURE;
   }
 
   // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
   // a kung fu death grip before calling PutEntry.  Otherwise, if PutEntry
   // addref'ed and released |aReporter| before finally addref'ing it for
   // good, it would free aReporter!  The kung fu death grip could itself be
   // problematic if PutEntry didn't addref |aReporter| (because then when the
@@ -1397,17 +1397,19 @@ nsMemoryReporterManager::UnblockRegistra
 }
 
 // This is just a wrapper for int64_t that implements nsISupports, so it can be
 // passed to nsIMemoryReporter::CollectReports.
 class Int64Wrapper MOZ_FINAL : public nsISupports
 {
 public:
   NS_DECL_ISUPPORTS
-  Int64Wrapper() : mValue(0) { }
+  Int64Wrapper() : mValue(0)
+  {
+  }
   int64_t mValue;
 };
 
 NS_IMPL_ISUPPORTS0(Int64Wrapper)
 
 class ExplicitCallback MOZ_FINAL : public nsIHandleReportCallback
 {
 public:
@@ -1421,18 +1423,17 @@ public:
     // Using the "heap-allocated" reporter here instead of
     // nsMemoryReporterManager.heapAllocated goes against the usual
     // pattern.  But it's for a good reason:  in tests, we can easily
     // create artificial (i.e. deterministic) reporters -- which allows us
     // to precisely test nsMemoryReporterManager.explicit -- but we can't
     // do that for distinguished amounts.
     if (aPath.Equals("heap-allocated") ||
         (aKind == nsIMemoryReporter::KIND_NONHEAP &&
-         PromiseFlatCString(aPath).Find("explicit") == 0))
-    {
+         PromiseFlatCString(aPath).Find("explicit") == 0)) {
       Int64Wrapper* wrappedInt64 = static_cast<Int64Wrapper*>(aWrappedExplicit);
       wrappedInt64->mValue += aAmount;
     }
     return NS_OK;
   }
 };
 
 NS_IMPL_ISUPPORTS(ExplicitCallback, nsIHandleReportCallback)
@@ -1505,17 +1506,18 @@ nsMemoryReporterManager::GetResidentFast
   return ResidentFastDistinguishedAmount(aAmount);
 #else
   *aAmount = 0;
   return NS_ERROR_NOT_AVAILABLE;
 #endif
 }
 
 /*static*/
-int64_t nsMemoryReporterManager::ResidentFast()
+int64_t
+nsMemoryReporterManager::ResidentFast()
 {
 #ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
   int64_t amount;
   ResidentFastDistinguishedAmount(&amount);
   return amount;
 #else
   return 0;
 #endif
@@ -1653,17 +1655,18 @@ namespace {
  * runnable's constructor.
  */
 class MinimizeMemoryUsageRunnable : public nsRunnable
 {
 public:
   MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
     : mCallback(aCallback)
     , mRemainingIters(sNumIters)
-  {}
+  {
+  }
 
   NS_IMETHOD Run()
   {
     nsCOMPtr<nsIObserverService> os = services::GetObserverService();
     if (!os) {
       return NS_ERROR_FAILURE;
     }
 
--- a/xpcom/base/nsMemoryReporterManager.h
+++ b/xpcom/base/nsMemoryReporterManager.h
@@ -34,18 +34,18 @@ public:
   // Gets the memory reporter manager service.
   static nsMemoryReporterManager* GetOrCreate()
   {
     nsCOMPtr<nsIMemoryReporterManager> imgr =
       do_GetService("@mozilla.org/memory-reporter-manager;1");
     return static_cast<nsMemoryReporterManager*>(imgr.get());
   }
 
-  typedef nsTHashtable<nsRefPtrHashKey<nsIMemoryReporter> > StrongReportersTable;
-  typedef nsTHashtable<nsPtrHashKey<nsIMemoryReporter> > WeakReportersTable;
+  typedef nsTHashtable<nsRefPtrHashKey<nsIMemoryReporter>> StrongReportersTable;
+  typedef nsTHashtable<nsPtrHashKey<nsIMemoryReporter>> WeakReportersTable;
 
   void IncrementNumChildProcesses();
   void DecrementNumChildProcesses();
 
   // Inter-process memory reporting proceeds as follows.
   //
   // - GetReports() (declared within NS_DECL_NSIMEMORYREPORTERMANAGER)
   //   synchronously gets memory reports for the current process, tells all
@@ -112,17 +112,17 @@ public:
   //   data.  So the reported data will reflect how things were when the
   //   request began.
   //
   // The inconsistencies between these three cases are unfortunate but
   // difficult to avoid.  It's enough of an edge case to not be worth doing
   // more.
   //
   void HandleChildReports(
-    const uint32_t& generation,
+    const uint32_t& aGeneration,
     const InfallibleTArray<mozilla::dom::MemoryReport>& aChildReports);
   nsresult FinishReporting();
 
   // Functions that (a) implement distinguished amounts, and (b) are outside of
   // this module.
   struct AmountFns
   {
     mozilla::InfallibleAmountFn mJSMainRuntimeGCHeap;
@@ -134,17 +134,20 @@ public:
 
     mozilla::InfallibleAmountFn mStorageSQLite;
 
     mozilla::InfallibleAmountFn mLowMemoryEventsVirtual;
     mozilla::InfallibleAmountFn mLowMemoryEventsPhysical;
 
     mozilla::InfallibleAmountFn mGhostWindows;
 
-    AmountFns() { mozilla::PodZero(this); }
+    AmountFns()
+    {
+      mozilla::PodZero(this);
+    }
   };
   AmountFns mAmountFns;
 
   // Convenience function to get RSS easily from other code.  This is useful
   // when debugging transient memory spikes with printf instrumentation.
   static int64_t ResidentFast();
 
   // Functions that measure per-tab memory consumption.
@@ -197,17 +200,17 @@ private:
     nsString                             mDMDDumpIdent;
 
     GetReportsState(uint32_t aGeneration, nsITimer* aTimer,
                     uint32_t aNumChildProcesses,
                     nsIHandleReportCallback* aHandleReport,
                     nsISupports* aHandleReportData,
                     nsIFinishReportingCallback* aFinishReporting,
                     nsISupports* aFinishReportingData,
-                    const nsAString &aDMDDumpIdent)
+                    const nsAString& aDMDDumpIdent)
       : mGeneration(aGeneration)
       , mTimer(aTimer)
       , mNumChildProcesses(aNumChildProcesses)
       , mNumChildProcessesCompleted(0)
       , mParentDone(false)
       , mHandleReport(aHandleReport)
       , mHandleReportData(aHandleReportData)
       , mFinishReporting(aFinishReporting)
--- a/xpcom/base/nsMessageLoop.cpp
+++ b/xpcom/base/nsMessageLoop.cpp
@@ -28,17 +28,19 @@ namespace {
  */
 class MessageLoopIdleTask
   : public Task
   , public SupportsWeakPtr<MessageLoopIdleTask>
 {
 public:
   MOZ_DECLARE_REFCOUNTED_TYPENAME(MessageLoopIdleTask)
   MessageLoopIdleTask(nsIRunnable* aTask, uint32_t aEnsureRunsAfterMS);
-  virtual ~MessageLoopIdleTask() {}
+  virtual ~MessageLoopIdleTask()
+  {
+  }
   virtual void Run();
 
 private:
   nsresult Init(uint32_t aEnsureRunsAfterMS);
 
   nsCOMPtr<nsIRunnable> mTask;
   nsCOMPtr<nsITimer> mTimer;
 };
@@ -83,18 +85,19 @@ MessageLoopIdleTask::MessageLoopIdleTask
     mTimer = nullptr;
   }
 }
 
 nsresult
 MessageLoopIdleTask::Init(uint32_t aEnsureRunsAfterMS)
 {
   mTimer = do_CreateInstance("@mozilla.org/timer;1");
-  if (NS_WARN_IF(!mTimer))
+  if (NS_WARN_IF(!mTimer)) {
     return NS_ERROR_UNEXPECTED;
+  }
 
   nsRefPtr<MessageLoopTimerCallback> callback =
     new MessageLoopTimerCallback(this);
 
   return mTimer->InitWithCallback(callback, aEnsureRunsAfterMS,
                                   nsITimer::TYPE_ONE_SHOT);
 }
 
@@ -113,17 +116,18 @@ MessageLoopIdleTask::Run()
   if (mTask) {
     mTask->Run();
     mTask = nullptr;
   }
 }
 
 MessageLoopTimerCallback::MessageLoopTimerCallback(MessageLoopIdleTask* aTask)
   : mTask(aTask->asWeakPtr())
-{}
+{
+}
 
 NS_IMETHODIMP
 MessageLoopTimerCallback::Notify(nsITimer* aTimer)
 {
   // We don't expect to hit the case when the timer fires but mTask has been
   // deleted, because mTask should cancel the timer before the mTask is
   // deleted.  But you never know...
   NS_WARN_IF_FALSE(mTask, "This timer shouldn't have fired.");
@@ -150,13 +154,14 @@ nsMessageLoop::PostIdleTask(nsIRunnable*
   return NS_OK;
 }
 
 nsresult
 nsMessageLoopConstructor(nsISupports* aOuter,
                          const nsIID& aIID,
                          void** aInstancePtr)
 {
-  if (NS_WARN_IF(aOuter))
+  if (NS_WARN_IF(aOuter)) {
     return NS_ERROR_NO_AGGREGATION;
+  }
   nsISupports* messageLoop = new nsMessageLoop();
   return messageLoop->QueryInterface(aIID, aInstancePtr);
 }
--- a/xpcom/base/nsMessageLoop.h
+++ b/xpcom/base/nsMessageLoop.h
@@ -9,19 +9,21 @@
  * nsMessageLoop implements nsIMessageLoop, which wraps Chromium's MessageLoop
  * class and adds a bit of sugar.
  */
 class nsMessageLoop : public nsIMessageLoop
 {
   NS_DECL_ISUPPORTS
   NS_DECL_NSIMESSAGELOOP
 
-  virtual ~nsMessageLoop() {}
+  virtual ~nsMessageLoop()
+  {
+  }
 };
 
 #define NS_MESSAGE_LOOP_CID \
 {0x67b3ac0c, 0xd806, 0x4d48, \
 {0x93, 0x9e, 0x6a, 0x81, 0x9e, 0x6c, 0x24, 0x8f}}
 
 extern nsresult
-nsMessageLoopConstructor(nsISupports* outer,
+nsMessageLoopConstructor(nsISupports* aOuter,
                          const nsIID& aIID,
-                         void* *aInstancePtr);
+                         void** aInstancePtr);
--- a/xpcom/base/nsObjCExceptions.h
+++ b/xpcom/base/nsObjCExceptions.h
@@ -36,117 +36,122 @@
  * from an exception handler. At some point we will fix this by replacing
  * all macros in the tree with appropriately-named macros.
  */
 
 // See Mozilla bug 163260.
 // This file can only be included in an Objective-C context.
 
 __attribute__((unused))
-static void nsObjCExceptionLog(NSException* aException)
+static void
+nsObjCExceptionLog(NSException* aException)
 {
   NSLog(@"Mozilla has caught an Obj-C exception [%@: %@]",
         [aException name], [aException reason]);
 
 #if defined(MOZ_CRASHREPORTER) && defined(__cplusplus)
   // Attach exception info to the crash report.
   nsCOMPtr<nsICrashReporter> crashReporter =
     do_GetService("@mozilla.org/toolkit/crash-reporter;1");
-  if (crashReporter)
+  if (crashReporter) {
     crashReporter->AppendObjCExceptionInfoToAppNotes(static_cast<void*>(aException));
+  }
 #endif
 
 #ifdef DEBUG
   @try {
     // Try to get stack information out of the exception. 10.5 returns the stack
     // info with the callStackReturnAddresses selector.
-    NSArray *stackTrace = nil;
+    NSArray* stackTrace = nil;
     if ([aException respondsToSelector:@selector(callStackReturnAddresses)]) {
       NSArray* addresses = (NSArray*)
         [aException performSelector:@selector(callStackReturnAddresses)];
-      if ([addresses count])
+      if ([addresses count]) {
         stackTrace = addresses;
+      }
     }
 
     // 10.4 doesn't respond to callStackReturnAddresses so we'll try to pull the
     // stack info out of the userInfo. It might not be there, sadly :(
-    if (!stackTrace)
+    if (!stackTrace) {
       stackTrace = [[aException userInfo] objectForKey:NSStackTraceKey];
+    }
 
     if (stackTrace) {
       // The command line should look like this:
       //   /usr/bin/atos -p <pid> -printHeader <stack frame addresses>
-      NSMutableArray *args =
+      NSMutableArray* args =
         [NSMutableArray arrayWithCapacity:[stackTrace count] + 3];
 
       [args addObject:@"-p"];
       int pid = [[NSProcessInfo processInfo] processIdentifier];
       [args addObject:[NSString stringWithFormat:@"%d", pid]];
 
       [args addObject:@"-printHeader"];
 
       unsigned int stackCount = [stackTrace count];
       unsigned int stackIndex = 0;
       for (; stackIndex < stackCount; stackIndex++) {
         unsigned long address =
           [[stackTrace objectAtIndex:stackIndex] unsignedLongValue];
         [args addObject:[NSString stringWithFormat:@"0x%lx", address]];
       }
 
-      NSPipe *outPipe = [NSPipe pipe];
+      NSPipe* outPipe = [NSPipe pipe];
 
-      NSTask *task = [[NSTask alloc] init];
+      NSTask* task = [[NSTask alloc] init];
       [task setLaunchPath:@"/usr/bin/atos"];
       [task setArguments:args];
       [task setStandardOutput:outPipe];
       [task setStandardError:outPipe];
 
       NSLog(@"Generating stack trace for Obj-C exception...");
 
       // This will throw an exception if the atos tool cannot be found, and in
       // that case we'll just hit our @catch block below.
       [task launch];
 
       [task waitUntilExit];
       [task release];
 
-      NSData *outData =
+      NSData* outData =
         [[outPipe fileHandleForReading] readDataToEndOfFile];
-      NSString *outString =
+      NSString* outString =
         [[NSString alloc] initWithData:outData encoding:NSUTF8StringEncoding];
 
       NSLog(@"Stack trace:\n%@", outString);
 
       [outString release];
-    }
-    else {
+    } else {
       NSLog(@"<No stack information available for Obj-C exception>");
     }
   }
-  @catch (NSException *exn) {
+  @catch (NSException* exn) {
     NSLog(@"Failed to generate stack trace for Obj-C exception [%@: %@]",
           [exn name], [exn reason]);
   }
 #endif
 }
 
 __attribute__((unused))
-static void nsObjCExceptionAbort()
+static void
+nsObjCExceptionAbort()
 {
   // We need to raise a mach-o signal here, the Mozilla crash reporter on
   // Mac OS X does not respond to POSIX signals. Raising mach-o signals directly
   // is tricky so we do it by just derefing a null pointer.
   int* foo = nullptr;
   *foo = 1;
 }
 
 __attribute__((unused))
-static void nsObjCExceptionLogAbort(NSException *e)
+static void
+nsObjCExceptionLogAbort(NSException* aException)
 {
-  nsObjCExceptionLog(e);
+  nsObjCExceptionLog(aException);
   nsObjCExceptionAbort();
 }
 
 #define NS_OBJC_TRY(_e, _fail)                     \
 @try { _e; }                                       \
 @catch(NSException *_exn) {                        \
   nsObjCExceptionLog(_exn);                        \
   _fail;                                           \
--- a/xpcom/base/nsSetDllDirectory.h
+++ b/xpcom/base/nsSetDllDirectory.h
@@ -11,17 +11,18 @@
 #endif
 
 #include <windows.h>
 #include <nscore.h>
 #include <stdlib.h>
 
 namespace mozilla {
 
-static void SanitizeEnvironmentVariables()
+static void
+SanitizeEnvironmentVariables()
 {
   DWORD bufferSize = GetEnvironmentVariableW(L"PATH", nullptr, 0);
   if (bufferSize) {
     wchar_t* originalPath = new wchar_t[bufferSize];
     if (bufferSize - 1 == GetEnvironmentVariableW(L"PATH", originalPath, bufferSize)) {
       bufferSize = ExpandEnvironmentStringsW(originalPath, nullptr, 0);
       if (bufferSize) {
         wchar_t* newPath = new wchar_t[bufferSize];
--- a/xpcom/base/nsStackWalk.cpp
+++ b/xpcom/base/nsStackWalk.cpp
@@ -12,17 +12,18 @@
 #include "nsStackWalkPrivate.h"
 
 #include "nsStackWalk.h"
 
 using namespace mozilla;
 
 // The presence of this address is the stack must stop the stack walk. If
 // there is no such address, the structure will be {nullptr, true}.
-struct CriticalAddress {
+struct CriticalAddress
+{
   void* mAddr;
   bool mInit;
 };
 static CriticalAddress gCriticalAddress;
 
 // for _Unwind_Backtrace from libcxxrt or libunwind
 // cxxabi.h from libcxxrt implicitly includes unwind.h first
 #if defined(HAVE__UNWIND_BACKTRACE) && !defined(_GNU_SOURCE)
@@ -46,34 +47,36 @@ static CriticalAddress gCriticalAddress;
   (defined(__sun) && \
    (defined(__sparc) || defined(sparc) || defined(__i386) || defined(i386)))
 
 #if NSSTACKWALK_SUPPORTS_MACOSX
 #include <pthread.h>
 #include <CoreServices/CoreServices.h>
 
 typedef void
-malloc_logger_t(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
-                uintptr_t result, uint32_t num_hot_frames_to_skip);
-extern malloc_logger_t *malloc_logger;
+malloc_logger_t(uint32_t aType,
+                uintptr_t aArg1, uintptr_t aArg2, uintptr_t aArg3,
+                uintptr_t aResult, uint32_t aNumHotFramesToSkip);
+extern malloc_logger_t* malloc_logger;
 
 static void
-stack_callback(void *pc, void *sp, void *closure)
+stack_callback(void* aPc, void* aSp, void* aClosure)
 {
-  const char *name = reinterpret_cast<char *>(closure);
+  const char* name = static_cast<char*>(aClosure);
   Dl_info info;
 
   // On Leopard dladdr returns the wrong value for "new_sem_from_pool". The
   // stack shows up as having two pthread_cond_wait$UNIX2003 frames. The
   // correct one is the first that we find on our way up, so the
   // following check for gCriticalAddress.mAddr is critical.
-  if (gCriticalAddress.mAddr || dladdr(pc, &info) == 0  ||
-      info.dli_sname == nullptr || strcmp(info.dli_sname, name) != 0)
+  if (gCriticalAddress.mAddr || dladdr(aPc, &info) == 0  ||
+      !info.dli_sname || strcmp(info.dli_sname, name) != 0) {
     return;
-  gCriticalAddress.mAddr = pc;
+  }
+  gCriticalAddress.mAddr = aPc;
 }
 
 #ifdef DEBUG
 #define MAC_OS_X_VERSION_10_7_HEX 0x00001070
 
 static int32_t OSXVersion()
 {
   static int32_t gOSXVersion = 0x0;
@@ -86,63 +89,66 @@ static int32_t OSXVersion()
 
 static bool OnLionOrLater()
 {
   return (OSXVersion() >= MAC_OS_X_VERSION_10_7_HEX);
 }
 #endif
 
 static void
-my_malloc_logger(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3,
-                 uintptr_t result, uint32_t num_hot_frames_to_skip)
+my_malloc_logger(uint32_t aType,
+                 uintptr_t aArg1, uintptr_t aArg2, uintptr_t aArg3,
+                 uintptr_t aResult, uint32_t aNumHotFramesToSkip)
 {
   static bool once = false;
-  if (once)
+  if (once) {
     return;
+  }
   once = true;
 
   // On Leopard dladdr returns the wrong value for "new_sem_from_pool". The
   // stack shows up as having two pthread_cond_wait$UNIX2003 frames.
-  const char *name = "new_sem_from_pool";
+  const char* name = "new_sem_from_pool";
   NS_StackWalk(stack_callback, /* skipFrames */ 0, /* maxFrames */ 0,
                const_cast<char*>(name), 0, nullptr);
 }
 
 // This is called from NS_LogInit() and from the stack walking functions, but
 // only the first call has any effect.  We need to call this function from both
 // places because it must run before any mutexes are created, and also before
 // any objects whose refcounts we're logging are created.  Running this
 // function during NS_LogInit() ensures that we meet the first criterion, and
 // running this function during the stack walking functions ensures we meet the
 // second criterion.
 void
 StackWalkInitCriticalAddress()
 {
-  if(gCriticalAddress.mInit)
+  if (gCriticalAddress.mInit) {
     return;
+  }
   gCriticalAddress.mInit = true;
   // We must not do work when 'new_sem_from_pool' calls realloc, since
   // it holds a non-reentrant spin-lock and we will quickly deadlock.
   // new_sem_from_pool is not directly accessible using dlsym, so
   // we force a situation where new_sem_from_pool is on the stack and
   // use dladdr to check the addresses.
 
   // malloc_logger can be set by external tools like 'Instruments' or 'leaks'
-  malloc_logger_t *old_malloc_logger = malloc_logger;
+  malloc_logger_t* old_malloc_logger = malloc_logger;
   malloc_logger = my_malloc_logger;
 
   pthread_cond_t cond;
   int r = pthread_cond_init(&cond, 0);
   MOZ_ASSERT(r == 0);
   pthread_mutex_t mutex;
-  r = pthread_mutex_init(&mutex,0);
+  r = pthread_mutex_init(&mutex, 0);
   MOZ_ASSERT(r == 0);
   r = pthread_mutex_lock(&mutex);
   MOZ_ASSERT(r == 0);
-  struct timespec abstime = {0, 1};
+  struct timespec abstime = { 0, 1 };
   r = pthread_cond_timedwait_relative_np(&cond, &mutex, &abstime);
 
   // restore the previous malloc logger
   malloc_logger = old_malloc_logger;
 
   // On Lion, malloc is no longer called from pthread_cond_*wait*. This prevents
   // us from finding the address, but that is fine, since with no call to malloc
   // there is no critical address.
@@ -151,22 +157,24 @@ StackWalkInitCriticalAddress()
   r = pthread_mutex_unlock(&mutex);
   MOZ_ASSERT(r == 0);
   r = pthread_mutex_destroy(&mutex);
   MOZ_ASSERT(r == 0);
   r = pthread_cond_destroy(&cond);
   MOZ_ASSERT(r == 0);
 }
 
-static bool IsCriticalAddress(void* aPC)
+static bool
+IsCriticalAddress(void* aPC)
 {
   return gCriticalAddress.mAddr == aPC;
 }
 #else
-static bool IsCriticalAddress(void* aPC)
+static bool
+IsCriticalAddress(void* aPC)
 {
   return false;
 }
 // We still initialize gCriticalAddress.mInit so that this code behaves
 // the same on all platforms. Otherwise a failure to init would be visible
 // only on OS X.
 void
 StackWalkInitCriticalAddress()
@@ -203,98 +211,101 @@ StackWalkInitCriticalAddress()
 extern "C" {
 
 extern HANDLE hStackWalkMutex;
 
 bool EnsureSymInitialized();
 
 bool EnsureWalkThreadReady();
 
-struct WalkStackData {
+struct WalkStackData
+{
   uint32_t skipFrames;
   HANDLE thread;
   bool walkCallingThread;
   HANDLE process;
   HANDLE eventStart;
   HANDLE eventEnd;
-  void **pcs;
+  void** pcs;
   uint32_t pc_size;
   uint32_t pc_count;
   uint32_t pc_max;
-  void **sps;
+  void** sps;
   uint32_t sp_size;
   uint32_t sp_count;
-  void *platformData;
+  void* platformData;
 };
 
-void PrintError(char *prefix, WalkStackData* data);
-unsigned int WINAPI WalkStackThread(void* data);
-void WalkStackMain64(struct WalkStackData* data);
+void PrintError(char* aPrefix, WalkStackData* aData);
+unsigned int WINAPI WalkStackThread(void* aData);
+void WalkStackMain64(struct WalkStackData* aData);
 
 
 DWORD gStackWalkThread;
 CRITICAL_SECTION gDbgHelpCS;
 
 }
 
 // Routine to print an error message to standard error.
-void PrintError(const char *prefix)
+void
+PrintError(const char* aPrefix)
 {
   LPVOID lpMsgBuf;
   DWORD lastErr = GetLastError();
   FormatMessageA(
     FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
     nullptr,
     lastErr,
     MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
     (LPSTR) &lpMsgBuf,
     0,
     nullptr
   );
   fprintf(stderr, "### ERROR: %s: %s",
-          prefix, lpMsgBuf ? lpMsgBuf : "(null)\n");
+          aPrefix, lpMsgBuf ? lpMsgBuf : "(null)\n");
   fflush(stderr);
   LocalFree(lpMsgBuf);
 }
 
 bool
 EnsureWalkThreadReady()
 {
   static bool walkThreadReady = false;
   static HANDLE stackWalkThread = nullptr;
   static HANDLE readyEvent = nullptr;
 
-  if (walkThreadReady)
+  if (walkThreadReady) {
     return walkThreadReady;
+  }
 
-  if (stackWalkThread == nullptr) {
+  if (!stackWalkThread) {
     readyEvent = ::CreateEvent(nullptr, FALSE /* auto-reset*/,
                                FALSE /* initially non-signaled */,
                                nullptr);
-    if (readyEvent == nullptr) {
+    if (!readyEvent) {
       PrintError("CreateEvent");
       return false;
     }
 
     unsigned int threadID;
     stackWalkThread = (HANDLE)
       _beginthreadex(nullptr, 0, WalkStackThread, (void*)readyEvent,
                      0, &threadID);
-    if (stackWalkThread == nullptr) {
+    if (!stackWalkThread) {
       PrintError("CreateThread");
       ::CloseHandle(readyEvent);
       readyEvent = nullptr;
       return false;
     }
     gStackWalkThread = threadID;
     ::CloseHandle(stackWalkThread);
   }
 
-  MOZ_ASSERT((stackWalkThread != nullptr && readyEvent != nullptr) ||
-             (stackWalkThread == nullptr && readyEvent == nullptr));
+  MOZ_ASSERT((stackWalkThread && readyEvent) ||
+             (!stackWalkThread && !readyEvent));
 
   // The thread was created. Try to wait an arbitrary amount of time (1 second
   // should be enough) for its event loop to start before posting events to it.
   DWORD waitRet = ::WaitForSingleObject(readyEvent, 1000);
   if (waitRet == WAIT_TIMEOUT) {
     // We get a timeout if we're called during static initialization because
     // the thread will only start executing after we return so it couldn't
     // have signalled the event. If that is the case, give up for now and
@@ -307,43 +318,43 @@ EnsureWalkThreadReady()
 
 
   ::InitializeCriticalSection(&gDbgHelpCS);
 
   return walkThreadReady = true;
 }
 
 void
-WalkStackMain64(struct WalkStackData* data)
+WalkStackMain64(struct WalkStackData* aData)
 {
   // Get the context information for the thread. That way we will
   // know where our sp, fp, pc, etc. are and can fill in the
   // STACKFRAME64 with the initial values.
   CONTEXT context;
-  HANDLE myProcess = data->process;
-  HANDLE myThread = data->thread;
+  HANDLE myProcess = aData->process;
+  HANDLE myThread = aData->thread;
   DWORD64 addr;
   DWORD64 spaddr;
   STACKFRAME64 frame64;
   // skip our own stack walking frames
-  int skip = (data->walkCallingThread ? 3 : 0) + data->skipFrames;
+  int skip = (aData->walkCallingThread ? 3 : 0) + aData->skipFrames;
   BOOL ok;
 
   // Get a context for the specified thread.
-  if (!data->platformData) {
+  if (!aData->platformData) {
     memset(&context, 0, sizeof(CONTEXT));
     context.ContextFlags = CONTEXT_FULL;
     if (!GetThreadContext(myThread, &context)) {
-      if (data->walkCallingThread) {
+      if (aData->walkCallingThread) {
         PrintError("GetThreadContext");
       }
       return;
     }
   } else {
-    context = *static_cast<CONTEXT*>(data->platformData);
+    context = *static_cast<CONTEXT*>(aData->platformData);
   }
 
   // Setup initial stack frame to walk from
   memset(&frame64, 0, sizeof(frame64));
 #ifdef _M_IX86
   frame64.AddrPC.Offset    = context.Eip;
   frame64.AddrStack.Offset = context.Esp;
   frame64.AddrFrame.Offset = context.Ebp;
@@ -390,42 +401,46 @@ WalkStackMain64(struct WalkStackData* da
     LeaveCriticalSection(&gDbgHelpCS);
 
     if (ok) {
       addr = frame64.AddrPC.Offset;
       spaddr = frame64.AddrStack.Offset;
     } else {
       addr = 0;
       spaddr = 0;
-      if (data->walkCallingThread) {
+      if (aData->walkCallingThread) {
         PrintError("WalkStack64");
       }
     }
 
     if (!ok || (addr == 0)) {
       break;
     }
 
     if (skip-- > 0) {
       continue;
     }
 
-    if (data->pc_count < data->pc_size)
-      data->pcs[data->pc_count] = (void*)addr;
-    ++data->pc_count;
+    if (aData->pc_count < aData->pc_size) {
+      aData->pcs[aData->pc_count] = (void*)addr;
+    }
+    ++aData->pc_count;
 
-    if (data->sp_count < data->sp_size)
-      data->sps[data->sp_count] = (void*)spaddr;
-    ++data->sp_count;
+    if (aData->sp_count < aData->sp_size) {
+      aData->sps[aData->sp_count] = (void*)spaddr;
+    }
+    ++aData->sp_count;
 
-    if (data->pc_max != 0 && data->pc_count == data->pc_max)
+    if (aData->pc_max != 0 && aData->pc_count == aData->pc_max) {
       break;
+    }
 
-    if (frame64.AddrReturn.Offset == 0)
+    if (frame64.AddrReturn.Offset == 0) {
       break;
+    }
   }
   return;
 }
 
 
 unsigned int WINAPI
 WalkStackThread(void* aData)
 {
@@ -435,39 +450,40 @@ WalkStackThread(void* aData)
   // Call PeekMessage to force creation of a message queue so that
   // other threads can safely post events to us.
   ::PeekMessage(&msg, nullptr, WM_USER, WM_USER, PM_NOREMOVE);
 
   // and tell the thread that created us that we're ready.
   HANDLE readyEvent = (HANDLE)aData;
   ::SetEvent(readyEvent);
 
-  while ((msgRet = ::GetMessage(&msg, (HWND)-1, 0, 0)) != 0) {
+  while ((msgRet = ::GetMessage(&msg, (HWND) - 1, 0, 0)) != 0) {
     if (msgRet == -1) {
       PrintError("GetMessage");
     } else {
       DWORD ret;
 
-      struct WalkStackData *data = (WalkStackData *)msg.lParam;
-      if (!data)
+      struct WalkStackData* data = (WalkStackData*)msg.lParam;
+      if (!data) {
         continue;
+      }
 
       // Don't suspend the calling thread until it's waiting for
       // us; otherwise the number of frames on the stack could vary.
       ret = ::WaitForSingleObject(data->eventStart, INFINITE);
-      if (ret != WAIT_OBJECT_0)
+      if (ret != WAIT_OBJECT_0) {
         PrintError("WaitForSingleObject");
+      }
 
       // Suspend the calling thread, dump his stack, and then resume him.
       // He's currently waiting for us to finish so now should be a good time.
-      ret = ::SuspendThread( data->thread );
+      ret = ::SuspendThread(data->thread);
       if (ret == -1) {
         PrintError("ThreadSuspend");
-      }
-      else {
+      } else {
         WalkStackMain64(data);
 
         ret = ::ResumeThread(data->thread);
         if (ret == -1) {
           PrintError("ThreadResume");
         }
       }
 
@@ -483,32 +499,33 @@ WalkStackThread(void* aData)
  * chain in aBuffer. For this to work properly, the DLLs must be rebased
  * so that the address in the file agrees with the address in memory.
  * Otherwise StackWalk will return FALSE when it hits a frame in a DLL
  * whose in memory address doesn't match its in-file address.
  */
 
 EXPORT_XPCOM_API(nsresult)
 NS_StackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-             uint32_t aMaxFrames, void *aClosure, uintptr_t aThread,
-             void *aPlatformData)
+             uint32_t aMaxFrames, void* aClosure, uintptr_t aThread,
+             void* aPlatformData)
 {
   StackWalkInitCriticalAddress();
   static HANDLE myProcess = nullptr;
   HANDLE myThread;
   DWORD walkerReturn;
   struct WalkStackData data;
 
-  if (!EnsureWalkThreadReady())
+  if (!EnsureWalkThreadReady()) {
     return NS_ERROR_FAILURE;
+  }
 
   HANDLE targetThread = ::GetCurrentThread();
   data.walkCallingThread = true;
   if (aThread) {
-    HANDLE threadToWalk = reinterpret_cast<HANDLE> (aThread);
+    HANDLE threadToWalk = reinterpret_cast<HANDLE>(aThread);
     // walkCallingThread indicates whether we are walking the caller's stack
     data.walkCallingThread = (threadToWalk == targetThread);
     targetThread = threadToWalk;
   }
 
   // We need to avoid calling fprintf and friends if we're walking the stack of
   // another thread, in order to avoid deadlocks.
   const bool shouldBeThreadSafe = !!aThread;
@@ -535,81 +552,85 @@ NS_StackWalk(NS_WalkStackCallback aCallb
       PrintError("DuplicateHandle (thread)");
     }
     return NS_ERROR_FAILURE;
   }
 
   data.skipFrames = aSkipFrames;
   data.thread = myThread;
   data.process = myProcess;
-  void *local_pcs[1024];
+  void* local_pcs[1024];
   data.pcs = local_pcs;
   data.pc_count = 0;
   data.pc_size = ArrayLength(local_pcs);
   data.pc_max = aMaxFrames;
-  void *local_sps[1024];
+  void* local_sps[1024];
   data.sps = local_sps;
   data.sp_count = 0;
   data.sp_size = ArrayLength(local_sps);
   data.platformData = aPlatformData;
 
   if (aThread) {
     // If we're walking the stack of another thread, we don't need to
     // use a separate walker thread.
     WalkStackMain64(&data);
 
     if (data.pc_count > data.pc_size) {
-      data.pcs = (void**) _alloca(data.pc_count * sizeof(void*));
+      data.pcs = (void**)_alloca(data.pc_count * sizeof(void*));
       data.pc_size = data.pc_count;
       data.pc_count = 0;
-      data.sps = (void**) _alloca(data.sp_count * sizeof(void*));
+      data.sps = (void**)_alloca(data.sp_count * sizeof(void*));
       data.sp_size = data.sp_count;
       data.sp_count = 0;
       WalkStackMain64(&data);
     }
   } else {
     data.eventStart = ::CreateEvent(nullptr, FALSE /* auto-reset*/,
                                     FALSE /* initially non-signaled */, nullptr);
     data.eventEnd = ::CreateEvent(nullptr, FALSE /* auto-reset*/,
                                   FALSE /* initially non-signaled */, nullptr);
 
     ::PostThreadMessage(gStackWalkThread, WM_USER, 0, (LPARAM)&data);
 
     walkerReturn = ::SignalObjectAndWait(data.eventStart,
                                          data.eventEnd, INFINITE, FALSE);
-    if (walkerReturn != WAIT_OBJECT_0 && !shouldBeThreadSafe)
+    if (walkerReturn != WAIT_OBJECT_0 && !shouldBeThreadSafe) {
       PrintError("SignalObjectAndWait (1)");
+    }
     if (data.pc_count > data.pc_size) {
-      data.pcs = (void**) _alloca(data.pc_count * sizeof(void*));
+      data.pcs = (void**)_alloca(data.pc_count * sizeof(void*));
       data.pc_size = data.pc_count;
       data.pc_count = 0;
-      data.sps = (void**) _alloca(data.sp_count * sizeof(void*));
+      data.sps = (void**)_alloca(data.sp_count * sizeof(void*));
       data.sp_size = data.sp_count;
       data.sp_count = 0;
       ::PostThreadMessage(gStackWalkThread, WM_USER, 0, (LPARAM)&data);
       walkerReturn = ::SignalObjectAndWait(data.eventStart,
                                            data.eventEnd, INFINITE, FALSE);
-      if (walkerReturn != WAIT_OBJECT_0 && !shouldBeThreadSafe)
+      if (walkerReturn != WAIT_OBJECT_0 && !shouldBeThreadSafe) {
         PrintError("SignalObjectAndWait (2)");
+      }
     }
 
     ::CloseHandle(data.eventStart);
     ::CloseHandle(data.eventEnd);
   }
 
   ::CloseHandle(myThread);
 
-  for (uint32_t i = 0; i < data.pc_count; ++i)
+  for (uint32_t i = 0; i < data.pc_count; ++i) {
     (*aCallback)(data.pcs[i], data.sps[i], aClosure);
+  }
 
   return data.pc_count == 0 ? NS_ERROR_FAILURE : NS_OK;
 }
 
 
-static BOOL CALLBACK callbackEspecial64(
+static BOOL CALLBACK
+callbackEspecial64(
   PCSTR aModuleName,
   DWORD64 aModuleBase,
   ULONG aModuleSize,
   PVOID aUserContext)
 {
   BOOL retval = TRUE;
   DWORD64 addr = *(DWORD64*)aUserContext;
 
@@ -625,18 +646,19 @@ static BOOL CALLBACK callbackEspecial64(
    */
   if (addressIncreases
       ? (addr >= aModuleBase && addr <= (aModuleBase + aModuleSize))
       : (addr <= aModuleBase && addr >= (aModuleBase - aModuleSize))
      ) {
     retval = !!SymLoadModule64(GetCurrentProcess(), nullptr,
                                (PSTR)aModuleName, nullptr,
                                aModuleBase, aModuleSize);
-    if (!retval)
+    if (!retval) {
       PrintError("SymLoadModule64");
+    }
   }
 
   return retval;
 }
 
 /*
  * SymGetModuleInfoEspecial
  *
@@ -657,62 +679,63 @@ static BOOL CALLBACK callbackEspecial64(
 // when these changes were made, ifdef based on a constant that was
 // added between these versions.
 #ifdef SSRVOPT_SETCONTEXT
 #define NS_IMAGEHLP_MODULE64_SIZE (((offsetof(IMAGEHLP_MODULE64, LoadedPdbName) + sizeof(DWORD64) - 1) / sizeof(DWORD64)) * sizeof(DWORD64))
 #else
 #define NS_IMAGEHLP_MODULE64_SIZE sizeof(IMAGEHLP_MODULE64)
 #endif
 
-BOOL SymGetModuleInfoEspecial64(HANDLE aProcess, DWORD64 aAddr, PIMAGEHLP_MODULE64 aModuleInfo, PIMAGEHLP_LINE64 aLineInfo)
+BOOL SymGetModuleInfoEspecial64(HANDLE aProcess, DWORD64 aAddr,
+                                PIMAGEHLP_MODULE64 aModuleInfo,
+                                PIMAGEHLP_LINE64 aLineInfo)
 {
   BOOL retval = FALSE;
 
   /*
    * Init the vars if we have em.
    */
   aModuleInfo->SizeOfStruct = NS_IMAGEHLP_MODULE64_SIZE;
-  if (nullptr != aLineInfo) {
+  if (aLineInfo) {
     aLineInfo->SizeOfStruct = sizeof(IMAGEHLP_LINE64);
   }
 
   /*
    * Give it a go.
    * It may already be loaded.
    */
   retval = SymGetModuleInfo64(aProcess, aAddr, aModuleInfo);
-
-  if (FALSE == retval) {
-    BOOL enumRes = FALSE;
-
+  if (retval == FALSE) {
     /*
      * Not loaded, here's the magic.
      * Go through all the modules.
      */
     // Need to cast to PENUMLOADED_MODULES_CALLBACK64 because the
     // constness of the first parameter of
     // PENUMLOADED_MODULES_CALLBACK64 varies over SDK versions (from
     // non-const to const over time).  See bug 391848 and bug
     // 415426.
-    enumRes = EnumerateLoadedModules64(aProcess, (PENUMLOADED_MODULES_CALLBACK64)callbackEspecial64, (PVOID)&aAddr);
-    if (FALSE != enumRes)
-    {
+    BOOL enumRes = EnumerateLoadedModules64(
+      aProcess,
+      (PENUMLOADED_MODULES_CALLBACK64)callbackEspecial64,
+      (PVOID)&aAddr);
+    if (enumRes != FALSE) {
       /*
        * One final go.
        * If it fails, then well, we have other problems.
        */
       retval = SymGetModuleInfo64(aProcess, aAddr, aModuleInfo);
     }
   }
 
   /*
    * If we got module info, we may attempt line info as well.
    * We will not report failure if this does not work.
    */
-  if (FALSE != retval && nullptr != aLineInfo) {
+  if (retval != FALSE && aLineInfo) {
     DWORD displacement = 0;
     BOOL lineRes = FALSE;
     lineRes = SymGetLineFromAddr64(aProcess, aAddr, &displacement, aLineInfo);
     if (!lineRes) {
       // Clear out aLineInfo to indicate that it's not valid
       memset(aLineInfo, 0, sizeof(*aLineInfo));
     }
   }
@@ -721,46 +744,50 @@ BOOL SymGetModuleInfoEspecial64(HANDLE a
 }
 
 bool
 EnsureSymInitialized()
 {
   static bool gInitialized = false;
   bool retStat;
 
-  if (gInitialized)
+  if (gInitialized) {
     return gInitialized;
+  }
 
-  if (!EnsureWalkThreadReady())
+  if (!EnsureWalkThreadReady()) {
     return false;
+  }
 
   SymSetOptions(SYMOPT_LOAD_LINES | SYMOPT_UNDNAME);
   retStat = SymInitialize(GetCurrentProcess(), nullptr, TRUE);
-  if (!retStat)
+  if (!retStat) {
     PrintError("SymInitialize");
+  }
 
   gInitialized = retStat;
   /* XXX At some point we need to arrange to call SymCleanup */
 
   return retStat;
 }
 
 
 EXPORT_XPCOM_API(nsresult)
-NS_DescribeCodeAddress(void *aPC, nsCodeAddressDetails *aDetails)
+NS_DescribeCodeAddress(void* aPC, nsCodeAddressDetails* aDetails)
 {
   aDetails->library[0] = '\0';
   aDetails->loffset = 0;
   aDetails->filename[0] = '\0';
   aDetails->lineno = 0;
   aDetails->function[0] = '\0';
   aDetails->foffset = 0;
 
-  if (!EnsureSymInitialized())
+  if (!EnsureSymInitialized()) {
     return NS_ERROR_FAILURE;
+  }
 
   HANDLE myProcess = ::GetCurrentProcess();
   BOOL ok;
 
   // debug routines are not threadsafe, so grab the lock.
   EnterCriticalSection(&gDbgHelpCS);
 
   //
@@ -772,27 +799,27 @@ NS_DescribeCodeAddress(void *aPC, nsCode
   IMAGEHLP_MODULE64 modInfo;
   IMAGEHLP_LINE64 lineInfo;
   BOOL modInfoRes;
   modInfoRes = SymGetModuleInfoEspecial64(myProcess, addr, &modInfo, &lineInfo);
 
   if (modInfoRes) {
     PL_strncpyz(aDetails->library, modInfo.ModuleName,
                 sizeof(aDetails->library));
-    aDetails->loffset = (char*) aPC - (char*) modInfo.BaseOfImage;
+    aDetails->loffset = (char*)aPC - (char*)modInfo.BaseOfImage;
 
     if (lineInfo.FileName) {
       PL_strncpyz(aDetails->filename, lineInfo.FileName,
                   sizeof(aDetails->filename));
       aDetails->lineno = lineInfo.LineNumber;
     }
   }
 
   ULONG64 buffer[(sizeof(SYMBOL_INFO) +
-    MAX_SYM_NAME*sizeof(TCHAR) + sizeof(ULONG64) - 1) / sizeof(ULONG64)];
+    MAX_SYM_NAME * sizeof(TCHAR) + sizeof(ULONG64) - 1) / sizeof(ULONG64)];
   PSYMBOL_INFO pSymbol = (PSYMBOL_INFO)buffer;
   pSymbol->SizeOfStruct = sizeof(SYMBOL_INFO);
   pSymbol->MaxNameLen = MAX_SYM_NAME;
 
   DWORD64 displacement;
   ok = SymFromAddr(myProcess, addr, &displacement, pSymbol);
 
   if (ok) {
@@ -801,18 +828,18 @@ NS_DescribeCodeAddress(void *aPC, nsCode
     aDetails->foffset = static_cast<ptrdiff_t>(displacement);
   }
 
   LeaveCriticalSection(&gDbgHelpCS); // release our lock
   return NS_OK;
 }
 
 EXPORT_XPCOM_API(nsresult)
-NS_FormatCodeAddressDetails(void *aPC, const nsCodeAddressDetails *aDetails,
-                            char *aBuffer, uint32_t aBufferSize)
+NS_FormatCodeAddressDetails(void* aPC, const nsCodeAddressDetails* aDetails,
+                            char* aBuffer, uint32_t aBufferSize)
 {
   if (aDetails->function[0]) {
     _snprintf(aBuffer, aBufferSize, "%s+0x%08lX [%s +0x%016lX]",
               aDetails->function, aDetails->foffset,
               aDetails->library, aDetails->loffset);
   } else if (aDetails->library[0]) {
     _snprintf(aBuffer, aBufferSize, "UNKNOWN [%s +0x%016lX]",
               aDetails->library, aDetails->loffset);
@@ -823,18 +850,19 @@ NS_FormatCodeAddressDetails(void *aPC, c
   aBuffer[aBufferSize - 1] = '\0';
 
   uint32_t len = strlen(aBuffer);
   if (aDetails->filename[0]) {
     _snprintf(aBuffer + len, aBufferSize - len, " (%s, line %d)\n",
               aDetails->filename, aDetails->lineno);
   } else {
     aBuffer[len] = '\n';
-    if (++len != aBufferSize)
+    if (++len != aBufferSize) {
       aBuffer[len] = '\0';
+    }
   }
   aBuffer[aBufferSize - 2] = '\n';
   aBuffer[aBufferSize - 1] = '\0';
   return NS_OK;
 }
 
 // WIN32 x86 stack walking code
 // i386 or PPC Linux stackwalking code or Solaris
@@ -855,29 +883,28 @@ NS_FormatCodeAddressDetails(void *aPC, c
 #endif
 
 // This thing is exported by libstdc++
 // Yes, this is a gcc only hack
 #if defined(MOZ_DEMANGLE_SYMBOLS)
 #include <cxxabi.h>
 #endif // MOZ_DEMANGLE_SYMBOLS
 
-void DemangleSymbol(const char * aSymbol,
-                    char * aBuffer,
+void DemangleSymbol(const char* aSymbol,
+                    char* aBuffer,
                     int aBufLen)
 {
   aBuffer[0] = '\0';
 
 #if defined(MOZ_DEMANGLE_SYMBOLS)
   /* See demangle.h in the gcc source for the voodoo */
-  char * demangled = abi::__cxa_demangle(aSymbol,0,0,0);
+  char* demangled = abi::__cxa_demangle(aSymbol, 0, 0, 0);
 
-  if (demangled)
-  {
-    PL_strncpyz(aBuffer,demangled,aBufLen);
+  if (demangled) {
+    PL_strncpyz(aBuffer, demangled, aBufLen);
     free(demangled);
   }
 #endif // MOZ_DEMANGLE_SYMBOLS
 }
 
 
 #if NSSTACKWALK_SUPPORTS_SOLARIS
 
@@ -998,17 +1025,17 @@ load_address(void * pc, void * arg)
   }
   return stop;
 }
 
 
 static struct bucket *
 newbucket(void * pc)
 {
-  struct bucket * ptr = (struct bucket *) malloc(sizeof (*ptr));
+  struct bucket * ptr = (struct bucket *)malloc(sizeof(*ptr));
   static int index; /* protected by lock in caller */
 
   ptr->index = index++;
   ptr->next = nullptr;
   ptr->pc = pc;
   return (ptr);
 }
 
@@ -1054,39 +1081,40 @@ cswalkstack(struct frame *fp, int (*oper
 static void
 cs_operate(int (*operate_func)(void *, void *, void *), void * usrarg)
 {
   cswalkstack(csgetframeptr(), operate_func, usrarg);
 }
 
 EXPORT_XPCOM_API(nsresult)
 NS_StackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-             uint32_t aMaxFrames, void *aClosure, uintptr_t aThread,
-             void *aPlatformData)
+             uint32_t aMaxFrames, void* aClosure, uintptr_t aThread,
+             void* aPlatformData)
 {
   MOZ_ASSERT(!aThread);
   MOZ_ASSERT(!aPlatformData);
   struct my_user_args args;
 
   StackWalkInitCriticalAddress();
 
-  if (!initialized)
+  if (!initialized) {
     myinit();
+  }
 
   args.callback = aCallback;
   args.skipFrames = aSkipFrames; /* XXX Not handled! */
   args.maxFrames = aMaxFrames;
   args.numFrames = 0;
   args.closure = aClosure;
   cs_operate(load_address, &args);
   return args.numFrames == 0 ? NS_ERROR_FAILURE : NS_OK;
 }
 
 EXPORT_XPCOM_API(nsresult)
-NS_DescribeCodeAddress(void *aPC, nsCodeAddressDetails *aDetails)
+NS_DescribeCodeAddress(void* aPC, nsCodeAddressDetails* aDetails)
 {
   aDetails->library[0] = '\0';
   aDetails->loffset = 0;
   aDetails->filename[0] = '\0';
   aDetails->lineno = 0;
   aDetails->function[0] = '\0';
   aDetails->foffset = 0;
 
@@ -1099,31 +1127,32 @@ NS_DescribeCodeAddress(void *aPC, nsCode
                   sizeof(aDetails->library));
       aDetails->loffset = (char*)aPC - (char*)info.dli_fbase;
     }
     if (info.dli_sname) {
       aDetails->foffset = (char*)aPC - (char*)info.dli_saddr;
 #ifdef __GNUC__
       DemangleSymbol(info.dli_sname, dembuff, sizeof(dembuff));
 #else
-      if (!demf || demf(info.dli_sname, dembuff, sizeof (dembuff)))
+      if (!demf || demf(info.dli_sname, dembuff, sizeof(dembuff))) {
         dembuff[0] = 0;
+      }
 #endif /*__GNUC__*/
       PL_strncpyz(aDetails->function,
                   (dembuff[0] != '\0') ? dembuff : info.dli_sname,
                   sizeof(aDetails->function));
     }
   }
 
   return NS_OK;
 }
 
 EXPORT_XPCOM_API(nsresult)
-NS_FormatCodeAddressDetails(void *aPC, const nsCodeAddressDetails *aDetails,
-                            char *aBuffer, uint32_t aBufferSize)
+NS_FormatCodeAddressDetails(void* aPC, const nsCodeAddressDetails* aDetails,
+                            char* aBuffer, uint32_t aBufferSize)
 {
   snprintf(aBuffer, aBufferSize, "%p %s:%s+0x%lx\n",
            aPC,
            aDetails->library[0] ? aDetails->library : "??",
            aDetails->function[0] ? aDetails->function : "??",
            aDetails->foffset);
   return NS_OK;
 }
@@ -1132,123 +1161,125 @@ NS_FormatCodeAddressDetails(void *aPC, c
 
 #if __GLIBC__ > 2 || __GLIBC_MINOR > 1
 #define HAVE___LIBC_STACK_END 1
 #else
 #define HAVE___LIBC_STACK_END 0
 #endif
 
 #if HAVE___LIBC_STACK_END
-extern void *__libc_stack_end; // from ld-linux.so
+extern void* __libc_stack_end; // from ld-linux.so
 #endif
 namespace mozilla {
 nsresult
 FramePointerStackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-                      uint32_t aMaxFrames, void *aClosure, void **bp,
-                      void *aStackEnd)
+                      uint32_t aMaxFrames, void* aClosure, void** bp,
+                      void* aStackEnd)
 {
   // Stack walking code courtesy Kipp's "leaky".
 
   int32_t skip = aSkipFrames;
   uint32_t numFrames = 0;
   while (1) {
-    void **next = (void**)*bp;
+    void** next = (void**)*bp;
     // bp may not be a frame pointer on i386 if code was compiled with
     // -fomit-frame-pointer, so do some sanity checks.
     // (bp should be a frame pointer on ppc(64) but checking anyway may help
     // a little if the stack has been corrupted.)
     // We don't need to check against the begining of the stack because
     // we can assume that bp > sp
     if (next <= bp ||
         next > aStackEnd ||
         (long(next) & 3)) {
       break;
     }
 #if (defined(__ppc__) && defined(XP_MACOSX)) || defined(__powerpc64__)
     // ppc mac or powerpc64 linux
-    void *pc = *(bp+2);
+    void* pc = *(bp + 2);
     bp += 3;
 #else // i386 or powerpc32 linux
-    void *pc = *(bp+1);
+    void* pc = *(bp + 1);
     bp += 2;
 #endif
     if (IsCriticalAddress(pc)) {
       printf("Aborting stack trace, PC is critical\n");
       return NS_ERROR_UNEXPECTED;
     }
     if (--skip < 0) {
       // Assume that the SP points to the BP of the function
       // it called. We can't know the exact location of the SP
       // but this should be sufficient for our use the SP
       // to order elements on the stack.
       (*aCallback)(pc, bp, aClosure);
       numFrames++;
-      if (aMaxFrames != 0 && numFrames == aMaxFrames)
+      if (aMaxFrames != 0 && numFrames == aMaxFrames) {
         break;
+      }
     }
     bp = next;
   }
   return numFrames == 0 ? NS_ERROR_FAILURE : NS_OK;
 }
 
 }
 
 #define X86_OR_PPC (defined(__i386) || defined(PPC) || defined(__ppc__))
 #if X86_OR_PPC && (NSSTACKWALK_SUPPORTS_MACOSX || NSSTACKWALK_SUPPORTS_LINUX) // i386 or PPC Linux or Mac stackwalking code
 
 EXPORT_XPCOM_API(nsresult)
 NS_StackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-             uint32_t aMaxFrames, void *aClosure, uintptr_t aThread,
-             void *aPlatformData)
+             uint32_t aMaxFrames, void* aClosure, uintptr_t aThread,
+             void* aPlatformData)
 {
   MOZ_ASSERT(!aThread);
   MOZ_ASSERT(!aPlatformData);
   StackWalkInitCriticalAddress();
 
   // Get the frame pointer
-  void **bp;
+  void** bp;
 #if defined(__i386)
-  __asm__( "movl %%ebp, %0" : "=g"(bp));
+  __asm__("movl %%ebp, %0" : "=g"(bp));
 #else
   // It would be nice if this worked uniformly, but at least on i386 and
   // x86_64, it stopped working with gcc 4.1, because it points to the
   // end of the saved registers instead of the start.
-  bp = (void**) __builtin_frame_address(0);
+  bp = (void**)__builtin_frame_address(0);
 #endif
 
-  void *stackEnd;
+  void* stackEnd;
 #if HAVE___LIBC_STACK_END
   stackEnd = __libc_stack_end;
 #else
   stackEnd = reinterpret_cast<void*>(-1);
 #endif
   return FramePointerStackWalk(aCallback, aSkipFrames, aMaxFrames,
                                aClosure, bp, stackEnd);
 
 }
 
 #elif defined(HAVE__UNWIND_BACKTRACE)
 
 // libgcc_s.so symbols _Unwind_Backtrace@@GCC_3.3 and _Unwind_GetIP@@GCC_3.0
 #include <unwind.h>
 
-struct unwind_info {
+struct unwind_info
+{
   NS_WalkStackCallback callback;
   int skip;
   int maxFrames;
   int numFrames;
   bool isCriticalAbort;
-  void *closure;
+  void* closure;
 };
 
 static _Unwind_Reason_Code
-unwind_callback (struct _Unwind_Context *context, void *closure)
+unwind_callback(struct _Unwind_Context* context, void* closure)
 {
-  unwind_info *info = static_cast<unwind_info *>(closure);
-  void *pc = reinterpret_cast<void *>(_Unwind_GetIP(context));
+  unwind_info* info = static_cast<unwind_info*>(closure);
+  void* pc = reinterpret_cast<void*>(_Unwind_GetIP(context));
   // TODO Use something like '_Unwind_GetGR()' to get the stack pointer.
   if (IsCriticalAddress(pc)) {
     printf("Aborting stack trace, PC is critical\n");
     info->isCriticalAbort = true;
     // We just want to stop the walk, so any error code will do.  Using
     // _URC_NORMAL_STOP would probably be the most accurate, but it is not
     // defined on Android for ARM.
     return _URC_FOREIGN_EXCEPTION_CAUGHT;
@@ -1261,18 +1292,18 @@ unwind_callback (struct _Unwind_Context 
       return _URC_FOREIGN_EXCEPTION_CAUGHT;
     }
   }
   return _URC_NO_REASON;
 }
 
 EXPORT_XPCOM_API(nsresult)
 NS_StackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-             uint32_t aMaxFrames, void *aClosure, uintptr_t aThread,
-             void *aPlatformData)
+             uint32_t aMaxFrames, void* aClosure, uintptr_t aThread,
+             void* aPlatformData)
 {
   MOZ_ASSERT(!aThread);
   MOZ_ASSERT(!aPlatformData);
   StackWalkInitCriticalAddress();
   unwind_info info;
   info.callback = aCallback;
   info.skip = aSkipFrames + 1;
   info.maxFrames = aMaxFrames;
@@ -1285,25 +1316,26 @@ NS_StackWalk(NS_WalkStackCallback aCallb
   // We ignore the return value from _Unwind_Backtrace and instead determine
   // the outcome from |info|.  There are two main reasons for this:
   // - On ARM/Android bionic's _Unwind_Backtrace usually (always?) returns
   //   _URC_FAILURE.  See
   //   https://bugzilla.mozilla.org/show_bug.cgi?id=717853#c110.
   // - If aMaxFrames != 0, we want to stop early, and the only way to do that
   //   is to make unwind_callback return something other than _URC_NO_REASON,
   //   which causes _Unwind_Backtrace to return a non-success code.
-  if (info.isCriticalAbort)
+  if (info.isCriticalAbort) {
     return NS_ERROR_UNEXPECTED;
+  }
   return info.numFrames == 0 ? NS_ERROR_FAILURE : NS_OK;
 }
 
 #endif
 
 EXPORT_XPCOM_API(nsresult)
-NS_DescribeCodeAddress(void *aPC, nsCodeAddressDetails *aDetails)
+NS_DescribeCodeAddress(void* aPC, nsCodeAddressDetails* aDetails)
 {
   aDetails->library[0] = '\0';
   aDetails->loffset = 0;
   aDetails->filename[0] = '\0';
   aDetails->lineno = 0;
   aDetails->function[0] = '\0';
   aDetails->foffset = 0;
 
@@ -1311,35 +1343,35 @@ NS_DescribeCodeAddress(void *aPC, nsCode
   int ok = dladdr(aPC, &info);
   if (!ok) {
     return NS_OK;
   }
 
   PL_strncpyz(aDetails->library, info.dli_fname, sizeof(aDetails->library));
   aDetails->loffset = (char*)aPC - (char*)info.dli_fbase;
 
-  const char * symbol = info.dli_sname;
+  const char* symbol = info.dli_sname;
   if (!symbol || symbol[0] == '\0') {
     return NS_OK;
   }
 
   DemangleSymbol(symbol, aDetails->function, sizeof(aDetails->function));
 
   if (aDetails->function[0] == '\0') {
     // Just use the mangled symbol if demangling failed.
     PL_strncpyz(aDetails->function, symbol, sizeof(aDetails->function));
   }
 
   aDetails->foffset = (char*)aPC - (char*)info.dli_saddr;
   return NS_OK;
 }
 
 EXPORT_XPCOM_API(nsresult)
-NS_FormatCodeAddressDetails(void *aPC, const nsCodeAddressDetails *aDetails,
-                            char *aBuffer, uint32_t aBufferSize)
+NS_FormatCodeAddressDetails(void* aPC, const nsCodeAddressDetails* aDetails,
+                            char* aBuffer, uint32_t aBufferSize)
 {
   if (!aDetails->library[0]) {
     snprintf(aBuffer, aBufferSize, "UNKNOWN %p\n", aPC);
   } else if (!aDetails->function[0]) {
     snprintf(aBuffer, aBufferSize, "UNKNOWN [%s +0x%08" PRIXPTR "]\n",
              aDetails->library, aDetails->loffset);
   } else {
     snprintf(aBuffer, aBufferSize, "%s+0x%08" PRIXPTR
@@ -1351,46 +1383,46 @@ NS_FormatCodeAddressDetails(void *aPC, c
 }
 
 #endif
 
 #else // unsupported platform.
 
 EXPORT_XPCOM_API(nsresult)
 NS_StackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-             uint32_t aMaxFrames, void *aClosure, uintptr_t aThread,
-             void *aPlatformData)
+             uint32_t aMaxFrames, void* aClosure, uintptr_t aThread,
+             void* aPlatformData)
 {
   MOZ_ASSERT(!aThread);
   MOZ_ASSERT(!aPlatformData);
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 namespace mozilla {
 nsresult
 FramePointerStackWalk(NS_WalkStackCallback aCallback, uint32_t aSkipFrames,
-                      void *aClosure, void **bp)
+                      void* aClosure, void** aBp)
 {
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 }
 
 EXPORT_XPCOM_API(nsresult)
-NS_DescribeCodeAddress(void *aPC, nsCodeAddressDetails *aDetails)
+NS_DescribeCodeAddress(void* aPC, nsCodeAddressDetails* aDetails)
 {
   aDetails->library[0] = '\0';
   aDetails->loffset = 0;
   aDetails->filename[0] = '\0';
   aDetails->lineno = 0;
   aDetails->function[0] = '\0';
   aDetails->foffset = 0;
   return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 EXPORT_XPCOM_API(nsresult)