Bug 1026319 - Convert the second quarter of MFBT to Gecko style. r=froydnj.
authorNicholas Nethercote <nnethercote@mozilla.com>
Thu, 12 Jun 2014 23:34:08 -0700
changeset 210314 cf068fd95d3cef2e75205ae37c937bfaee01506f
parent 210313 f57cf85fd1289a03e5794704f7e7483db4999b3a
child 210315 1ffd2122f12040a74b777ec51654ddd20aca9179
push id1
push usersledru@mozilla.com
push dateThu, 04 Dec 2014 17:57:20 +0000
reviewersfroydnj
bugs1026319
milestone33.0a1
Bug 1026319 - Convert the second quarter of MFBT to Gecko style. r=froydnj.
js/src/jit/CodeGenerator.cpp
js/src/jit/RangeAnalysis.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jsmath.cpp
js/src/vm/NumericConversions.h
mfbt/Assertions.h
mfbt/Attributes.h
mfbt/BloomFilter.h
mfbt/Endian.h
mfbt/EnumSet.h
mfbt/EnumeratedArray.h
mfbt/FloatingPoint.cpp
mfbt/FloatingPoint.h
mfbt/GuardObjects.h
mfbt/HashFunctions.cpp
mfbt/HashFunctions.h
mfbt/IntegerTypeTraits.h
mfbt/LinkedList.h
mfbt/MSIntTypes.h
mfbt/MathAlgorithms.h
mfbt/Maybe.h
mfbt/MaybeOneOf.h
mfbt/MemoryChecking.h
mfbt/Move.h
mfbt/NullPtr.h
mfbt/NumericLimits.h
mfbt/PodOperations.h
mfbt/Poison.cpp
mfbt/tests/TestFloatingPoint.cpp
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8809,17 +8809,17 @@ CodeGenerator::emitAssertRangeD(const Ra
         masm.assumeUnreachable("Double input should be lower or equal than Upperbound.");
         masm.bind(&success);
     }
 
     // This code does not yet check r->canHaveFractionalPart(). This would require new
     // assembler interfaces to make rounding instructions available.
 
     if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
-        r->exponent() < FloatingPoint<double>::ExponentBias)
+        r->exponent() < FloatingPoint<double>::kExponentBias)
     {
         // Check the bounds implied by the maximum exponent.
         Label exponentLoOk;
         masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
         masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
         masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &exponentLoOk);
         masm.assumeUnreachable("Check for exponent failed.");
         masm.bind(&exponentLoOk);
--- a/js/src/jit/RangeAnalysis.h
+++ b/js/src/jit/RangeAnalysis.h
@@ -115,20 +115,20 @@ class Range : public TempObject {
 
     // UInt32 are unsigned. UINT32_MAX is pow(2,32)-1, so it's the greatest
     // value that has an exponent of 31.
     static const uint16_t MaxUInt32Exponent = 31;
 
     // Maximal exponenent under which we have no precission loss on double
     // operations. Double has 52 bits of mantissa, so 2^52+1 cannot be
     // represented without loss.
-    static const uint16_t MaxTruncatableExponent = mozilla::FloatingPoint<double>::ExponentShift;
+    static const uint16_t MaxTruncatableExponent = mozilla::FloatingPoint<double>::kExponentShift;
 
     // Maximum exponent for finite values.
-    static const uint16_t MaxFiniteExponent = mozilla::FloatingPoint<double>::ExponentBias;
+    static const uint16_t MaxFiniteExponent = mozilla::FloatingPoint<double>::kExponentBias;
 
     // An special exponent value representing all non-NaN values. This
     // includes finite values and the infinities.
     static const uint16_t IncludesInfinity = MaxFiniteExponent + 1;
 
     // An special exponent value representing all possible double-precision
     // values. This includes finite values, the infinities, and NaNs.
     static const uint16_t IncludesInfinityAndNaN = UINT16_MAX;
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -533,29 +533,29 @@ CodeGeneratorX86Shared::visitMinMaxD(LMi
 }
 
 bool
 CodeGeneratorX86Shared::visitAbsD(LAbsD *ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     JS_ASSERT(input == ToFloatRegister(ins->output()));
     // Load a value which is all ones except for the sign bit.
-    masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::SignificandBits),
+    masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::kSignificandBits),
                             ScratchFloatReg);
     masm.andpd(ScratchFloatReg, input);
     return true;
 }
 
 bool
 CodeGeneratorX86Shared::visitAbsF(LAbsF *ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     JS_ASSERT(input == ToFloatRegister(ins->output()));
     // Same trick as visitAbsD above.
-    masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::SignificandBits),
+    masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits),
                              ScratchFloatReg);
     masm.andps(ScratchFloatReg, input);
     return true;
 }
 
 bool
 CodeGeneratorX86Shared::visitSqrtD(LSqrtD *ins)
 {
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -700,18 +700,18 @@ CodeGeneratorX86::visitOutOfLineTruncate
     Label fail;
 
     if (Assembler::HasSSE3()) {
         // Push double.
         masm.subl(Imm32(sizeof(double)), esp);
         masm.storeDouble(input, Operand(esp, 0));
 
         static const uint32_t EXPONENT_MASK = 0x7ff00000;
-        static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::ExponentShift - 32;
-        static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::ExponentBias + 63)
+        static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::kExponentShift - 32;
+        static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::kExponentBias + 63)
                                                  << EXPONENT_SHIFT;
 
         // Check exponent to avoid fp exceptions.
         Label failPopDouble;
         masm.load32(Address(esp, 4), output);
         masm.and32(Imm32(EXPONENT_MASK), output);
         masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopDouble);
 
@@ -788,20 +788,20 @@ CodeGeneratorX86::visitOutOfLineTruncate
 
     Label fail;
 
     if (Assembler::HasSSE3()) {
         // Push float32, but subtracts 64 bits so that the value popped by fisttp fits
         masm.subl(Imm32(sizeof(uint64_t)), esp);
         masm.storeFloat32(input, Operand(esp, 0));
 
-        static const uint32_t EXPONENT_MASK = FloatingPoint<float>::ExponentBits;
-        static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::ExponentShift;
+        static const uint32_t EXPONENT_MASK = FloatingPoint<float>::kExponentBits;
+        static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::kExponentShift;
         // Integers are still 64 bits long, so we can still test for an exponent > 63.
-        static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::ExponentBias + 63)
+        static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::kExponentBias + 63)
                                                  << EXPONENT_SHIFT;
 
         // Check exponent to avoid fp exceptions.
         Label failPopFloat;
         masm.movl(Operand(esp, 0), output);
         masm.and32(Imm32(EXPONENT_MASK), output);
         masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopFloat);
 
--- a/js/src/jsmath.cpp
+++ b/js/src/jsmath.cpp
@@ -779,31 +779,31 @@ js_math_random(JSContext *cx, unsigned a
 double
 js::math_round_impl(double x)
 {
     int32_t ignored;
     if (NumberIsInt32(x, &ignored))
         return x;
 
     /* Some numbers are so big that adding 0.5 would give the wrong number. */
-    if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<double>::ExponentShift))
+    if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<double>::kExponentShift))
         return x;
 
     return js_copysign(floor(x + 0.5), x);
 }
 
 float
 js::math_roundf_impl(float x)
 {
     int32_t ignored;
     if (NumberIsInt32(x, &ignored))
         return x;
 
     /* Some numbers are so big that adding 0.5 would give the wrong number. */
-    if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<float>::ExponentShift))
+    if (ExponentComponent(x) >= int_fast16_t(FloatingPoint<float>::kExponentShift))
         return x;
 
     return js_copysign(floorf(x + 0.5f), x);
 }
 
 bool /* ES5 15.8.2.15. */
 js::math_round(JSContext *cx, unsigned argc, Value *vp)
 {
--- a/js/src/vm/NumericConversions.h
+++ b/js/src/vm/NumericConversions.h
@@ -34,23 +34,23 @@ namespace detail {
 template<typename ResultType>
 inline ResultType
 ToUintWidth(double d)
 {
     static_assert(mozilla::IsUnsigned<ResultType>::value,
                   "ResultType must be an unsigned type");
 
     uint64_t bits = mozilla::BitwiseCast<uint64_t>(d);
-    unsigned DoubleExponentShift = mozilla::FloatingPoint<double>::ExponentShift;
+    unsigned DoubleExponentShift = mozilla::FloatingPoint<double>::kExponentShift;
 
     // Extract the exponent component.  (Be careful here!  It's not technically
     // the exponent in NaN, infinities, and subnormals.)
     int_fast16_t exp =
-        int_fast16_t((bits & mozilla::FloatingPoint<double>::ExponentBits) >> DoubleExponentShift) -
-        int_fast16_t(mozilla::FloatingPoint<double>::ExponentBias);
+        int_fast16_t((bits & mozilla::FloatingPoint<double>::kExponentBits) >> DoubleExponentShift) -
+        int_fast16_t(mozilla::FloatingPoint<double>::kExponentBias);
 
     // If the exponent's less than zero, abs(d) < 1, so the result is 0.  (This
     // also handles subnormals.)
     if (exp < 0)
         return 0;
 
     uint_fast16_t exponent = mozilla::SafeCast<uint_fast16_t>(exp);
 
@@ -99,17 +99,17 @@ ToUintWidth(double d)
     //   again, |exponent < ResultWidth|.
     if (exponent < ResultWidth) {
         ResultType implicitOne = ResultType(1) << exponent;
         result &= implicitOne - 1; // remove bogus bits
         result += implicitOne; // add the implicit bit
     }
 
     // Compute the congruent value in the signed range.
-    return (bits & mozilla::FloatingPoint<double>::SignBit) ? ~result + 1 : result;
+    return (bits & mozilla::FloatingPoint<double>::kSignBit) ? ~result + 1 : result;
 }
 
 template<typename ResultType>
 inline ResultType
 ToIntWidth(double d)
 {
     static_assert(mozilla::IsSigned<ResultType>::value,
                   "ResultType must be a signed type");
--- a/mfbt/Assertions.h
+++ b/mfbt/Assertions.h
@@ -404,18 +404,19 @@ void ValidateAssertConditionType()
  *   MOZ_ASSERT_IF(isPrime(num), num == 2 || isOdd(num));
  *
  * As with MOZ_ASSERT, MOZ_ASSERT_IF has effect only in debug builds.  It is
  * designed to catch bugs during debugging, not "in the field".
  */
 #ifdef DEBUG
 #  define MOZ_ASSERT_IF(cond, expr) \
      do { \
-       if (cond) \
+       if (cond) { \
          MOZ_ASSERT(expr); \
+       } \
      } while (0)
 #else
 #  define MOZ_ASSERT_IF(cond, expr)  do { } while (0)
 #endif
 
 /*
  * MOZ_ASSUME_UNREACHABLE_MARKER() expands to an expression which states that
  * it is undefined behavior for execution to reach this point.  No guarantees
--- a/mfbt/Attributes.h
+++ b/mfbt/Attributes.h
@@ -268,18 +268,18 @@
  * class.  An attempt to use the method will always produce an error *at compile
  * time* (instead of sometimes as late as link time) when this macro can be
  * implemented.  For example, you can use MOZ_DELETE to produce classes with no
  * implicit copy constructor or assignment operator:
  *
  *   struct NonCopyable
  *   {
  *   private:
- *     NonCopyable(const NonCopyable& other) MOZ_DELETE;
- *     void operator=(const NonCopyable& other) MOZ_DELETE;
+ *     NonCopyable(const NonCopyable& aOther) MOZ_DELETE;
+ *     void operator=(const NonCopyable& aOther) MOZ_DELETE;
  *   };
  *
  * If MOZ_DELETE can't be implemented for the current compiler, use of the
  * annotated method will still cause an error, but the error might occur at link
  * time in some cases rather than at compile time.
  *
  * MOZ_DELETE relies on C++11 functionality not universally implemented.  As a
  * backstop, method declarations using MOZ_DELETE should be private.
--- a/mfbt/BloomFilter.h
+++ b/mfbt/BloomFilter.h
@@ -192,18 +192,18 @@ BloomFilter<KeySize, T>::clear()
 template<unsigned KeySize, class T>
 inline void
 BloomFilter<KeySize, T>::add(uint32_t aHash)
 {
   uint8_t& slot1 = firstSlot(aHash);
   if (MOZ_LIKELY(!full(slot1))) {
     ++slot1;
   }
-  uint8_t& slot2 = secondSlot(aHash); {
-  if (MOZ_LIKELY(!full(slot2)))
+  uint8_t& slot2 = secondSlot(aHash);
+  if (MOZ_LIKELY(!full(slot2))) {
     ++slot2;
   }
 }
 
 template<unsigned KeySize, class T>
 MOZ_ALWAYS_INLINE void
 BloomFilter<KeySize, T>::add(const T* aValue)
 {
--- a/mfbt/Endian.h
+++ b/mfbt/Endian.h
@@ -37,31 +37,32 @@
  * For clarity in networking code, "Network" may be used as a synonym
  * for "Big" in any of the above methods or class names.
  *
  * As an example, reading a file format header whose fields are stored
  * in big-endian format might look like:
  *
  * class ExampleHeader
  * {
- *   private:
- *     uint32_t magic;
- *     uint32_t length;
- *     uint32_t totalRecords;
- *     uint64_t checksum;
+ * private:
+ *   uint32_t mMagic;
+ *   uint32_t mLength;
+ *   uint32_t mTotalRecords;
+ *   uint64_t mChecksum;
  *
- *   public:
- *     ExampleHeader(const void* data) {
- *       const uint8_t* ptr = static_cast<const uint8_t*>(data);
- *       magic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
- *       length = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
- *       totalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
- *       checksum = BigEndian::readUint64(ptr);
- *     }
- *     ...
+ * public:
+ *   ExampleHeader(const void* data)
+ *   {
+ *     const uint8_t* ptr = static_cast<const uint8_t*>(data);
+ *     mMagic = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ *     mLength = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ *     mTotalRecords = BigEndian::readUint32(ptr); ptr += sizeof(uint32_t);
+ *     mChecksum = BigEndian::readUint64(ptr);
+ *   }
+ *   ...
  * };
  */
 
 #ifndef mozilla_Endian_h
 #define mozilla_Endian_h
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
@@ -164,476 +165,530 @@ namespace detail {
  * supported by all the compilers we use.
  */
 template<typename T, size_t Size = sizeof(T)>
 struct Swapper;
 
 template<typename T>
 struct Swapper<T, 2>
 {
-  static T swap(T value)
+  static T swap(T aValue)
   {
 #if defined(MOZ_HAVE_BUILTIN_BYTESWAP16)
-    return MOZ_HAVE_BUILTIN_BYTESWAP16(value);
+    return MOZ_HAVE_BUILTIN_BYTESWAP16(aValue);
 #else
-    return T(((value & 0x00ff) << 8) | ((value & 0xff00) >> 8));
+    return T(((aValue & 0x00ff) << 8) | ((aValue & 0xff00) >> 8));
 #endif
   }
 };
 
 template<typename T>
 struct Swapper<T, 4>
 {
-  static T swap(T value)
+  static T swap(T aValue)
   {
 #if defined(__clang__) || defined(__GNUC__)
-    return T(__builtin_bswap32(value));
+    return T(__builtin_bswap32(aValue));
 #elif defined(_MSC_VER)
-    return T(_byteswap_ulong(value));
+    return T(_byteswap_ulong(aValue));
 #else
-    return T(((value & 0x000000ffU) << 24) |
-             ((value & 0x0000ff00U) << 8) |
-             ((value & 0x00ff0000U) >> 8) |
-             ((value & 0xff000000U) >> 24));
+    return T(((aValue & 0x000000ffU) << 24) |
+             ((aValue & 0x0000ff00U) << 8) |
+             ((aValue & 0x00ff0000U) >> 8) |
+             ((aValue & 0xff000000U) >> 24));
 #endif
   }
 };
 
 template<typename T>
 struct Swapper<T, 8>
 {
-  static inline T swap(T value)
+  static inline T swap(T aValue)
   {
 #if defined(__clang__) || defined(__GNUC__)
-    return T(__builtin_bswap64(value));
+    return T(__builtin_bswap64(aValue));
 #elif defined(_MSC_VER)
-    return T(_byteswap_uint64(value));
+    return T(_byteswap_uint64(aValue));
 #else
-    return T(((value & 0x00000000000000ffULL) << 56) |
-             ((value & 0x000000000000ff00ULL) << 40) |
-             ((value & 0x0000000000ff0000ULL) << 24) |
-             ((value & 0x00000000ff000000ULL) << 8) |
-             ((value & 0x000000ff00000000ULL) >> 8) |
-             ((value & 0x0000ff0000000000ULL) >> 24) |
-             ((value & 0x00ff000000000000ULL) >> 40) |
-             ((value & 0xff00000000000000ULL) >> 56));
+    return T(((aValue & 0x00000000000000ffULL) << 56) |
+             ((aValue & 0x000000000000ff00ULL) << 40) |
+             ((aValue & 0x0000000000ff0000ULL) << 24) |
+             ((aValue & 0x00000000ff000000ULL) << 8) |
+             ((aValue & 0x000000ff00000000ULL) >> 8) |
+             ((aValue & 0x0000ff0000000000ULL) >> 24) |
+             ((aValue & 0x00ff000000000000ULL) >> 40) |
+             ((aValue & 0xff00000000000000ULL) >> 56));
 #endif
   }
 };
 
 enum Endianness { Little, Big };
 
 #if MOZ_BIG_ENDIAN
 #  define MOZ_NATIVE_ENDIANNESS detail::Big
 #else
 #  define MOZ_NATIVE_ENDIANNESS detail::Little
 #endif
 
 class EndianUtils
 {
-    /**
-     * Assert that the memory regions [dest, dest+count) and [src, src+count]
-     * do not overlap.  count is given in bytes.
-     */
-    static void assertNoOverlap(const void* dest, const void* src, size_t count)
-    {
-      DebugOnly<const uint8_t*> byteDestPtr = static_cast<const uint8_t*>(dest);
-      DebugOnly<const uint8_t*> byteSrcPtr = static_cast<const uint8_t*>(src);
-      MOZ_ASSERT((byteDestPtr <= byteSrcPtr &&
-                  byteDestPtr + count <= byteSrcPtr) ||
-                 (byteSrcPtr <= byteDestPtr &&
-                  byteSrcPtr + count <= byteDestPtr));
-    }
+  /**
+   * Assert that the memory regions [aDest, aDest+aCount) and
+   * [aSrc, aSrc+aCount] do not overlap.  aCount is given in bytes.
+   */
+  static void assertNoOverlap(const void* aDest, const void* aSrc,
+                              size_t aCount)
+  {
+    DebugOnly<const uint8_t*> byteDestPtr = static_cast<const uint8_t*>(aDest);
+    DebugOnly<const uint8_t*> byteSrcPtr = static_cast<const uint8_t*>(aSrc);
+    MOZ_ASSERT((byteDestPtr <= byteSrcPtr &&
+                byteDestPtr + aCount <= byteSrcPtr) ||
+               (byteSrcPtr <= byteDestPtr &&
+                byteSrcPtr + aCount <= byteDestPtr));
+  }
 
-    template<typename T>
-    static void assertAligned(T* ptr)
-    {
-      MOZ_ASSERT((uintptr_t(ptr) % sizeof(T)) == 0, "Unaligned pointer!");
-    }
+  template<typename T>
+  static void assertAligned(T* aPtr)
+  {
+    MOZ_ASSERT((uintptr_t(aPtr) % sizeof(T)) == 0, "Unaligned pointer!");
+  }
 
-  protected:
-    /**
-     * Return |value| converted from SourceEndian encoding to DestEndian
-     * encoding.
-     */
-    template<Endianness SourceEndian, Endianness DestEndian, typename T>
-    static inline T maybeSwap(T value)
-    {
-      if (SourceEndian == DestEndian)
-        return value;
-
-      return Swapper<T>::swap(value);
+protected:
+  /**
+   * Return |aValue| converted from SourceEndian encoding to DestEndian
+   * encoding.
+   */
+  template<Endianness SourceEndian, Endianness DestEndian, typename T>
+  static inline T maybeSwap(T aValue)
+  {
+    if (SourceEndian == DestEndian) {
+      return aValue;
     }
+    return Swapper<T>::swap(aValue);
+  }
 
-    /**
-     * Convert |count| elements at |ptr| from SourceEndian encoding to
-     * DestEndian encoding.
-     */
-    template<Endianness SourceEndian, Endianness DestEndian, typename T>
-    static inline void maybeSwapInPlace(T* ptr, size_t count)
-    {
-      assertAligned(ptr);
+  /**
+   * Convert |aCount| elements at |aPtr| from SourceEndian encoding to
+   * DestEndian encoding.
+   */
+  template<Endianness SourceEndian, Endianness DestEndian, typename T>
+  static inline void maybeSwapInPlace(T* aPtr, size_t aCount)
+  {
+    assertAligned(aPtr);
 
-      if (SourceEndian == DestEndian)
-        return;
+    if (SourceEndian == DestEndian) {
+      return;
+    }
+    for (size_t i = 0; i < aCount; i++) {
+      aPtr[i] = Swapper<T>::swap(aPtr[i]);
+    }
+  }
 
-      for (size_t i = 0; i < count; i++)
-        ptr[i] = Swapper<T>::swap(ptr[i]);
+  /**
+   * Write |aCount| elements to the unaligned address |aDest| in DestEndian
+   * format, using elements found at |aSrc| in SourceEndian format.
+   */
+  template<Endianness SourceEndian, Endianness DestEndian, typename T>
+  static void copyAndSwapTo(void* aDest, const T* aSrc, size_t aCount)
+  {
+    assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
+    assertAligned(aSrc);
+
+    if (SourceEndian == DestEndian) {
+      memcpy(aDest, aSrc, aCount * sizeof(T));
+      return;
     }
 
-    /**
-     * Write |count| elements to the unaligned address |dest| in DestEndian
-     * format, using elements found at |src| in SourceEndian format.
-     */
-    template<Endianness SourceEndian, Endianness DestEndian, typename T>
-    static void copyAndSwapTo(void* dest, const T* src, size_t count)
-    {
-      assertNoOverlap(dest, src, count * sizeof(T));
-      assertAligned(src);
+    uint8_t* byteDestPtr = static_cast<uint8_t*>(aDest);
+    for (size_t i = 0; i < aCount; ++i) {
+      union
+      {
+        T mVal;
+        uint8_t mBuffer[sizeof(T)];
+      } u;
+      u.mVal = maybeSwap<SourceEndian, DestEndian>(aSrc[i]);
+      memcpy(byteDestPtr, u.mBuffer, sizeof(T));
+      byteDestPtr += sizeof(T);
+    }
+  }
 
-      if (SourceEndian == DestEndian) {
-        memcpy(dest, src, count * sizeof(T));
-        return;
-      }
+  /**
+   * Write |aCount| elements to |aDest| in DestEndian format, using elements
+   * found at the unaligned address |aSrc| in SourceEndian format.
+   */
+  template<Endianness SourceEndian, Endianness DestEndian, typename T>
+  static void copyAndSwapFrom(T* aDest, const void* aSrc, size_t aCount)
+  {
+    assertNoOverlap(aDest, aSrc, aCount * sizeof(T));
+    assertAligned(aDest);
 
-      uint8_t* byteDestPtr = static_cast<uint8_t*>(dest);
-      for (size_t i = 0; i < count; ++i) {
-        union {
-          T val;
-          uint8_t buffer[sizeof(T)];
-        } u;
-        u.val = maybeSwap<SourceEndian, DestEndian>(src[i]);
-        memcpy(byteDestPtr, u.buffer, sizeof(T));
-        byteDestPtr += sizeof(T);
-      }
+    if (SourceEndian == DestEndian) {
+      memcpy(aDest, aSrc, aCount * sizeof(T));
+      return;
     }
 
-    /**
-     * Write |count| elements to |dest| in DestEndian format, using elements
-     * found at the unaligned address |src| in SourceEndian format.
-     */
-    template<Endianness SourceEndian, Endianness DestEndian, typename T>
-    static void copyAndSwapFrom(T* dest, const void* src, size_t count)
-    {
-      assertNoOverlap(dest, src, count * sizeof(T));
-      assertAligned(dest);
-
-      if (SourceEndian == DestEndian) {
-        memcpy(dest, src, count * sizeof(T));
-        return;
-      }
-
-      const uint8_t* byteSrcPtr = static_cast<const uint8_t*>(src);
-      for (size_t i = 0; i < count; ++i) {
-        union {
-          T val;
-          uint8_t buffer[sizeof(T)];
-        } u;
-        memcpy(u.buffer, byteSrcPtr, sizeof(T));
-        dest[i] = maybeSwap<SourceEndian, DestEndian>(u.val);
-        byteSrcPtr += sizeof(T);
-      }
+    const uint8_t* byteSrcPtr = static_cast<const uint8_t*>(aSrc);
+    for (size_t i = 0; i < aCount; ++i) {
+      union
+      {
+        T mVal;
+        uint8_t mBuffer[sizeof(T)];
+      } u;
+      memcpy(u.mBuffer, byteSrcPtr, sizeof(T));
+      aDest[i] = maybeSwap<SourceEndian, DestEndian>(u.mVal);
+      byteSrcPtr += sizeof(T);
     }
+  }
 };
 
 template<Endianness ThisEndian>
 class Endian : private EndianUtils
 {
-  protected:
-    /** Read a uint16_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT uint16_t readUint16(const void* p) {
-      return read<uint16_t>(p);
-    }
+protected:
+  /** Read a uint16_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT uint16_t readUint16(const void* aPtr)
+  {
+    return read<uint16_t>(aPtr);
+  }
+
+  /** Read a uint32_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT uint32_t readUint32(const void* aPtr)
+  {
+    return read<uint32_t>(aPtr);
+  }
 
-    /** Read a uint32_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT uint32_t readUint32(const void* p) {
-      return read<uint32_t>(p);
-    }
+  /** Read a uint64_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT uint64_t readUint64(const void* aPtr)
+  {
+    return read<uint64_t>(aPtr);
+  }
 
-    /** Read a uint64_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT uint64_t readUint64(const void* p) {
-      return read<uint64_t>(p);
-    }
+  /** Read an int16_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT int16_t readInt16(const void* aPtr)
+  {
+    return read<int16_t>(aPtr);
+  }
 
-    /** Read an int16_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT int16_t readInt16(const void* p) {
-      return read<int16_t>(p);
-    }
+  /** Read an int32_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT int32_t readInt32(const void* aPtr)
+  {
+    return read<uint32_t>(aPtr);
+  }
 
-    /** Read an int32_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT int32_t readInt32(const void* p) {
-      return read<uint32_t>(p);
-    }
+  /** Read an int64_t in ThisEndian endianness from |aPtr| and return it. */
+  static MOZ_WARN_UNUSED_RESULT int64_t readInt64(const void* aPtr)
+  {
+    return read<int64_t>(aPtr);
+  }
 
-    /** Read an int64_t in ThisEndian endianness from |p| and return it. */
-    static MOZ_WARN_UNUSED_RESULT int64_t readInt64(const void* p) {
-      return read<int64_t>(p);
-    }
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeUint16(void* aPtr, uint16_t aValue)
+  {
+    write(aPtr, aValue);
+  }
+
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeUint32(void* aPtr, uint32_t aValue)
+  {
+    write(aPtr, aValue);
+  }
 
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeUint16(void* p, uint16_t val) {
-      write(p, val);
-    }
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeUint32(void* p, uint32_t val) {
-      write(p, val);
-    }
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeUint64(void* p, uint64_t val) {
-      write(p, val);
-    }
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeUint64(void* aPtr, uint64_t aValue)
+  {
+    write(aPtr, aValue);
+  }
+
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeInt16(void* aPtr, int16_t aValue)
+  {
+    write(aPtr, aValue);
+  }
+
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeInt32(void* aPtr, int32_t aValue)
+  {
+    write(aPtr, aValue);
+  }
 
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeInt16(void* p, int16_t val) {
-      write(p, val);
-    }
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeInt32(void* p, int32_t val) {
-      write(p, val);
-    }
-    /** Write |val| to |p| using ThisEndian endianness. */
-    static void writeInt64(void* p, int64_t val) {
-      write(p, val);
-    }
+  /** Write |aValue| to |aPtr| using ThisEndian endianness. */
+  static void writeInt64(void* aPtr, int64_t aValue)
+  {
+    write(aPtr, aValue);
+  }
+
+  /*
+   * Converts a value of type T to little-endian format.
+   *
+   * This function is intended for cases where you have data in your
+   * native-endian format and you need it to appear in little-endian
+   * format for transmission.
+   */
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapToLittleEndian(T aValue)
+  {
+    return maybeSwap<ThisEndian, Little>(aValue);
+  }
+
+  /*
+   * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+   * them to little-endian format if ThisEndian is Big.
+   * As with memcpy, |aDest| and |aSrc| must not overlap.
+   */
+  template<typename T>
+  static void copyAndSwapToLittleEndian(void* aDest, const T* aSrc,
+                                        size_t aCount)
+  {
+    copyAndSwapTo<ThisEndian, Little>(aDest, aSrc, aCount);
+  }
 
-    /*
-     * Converts a value of type T to little-endian format.
-     *
-     * This function is intended for cases where you have data in your
-     * native-endian format and you need it to appear in little-endian
-     * format for transmission.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapToLittleEndian(T value) {
-      return maybeSwap<ThisEndian, Little>(value);
-    }
-    /*
-     * Copies count values of type T starting at src to dest, converting
-     * them to little-endian format if ThisEndian is Big.
-     * As with memcpy, dest and src must not overlap.
-     */
-    template<typename T>
-    static void copyAndSwapToLittleEndian(void* dest, const T* src,
-                                          size_t count) {
-      copyAndSwapTo<ThisEndian, Little>(dest, src, count);
-    }
-    /*
-     * Likewise, but converts values in place.
-     */
-    template<typename T>
-    static void swapToLittleEndianInPlace(T* p, size_t count) {
-      maybeSwapInPlace<ThisEndian, Little>(p, count);
-    }
+  /*
+   * Likewise, but converts values in place.
+   */
+  template<typename T>
+  static void swapToLittleEndianInPlace(T* aPtr, size_t aCount)
+  {
+    maybeSwapInPlace<ThisEndian, Little>(aPtr, aCount);
+  }
+
+  /*
+   * Converts a value of type T to big-endian format.
+   */
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapToBigEndian(T aValue)
+  {
+    return maybeSwap<ThisEndian, Big>(aValue);
+  }
 
-    /*
-     * Converts a value of type T to big-endian format.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapToBigEndian(T value) {
-      return maybeSwap<ThisEndian, Big>(value);
-    }
-    /*
-     * Copies count values of type T starting at src to dest, converting
-     * them to big-endian format if ThisEndian is Little.
-     * As with memcpy, dest and src must not overlap.
-     */
-    template<typename T>
-    static void copyAndSwapToBigEndian(void* dest, const T* src, size_t count) {
-      copyAndSwapTo<ThisEndian, Big>(dest, src, count);
-    }
-    /*
-     * Likewise, but converts values in place.
-     */
-    template<typename T>
-    static void swapToBigEndianInPlace(T* p, size_t count) {
-      maybeSwapInPlace<ThisEndian, Big>(p, count);
-    }
+  /*
+   * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+   * them to big-endian format if ThisEndian is Little.
+   * As with memcpy, |aDest| and |aSrc| must not overlap.
+   */
+  template<typename T>
+  static void copyAndSwapToBigEndian(void* aDest, const T* aSrc,
+                                     size_t aCount)
+  {
+    copyAndSwapTo<ThisEndian, Big>(aDest, aSrc, aCount);
+  }
+
+  /*
+   * Likewise, but converts values in place.
+   */
+  template<typename T>
+  static void swapToBigEndianInPlace(T* aPtr, size_t aCount) {
+    maybeSwapInPlace<ThisEndian, Big>(aPtr, aCount);
+  }
 
-    /*
-     * Synonyms for the big-endian functions, for better readability
-     * in network code.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapToNetworkOrder(T value) {
-      return swapToBigEndian(value);
-    }
-    template<typename T>
-    static void
-    copyAndSwapToNetworkOrder(void* dest, const T* src, size_t count) {
-      copyAndSwapToBigEndian(dest, src, count);
-    }
-    template<typename T>
-    static void
-    swapToNetworkOrderInPlace(T* p, size_t count) {
-      swapToBigEndianInPlace(p, count);
-    }
+  /*
+   * Synonyms for the big-endian functions, for better readability
+   * in network code.
+   */
+
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapToNetworkOrder(T aValue)
+  {
+    return swapToBigEndian(aValue);
+  }
+
+  template<typename T>
+  static void
+  copyAndSwapToNetworkOrder(void* aDest, const T* aSrc, size_t aCount)
+  {
+    copyAndSwapToBigEndian(aDest, aSrc, aCount);
+  }
+
+  template<typename T>
+  static void
+  swapToNetworkOrderInPlace(T* aPtr, size_t aCount)
+  {
+    swapToBigEndianInPlace(aPtr, aCount);
+  }
 
-    /*
-     * Converts a value of type T from little-endian format.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapFromLittleEndian(T value) {
-      return maybeSwap<Little, ThisEndian>(value);
-    }
-    /*
-     * Copies count values of type T starting at src to dest, converting
-     * them to little-endian format if ThisEndian is Big.
-     * As with memcpy, dest and src must not overlap.
-     */
-    template<typename T>
-    static void copyAndSwapFromLittleEndian(T* dest, const void* src,
-                                            size_t count) {
-      copyAndSwapFrom<Little, ThisEndian>(dest, src, count);
-    }
-    /*
-     * Likewise, but converts values in place.
-     */
-    template<typename T>
-    static void swapFromLittleEndianInPlace(T* p, size_t count) {
-      maybeSwapInPlace<Little, ThisEndian>(p, count);
-    }
+  /*
+   * Converts a value of type T from little-endian format.
+   */
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapFromLittleEndian(T aValue)
+  {
+    return maybeSwap<Little, ThisEndian>(aValue);
+  }
+
+  /*
+   * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+   * them to little-endian format if ThisEndian is Big.
+   * As with memcpy, |aDest| and |aSrc| must not overlap.
+   */
+  template<typename T>
+  static void copyAndSwapFromLittleEndian(T* aDest, const void* aSrc,
+                                          size_t aCount)
+  {
+    copyAndSwapFrom<Little, ThisEndian>(aDest, aSrc, aCount);
+  }
+
+  /*
+   * Likewise, but converts values in place.
+   */
+  template<typename T>
+  static void swapFromLittleEndianInPlace(T* aPtr, size_t aCount)
+  {
+    maybeSwapInPlace<Little, ThisEndian>(aPtr, aCount);
+  }
+
+  /*
+   * Converts a value of type T from big-endian format.
+   */
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapFromBigEndian(T aValue)
+  {
+    return maybeSwap<Big, ThisEndian>(aValue);
+  }
 
-    /*
-     * Converts a value of type T from big-endian format.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapFromBigEndian(T value) {
-      return maybeSwap<Big, ThisEndian>(value);
-    }
-    /*
-     * Copies count values of type T starting at src to dest, converting
-     * them to big-endian format if ThisEndian is Little.
-     * As with memcpy, dest and src must not overlap.
-     */
-    template<typename T>
-    static void copyAndSwapFromBigEndian(T* dest, const void* src,
-                                         size_t count) {
-      copyAndSwapFrom<Big, ThisEndian>(dest, src, count);
-    }
-    /*
-     * Likewise, but converts values in place.
-     */
-    template<typename T>
-    static void swapFromBigEndianInPlace(T* p, size_t count) {
-      maybeSwapInPlace<Big, ThisEndian>(p, count);
-    }
+  /*
+   * Copies |aCount| values of type T starting at |aSrc| to |aDest|, converting
+   * them to big-endian format if ThisEndian is Little.
+   * As with memcpy, |aDest| and |aSrc| must not overlap.
+   */
+  template<typename T>
+  static void copyAndSwapFromBigEndian(T* aDest, const void* aSrc,
+                                       size_t aCount)
+  {
+    copyAndSwapFrom<Big, ThisEndian>(aDest, aSrc, aCount);
+  }
+
+  /*
+   * Likewise, but converts values in place.
+   */
+  template<typename T>
+  static void swapFromBigEndianInPlace(T* aPtr, size_t aCount)
+  {
+    maybeSwapInPlace<Big, ThisEndian>(aPtr, aCount);
+  }
+
+  /*
+   * Synonyms for the big-endian functions, for better readability
+   * in network code.
+   */
+  template<typename T>
+  MOZ_WARN_UNUSED_RESULT static T swapFromNetworkOrder(T aValue)
+  {
+    return swapFromBigEndian(aValue);
+  }
+
+  template<typename T>
+  static void copyAndSwapFromNetworkOrder(T* aDest, const void* aSrc,
+                                          size_t aCount)
+  {
+    copyAndSwapFromBigEndian(aDest, aSrc, aCount);
+  }
 
-    /*
-     * Synonyms for the big-endian functions, for better readability
-     * in network code.
-     */
-    template<typename T>
-    MOZ_WARN_UNUSED_RESULT static T swapFromNetworkOrder(T value) {
-      return swapFromBigEndian(value);
-    }
-    template<typename T>
-    static void copyAndSwapFromNetworkOrder(T* dest, const void* src,
-                                            size_t count) {
-      copyAndSwapFromBigEndian(dest, src, count);
-    }
-    template<typename T>
-    static void swapFromNetworkOrderInPlace(T* p, size_t count) {
-      swapFromBigEndianInPlace(p, count);
-    }
+  template<typename T>
+  static void swapFromNetworkOrderInPlace(T* aPtr, size_t aCount)
+  {
+    swapFromBigEndianInPlace(aPtr, aCount);
+  }
 
-  private:
-    /**
-     * Read a value of type T, encoded in endianness ThisEndian from |p|.
-     * Return that value encoded in native endianness.
-     */
-    template<typename T>
-    static T read(const void* p) {
-      union {
-        T val;
-        uint8_t buffer[sizeof(T)];
-      } u;
-      memcpy(u.buffer, p, sizeof(T));
-      return maybeSwap<ThisEndian, MOZ_NATIVE_ENDIANNESS>(u.val);
-    }
+private:
+  /**
+   * Read a value of type T, encoded in endianness ThisEndian from |aPtr|.
+   * Return that value encoded in native endianness.
+   */
+  template<typename T>
+  static T read(const void* aPtr)
+  {
+    union
+    {
+      T mVal;
+      uint8_t mBuffer[sizeof(T)];
+    } u;
+    memcpy(u.mBuffer, aPtr, sizeof(T));
+    return maybeSwap<ThisEndian, MOZ_NATIVE_ENDIANNESS>(u.mVal);
+  }
 
-    /**
-     * Write a value of type T, in native endianness, to |p|, in ThisEndian
-     * endianness.
-     */
-    template<typename T>
-    static void write(void* p, T value) {
-      T tmp = maybeSwap<MOZ_NATIVE_ENDIANNESS, ThisEndian>(value);
-      memcpy(p, &tmp, sizeof(T));
-    }
+  /**
+   * Write a value of type T, in native endianness, to |aPtr|, in ThisEndian
+   * endianness.
+   */
+  template<typename T>
+  static void write(void* aPtr, T aValue)
+  {
+    T tmp = maybeSwap<MOZ_NATIVE_ENDIANNESS, ThisEndian>(aValue);
+    memcpy(aPtr, &tmp, sizeof(T));
+  }
 
-    Endian() MOZ_DELETE;
-    Endian(const Endian& other) MOZ_DELETE;
-    void operator=(const Endian& other) MOZ_DELETE;
+  Endian() MOZ_DELETE;
+  Endian(const Endian& aTther) MOZ_DELETE;
+  void operator=(const Endian& aOther) MOZ_DELETE;
 };
 
 template<Endianness ThisEndian>
 class EndianReadWrite : public Endian<ThisEndian>
 {
-  private:
-    typedef Endian<ThisEndian> super;
+private:
+  typedef Endian<ThisEndian> super;
 
-  public:
-    using super::readUint16;
-    using super::readUint32;
-    using super::readUint64;
-    using super::readInt16;
-    using super::readInt32;
-    using super::readInt64;
-    using super::writeUint16;
-    using super::writeUint32;
-    using super::writeUint64;
-    using super::writeInt16;
-    using super::writeInt32;
-    using super::writeInt64;
+public:
+  using super::readUint16;
+  using super::readUint32;
+  using super::readUint64;
+  using super::readInt16;
+  using super::readInt32;
+  using super::readInt64;
+  using super::writeUint16;
+  using super::writeUint32;
+  using super::writeUint64;
+  using super::writeInt16;
+  using super::writeInt32;
+  using super::writeInt64;
 };
 
 } /* namespace detail */
 
 class LittleEndian MOZ_FINAL : public detail::EndianReadWrite<detail::Little>
 {};
 
 class BigEndian MOZ_FINAL : public detail::EndianReadWrite<detail::Big>
 {};
 
 typedef BigEndian NetworkEndian;
 
 class NativeEndian MOZ_FINAL : public detail::Endian<MOZ_NATIVE_ENDIANNESS>
 {
-  private:
-    typedef detail::Endian<MOZ_NATIVE_ENDIANNESS> super;
+private:
+  typedef detail::Endian<MOZ_NATIVE_ENDIANNESS> super;
 
-  public:
-    /*
-     * These functions are intended for cases where you have data in your
-     * native-endian format and you need the data to appear in the appropriate
-     * endianness for transmission, serialization, etc.
-     */
-    using super::swapToLittleEndian;
-    using super::copyAndSwapToLittleEndian;
-    using super::swapToLittleEndianInPlace;
-    using super::swapToBigEndian;
-    using super::copyAndSwapToBigEndian;
-    using super::swapToBigEndianInPlace;
-    using super::swapToNetworkOrder;
-    using super::copyAndSwapToNetworkOrder;
-    using super::swapToNetworkOrderInPlace;
+public:
+  /*
+   * These functions are intended for cases where you have data in your
+   * native-endian format and you need the data to appear in the appropriate
+   * endianness for transmission, serialization, etc.
+   */
+  using super::swapToLittleEndian;
+  using super::copyAndSwapToLittleEndian;
+  using super::swapToLittleEndianInPlace;
+  using super::swapToBigEndian;
+  using super::copyAndSwapToBigEndian;
+  using super::swapToBigEndianInPlace;
+  using super::swapToNetworkOrder;
+  using super::copyAndSwapToNetworkOrder;
+  using super::swapToNetworkOrderInPlace;
 
-    /*
-     * These functions are intended for cases where you have data in the
-     * given endianness (e.g. reading from disk or a file-format) and you
-     * need the data to appear in native-endian format for processing.
-     */
-    using super::swapFromLittleEndian;
-    using super::copyAndSwapFromLittleEndian;
-    using super::swapFromLittleEndianInPlace;
-    using super::swapFromBigEndian;
-    using super::copyAndSwapFromBigEndian;
-    using super::swapFromBigEndianInPlace;
-    using super::swapFromNetworkOrder;
-    using super::copyAndSwapFromNetworkOrder;
-    using super::swapFromNetworkOrderInPlace;
+  /*
+   * These functions are intended for cases where you have data in the
+   * given endianness (e.g. reading from disk or a file-format) and you
+   * need the data to appear in native-endian format for processing.
+   */
+  using super::swapFromLittleEndian;
+  using super::copyAndSwapFromLittleEndian;
+  using super::swapFromLittleEndianInPlace;
+  using super::swapFromBigEndian;
+  using super::copyAndSwapFromBigEndian;
+  using super::swapFromBigEndianInPlace;
+  using super::swapFromNetworkOrder;
+  using super::copyAndSwapFromNetworkOrder;
+  using super::swapFromNetworkOrderInPlace;
 };
 
 #undef MOZ_NATIVE_ENDIANNESS
 
 } /* namespace mozilla */
 
 #endif /* mozilla_Endian_h */
--- a/mfbt/EnumSet.h
+++ b/mfbt/EnumSet.h
@@ -19,172 +19,188 @@ namespace mozilla {
 /**
  * EnumSet<T> is a set of values defined by an enumeration. It is implemented
  * using a 32 bit mask for each value so it will only work for enums with an int
  * representation less than 32. It works both for enum and enum class types.
  */
 template<typename T>
 class EnumSet
 {
-  public:
-    EnumSet()
-      : mBitField(0)
-    { }
+public:
+  EnumSet()
+    : mBitField(0)
+  { }
 
-    MOZ_IMPLICIT EnumSet(T aEnum)
-      : mBitField(bitFor(aEnum))
-    { }
+  MOZ_IMPLICIT EnumSet(T aEnum)
+    : mBitField(bitFor(aEnum))
+  { }
 
-    EnumSet(T aEnum1, T aEnum2)
-      : mBitField(bitFor(aEnum1) |
-                  bitFor(aEnum2))
-    { }
+  EnumSet(T aEnum1, T aEnum2)
+    : mBitField(bitFor(aEnum1) |
+                bitFor(aEnum2))
+  { }
 
-    EnumSet(T aEnum1, T aEnum2, T aEnum3)
-      : mBitField(bitFor(aEnum1) |
-                  bitFor(aEnum2) |
-                  bitFor(aEnum3))
-    { }
+  EnumSet(T aEnum1, T aEnum2, T aEnum3)
+    : mBitField(bitFor(aEnum1) |
+                bitFor(aEnum2) |
+                bitFor(aEnum3))
+  { }
 
-    EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4)
-     : mBitField(bitFor(aEnum1) |
-                 bitFor(aEnum2) |
-                 bitFor(aEnum3) |
-                 bitFor(aEnum4))
-    { }
+  EnumSet(T aEnum1, T aEnum2, T aEnum3, T aEnum4)
+   : mBitField(bitFor(aEnum1) |
+               bitFor(aEnum2) |
+               bitFor(aEnum3) |
+               bitFor(aEnum4))
+  { }
 
-    EnumSet(const EnumSet& aEnumSet)
-     : mBitField(aEnumSet.mBitField)
-    { }
+  EnumSet(const EnumSet& aEnumSet)
+   : mBitField(aEnumSet.mBitField)
+  { }
 
-    /**
-     * Add an element
-     */
-    void operator+=(T aEnum) {
-      mBitField |= bitFor(aEnum);
-    }
+  /**
+   * Add an element
+   */
+  void operator+=(T aEnum)
+  {
+    mBitField |= bitFor(aEnum);
+  }
 
-    /**
-     * Add an element
-     */
-    EnumSet<T> operator+(T aEnum) const {
-      EnumSet<T> result(*this);
-      result += aEnum;
-      return result;
-    }
+  /**
+   * Add an element
+   */
+  EnumSet<T> operator+(T aEnum) const
+  {
+    EnumSet<T> result(*this);
+    result += aEnum;
+    return result;
+  }
 
-    /**
-     * Union
-     */
-    void operator+=(const EnumSet<T> aEnumSet) {
-      mBitField |= aEnumSet.mBitField;
-    }
+  /**
+   * Union
+   */
+  void operator+=(const EnumSet<T> aEnumSet)
+  {
+    mBitField |= aEnumSet.mBitField;
+  }
 
-    /**
-     * Union
-     */
-    EnumSet<T> operator+(const EnumSet<T> aEnumSet) const {
-      EnumSet<T> result(*this);
-      result += aEnumSet;
-      return result;
-    }
+  /**
+   * Union
+   */
+  EnumSet<T> operator+(const EnumSet<T> aEnumSet) const
+  {
+    EnumSet<T> result(*this);
+    result += aEnumSet;
+    return result;
+  }
 
-    /**
-     * Remove an element
-     */
-    void operator-=(T aEnum) {
-      mBitField &= ~(bitFor(aEnum));
-    }
+  /**
+   * Remove an element
+   */
+  void operator-=(T aEnum)
+  {
+    mBitField &= ~(bitFor(aEnum));
+  }
 
-    /**
-     * Remove an element
-     */
-    EnumSet<T> operator-(T aEnum) const {
-      EnumSet<T> result(*this);
-      result -= aEnum;
-      return result;
-    }
+  /**
+   * Remove an element
+   */
+  EnumSet<T> operator-(T aEnum) const
+  {
+    EnumSet<T> result(*this);
+    result -= aEnum;
+    return result;
+  }
 
-    /**
-     * Remove a set of elements
-     */
-    void operator-=(const EnumSet<T> aEnumSet) {
-      mBitField &= ~(aEnumSet.mBitField);
-    }
+  /**
+   * Remove a set of elements
+   */
+  void operator-=(const EnumSet<T> aEnumSet)
+  {
+    mBitField &= ~(aEnumSet.mBitField);
+  }
 
-    /**
-     * Remove a set of elements
-     */
-    EnumSet<T> operator-(const EnumSet<T> aEnumSet) const {
-      EnumSet<T> result(*this);
-      result -= aEnumSet;
-      return result;
-    }
+  /**
+   * Remove a set of elements
+   */
+  EnumSet<T> operator-(const EnumSet<T> aEnumSet) const
+  {
+    EnumSet<T> result(*this);
+    result -= aEnumSet;
+    return result;
+  }
 
-    /**
-     * Intersection
-     */
-    void operator&=(const EnumSet<T> aEnumSet) {
-      mBitField &= aEnumSet.mBitField;
-    }
+  /**
+   * Intersection
+   */
+  void operator&=(const EnumSet<T> aEnumSet)
+  {
+    mBitField &= aEnumSet.mBitField;
+  }
 
-    /**
-     * Intersection
-     */
-    EnumSet<T> operator&(const EnumSet<T> aEnumSet) const {
-      EnumSet<T> result(*this);
-      result &= aEnumSet;
-      return result;
-    }
+  /**
+   * Intersection
+   */
+  EnumSet<T> operator&(const EnumSet<T> aEnumSet) const
+  {
+    EnumSet<T> result(*this);
+    result &= aEnumSet;
+    return result;
+  }
 
-    /**
-     * Equality
-     */
-
-    bool operator==(const EnumSet<T> aEnumSet) const {
-      return mBitField == aEnumSet.mBitField;
-    }
+  /**
+   * Equality
+   */
+  bool operator==(const EnumSet<T> aEnumSet) const
+  {
+    return mBitField == aEnumSet.mBitField;
+  }
 
-    /**
-     * Test is an element is contained in the set
-     */
-    bool contains(T aEnum) const {
-      return mBitField & bitFor(aEnum);
-    }
+  /**
+   * Test is an element is contained in the set.
+   */
+  bool contains(T aEnum) const
+  {
+    return mBitField & bitFor(aEnum);
+  }
 
-    /**
-     * Return the number of elements in the set
-     */
-
-    uint8_t size() {
-      uint8_t count = 0;
-      for (uint32_t bitField = mBitField; bitField; bitField >>= 1) {
-        if (bitField & 1)
-          count++;
+  /**
+   * Return the number of elements in the set.
+   */
+  uint8_t size()
+  {
+    uint8_t count = 0;
+    for (uint32_t bitField = mBitField; bitField; bitField >>= 1) {
+      if (bitField & 1) {
+        count++;
       }
-      return count;
     }
+    return count;
+  }
 
-    bool isEmpty() const {
-      return mBitField == 0;
-    }
+  bool isEmpty() const
+  {
+    return mBitField == 0;
+  }
 
-    uint32_t serialize() const {
-      return mBitField;
-    }
+  uint32_t serialize() const
+  {
+    return mBitField;
+  }
 
-    void deserialize(uint32_t aValue) {
-      mBitField = aValue;
-    }
+  void deserialize(uint32_t aValue)
+  {
+    mBitField = aValue;
+  }
 
-  private:
-    static uint32_t bitFor(T aEnum) {
-      uint32_t bitNumber = (uint32_t)aEnum;
-      MOZ_ASSERT(bitNumber < 32);
-      return 1U << bitNumber;
-    }
+private:
+  static uint32_t bitFor(T aEnum)
+  {
+    uint32_t bitNumber = (uint32_t)aEnum;
+    MOZ_ASSERT(bitNumber < 32);
+    return 1U << bitNumber;
+  }
 
-    uint32_t mBitField;
+  uint32_t mBitField;
 };
 
 } // namespace mozilla
 
 #endif /* mozilla_EnumSet_h_*/
--- a/mfbt/EnumeratedArray.h
+++ b/mfbt/EnumeratedArray.h
@@ -39,37 +39,38 @@ namespace mozilla {
  *   headCount[AnimalSpecies::Sheep] = 30;
  *
  */
 template<typename IndexType,
          MOZ_TEMPLATE_ENUM_CLASS_ENUM_TYPE(IndexType) SizeAsEnumValue,
          typename ValueType>
 class EnumeratedArray
 {
-  public:
-    static const size_t Size = size_t(SizeAsEnumValue);
+public:
+  static const size_t kSize = size_t(SizeAsEnumValue);
 
-  private:
-    Array<ValueType, Size> mArray;
+private:
+  Array<ValueType, kSize> mArray;
 
-  public:
-    EnumeratedArray() {}
+public:
+  EnumeratedArray() {}
 
-    explicit EnumeratedArray(const EnumeratedArray& aOther)
-    {
-      for (size_t i = 0; i < Size; i++)
-        mArray[i] = aOther.mArray[i];
+  explicit EnumeratedArray(const EnumeratedArray& aOther)
+  {
+    for (size_t i = 0; i < kSize; i++) {
+      mArray[i] = aOther.mArray[i];
     }
+  }
 
-    ValueType& operator[](IndexType aIndex)
-    {
-      return mArray[size_t(aIndex)];
-    }
+  ValueType& operator[](IndexType aIndex)
+  {
+    return mArray[size_t(aIndex)];
+  }
 
-    const ValueType& operator[](IndexType aIndex) const
-    {
-      return mArray[size_t(aIndex)];
-    }
+  const ValueType& operator[](IndexType aIndex) const
+  {
+    return mArray[size_t(aIndex)];
+  }
 };
 
 } // namespace mozilla
 
 #endif // mozilla_EnumeratedArray_h
--- a/mfbt/FloatingPoint.cpp
+++ b/mfbt/FloatingPoint.cpp
@@ -5,16 +5,16 @@
 
 /* Implementations of FloatingPoint functions */
 
 #include "mozilla/FloatingPoint.h"
 
 namespace mozilla {
 
 bool
-IsFloat32Representable(double x)
+IsFloat32Representable(double aFloat32)
 {
-    float asFloat = static_cast<float>(x);
-    double floatAsDouble = static_cast<double>(asFloat);
-    return floatAsDouble == x;
+  float asFloat = static_cast<float>(aFloat32);
+  double floatAsDouble = static_cast<double>(asFloat);
+  return floatAsDouble == aFloat32;
 }
 
 } /* namespace mozilla */
--- a/mfbt/FloatingPoint.h
+++ b/mfbt/FloatingPoint.h
@@ -31,272 +31,274 @@ namespace mozilla {
  *
  * For the aforementioned reasons, be very wary of making changes to any of
  * these algorithms.  If you must make changes, keep a careful eye out for
  * compiler bustage, particularly PGO-specific bustage.
  */
 
 struct FloatTypeTraits
 {
-    typedef uint32_t Bits;
+  typedef uint32_t Bits;
 
-    static const unsigned ExponentBias = 127;
-    static const unsigned ExponentShift = 23;
+  static const unsigned kExponentBias = 127;
+  static const unsigned kExponentShift = 23;
 
-    static const Bits SignBit         = 0x80000000UL;
-    static const Bits ExponentBits    = 0x7F800000UL;
-    static const Bits SignificandBits = 0x007FFFFFUL;
+  static const Bits kSignBit         = 0x80000000UL;
+  static const Bits kExponentBits    = 0x7F800000UL;
+  static const Bits kSignificandBits = 0x007FFFFFUL;
 };
 
 struct DoubleTypeTraits
 {
-    typedef uint64_t Bits;
+  typedef uint64_t Bits;
 
-    static const unsigned ExponentBias = 1023;
-    static const unsigned ExponentShift = 52;
+  static const unsigned kExponentBias = 1023;
+  static const unsigned kExponentShift = 52;
 
-    static const Bits SignBit         = 0x8000000000000000ULL;
-    static const Bits ExponentBits    = 0x7ff0000000000000ULL;
-    static const Bits SignificandBits = 0x000fffffffffffffULL;
+  static const Bits kSignBit         = 0x8000000000000000ULL;
+  static const Bits kExponentBits    = 0x7ff0000000000000ULL;
+  static const Bits kSignificandBits = 0x000fffffffffffffULL;
 };
 
 template<typename T> struct SelectTrait;
 template<> struct SelectTrait<float> : public FloatTypeTraits {};
 template<> struct SelectTrait<double> : public DoubleTypeTraits {};
 
 /*
  *  This struct contains details regarding the encoding of floating-point
  *  numbers that can be useful for direct bit manipulation. As of now, the
  *  template parameter has to be float or double.
  *
  *  The nested typedef |Bits| is the unsigned integral type with the same size
  *  as T: uint32_t for float and uint64_t for double (static assertions
  *  double-check these assumptions).
  *
- *  ExponentBias is the offset that is subtracted from the exponent when
+ *  kExponentBias is the offset that is subtracted from the exponent when
  *  computing the value, i.e. one plus the opposite of the mininum possible
  *  exponent.
- *  ExponentShift is the shift that one needs to apply to retrieve the exponent
- *  component of the value.
+ *  kExponentShift is the shift that one needs to apply to retrieve the
+ *  exponent component of the value.
  *
- *  SignBit contains a bits mask. Bit-and-ing with this mask will result in
+ *  kSignBit contains a bits mask. Bit-and-ing with this mask will result in
  *  obtaining the sign bit.
- *  ExponentBits contains the mask needed for obtaining the exponent bits and
- *  SignificandBits contains the mask needed for obtaining the significand bits.
+ *  kExponentBits contains the mask needed for obtaining the exponent bits and
+ *  kSignificandBits contains the mask needed for obtaining the significand
+ *  bits.
  *
- *  Full details of how floating point number formats are encoded are beyond the
- *  scope of this comment. For more information, see
+ *  Full details of how floating point number formats are encoded are beyond
+ *  the scope of this comment. For more information, see
  *  http://en.wikipedia.org/wiki/IEEE_floating_point
  *  http://en.wikipedia.org/wiki/Floating_point#IEEE_754:_floating_point_in_modern_computers
  */
 template<typename T>
 struct FloatingPoint : public SelectTrait<T>
 {
-    typedef SelectTrait<T> Base;
-    typedef typename Base::Bits Bits;
+  typedef SelectTrait<T> Base;
+  typedef typename Base::Bits Bits;
 
-    static_assert((Base::SignBit & Base::ExponentBits) == 0,
-                  "sign bit shouldn't overlap exponent bits");
-    static_assert((Base::SignBit & Base::SignificandBits) == 0,
-                  "sign bit shouldn't overlap significand bits");
-    static_assert((Base::ExponentBits & Base::SignificandBits) == 0,
-                  "exponent bits shouldn't overlap significand bits");
+  static_assert((Base::kSignBit & Base::kExponentBits) == 0,
+                "sign bit shouldn't overlap exponent bits");
+  static_assert((Base::kSignBit & Base::kSignificandBits) == 0,
+                "sign bit shouldn't overlap significand bits");
+  static_assert((Base::kExponentBits & Base::kSignificandBits) == 0,
+                "exponent bits shouldn't overlap significand bits");
 
-    static_assert((Base::SignBit | Base::ExponentBits | Base::SignificandBits) ==
-                  ~Bits(0),
-                  "all bits accounted for");
+  static_assert((Base::kSignBit | Base::kExponentBits | Base::kSignificandBits) ==
+                ~Bits(0),
+                "all bits accounted for");
 
-    /*
-     * These implementations assume float/double are 32/64-bit single/double format
-     * number types compatible with the IEEE-754 standard.  C++ don't require this
-     * to be the case.  But we required this in implementations of these algorithms
-     * that preceded this header, so we shouldn't break anything if we keep doing so.
-     */
-    static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T");
+  /*
+   * These implementations assume float/double are 32/64-bit single/double
+   * format number types compatible with the IEEE-754 standard.  C++ don't
+   * require this to be the case.  But we required this in implementations of
+   * these algorithms that preceded this header, so we shouldn't break anything
+   * if we keep doing so.
+   */
+  static_assert(sizeof(T) == sizeof(Bits), "Bits must be same size as T");
 };
 
 /** Determines whether a double is NaN. */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-IsNaN(T t)
+IsNaN(T aValue)
 {
   /*
-   * A float/double is NaN if all exponent bits are 1 and the significand contains at
-   * least one non-zero bit.
+   * A float/double is NaN if all exponent bits are 1 and the significand
+   * contains at least one non-zero bit.
    */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return (bits & Traits::ExponentBits) == Traits::ExponentBits &&
-         (bits & Traits::SignificandBits) != 0;
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return (bits & Traits::kExponentBits) == Traits::kExponentBits &&
+         (bits & Traits::kSignificandBits) != 0;
 }
 
 /** Determines whether a float/double is +Infinity or -Infinity. */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-IsInfinite(T t)
+IsInfinite(T aValue)
 {
   /* Infinities have all exponent bits set to 1 and an all-0 significand. */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return (bits & ~Traits::SignBit) == Traits::ExponentBits;
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return (bits & ~Traits::kSignBit) == Traits::kExponentBits;
 }
 
 /** Determines whether a float/double is not NaN or infinite. */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-IsFinite(T t)
+IsFinite(T aValue)
 {
   /*
-   * NaN and Infinities are the only non-finite floats/doubles, and both have all
-   * exponent bits set to 1.
+   * NaN and Infinities are the only non-finite floats/doubles, and both have
+   * all exponent bits set to 1.
    */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return (bits & Traits::ExponentBits) != Traits::ExponentBits;
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return (bits & Traits::kExponentBits) != Traits::kExponentBits;
 }
 
 /**
- * Determines whether a float/double is negative.  It is an error to call this method
- * on a float/double which is NaN.
+ * Determines whether a float/double is negative.  It is an error to call this
+ * method on a float/double which is NaN.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-IsNegative(T t)
+IsNegative(T aValue)
 {
-  MOZ_ASSERT(!IsNaN(t), "NaN does not have a sign");
+  MOZ_ASSERT(!IsNaN(aValue), "NaN does not have a sign");
 
   /* The sign bit is set if the double is negative. */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return (bits & Traits::SignBit) != 0;
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return (bits & Traits::kSignBit) != 0;
 }
 
 /** Determines whether a float/double represents -0. */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-IsNegativeZero(T t)
+IsNegativeZero(T aValue)
 {
   /* Only the sign bit is set if the value is -0. */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return bits == Traits::SignBit;
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return bits == Traits::kSignBit;
 }
 
 /**
  * Returns the exponent portion of the float/double.
  *
  * Zero is not special-cased, so ExponentComponent(0.0) is
- * -int_fast16_t(Traits::ExponentBias).
+ * -int_fast16_t(Traits::kExponentBias).
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE int_fast16_t
-ExponentComponent(T t)
+ExponentComponent(T aValue)
 {
   /*
-   * The exponent component of a float/double is an unsigned number, biased from its
-   * actual value.  Subtract the bias to retrieve the actual exponent.
+   * The exponent component of a float/double is an unsigned number, biased
+   * from its actual value.  Subtract the bias to retrieve the actual exponent.
    */
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  Bits bits = BitwiseCast<Bits>(t);
-  return int_fast16_t((bits & Traits::ExponentBits) >> Traits::ExponentShift) -
-         int_fast16_t(Traits::ExponentBias);
+  Bits bits = BitwiseCast<Bits>(aValue);
+  return int_fast16_t((bits & Traits::kExponentBits) >> Traits::kExponentShift) -
+         int_fast16_t(Traits::kExponentBias);
 }
 
 /** Returns +Infinity. */
 template<typename T>
 static MOZ_ALWAYS_INLINE T
 PositiveInfinity()
 {
   /*
    * Positive infinity has all exponent bits set, sign bit set to 0, and no
    * significand.
    */
   typedef FloatingPoint<T> Traits;
-  return BitwiseCast<T>(Traits::ExponentBits);
+  return BitwiseCast<T>(Traits::kExponentBits);
 }
 
 /** Returns -Infinity. */
 template<typename T>
 static MOZ_ALWAYS_INLINE T
 NegativeInfinity()
 {
   /*
    * Negative infinity has all exponent bits set, sign bit set to 1, and no
    * significand.
    */
   typedef FloatingPoint<T> Traits;
-  return BitwiseCast<T>(Traits::SignBit | Traits::ExponentBits);
+  return BitwiseCast<T>(Traits::kSignBit | Traits::kExponentBits);
 }
 
 
 /** Constructs a NaN value with the specified sign bit and significand bits. */
 template<typename T>
 static MOZ_ALWAYS_INLINE T
 SpecificNaN(int signbit, typename FloatingPoint<T>::Bits significand)
 {
   typedef FloatingPoint<T> Traits;
   MOZ_ASSERT(signbit == 0 || signbit == 1);
-  MOZ_ASSERT((significand & ~Traits::SignificandBits) == 0);
-  MOZ_ASSERT(significand & Traits::SignificandBits);
+  MOZ_ASSERT((significand & ~Traits::kSignificandBits) == 0);
+  MOZ_ASSERT(significand & Traits::kSignificandBits);
 
-  T t = BitwiseCast<T>((signbit ? Traits::SignBit : 0) |
-                       Traits::ExponentBits |
+  T t = BitwiseCast<T>((signbit ? Traits::kSignBit : 0) |
+                       Traits::kExponentBits |
                        significand);
   MOZ_ASSERT(IsNaN(t));
   return t;
 }
 
 /** Computes the smallest non-zero positive float/double value. */
 template<typename T>
 static MOZ_ALWAYS_INLINE T
 MinNumberValue()
 {
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
   return BitwiseCast<T>(Bits(1));
 }
 
 /**
- * If t is equal to some int32_t value, set *i to that value and return true;
- * otherwise return false.
+ * If aValue is equal to some int32_t value, set *aInt32 to that value and
+ * return true; otherwise return false.
  *
  * Note that negative zero is "equal" to zero here. To test whether a value can
  * be losslessly converted to int32_t and back, use NumberIsInt32 instead.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-NumberEqualsInt32(T t, int32_t* i)
+NumberEqualsInt32(T aValue, int32_t* aInt32)
 {
   /*
    * XXX Casting a floating-point value that doesn't truncate to int32_t, to
    *     int32_t, induces undefined behavior.  We should definitely fix this
    *     (bug 744965), but as apparently it "works" in practice, it's not a
    *     pressing concern now.
    */
-  return t == (*i = int32_t(t));
+  return aValue == (*aInt32 = int32_t(aValue));
 }
 
 /**
  * If d can be converted to int32_t and back to an identical double value,
- * set *i to that value and return true; otherwise return false.
+ * set *aInt32 to that value and return true; otherwise return false.
  *
  * The difference between this and NumberEqualsInt32 is that this method returns
  * false for negative zero.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-NumberIsInt32(T t, int32_t* i)
+NumberIsInt32(T aValue, int32_t* aInt32)
 {
-  return !IsNegativeZero(t) && NumberEqualsInt32(t, i);
+  return !IsNegativeZero(aValue) && NumberEqualsInt32(aValue, aInt32);
 }
 
 /**
  * Computes a NaN value.  Do not use this method if you depend upon a particular
  * NaN value being returned.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE T
@@ -304,106 +306,108 @@ UnspecifiedNaN()
 {
   /*
    * If we can use any quiet NaN, we might as well use the all-ones NaN,
    * since it's cheap to materialize on common platforms (such as x64, where
    * this value can be represented in a 32-bit signed immediate field, allowing
    * it to be stored to memory in a single instruction).
    */
   typedef FloatingPoint<T> Traits;
-  return SpecificNaN<T>(1, Traits::SignificandBits);
+  return SpecificNaN<T>(1, Traits::kSignificandBits);
 }
 
 /**
  * Compare two doubles for equality, *without* equating -0 to +0, and equating
  * any NaN value to any other NaN value.  (The normal equality operators equate
  * -0 with +0, and they equate NaN to no other value.)
  */
 template<typename T>
 static inline bool
-NumbersAreIdentical(T t1, T t2)
+NumbersAreIdentical(T aValue1, T aValue2)
 {
   typedef FloatingPoint<T> Traits;
   typedef typename Traits::Bits Bits;
-  if (IsNaN(t1))
-    return IsNaN(t2);
-  return BitwiseCast<Bits>(t1) == BitwiseCast<Bits>(t2);
+  if (IsNaN(aValue1)) {
+    return IsNaN(aValue2);
+  }
+  return BitwiseCast<Bits>(aValue1) == BitwiseCast<Bits>(aValue2);
 }
 
 namespace detail {
 
 template<typename T>
 struct FuzzyEqualsEpsilon;
 
 template<>
 struct FuzzyEqualsEpsilon<float>
 {
-  // A number near 1e-5 that is exactly representable in
-  // floating point
+  // A number near 1e-5 that is exactly representable in a float.
   static float value() { return 1.0f / (1 << 17); }
 };
 
 template<>
 struct FuzzyEqualsEpsilon<double>
 {
-  // A number near 1e-12 that is exactly representable in
-  // a double
+  // A number near 1e-12 that is exactly representable in a double.
   static double value() { return 1.0 / (1LL << 40); }
 };
 
 } // namespace detail
 
 /**
  * Compare two floating point values for equality, modulo rounding error. That
  * is, the two values are considered equal if they are both not NaN and if they
- * are less than or equal to epsilon apart. The default value of epsilon is near
- * 1e-5.
+ * are less than or equal to aEpsilon apart. The default value of aEpsilon is
+ * near 1e-5.
  *
  * For most scenarios you will want to use FuzzyEqualsMultiplicative instead,
  * as it is more reasonable over the entire range of floating point numbers.
- * This additive version should only be used if you know the range of the numbers
- * you are dealing with is bounded and stays around the same order of magnitude.
+ * This additive version should only be used if you know the range of the
+ * numbers you are dealing with is bounded and stays around the same order of
+ * magnitude.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-FuzzyEqualsAdditive(T val1, T val2, T epsilon = detail::FuzzyEqualsEpsilon<T>::value())
+FuzzyEqualsAdditive(T aValue1, T aValue2,
+                    T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value())
 {
   static_assert(IsFloatingPoint<T>::value, "floating point type required");
-  return Abs(val1 - val2) <= epsilon;
+  return Abs(aValue1 - aValue2) <= aEpsilon;
 }
 
 /**
  * Compare two floating point values for equality, allowing for rounding error
  * relative to the magnitude of the values. That is, the two values are
  * considered equal if they are both not NaN and they are less than or equal to
- * some epsilon apart, where the epsilon is scaled by the smaller of the two
+ * some aEpsilon apart, where the aEpsilon is scaled by the smaller of the two
  * argument values.
  *
  * In most cases you will want to use this rather than FuzzyEqualsAdditive, as
  * this function effectively masks out differences in the bottom few bits of
- * the floating point numbers being compared, regardless of what order of magnitude
- * those numbers are at.
+ * the floating point numbers being compared, regardless of what order of
+ * magnitude those numbers are at.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
-FuzzyEqualsMultiplicative(T val1, T val2, T epsilon = detail::FuzzyEqualsEpsilon<T>::value())
+FuzzyEqualsMultiplicative(T aValue1, T aValue2,
+                          T aEpsilon = detail::FuzzyEqualsEpsilon<T>::value())
 {
   static_assert(IsFloatingPoint<T>::value, "floating point type required");
   // can't use std::min because of bug 965340
-  T smaller = Abs(val1) < Abs(val2) ? Abs(val1) : Abs(val2);
-  return Abs(val1 - val2) <= epsilon * smaller;
+  T smaller = Abs(aValue1) < Abs(aValue2) ? Abs(aValue1) : Abs(aValue2);
+  return Abs(aValue1 - aValue2) <= aEpsilon * smaller;
 }
 
 /**
  * Returns true if the given value can be losslessly represented as an IEEE-754
  * single format number, false otherwise.  All NaN values are considered
  * representable (notwithstanding that the exact bit pattern of a double format
  * NaN value can't be exactly represented in single format).
  *
  * This function isn't inlined to avoid buggy optimizations by MSVC.
  */
 MOZ_WARN_UNUSED_RESULT
 extern MFBT_API bool
-IsFloat32Representable(double x);
+IsFloat32Representable(double aFloat32);
 
 } /* namespace mozilla */
 
 #endif /* mozilla_FloatingPoint_h */
--- a/mfbt/GuardObjects.h
+++ b/mfbt/GuardObjects.h
@@ -65,56 +65,57 @@ namespace detail {
  *     constructor. It uses the parameter declared by
  *     MOZ_GUARD_OBJECT_NOTIFIER_PARAM.
  *
  * For more details, and examples of using these macros, see
  * https://developer.mozilla.org/en/Using_RAII_classes_in_Mozilla
  */
 class GuardObjectNotifier
 {
-  private:
-    bool* statementDone;
+private:
+  bool* mStatementDone;
 
-  public:
-    GuardObjectNotifier() : statementDone(nullptr) { }
+public:
+  GuardObjectNotifier() : mStatementDone(nullptr) { }
 
-    ~GuardObjectNotifier() {
-      *statementDone = true;
-    }
+  ~GuardObjectNotifier() { *mStatementDone = true; }
 
-    void setStatementDone(bool* statementIsDone) {
-      statementDone = statementIsDone;
-    }
+  void setStatementDone(bool* aStatementIsDone)
+  {
+    mStatementDone = aStatementIsDone;
+  }
 };
 
 class GuardObjectNotificationReceiver
 {
-  private:
-    bool statementDone;
+private:
+  bool mStatementDone;
 
-  public:
-    GuardObjectNotificationReceiver() : statementDone(false) { }
+public:
+  GuardObjectNotificationReceiver() : mStatementDone(false) { }
 
-    ~GuardObjectNotificationReceiver() {
-      /*
-       * Assert that the guard object was not used as a temporary.  (Note that
-       * this assert might also fire if init is not called because the guard
-       * object's implementation is not using the above macros correctly.)
-       */
-      MOZ_ASSERT(statementDone);
-    }
+  ~GuardObjectNotificationReceiver() {
+    /*
+     * Assert that the guard object was not used as a temporary.  (Note that
+     * this assert might also fire if init is not called because the guard
+     * object's implementation is not using the above macros correctly.)
+     */
+    MOZ_ASSERT(mStatementDone);
+  }
 
-    void init(const GuardObjectNotifier& constNotifier) {
-      /*
-       * constNotifier is passed as a const reference so that we can pass a
-       * temporary, but we really intend it as non-const.
-       */
-      GuardObjectNotifier& notifier = const_cast<GuardObjectNotifier&>(constNotifier);
-      notifier.setStatementDone(&statementDone);
-    }
+  void init(const GuardObjectNotifier& aConstNotifier)
+  {
+    /*
+     * aConstNotifier is passed as a const reference so that we can pass a
+     * temporary, but we really intend it as non-const.
+     */
+    GuardObjectNotifier& notifier =
+      const_cast<GuardObjectNotifier&>(aConstNotifier);
+    notifier.setStatementDone(&mStatementDone);
+  }
 };
 
 } /* namespace detail */
 } /* namespace mozilla */
 
 #endif /* DEBUG */
 
 #ifdef DEBUG
--- a/mfbt/HashFunctions.cpp
+++ b/mfbt/HashFunctions.cpp
@@ -8,31 +8,31 @@
 #include "mozilla/HashFunctions.h"
 #include "mozilla/Types.h"
 
 #include <string.h>
 
 namespace mozilla {
 
 uint32_t
-HashBytes(const void* bytes, size_t length)
+HashBytes(const void* aBytes, size_t aLength)
 {
   uint32_t hash = 0;
-  const char* b = reinterpret_cast<const char*>(bytes);
+  const char* b = reinterpret_cast<const char*>(aBytes);
 
   /* Walk word by word. */
   size_t i = 0;
-  for (; i < length - (length % sizeof(size_t)); i += sizeof(size_t)) {
+  for (; i < aLength - (aLength % sizeof(size_t)); i += sizeof(size_t)) {
     /* Do an explicitly unaligned load of the data. */
     size_t data;
     memcpy(&data, b + i, sizeof(size_t));
 
     hash = AddToHash(hash, data, sizeof(data));
   }
 
   /* Get the remaining bytes. */
-  for (; i < length; i++)
+  for (; i < aLength; i++) {
     hash = AddToHash(hash, b[i]);
-
+  }
   return hash;
 }
 
 } /* namespace mozilla */
--- a/mfbt/HashFunctions.h
+++ b/mfbt/HashFunctions.h
@@ -22,26 +22,27 @@
  *  - AddToHash     Add one or more values to the given hash.  This supports the
  *                  same list of types as HashGeneric.
  *
  *
  * You can chain these functions together to hash complex objects.  For example:
  *
  *  class ComplexObject
  *  {
- *      char* str;
- *      uint32_t uint1, uint2;
- *      void (*callbackFn)();
+ *    char* mStr;
+ *    uint32_t mUint1, mUint2;
+ *    void (*mCallbackFn)();
  *
- *    public:
- *      uint32_t hash() {
- *        uint32_t hash = HashString(str);
- *        hash = AddToHash(hash, uint1, uint2);
- *        return AddToHash(hash, callbackFn);
- *      }
+ *  public:
+ *    uint32_t hash()
+ *    {
+ *      uint32_t hash = HashString(mStr);
+ *      hash = AddToHash(hash, mUint1, mUint2);
+ *      return AddToHash(hash, mCallbackFn);
+ *    }
  *  };
  *
  * If you want to hash an nsAString or nsACString, use the HashString functions
  * in nsHashKeys.h.
  */
 
 #ifndef mozilla_HashFunctions_h
 #define mozilla_HashFunctions_h
@@ -54,29 +55,29 @@
 #include <stdint.h>
 
 #ifdef __cplusplus
 namespace mozilla {
 
 /**
  * The golden ratio as a 32-bit fixed-point value.
  */
-static const uint32_t GoldenRatioU32 = 0x9E3779B9U;
+static const uint32_t kGoldenRatioU32 = 0x9E3779B9U;
 
 inline uint32_t
-RotateBitsLeft32(uint32_t value, uint8_t bits)
+RotateBitsLeft32(uint32_t aValue, uint8_t aBits)
 {
-  MOZ_ASSERT(bits < 32);
-  return (value << bits) | (value >> (32 - bits));
+  MOZ_ASSERT(aBits < 32);
+  return (aValue << aBits) | (aValue >> (32 - aBits));
 }
 
 namespace detail {
 
 inline uint32_t
-AddU32ToHash(uint32_t hash, uint32_t value)
+AddU32ToHash(uint32_t aHash, uint32_t aValue)
 {
   /*
    * This is the meat of all our hash routines.  This hash function is not
    * particularly sophisticated, but it seems to work well for our mostly
    * plain-text inputs.  Implementation notes follow.
    *
    * Our use of the golden ratio here is arbitrary; we could pick almost any
    * number which:
@@ -86,301 +87,281 @@ AddU32ToHash(uint32_t hash, uint32_t val
    *  * has a reasonably-even mix of 1's and 0's (consider the extreme case
    *    where we multiply by 0x3 or 0xeffffff -- this will not produce good
    *    mixing across all bits of the hash).
    *
    * The rotation length of 5 is also arbitrary, although an odd number is again
    * preferable so our hash explores the whole universe of possible rotations.
    *
    * Finally, we multiply by the golden ratio *after* xor'ing, not before.
-   * Otherwise, if |hash| is 0 (as it often is for the beginning of a message),
-   * the expression
+   * Otherwise, if |aHash| is 0 (as it often is for the beginning of a
+   * message), the expression
    *
-   *   (GoldenRatioU32 * RotateBitsLeft(hash, 5)) |xor| value
+   *   (kGoldenRatioU32 * RotateBitsLeft(aHash, 5)) |xor| aValue
    *
-   * evaluates to |value|.
+   * evaluates to |aValue|.
    *
    * (Number-theoretic aside: Because any odd number |m| is relatively prime to
    * our modulus (2^32), the list
    *
    *    [x * m (mod 2^32) for 0 <= x < 2^32]
    *
    * has no duplicate elements.  This means that multiplying by |m| does not
    * cause us to skip any possible hash values.
    *
    * It's also nice if |m| has large-ish order mod 2^32 -- that is, if the
    * smallest k such that m^k == 1 (mod 2^32) is large -- so we can safely
    * multiply our hash value by |m| a few times without negating the
    * multiplicative effect.  Our golden ratio constant has order 2^29, which is
    * more than enough for our purposes.)
    */
-  return GoldenRatioU32 * (RotateBitsLeft32(hash, 5) ^ value);
+  return kGoldenRatioU32 * (RotateBitsLeft32(aHash, 5) ^ aValue);
 }
 
 /**
  * AddUintptrToHash takes sizeof(uintptr_t) as a template parameter.
  */
 template<size_t PtrSize>
 inline uint32_t
-AddUintptrToHash(uint32_t hash, uintptr_t value);
+AddUintptrToHash(uint32_t aHash, uintptr_t aValue);
 
 template<>
 inline uint32_t
-AddUintptrToHash<4>(uint32_t hash, uintptr_t value)
+AddUintptrToHash<4>(uint32_t aHash, uintptr_t aValue)
 {
-  return AddU32ToHash(hash, static_cast<uint32_t>(value));
+  return AddU32ToHash(aHash, static_cast<uint32_t>(aValue));
 }
 
 template<>
 inline uint32_t
-AddUintptrToHash<8>(uint32_t hash, uintptr_t value)
+AddUintptrToHash<8>(uint32_t aHash, uintptr_t aValue)
 {
   /*
    * The static cast to uint64_t below is necessary because this function
    * sometimes gets compiled on 32-bit platforms (yes, even though it's a
    * template and we never call this particular override in a 32-bit build).  If
-   * we do value >> 32 on a 32-bit machine, we're shifting a 32-bit uintptr_t
+   * we do aValue >> 32 on a 32-bit machine, we're shifting a 32-bit uintptr_t
    * right 32 bits, and the compiler throws an error.
    */
-  uint32_t v1 = static_cast<uint32_t>(value);
-  uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(value) >> 32);
-  return AddU32ToHash(AddU32ToHash(hash, v1), v2);
+  uint32_t v1 = static_cast<uint32_t>(aValue);
+  uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
+  return AddU32ToHash(AddU32ToHash(aHash, v1), v2);
 }
 
 } /* namespace detail */
 
 /**
  * AddToHash takes a hash and some values and returns a new hash based on the
  * inputs.
  *
  * Currently, we support hashing uint32_t's, values which we can implicitly
  * convert to uint32_t, data pointers, and function pointers.
  */
 template<typename A>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-AddToHash(uint32_t hash, A a)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+AddToHash(uint32_t aHash, A aA)
 {
   /*
    * Try to convert |A| to uint32_t implicitly.  If this works, great.  If not,
    * we'll error out.
    */
-  return detail::AddU32ToHash(hash, a);
+  return detail::AddU32ToHash(aHash, aA);
 }
 
 template<typename A>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-AddToHash(uint32_t hash, A* a)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+AddToHash(uint32_t aHash, A* aA)
 {
   /*
    * You might think this function should just take a void*.  But then we'd only
    * catch data pointers and couldn't handle function pointers.
    */
 
-  static_assert(sizeof(a) == sizeof(uintptr_t),
-                "Strange pointer!");
+  static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!");
 
-  return detail::AddUintptrToHash<sizeof(uintptr_t)>(hash, uintptr_t(a));
+  return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, uintptr_t(aA));
 }
 
 template<>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-AddToHash(uint32_t hash, uintptr_t a)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+AddToHash(uint32_t aHash, uintptr_t aA)
 {
-  return detail::AddUintptrToHash<sizeof(uintptr_t)>(hash, a);
+  return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, aA);
 }
 
 template<typename A, typename B>
-MOZ_WARN_UNUSED_RESULT
-uint32_t
-AddToHash(uint32_t hash, A a, B b)
+MOZ_WARN_UNUSED_RESULT uint32_t
+AddToHash(uint32_t aHash, A aA, B aB)
 {
-  return AddToHash(AddToHash(hash, a), b);
+  return AddToHash(AddToHash(aHash, aA), aB);
 }
 
 template<typename A, typename B, typename C>
-MOZ_WARN_UNUSED_RESULT
-uint32_t
-AddToHash(uint32_t hash, A a, B b, C c)
+MOZ_WARN_UNUSED_RESULT uint32_t
+AddToHash(uint32_t aHash, A aA, B aB, C aC)
 {
-  return AddToHash(AddToHash(hash, a, b), c);
+  return AddToHash(AddToHash(aHash, aA, aB), aC);
 }
 
 template<typename A, typename B, typename C, typename D>
-MOZ_WARN_UNUSED_RESULT
-uint32_t
-AddToHash(uint32_t hash, A a, B b, C c, D d)
+MOZ_WARN_UNUSED_RESULT uint32_t
+AddToHash(uint32_t aHash, A aA, B aB, C aC, D aD)
 {
-  return AddToHash(AddToHash(hash, a, b, c), d);
+  return AddToHash(AddToHash(aHash, aA, aB, aC), aD);
 }
 
 template<typename A, typename B, typename C, typename D, typename E>
-MOZ_WARN_UNUSED_RESULT
-uint32_t
-AddToHash(uint32_t hash, A a, B b, C c, D d, E e)
+MOZ_WARN_UNUSED_RESULT uint32_t
+AddToHash(uint32_t aHash, A aA, B aB, C aC, D aD, E aE)
 {
-  return AddToHash(AddToHash(hash, a, b, c, d), e);
+  return AddToHash(AddToHash(aHash, aA, aB, aC, aD), aE);
 }
 
 /**
  * The HashGeneric class of functions let you hash one or more values.
  *
  * If you want to hash together two values x and y, calling HashGeneric(x, y) is
  * much better than calling AddToHash(x, y), because AddToHash(x, y) assumes
  * that x has already been hashed.
  */
 template<typename A>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashGeneric(A a)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashGeneric(A aA)
 {
-  return AddToHash(0, a);
+  return AddToHash(0, aA);
 }
 
 template<typename A, typename B>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashGeneric(A a, B b)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashGeneric(A aA, B aB)
 {
-  return AddToHash(0, a, b);
+  return AddToHash(0, aA, aB);
 }
 
 template<typename A, typename B, typename C>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashGeneric(A a, B b, C c)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashGeneric(A aA, B aB, C aC)
 {
-  return AddToHash(0, a, b, c);
+  return AddToHash(0, aA, aB, aC);
 }
 
 template<typename A, typename B, typename C, typename D>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashGeneric(A a, B b, C c, D d)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashGeneric(A aA, B aB, C aC, D aD)
 {
-  return AddToHash(0, a, b, c, d);
+  return AddToHash(0, aA, aB, aC, aD);
 }
 
 template<typename A, typename B, typename C, typename D, typename E>
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashGeneric(A a, B b, C c, D d, E e)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashGeneric(A aA, B aB, C aC, D aD, E aE)
 {
-  return AddToHash(0, a, b, c, d, e);
+  return AddToHash(0, aA, aB, aC, aD, aE);
 }
 
 namespace detail {
 
 template<typename T>
 uint32_t
-HashUntilZero(const T* str)
+HashUntilZero(const T* aStr)
 {
   uint32_t hash = 0;
-  for (T c; (c = *str); str++)
+  for (T c; (c = *aStr); aStr++) {
     hash = AddToHash(hash, c);
+  }
   return hash;
 }
 
 template<typename T>
 uint32_t
-HashKnownLength(const T* str, size_t length)
+HashKnownLength(const T* aStr, size_t aLength)
 {
   uint32_t hash = 0;
-  for (size_t i = 0; i < length; i++)
-    hash = AddToHash(hash, str[i]);
+  for (size_t i = 0; i < aLength; i++) {
+    hash = AddToHash(hash, aStr[i]);
+  }
   return hash;
 }
 
 } /* namespace detail */
 
 /**
  * The HashString overloads below do just what you'd expect.
  *
  * If you have the string's length, you might as well call the overload which
  * includes the length.  It may be marginally faster.
  */
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const char* str)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const char* aStr)
 {
-  return detail::HashUntilZero(str);
+  return detail::HashUntilZero(aStr);
 }
 
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const char* str, size_t length)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const char* aStr, size_t aLength)
 {
-  return detail::HashKnownLength(str, length);
-}
-
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const unsigned char* str, size_t length)
-{
-  return detail::HashKnownLength(str, length);
+  return detail::HashKnownLength(aStr, aLength);
 }
 
 MOZ_WARN_UNUSED_RESULT
 inline uint32_t
-HashString(const uint16_t* str)
+HashString(const unsigned char* aStr, size_t aLength)
 {
-  return detail::HashUntilZero(str);
+  return detail::HashKnownLength(aStr, aLength);
 }
 
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const uint16_t* str, size_t length)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const uint16_t* aStr)
 {
-  return detail::HashKnownLength(str, length);
+  return detail::HashUntilZero(aStr);
+}
+
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const uint16_t* aStr, size_t aLength)
+{
+  return detail::HashKnownLength(aStr, aLength);
 }
 
 #ifdef MOZ_CHAR16_IS_NOT_WCHAR
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const char16_t* str)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const char16_t* aStr)
 {
-  return detail::HashUntilZero(str);
+  return detail::HashUntilZero(aStr);
 }
 
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const char16_t* str, size_t length)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const char16_t* aStr, size_t aLength)
 {
-  return detail::HashKnownLength(str, length);
+  return detail::HashKnownLength(aStr, aLength);
 }
 #endif
 
 /*
  * On Windows, wchar_t (char16_t) is not the same as uint16_t, even though it's
  * the same width!
  */
 #ifdef WIN32
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const wchar_t* str)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const wchar_t* aStr)
 {
-  return detail::HashUntilZero(str);
+  return detail::HashUntilZero(aStr);
 }
 
-MOZ_WARN_UNUSED_RESULT
-inline uint32_t
-HashString(const wchar_t* str, size_t length)
+MOZ_WARN_UNUSED_RESULT inline uint32_t
+HashString(const wchar_t* aStr, size_t aLength)
 {
-  return detail::HashKnownLength(str, length);
+  return detail::HashKnownLength(aStr, aLength);
 }
 #endif
 
 /**
  * Hash some number of bytes.
  *
  * This hash walks word-by-word, rather than byte-by-byte, so you won't get the
  * same result out of HashBytes as you would out of HashString.
  */
-MOZ_WARN_UNUSED_RESULT
-extern MFBT_API uint32_t
-HashBytes(const void* bytes, size_t length);
+MOZ_WARN_UNUSED_RESULT extern MFBT_API uint32_t
+HashBytes(const void* bytes, size_t aLength);
 
 } /* namespace mozilla */
 #endif /* __cplusplus */
 
 #endif /* mozilla_HashFunctions_h */
--- a/mfbt/IntegerTypeTraits.h
+++ b/mfbt/IntegerTypeTraits.h
@@ -21,45 +21,61 @@ namespace detail {
  * of given size (can be 1, 2, 4 or 8) and given signedness
  * (false means unsigned, true means signed).
  */
 template<size_t Size, bool Signedness>
 struct StdintTypeForSizeAndSignedness;
 
 template<>
 struct StdintTypeForSizeAndSignedness<1, true>
-{ typedef int8_t   Type; };
+{
+  typedef int8_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<1, false>
-{ typedef uint8_t  Type; };
+{
+  typedef uint8_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<2, true>
-{ typedef int16_t  Type; };
+{
+  typedef int16_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<2, false>
-{ typedef uint16_t Type; };
+{
+  typedef uint16_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<4, true>
-{ typedef int32_t  Type; };
+{
+  typedef int32_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<4, false>
-{ typedef uint32_t Type; };
+{
+  typedef uint32_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<8, true>
-{ typedef int64_t  Type; };
+{
+  typedef int64_t Type;
+};
 
 template<>
 struct StdintTypeForSizeAndSignedness<8, false>
-{ typedef uint64_t Type; };
+{
+  typedef uint64_t Type;
+};
 
 } // namespace detail
 
 template<size_t Size>
 struct UnsignedStdintTypeForSize
   : detail::StdintTypeForSizeAndSignedness<Size, false>
 {};
 
@@ -74,46 +90,46 @@ struct PositionOfSignBit
 /**
  * MinValue returns the minimum value of the given integer type as a
  * compile-time constant, which std::numeric_limits<IntegerType>::min()
  * cannot do in c++98.
  */
 template<typename IntegerType>
 struct MinValue
 {
-  private:
-    static_assert(IsIntegral<IntegerType>::value, "MinValue is only for integral types");
+private:
+  static_assert(IsIntegral<IntegerType>::value, "MinValue is only for integral types");
 
-    typedef typename MakeUnsigned<IntegerType>::Type UnsignedIntegerType;
-    static const size_t PosOfSignBit = PositionOfSignBit<IntegerType>::value;
+  typedef typename MakeUnsigned<IntegerType>::Type UnsignedIntegerType;
+  static const size_t PosOfSignBit = PositionOfSignBit<IntegerType>::value;
 
-  public:
-    // Bitwise ops may return a larger type, that's why we cast explicitly.
-    // In C++, left bit shifts on signed values is undefined by the standard
-    // unless the shifted value is representable.
-    // Notice that signed-to-unsigned conversions are always well-defined in
-    // the standard as the value congruent to 2**n, as expected. By contrast,
-    // unsigned-to-signed is only well-defined if the value is representable.
-    static const IntegerType value =
-        IsSigned<IntegerType>::value
-        ? IntegerType(UnsignedIntegerType(1) << PosOfSignBit)
-        : IntegerType(0);
+public:
+  // Bitwise ops may return a larger type, that's why we cast explicitly.
+  // In C++, left bit shifts on signed values is undefined by the standard
+  // unless the shifted value is representable.
+  // Notice that signed-to-unsigned conversions are always well-defined in
+  // the standard as the value congruent to 2**n, as expected. By contrast,
+  // unsigned-to-signed is only well-defined if the value is representable.
+  static const IntegerType value =
+      IsSigned<IntegerType>::value
+      ? IntegerType(UnsignedIntegerType(1) << PosOfSignBit)
+      : IntegerType(0);
 };
 
 /**
  * MaxValue returns the maximum value of the given integer type as a
  * compile-time constant, which std::numeric_limits<IntegerType>::max()
  * cannot do in c++98.
  */
 template<typename IntegerType>
 struct MaxValue
 {
-    static_assert(IsIntegral<IntegerType>::value, "MaxValue is only for integral types");
+  static_assert(IsIntegral<IntegerType>::value, "MaxValue is only for integral types");
 
-    // Tricksy, but covered by the CheckedInt unit test.
-    // Relies on the type of MinValue<IntegerType>::value
-    // being IntegerType.
-    static const IntegerType value = ~MinValue<IntegerType>::value;
+  // Tricksy, but covered by the CheckedInt unit test.
+  // Relies on the type of MinValue<IntegerType>::value
+  // being IntegerType.
+  static const IntegerType value = ~MinValue<IntegerType>::value;
 };
 
 } // namespace mozilla
 
 #endif // mozilla_IntegerTypeTraits_h
--- a/mfbt/LinkedList.h
+++ b/mfbt/LinkedList.h
@@ -18,42 +18,46 @@
  * destruction, and a LinkedList will fatally assert in debug builds if it's
  * non-empty when it's destructed.
  *
  * For example, you might use LinkedList in a simple observer list class as
  * follows.
  *
  *   class Observer : public LinkedListElement<Observer>
  *   {
- *     public:
- *       void observe(char* topic) { ... }
+ *   public:
+ *     void observe(char* aTopic) { ... }
  *   };
  *
  *   class ObserverContainer
  *   {
- *     private:
- *       LinkedList<Observer> list;
+ *   private:
+ *     LinkedList<Observer> list;
  *
- *     public:
- *       void addObserver(Observer* observer) {
- *         // Will assert if |observer| is part of another list.
- *         list.insertBack(observer);
- *       }
+ *   public:
+ *     void addObserver(Observer* aObserver)
+ *     {
+ *       // Will assert if |aObserver| is part of another list.
+ *       list.insertBack(aObserver);
+ *     }
  *
- *       void removeObserver(Observer* observer) {
- *         // Will assert if |observer| is not part of some list.
- *         observer.remove();
- *         // Or, will assert if |observer| is not part of |list| specifically.
- *         // observer.removeFrom(list);
+ *     void removeObserver(Observer* aObserver)
+ *     {
+ *       // Will assert if |aObserver| is not part of some list.
+ *       aObserver.remove();
+ *       // Or, will assert if |aObserver| is not part of |list| specifically.
+ *       // aObserver.removeFrom(list);
+ *     }
+ *
+ *     void notifyObservers(char* aTopic)
+ *     {
+ *       for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext()) {
+ *         o->observe(aTopic);
  *       }
- *
- *       void notifyObservers(char* topic) {
- *         for (Observer* o = list.getFirst(); o != nullptr; o = o->getNext())
- *           o->observe(topic);
- *       }
+ *     }
  *   };
  *
  */
 
 #ifndef mozilla_LinkedList_h
 #define mozilla_LinkedList_h
 
 #include "mozilla/Assertions.h"
@@ -67,420 +71,415 @@
 namespace mozilla {
 
 template<typename T>
 class LinkedList;
 
 template<typename T>
 class LinkedListElement
 {
-    /*
-     * It's convenient that we return nullptr when getNext() or getPrevious()
-     * hits the end of the list, but doing so costs an extra word of storage in
-     * each linked list node (to keep track of whether |this| is the sentinel
-     * node) and a branch on this value in getNext/getPrevious.
-     *
-     * We could get rid of the extra word of storage by shoving the "is
-     * sentinel" bit into one of the pointers, although this would, of course,
-     * have performance implications of its own.
-     *
-     * But the goal here isn't to win an award for the fastest or slimmest
-     * linked list; rather, we want a *convenient* linked list.  So we won't
-     * waste time guessing which micro-optimization strategy is best.
-     *
-     *
-     * Speaking of unnecessary work, it's worth addressing here why we wrote
-     * mozilla::LinkedList in the first place, instead of using stl::list.
-     *
-     * The key difference between mozilla::LinkedList and stl::list is that
-     * mozilla::LinkedList stores the prev/next pointers in the object itself,
-     * while stl::list stores the prev/next pointers in a list element which
-     * itself points to the object being stored.
-     *
-     * mozilla::LinkedList's approach makes it harder to store an object in more
-     * than one list.  But the upside is that you can call next() / prev() /
-     * remove() directly on the object.  With stl::list, you'd need to store a
-     * pointer to its iterator in the object in order to accomplish this.  Not
-     * only would this waste space, but you'd have to remember to update that
-     * pointer every time you added or removed the object from a list.
-     *
-     * In-place, constant-time removal is a killer feature of doubly-linked
-     * lists, and supporting this painlessly was a key design criterion.
-     */
+  /*
+   * It's convenient that we return nullptr when getNext() or getPrevious()
+   * hits the end of the list, but doing so costs an extra word of storage in
+   * each linked list node (to keep track of whether |this| is the sentinel
+   * node) and a branch on this value in getNext/getPrevious.
+   *
+   * We could get rid of the extra word of storage by shoving the "is
+   * sentinel" bit into one of the pointers, although this would, of course,
+   * have performance implications of its own.
+   *
+   * But the goal here isn't to win an award for the fastest or slimmest
+   * linked list; rather, we want a *convenient* linked list.  So we won't
+   * waste time guessing which micro-optimization strategy is best.
+   *
+   *
+   * Speaking of unnecessary work, it's worth addressing here why we wrote
+   * mozilla::LinkedList in the first place, instead of using stl::list.
+   *
+   * The key difference between mozilla::LinkedList and stl::list is that
+   * mozilla::LinkedList stores the mPrev/mNext pointers in the object itself,
+   * while stl::list stores the mPrev/mNext pointers in a list element which
+   * itself points to the object being stored.
+   *
+   * mozilla::LinkedList's approach makes it harder to store an object in more
+   * than one list.  But the upside is that you can call next() / prev() /
+   * remove() directly on the object.  With stl::list, you'd need to store a
+   * pointer to its iterator in the object in order to accomplish this.  Not
+   * only would this waste space, but you'd have to remember to update that
+   * pointer every time you added or removed the object from a list.
+   *
+   * In-place, constant-time removal is a killer feature of doubly-linked
+   * lists, and supporting this painlessly was a key design criterion.
+   */
 
-  private:
-    LinkedListElement* next;
-    LinkedListElement* prev;
-    const bool isSentinel;
-
-  public:
-    LinkedListElement()
-      : next(MOZ_THIS_IN_INITIALIZER_LIST()),
-        prev(MOZ_THIS_IN_INITIALIZER_LIST()),
-        isSentinel(false)
-    { }
+private:
+  LinkedListElement* mNext;
+  LinkedListElement* mPrev;
+  const bool mIsSentinel;
 
-    LinkedListElement(LinkedListElement<T>&& other)
-      : isSentinel(other.isSentinel)
-    {
-      if (!other.isInList()) {
-        next = this;
-        prev = this;
-        return;
-      }
-
-      MOZ_ASSERT(other.next->prev == &other);
-      MOZ_ASSERT(other.prev->next == &other);
+public:
+  LinkedListElement()
+    : mNext(MOZ_THIS_IN_INITIALIZER_LIST()),
+      mPrev(MOZ_THIS_IN_INITIALIZER_LIST()),
+      mIsSentinel(false)
+  { }
 
-      /*
-       * Initialize |this| with |other|'s prev/next pointers, and adjust those
-       * element to point to this one.
-       */
-      next = other.next;
-      prev = other.prev;
-
-      next->prev = this;
-      prev->next = this;
-
-      /*
-       * Adjust |other| so it doesn't think it's in a list.  This makes it
-       * safely destructable.
-       */
-      other.next = &other;
-      other.prev = &other;
+  LinkedListElement(LinkedListElement<T>&& other)
+    : mIsSentinel(other.mIsSentinel)
+  {
+    if (!other.isInList()) {
+      mNext = this;
+      mPrev = this;
+      return;
     }
 
-    ~LinkedListElement() {
-      if (!isSentinel && isInList())
-        remove();
-    }
+    MOZ_ASSERT(other.mNext->mPrev == &other);
+    MOZ_ASSERT(other.mPrev->mNext == &other);
 
     /*
-     * Get the next element in the list, or nullptr if this is the last element
-     * in the list.
+     * Initialize |this| with |other|'s mPrev/mNext pointers, and adjust those
+     * element to point to this one.
      */
-    T* getNext() {
-      return next->asT();
-    }
-    const T* getNext() const {
-      return next->asT();
-    }
+    mNext = other.mNext;
+    mPrev = other.mPrev;
 
-    /*
-     * Get the previous element in the list, or nullptr if this is the first
-     * element in the list.
-     */
-    T* getPrevious() {
-      return prev->asT();
-    }
-    const T* getPrevious() const {
-      return prev->asT();
-    }
+    mNext->mPrev = this;
+    mPrev->mNext = this;
 
     /*
-     * Insert elem after this element in the list.  |this| must be part of a
-     * linked list when you call setNext(); otherwise, this method will assert.
+     * Adjust |other| so it doesn't think it's in a list.  This makes it
+     * safely destructable.
      */
-    void setNext(T* elem) {
-      MOZ_ASSERT(isInList());
-      setNextUnsafe(elem);
-    }
-
-    /*
-     * Insert elem before this element in the list.  |this| must be part of a
-     * linked list when you call setPrevious(); otherwise, this method will
-     * assert.
-     */
-    void setPrevious(T* elem) {
-      MOZ_ASSERT(isInList());
-      setPreviousUnsafe(elem);
-    }
+    other.mNext = &other;
+    other.mPrev = &other;
+  }
 
-    /*
-     * Remove this element from the list which contains it.  If this element is
-     * not currently part of a linked list, this method asserts.
-     */
-    void remove() {
-      MOZ_ASSERT(isInList());
-
-      prev->next = next;
-      next->prev = prev;
-      next = this;
-      prev = this;
-    }
-
-    /*
-     * Identical to remove(), but also asserts in debug builds that this element
-     * is in list.
-     */
-    void removeFrom(const LinkedList<T>& list) {
-      list.assertContains(asT());
+  ~LinkedListElement()
+  {
+    if (!mIsSentinel && isInList()) {
       remove();
     }
+  }
 
-    /*
-     * Return true if |this| part is of a linked list, and false otherwise.
-     */
-    bool isInList() const {
-      MOZ_ASSERT((next == this) == (prev == this));
-      return next != this;
-    }
+  /*
+   * Get the next element in the list, or nullptr if this is the last element
+   * in the list.
+   */
+  T* getNext()             { return mNext->asT(); }
+  const T* getNext() const { return mNext->asT(); }
 
-  private:
-    friend class LinkedList<T>;
+  /*
+   * Get the previous element in the list, or nullptr if this is the first
+   * element in the list.
+   */
+  T* getPrevious()             { return mPrev->asT(); }
+  const T* getPrevious() const { return mPrev->asT(); }
+
+  /*
+   * Insert aElem after this element in the list.  |this| must be part of a
+   * linked list when you call setNext(); otherwise, this method will assert.
+   */
+  void setNext(T* aElem)
+  {
+    MOZ_ASSERT(isInList());
+    setNextUnsafe(aElem);
+  }
 
-    enum NodeKind {
-      NODE_KIND_NORMAL,
-      NODE_KIND_SENTINEL
-    };
+  /*
+   * Insert aElem before this element in the list.  |this| must be part of a
+   * linked list when you call setPrevious(); otherwise, this method will
+   * assert.
+   */
+  void setPrevious(T* aElem)
+  {
+    MOZ_ASSERT(isInList());
+    setPreviousUnsafe(aElem);
+  }
 
-    explicit LinkedListElement(NodeKind nodeKind)
-      : next(MOZ_THIS_IN_INITIALIZER_LIST()),
-        prev(MOZ_THIS_IN_INITIALIZER_LIST()),
-        isSentinel(nodeKind == NODE_KIND_SENTINEL)
-    { }
+  /*
+   * Remove this element from the list which contains it.  If this element is
+   * not currently part of a linked list, this method asserts.
+   */
+  void remove()
+  {
+    MOZ_ASSERT(isInList());
 
-    /*
-     * Return |this| cast to T* if we're a normal node, or return nullptr if
-     * we're a sentinel node.
-     */
-    T* asT() {
-      if (isSentinel)
-        return nullptr;
+    mPrev->mNext = mNext;
+    mNext->mPrev = mPrev;
+    mNext = this;
+    mPrev = this;
+  }
+
+  /*
+   * Identical to remove(), but also asserts in debug builds that this element
+   * is in aList.
+   */
+  void removeFrom(const LinkedList<T>& aList)
+  {
+    aList.assertContains(asT());
+    remove();
+  }
 
-      return static_cast<T*>(this);
-    }
-    const T* asT() const {
-      if (isSentinel)
-        return nullptr;
+  /*
+   * Return true if |this| part is of a linked list, and false otherwise.
+   */
+  bool isInList() const
+  {
+    MOZ_ASSERT((mNext == this) == (mPrev == this));
+    return mNext != this;
+  }
 
-      return static_cast<const T*>(this);
-    }
+private:
+  friend class LinkedList<T>;
 
-    /*
-     * Insert elem after this element, but don't check that this element is in
-     * the list.  This is called by LinkedList::insertFront().
-     */
-    void setNextUnsafe(T* elem) {
-      LinkedListElement *listElem = static_cast<LinkedListElement*>(elem);
-      MOZ_ASSERT(!listElem->isInList());
+  enum NodeKind {
+    NODE_KIND_NORMAL,
+    NODE_KIND_SENTINEL
+  };
+
+  explicit LinkedListElement(NodeKind nodeKind)
+    : mNext(MOZ_THIS_IN_INITIALIZER_LIST()),
+      mPrev(MOZ_THIS_IN_INITIALIZER_LIST()),
+      mIsSentinel(nodeKind == NODE_KIND_SENTINEL)
+  { }
 
-      listElem->next = this->next;
-      listElem->prev = this;
-      this->next->prev = listElem;
-      this->next = listElem;
-    }
+  /*
+   * Return |this| cast to T* if we're a normal node, or return nullptr if
+   * we're a sentinel node.
+   */
+  T* asT()
+  {
+    return mIsSentinel ? nullptr : static_cast<T*>(this);
+  }
+  const T* asT() const
+  {
+    return mIsSentinel ? nullptr : static_cast<const T*>(this);
+  }
+
+  /*
+   * Insert aElem after this element, but don't check that this element is in
+   * the list.  This is called by LinkedList::insertFront().
+   */
+  void setNextUnsafe(T* aElem)
+  {
+    LinkedListElement *listElem = static_cast<LinkedListElement*>(aElem);
+    MOZ_ASSERT(!listElem->isInList());
 
-    /*
-     * Insert elem before this element, but don't check that this element is in
-     * the list.  This is called by LinkedList::insertBack().
-     */
-    void setPreviousUnsafe(T* elem) {
-      LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(elem);
-      MOZ_ASSERT(!listElem->isInList());
+    listElem->mNext = this->mNext;
+    listElem->mPrev = this;
+    this->mNext->mPrev = listElem;
+    this->mNext = listElem;
+  }
 
-      listElem->next = this;
-      listElem->prev = this->prev;
-      this->prev->next = listElem;
-      this->prev = listElem;
-    }
+  /*
+   * Insert aElem before this element, but don't check that this element is in
+   * the list.  This is called by LinkedList::insertBack().
+   */
+  void setPreviousUnsafe(T* aElem)
+  {
+    LinkedListElement<T>* listElem = static_cast<LinkedListElement<T>*>(aElem);
+    MOZ_ASSERT(!listElem->isInList());
 
-  private:
-    LinkedListElement& operator=(const LinkedListElement<T>& other) MOZ_DELETE;
-    LinkedListElement(const LinkedListElement<T>& other) MOZ_DELETE;
+    listElem->mNext = this;
+    listElem->mPrev = this->mPrev;
+    this->mPrev->mNext = listElem;
+    this->mPrev = listElem;
+  }
+
+private:
+  LinkedListElement& operator=(const LinkedListElement<T>& aOther) MOZ_DELETE;
+  LinkedListElement(const LinkedListElement<T>& aOther) MOZ_DELETE;
 };
 
 template<typename T>
 class LinkedList
 {
-  private:
-    LinkedListElement<T> sentinel;
+private:
+  LinkedListElement<T> sentinel;
+
+public:
+  LinkedList() : sentinel(LinkedListElement<T>::NODE_KIND_SENTINEL) { }
+
+  LinkedList(LinkedList<T>&& aOther)
+    : sentinel(mozilla::Move(aOther.sentinel))
+  { }
 
-  public:
-    LinkedList() : sentinel(LinkedListElement<T>::NODE_KIND_SENTINEL) { }
+  ~LinkedList() { MOZ_ASSERT(isEmpty()); }
+
+  /*
+   * Add aElem to the front of the list.
+   */
+  void insertFront(T* aElem)
+  {
+    /* Bypass setNext()'s this->isInList() assertion. */
+    sentinel.setNextUnsafe(aElem);
+  }
 
-    LinkedList(LinkedList<T>&& other)
-      : sentinel(mozilla::Move(other.sentinel))
-    { }
+  /*
+   * Add aElem to the back of the list.
+   */
+  void insertBack(T* aElem)
+  {
+    sentinel.setPreviousUnsafe(aElem);
+  }
+
+  /*
+   * Get the first element of the list, or nullptr if the list is empty.
+   */
+  T* getFirst()             { return sentinel.getNext(); }
+  const T* getFirst() const { return sentinel.getNext(); }
 
-    ~LinkedList() {
-      MOZ_ASSERT(isEmpty());
+  /*
+   * Get the last element of the list, or nullptr if the list is empty.
+   */
+  T* getLast()             { return sentinel.getPrevious(); }
+  const T* getLast() const { return sentinel.getPrevious(); }
+
+  /*
+   * Get and remove the first element of the list.  If the list is empty,
+   * return nullptr.
+   */
+  T* popFirst()
+  {
+    T* ret = sentinel.getNext();
+    if (ret) {
+      static_cast<LinkedListElement<T>*>(ret)->remove();
     }
+    return ret;
+  }
 
-    /*
-     * Add elem to the front of the list.
-     */
-    void insertFront(T* elem) {
-      /* Bypass setNext()'s this->isInList() assertion. */
-      sentinel.setNextUnsafe(elem);
+  /*
+   * Get and remove the last element of the list.  If the list is empty,
+   * return nullptr.
+   */
+  T* popLast()
+  {
+    T* ret = sentinel.getPrevious();
+    if (ret) {
+      static_cast<LinkedListElement<T>*>(ret)->remove();
     }
+    return ret;
+  }
+
+  /*
+   * Return true if the list is empty, or false otherwise.
+   */
+  bool isEmpty() const
+  {
+    return !sentinel.isInList();
+  }
+
+  /*
+   * Remove all the elements from the list.
+   *
+   * This runs in time linear to the list's length, because we have to mark
+   * each element as not in the list.
+   */
+  void clear()
+  {
+    while (popFirst()) {
+      continue;
+    }
+  }
 
-    /*
-     * Add elem to the back of the list.
-     */
-    void insertBack(T* elem) {
-      sentinel.setPreviousUnsafe(elem);
+  /*
+   * Measures the memory consumption of the list excluding |this|.  Note that
+   * it only measures the list elements themselves.  If the list elements
+   * contain pointers to other memory blocks, those blocks must be measured
+   * separately during a subsequent iteration over the list.
+   */
+  size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
+  {
+    size_t n = 0;
+    for (const T* t = getFirst(); t; t = t->getNext()) {
+      n += aMallocSizeOf(t);
     }
+    return n;
+  }
 
-    /*
-     * Get the first element of the list, or nullptr if the list is empty.
-     */
-    T* getFirst() {
-      return sentinel.getNext();
-    }
-    const T* getFirst() const {
-      return sentinel.getNext();
-    }
+  /*
+   * Like sizeOfExcludingThis(), but measures |this| as well.
+   */
+  size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
+  {
+    return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
+  }
+
+  /*
+   * In a debug build, make sure that the list is sane (no cycles, consistent
+   * mNext/mPrev pointers, only one sentinel).  Has no effect in release builds.
+   */
+  void debugAssertIsSane() const
+  {
+#ifdef DEBUG
+    const LinkedListElement<T>* slow;
+    const LinkedListElement<T>* fast1;
+    const LinkedListElement<T>* fast2;
 
     /*
-     * Get the last element of the list, or nullptr if the list is empty.
+     * Check for cycles in the forward singly-linked list using the
+     * tortoise/hare algorithm.
      */
-    T* getLast() {
-      return sentinel.getPrevious();
-    }
-    const T* getLast() const {
-      return sentinel.getPrevious();
-    }
-
-    /*
-     * Get and remove the first element of the list.  If the list is empty,
-     * return nullptr.
-     */
-    T* popFirst() {
-      T* ret = sentinel.getNext();
-      if (ret)
-        static_cast<LinkedListElement<T>*>(ret)->remove();
-      return ret;
+    for (slow = sentinel.mNext,
+         fast1 = sentinel.mNext->mNext,
+         fast2 = sentinel.mNext->mNext->mNext;
+         slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
+         slow = slow->mNext, fast1 = fast2->mNext, fast2 = fast1->mNext) {
+      MOZ_ASSERT(slow != fast1);
+      MOZ_ASSERT(slow != fast2);
     }
 
-    /*
-     * Get and remove the last element of the list.  If the list is empty,
-     * return nullptr.
-     */
-    T* popLast() {
-      T* ret = sentinel.getPrevious();
-      if (ret)
-        static_cast<LinkedListElement<T>*>(ret)->remove();
-      return ret;
-    }
-
-    /*
-     * Return true if the list is empty, or false otherwise.
-     */
-    bool isEmpty() const {
-      return !sentinel.isInList();
-    }
-
-    /*
-     * Remove all the elements from the list.
-     *
-     * This runs in time linear to the list's length, because we have to mark
-     * each element as not in the list.
-     */
-    void clear() {
-      while (popFirst())
-        continue;
+    /* Check for cycles in the backward singly-linked list. */
+    for (slow = sentinel.mPrev,
+         fast1 = sentinel.mPrev->mPrev,
+         fast2 = sentinel.mPrev->mPrev->mPrev;
+         slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
+         slow = slow->mPrev, fast1 = fast2->mPrev, fast2 = fast1->mPrev) {
+      MOZ_ASSERT(slow != fast1);
+      MOZ_ASSERT(slow != fast2);
     }
 
     /*
-     * Measures the memory consumption of the list excluding |this|.  Note that
-     * it only measures the list elements themselves.  If the list elements
-     * contain pointers to other memory blocks, those blocks must be measured
-     * separately during a subsequent iteration over the list.
+     * Check that |sentinel| is the only node in the list with
+     * mIsSentinel == true.
      */
-    size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
-      size_t n = 0;
-      for (const T* t = getFirst(); t; t = t->getNext())
-        n += mallocSizeOf(t);
-      return n;
-    }
-
-    /*
-     * Like sizeOfExcludingThis(), but measures |this| as well.
-     */
-    size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const {
-      return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+    for (const LinkedListElement<T>* elem = sentinel.mNext;
+         elem != &sentinel;
+         elem = elem->mNext) {
+      MOZ_ASSERT(!elem->mIsSentinel);
     }
 
-    /*
-     * In a debug build, make sure that the list is sane (no cycles, consistent
-     * next/prev pointers, only one sentinel).  Has no effect in release builds.
-     */
-    void debugAssertIsSane() const {
-#ifdef DEBUG
-      const LinkedListElement<T>* slow;
-      const LinkedListElement<T>* fast1;
-      const LinkedListElement<T>* fast2;
+    /* Check that the mNext/mPrev pointers match up. */
+    const LinkedListElement<T>* prev = &sentinel;
+    const LinkedListElement<T>* cur = sentinel.mNext;
+    do {
+        MOZ_ASSERT(cur->mPrev == prev);
+        MOZ_ASSERT(prev->mNext == cur);
 
-      /*
-       * Check for cycles in the forward singly-linked list using the
-       * tortoise/hare algorithm.
-       */
-      for (slow = sentinel.next,
-           fast1 = sentinel.next->next,
-           fast2 = sentinel.next->next->next;
-           slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
-           slow = slow->next, fast1 = fast2->next, fast2 = fast1->next)
-      {
-        MOZ_ASSERT(slow != fast1);
-        MOZ_ASSERT(slow != fast2);
-      }
-
-      /* Check for cycles in the backward singly-linked list. */
-      for (slow = sentinel.prev,
-           fast1 = sentinel.prev->prev,
-           fast2 = sentinel.prev->prev->prev;
-           slow != &sentinel && fast1 != &sentinel && fast2 != &sentinel;
-           slow = slow->prev, fast1 = fast2->prev, fast2 = fast1->prev)
-      {
-        MOZ_ASSERT(slow != fast1);
-        MOZ_ASSERT(slow != fast2);
-      }
+        prev = cur;
+        cur = cur->mNext;
+    } while (cur != &sentinel);
+#endif /* ifdef DEBUG */
+  }
 
-      /*
-       * Check that |sentinel| is the only node in the list with
-       * isSentinel == true.
-       */
-      for (const LinkedListElement<T>* elem = sentinel.next;
-           elem != &sentinel;
-           elem = elem->next)
-      {
-        MOZ_ASSERT(!elem->isSentinel);
-      }
-
-      /* Check that the next/prev pointers match up. */
-      const LinkedListElement<T>* prev = &sentinel;
-      const LinkedListElement<T>* cur = sentinel.next;
-      do {
-          MOZ_ASSERT(cur->prev == prev);
-          MOZ_ASSERT(prev->next == cur);
+private:
+  friend class LinkedListElement<T>;
 
-          prev = cur;
-          cur = cur->next;
-      } while (cur != &sentinel);
-#endif /* ifdef DEBUG */
-    }
-
-  private:
-    friend class LinkedListElement<T>;
-
-    void assertContains(const T* t) const {
+  void assertContains(const T* aValue) const {
 #ifdef DEBUG
-      for (const T* elem = getFirst();
-           elem;
-           elem = elem->getNext())
-      {
-        if (elem == t)
-          return;
+    for (const T* elem = getFirst(); elem; elem = elem->getNext()) {
+      if (elem == aValue) {
+        return;
       }
-      MOZ_CRASH("element wasn't found in this list!");
+    }
+    MOZ_CRASH("element wasn't found in this list!");
 #endif
-    }
+  }
 
-    LinkedList& operator=(const LinkedList<T>& other) MOZ_DELETE;
-    LinkedList(const LinkedList<T>& other) MOZ_DELETE;
+  LinkedList& operator=(const LinkedList<T>& aOther) MOZ_DELETE;
+  LinkedList(const LinkedList<T>& aOther) MOZ_DELETE;
 };
 
 } /* namespace mozilla */
 
 #endif /* __cplusplus */
 
 #endif /* mozilla_LinkedList_h */
--- a/mfbt/MSIntTypes.h
+++ b/mfbt/MSIntTypes.h
@@ -1,37 +1,37 @@
 // ISO C9x  compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 
-// 
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
 //  Copyright (c) 2006 Alexander Chemeris
-// 
+//
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are met:
-// 
+//
 //   1. Redistributions of source code must retain the above copyright notice,
 //      this list of conditions and the following disclaimer.
-// 
+//
 //   2. Redistributions in binary form must reproduce the above copyright
 //      notice, this list of conditions and the following disclaimer in the
 //      documentation and/or other materials provided with the distribution.
-// 
+//
 //   3. The name of the author may be used to endorse or promote products
 //      derived from this software without specific prior written permission.
-// 
+//
 // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
 // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
 // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
 // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
 // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
 // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// 
+//
 ///////////////////////////////////////////////////////////////////////////////
 
 #ifndef _MSC_VER // [
 #error "Use this header only with Microsoft Visual C++ compilers!"
 #endif // _MSC_VER ]
 
 #ifndef _MSC_INTTYPES_H_ // [
 #define _MSC_INTTYPES_H_
--- a/mfbt/MathAlgorithms.h
+++ b/mfbt/MathAlgorithms.h
@@ -16,41 +16,41 @@
 #include <limits.h>
 #include <stdint.h>
 
 namespace mozilla {
 
 // Greatest Common Divisor
 template<typename IntegerType>
 MOZ_ALWAYS_INLINE IntegerType
-EuclidGCD(IntegerType a, IntegerType b)
+EuclidGCD(IntegerType aA, IntegerType aB)
 {
   // Euclid's algorithm; O(N) in the worst case.  (There are better
   // ways, but we don't need them for the current use of this algo.)
-  MOZ_ASSERT(a > IntegerType(0));
-  MOZ_ASSERT(b > IntegerType(0));
+  MOZ_ASSERT(aA > IntegerType(0));
+  MOZ_ASSERT(aB > IntegerType(0));
 
-  while (a != b) {
-    if (a > b) {
-      a = a - b;
+  while (aA != aB) {
+    if (aA > aB) {
+      aA = aA - aB;
     } else {
-      b = b - a;
+      aB = aB - aA;
     }
   }
 
-  return a;
+  return aA;
 }
 
 // Least Common Multiple
 template<typename IntegerType>
 MOZ_ALWAYS_INLINE IntegerType
-EuclidLCM(IntegerType a, IntegerType b)
+EuclidLCM(IntegerType aA, IntegerType aB)
 {
   // Divide first to reduce overflow risk.
-  return (a / EuclidGCD(a, b)) * b;
+  return (aA / EuclidGCD(aA, aB)) * aB;
 }
 
 namespace detail {
 
 template<typename T>
 struct AllowDeprecatedAbsFixed : FalseType {};
 
 template<> struct AllowDeprecatedAbsFixed<int32_t> : TrueType {};
@@ -63,31 +63,31 @@ template<> struct AllowDeprecatedAbs<int
 template<> struct AllowDeprecatedAbs<long> : TrueType {};
 
 } // namespace detail
 
 // DO NOT USE DeprecatedAbs.  It exists only until its callers can be converted
 // to Abs below, and it will be removed when all callers have been changed.
 template<typename T>
 inline typename mozilla::EnableIf<detail::AllowDeprecatedAbs<T>::value, T>::Type
-DeprecatedAbs(const T t)
+DeprecatedAbs(const T aValue)
 {
   // The absolute value of the smallest possible value of a signed-integer type
   // won't fit in that type (on twos-complement systems -- and we're blithely
   // assuming we're on such systems, for the non-<stdint.h> types listed above),
   // so assert that the input isn't that value.
   //
   // This is the case if: the value is non-negative; or if adding one (giving a
   // value in the range [-maxvalue, 0]), then negating (giving a value in the
   // range [0, maxvalue]), doesn't produce maxvalue (because in twos-complement,
   // (minvalue + 1) == -maxvalue).
-  MOZ_ASSERT(t >= 0 ||
-             -(t + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1),
+  MOZ_ASSERT(aValue >= 0 ||
+             -(aValue + 1) != T((1ULL << (CHAR_BIT * sizeof(T) - 1)) - 1),
              "You can't negate the smallest possible negative integer!");
-  return t >= 0 ? t : -t;
+  return aValue >= 0 ? aValue : -aValue;
 }
 
 namespace detail {
 
 // For now mozilla::Abs only takes intN_T, the signed natural types, and
 // float/double/long double.  Feel free to add overloads for other standard,
 // signed types if you need them.
 
@@ -111,41 +111,41 @@ template<> struct AbsReturnType<long lon
 template<> struct AbsReturnType<float> { typedef float Type; };
 template<> struct AbsReturnType<double> { typedef double Type; };
 template<> struct AbsReturnType<long double> { typedef long double Type; };
 
 } // namespace detail
 
 template<typename T>
 inline typename detail::AbsReturnType<T>::Type
-Abs(const T t)
+Abs(const T aValue)
 {
   typedef typename detail::AbsReturnType<T>::Type ReturnType;
-  return t >= 0 ? ReturnType(t) : ~ReturnType(t) + 1;
+  return aValue >= 0 ? ReturnType(aValue) : ~ReturnType(aValue) + 1;
 }
 
 template<>
 inline float
-Abs<float>(const float f)
+Abs<float>(const float aFloat)
 {
-  return std::fabs(f);
+  return std::fabs(aFloat);
 }
 
 template<>
 inline double
-Abs<double>(const double d)
+Abs<double>(const double aDouble)
 {
-  return std::fabs(d);
+  return std::fabs(aDouble);
 }
 
 template<>
 inline long double
-Abs<long double>(const long double d)
+Abs<long double>(const long double aLongDouble)
 {
-  return std::fabs(d);
+  return std::fabs(aLongDouble);
 }
 
 } // namespace mozilla
 
 #if defined(_WIN32) && (_MSC_VER >= 1300) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
 #  define MOZ_BITSCAN_WINDOWS
 
 #  include <intrin.h>
@@ -159,315 +159,321 @@ Abs<long double>(const long double d)
 #endif
 
 namespace mozilla {
 
 namespace detail {
 
 #if defined(MOZ_BITSCAN_WINDOWS)
 
-  inline uint_fast8_t
-  CountLeadingZeroes32(uint32_t u)
-  {
-    unsigned long index;
-    _BitScanReverse(&index, static_cast<unsigned long>(u));
-    return uint_fast8_t(31 - index);
-  }
+inline uint_fast8_t
+CountLeadingZeroes32(uint32_t aValue)
+{
+  unsigned long index;
+  _BitScanReverse(&index, static_cast<unsigned long>(aValue));
+  return uint_fast8_t(31 - index);
+}
 
 
-  inline uint_fast8_t
-  CountTrailingZeroes32(uint32_t u)
-  {
-    unsigned long index;
-    _BitScanForward(&index, static_cast<unsigned long>(u));
-    return uint_fast8_t(index);
-  }
+inline uint_fast8_t
+CountTrailingZeroes32(uint32_t aValue)
+{
+  unsigned long index;
+  _BitScanForward(&index, static_cast<unsigned long>(aValue));
+  return uint_fast8_t(index);
+}
 
-  inline uint_fast8_t
-  CountPopulation32(uint32_t u)
-  {
-    uint32_t x = u - ((u >> 1) & 0x55555555);
-    x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
-    return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
-  }
+inline uint_fast8_t
+CountPopulation32(uint32_t aValue)
+{
+  uint32_t x = aValue - ((aValue >> 1) & 0x55555555);
+  x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+  return (((x + (x >> 4)) & 0xf0f0f0f) * 0x1010101) >> 24;
+}
 
-  inline uint_fast8_t
-  CountLeadingZeroes64(uint64_t u)
-  {
-#  if defined(MOZ_BITSCAN_WINDOWS64)
-    unsigned long index;
-    _BitScanReverse64(&index, static_cast<unsigned __int64>(u));
-    return uint_fast8_t(63 - index);
-#  else
-    uint32_t hi = uint32_t(u >> 32);
-    if (hi != 0)
-      return CountLeadingZeroes32(hi);
-    return 32u + CountLeadingZeroes32(uint32_t(u));
-#  endif
+inline uint_fast8_t
+CountLeadingZeroes64(uint64_t aValue)
+{
+#if defined(MOZ_BITSCAN_WINDOWS64)
+  unsigned long index;
+  _BitScanReverse64(&index, static_cast<unsigned __int64>(aValue));
+  return uint_fast8_t(63 - index);
+#else
+  uint32_t hi = uint32_t(aValue >> 32);
+  if (hi != 0) {
+    return CountLeadingZeroes32(hi);
   }
+  return 32u + CountLeadingZeroes32(uint32_t(aValue));
+#endif
+}
 
-  inline uint_fast8_t
-  CountTrailingZeroes64(uint64_t u)
-  {
-#  if defined(MOZ_BITSCAN_WINDOWS64)
-    unsigned long index;
-    _BitScanForward64(&index, static_cast<unsigned __int64>(u));
-    return uint_fast8_t(index);
-#  else
-    uint32_t lo = uint32_t(u);
-    if (lo != 0)
-      return CountTrailingZeroes32(lo);
-    return 32u + CountTrailingZeroes32(uint32_t(u >> 32));
-#  endif
+inline uint_fast8_t
+CountTrailingZeroes64(uint64_t aValue)
+{
+#if defined(MOZ_BITSCAN_WINDOWS64)
+  unsigned long index;
+  _BitScanForward64(&index, static_cast<unsigned __int64>(aValue));
+  return uint_fast8_t(index);
+#else
+  uint32_t lo = uint32_t(aValue);
+  if (lo != 0) {
+    return CountTrailingZeroes32(lo);
   }
+  return 32u + CountTrailingZeroes32(uint32_t(aValue >> 32));
+#endif
+}
 
 #  ifdef MOZ_HAVE_BITSCAN64
 #    undef MOZ_HAVE_BITSCAN64
 #  endif
 
 #elif defined(__clang__) || defined(__GNUC__)
 
 #  if defined(__clang__)
 #    if !__has_builtin(__builtin_ctz) || !__has_builtin(__builtin_clz)
 #      error "A clang providing __builtin_c[lt]z is required to build"
 #    endif
 #  else
      // gcc has had __builtin_clz and friends since 3.4: no need to check.
 #  endif
 
-  inline uint_fast8_t
-  CountLeadingZeroes32(uint32_t u)
-  {
-    return __builtin_clz(u);
-  }
+inline uint_fast8_t
+CountLeadingZeroes32(uint32_t aValue)
+{
+  return __builtin_clz(aValue);
+}
 
-  inline uint_fast8_t
-  CountTrailingZeroes32(uint32_t u)
-  {
-    return __builtin_ctz(u);
-  }
+inline uint_fast8_t
+CountTrailingZeroes32(uint32_t aValue)
+{
+  return __builtin_ctz(aValue);
+}
 
-  inline uint_fast8_t
-  CountPopulation32(uint32_t u)
-  {
-    return __builtin_popcount(u);
-  }
+inline uint_fast8_t
+CountPopulation32(uint32_t aValue)
+{
+  return __builtin_popcount(aValue);
+}
 
-  inline uint_fast8_t
-  CountLeadingZeroes64(uint64_t u)
-  {
-    return __builtin_clzll(u);
-  }
+inline uint_fast8_t
+CountLeadingZeroes64(uint64_t aValue)
+{
+  return __builtin_clzll(aValue);
+}
 
-  inline uint_fast8_t
-  CountTrailingZeroes64(uint64_t u)
-  {
-    return __builtin_ctzll(u);
-  }
+inline uint_fast8_t
+CountTrailingZeroes64(uint64_t aValue)
+{
+  return __builtin_ctzll(aValue);
+}
 
 #else
 #  error "Implement these!"
-  inline uint_fast8_t CountLeadingZeroes32(uint32_t u) MOZ_DELETE;
-  inline uint_fast8_t CountTrailingZeroes32(uint32_t u) MOZ_DELETE;
-  inline uint_fast8_t CountPopulation32(uint32_t u) MOZ_DELETE;
-  inline uint_fast8_t CountLeadingZeroes64(uint64_t u) MOZ_DELETE;
-  inline uint_fast8_t CountTrailingZeroes64(uint64_t u) MOZ_DELETE;
+inline uint_fast8_t CountLeadingZeroes32(uint32_t aValue) MOZ_DELETE;
+inline uint_fast8_t CountTrailingZeroes32(uint32_t aValue) MOZ_DELETE;
+inline uint_fast8_t CountPopulation32(uint32_t aValue) MOZ_DELETE;
+inline uint_fast8_t CountLeadingZeroes64(uint64_t aValue) MOZ_DELETE;
+inline uint_fast8_t CountTrailingZeroes64(uint64_t aValue) MOZ_DELETE;
 #endif
 
 } // namespace detail
 
 /**
- * Compute the number of high-order zero bits in the NON-ZERO number |u|.  That
- * is, looking at the bitwise representation of the number, with the highest-
- * valued bits at the start, return the number of zeroes before the first one
- * is observed.
+ * Compute the number of high-order zero bits in the NON-ZERO number |aValue|.
+ * That is, looking at the bitwise representation of the number, with the
+ * highest- valued bits at the start, return the number of zeroes before the
+ * first one is observed.
  *
  * CountLeadingZeroes32(0xF0FF1000) is 0;
  * CountLeadingZeroes32(0x7F8F0001) is 1;
  * CountLeadingZeroes32(0x3FFF0100) is 2;
  * CountLeadingZeroes32(0x1FF50010) is 3; and so on.
  */
 inline uint_fast8_t
-CountLeadingZeroes32(uint32_t u)
+CountLeadingZeroes32(uint32_t aValue)
 {
-  MOZ_ASSERT(u != 0);
-  return detail::CountLeadingZeroes32(u);
+  MOZ_ASSERT(aValue != 0);
+  return detail::CountLeadingZeroes32(aValue);
 }
 
 /**
- * Compute the number of low-order zero bits in the NON-ZERO number |u|.  That
- * is, looking at the bitwise representation of the number, with the lowest-
- * valued bits at the start, return the number of zeroes before the first one
- * is observed.
+ * Compute the number of low-order zero bits in the NON-ZERO number |aValue|.
+ * That is, looking at the bitwise representation of the number, with the
+ * lowest- valued bits at the start, return the number of zeroes before the
+ * first one is observed.
  *
  * CountTrailingZeroes32(0x0100FFFF) is 0;
  * CountTrailingZeroes32(0x7000FFFE) is 1;
  * CountTrailingZeroes32(0x0080FFFC) is 2;
  * CountTrailingZeroes32(0x0080FFF8) is 3; and so on.
  */
 inline uint_fast8_t
-CountTrailingZeroes32(uint32_t u)
+CountTrailingZeroes32(uint32_t aValue)
 {
-  MOZ_ASSERT(u != 0);
-  return detail::CountTrailingZeroes32(u);
+  MOZ_ASSERT(aValue != 0);
+  return detail::CountTrailingZeroes32(aValue);
 }
 
 /**
- * Compute the number of one bits in the number |u|,
+ * Compute the number of one bits in the number |aValue|,
  */
 inline uint_fast8_t
-CountPopulation32(uint32_t u)
+CountPopulation32(uint32_t aValue)
 {
-  return detail::CountPopulation32(u);
+  return detail::CountPopulation32(aValue);
 }
 
 /** Analogous to CountLeadingZeroes32, but for 64-bit numbers. */
 inline uint_fast8_t
-CountLeadingZeroes64(uint64_t u)
+CountLeadingZeroes64(uint64_t aValue)
 {
-  MOZ_ASSERT(u != 0);
-  return detail::CountLeadingZeroes64(u);
+  MOZ_ASSERT(aValue != 0);
+  return detail::CountLeadingZeroes64(aValue);
 }
 
 /** Analogous to CountTrailingZeroes32, but for 64-bit numbers. */
 inline uint_fast8_t
-CountTrailingZeroes64(uint64_t u)
+CountTrailingZeroes64(uint64_t aValue)
 {
-  MOZ_ASSERT(u != 0);
-  return detail::CountTrailingZeroes64(u);
+  MOZ_ASSERT(aValue != 0);
+  return detail::CountTrailingZeroes64(aValue);
 }
 
 namespace detail {
 
 template<typename T, size_t Size = sizeof(T)>
 class CeilingLog2;
 
 template<typename T>
 class CeilingLog2<T, 4>
 {
-  public:
-    static uint_fast8_t compute(const T t) {
-      // Check for <= 1 to avoid the == 0 undefined case.
-      return t <= 1 ? 0u : 32u - CountLeadingZeroes32(t - 1);
-    }
+public:
+  static uint_fast8_t compute(const T aValue)
+  {
+    // Check for <= 1 to avoid the == 0 undefined case.
+    return aValue <= 1 ? 0u : 32u - CountLeadingZeroes32(aValue - 1);
+  }
 };
 
 template<typename T>
 class CeilingLog2<T, 8>
 {
-  public:
-    static uint_fast8_t compute(const T t) {
-      // Check for <= 1 to avoid the == 0 undefined case.
-      return t <= 1 ? 0 : 64 - CountLeadingZeroes64(t - 1);
-    }
+public:
+  static uint_fast8_t compute(const T aValue)
+  {
+    // Check for <= 1 to avoid the == 0 undefined case.
+    return aValue <= 1 ? 0 : 64 - CountLeadingZeroes64(aValue - 1);
+  }
 };
 
 } // namespace detail
 
 /**
- * Compute the log of the least power of 2 greater than or equal to |t|.
+ * Compute the log of the least power of 2 greater than or equal to |aValue|.
  *
  * CeilingLog2(0..1) is 0;
  * CeilingLog2(2) is 1;
  * CeilingLog2(3..4) is 2;
  * CeilingLog2(5..8) is 3;
  * CeilingLog2(9..16) is 4; and so on.
  */
 template<typename T>
 inline uint_fast8_t
-CeilingLog2(const T t)
+CeilingLog2(const T aValue)
 {
-  return detail::CeilingLog2<T>::compute(t);
+  return detail::CeilingLog2<T>::compute(aValue);
 }
 
 /** A CeilingLog2 variant that accepts only size_t. */
 inline uint_fast8_t
-CeilingLog2Size(size_t n)
+CeilingLog2Size(size_t aValue)
 {
-  return CeilingLog2(n);
+  return CeilingLog2(aValue);
 }
 
 namespace detail {
 
 template<typename T, size_t Size = sizeof(T)>
 class FloorLog2;
 
 template<typename T>
 class FloorLog2<T, 4>
 {
-  public:
-    static uint_fast8_t compute(const T t) {
-      return 31u - CountLeadingZeroes32(t | 1);
-    }
+public:
+  static uint_fast8_t compute(const T aValue)
+  {
+    return 31u - CountLeadingZeroes32(aValue | 1);
+  }
 };
 
 template<typename T>
 class FloorLog2<T, 8>
 {
-  public:
-    static uint_fast8_t compute(const T t) {
-      return 63u - CountLeadingZeroes64(t | 1);
-    }
+public:
+  static uint_fast8_t compute(const T aValue)
+  {
+    return 63u - CountLeadingZeroes64(aValue | 1);
+  }
 };
 
 } // namespace detail
 
 /**
- * Compute the log of the greatest power of 2 less than or equal to |t|.
+ * Compute the log of the greatest power of 2 less than or equal to |aValue|.
  *
  * FloorLog2(0..1) is 0;
  * FloorLog2(2..3) is 1;
  * FloorLog2(4..7) is 2;
  * FloorLog2(8..15) is 3; and so on.
  */
 template<typename T>
 inline uint_fast8_t
-FloorLog2(const T t)
+FloorLog2(const T aValue)
 {
-  return detail::FloorLog2<T>::compute(t);
+  return detail::FloorLog2<T>::compute(aValue);
 }
 
 /** A FloorLog2 variant that accepts only size_t. */
 inline uint_fast8_t
-FloorLog2Size(size_t n)
+FloorLog2Size(size_t aValue)
 {
-  return FloorLog2(n);
+  return FloorLog2(aValue);
 }
 
 /*
  * Compute the smallest power of 2 greater than or equal to |x|.  |x| must not
  * be so great that the computed value would overflow |size_t|.
  */
 inline size_t
-RoundUpPow2(size_t x)
+RoundUpPow2(size_t aValue)
 {
-  MOZ_ASSERT(x <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)),
+  MOZ_ASSERT(aValue <= (size_t(1) << (sizeof(size_t) * CHAR_BIT - 1)),
              "can't round up -- will overflow!");
-  return size_t(1) << CeilingLog2(x);
+  return size_t(1) << CeilingLog2(aValue);
 }
 
 /**
  * Rotates the bits of the given value left by the amount of the shift width.
  */
 template<typename T>
 inline T
-RotateLeft(const T t, uint_fast8_t shift)
+RotateLeft(const T aValue, uint_fast8_t aShift)
 {
-  MOZ_ASSERT(shift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
+  MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
   static_assert(IsUnsigned<T>::value, "Rotates require unsigned values");
-  return (t << shift) | (t >> (sizeof(T) * CHAR_BIT - shift));
+  return (aValue << aShift) | (aValue >> (sizeof(T) * CHAR_BIT - aShift));
 }
 
 /**
  * Rotates the bits of the given value right by the amount of the shift width.
  */
 template<typename T>
 inline T
-RotateRight(const T t, uint_fast8_t shift)
+RotateRight(const T aValue, uint_fast8_t aShift)
 {
-  MOZ_ASSERT(shift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
+  MOZ_ASSERT(aShift < sizeof(T) * CHAR_BIT, "Shift value is too large!");
   static_assert(IsUnsigned<T>::value, "Rotates require unsigned values");
-  return (t >> shift) | (t << (sizeof(T) * CHAR_BIT - shift));
+  return (aValue >> aShift) | (aValue << (sizeof(T) * CHAR_BIT - aShift));
 }
 
 } /* namespace mozilla */
 
 #endif /* mozilla_MathAlgorithms_h */
--- a/mfbt/Maybe.h
+++ b/mfbt/Maybe.h
@@ -26,138 +26,157 @@ namespace mozilla {
  * is destroyed.
  *
  * N.B. GCC seems to miss some optimizations with Maybe and may generate extra
  * branches/loads/stores. Use with caution on hot paths.
  */
 template<class T>
 class Maybe
 {
-    AlignedStorage2<T> storage;
-    bool constructed;
+  AlignedStorage2<T> storage;
+  bool constructed;
 
-    T& asT() { return *storage.addr(); }
+  T& asT() { return *storage.addr(); }
 
-  public:
-    Maybe() { constructed = false; }
-    ~Maybe() { if (constructed) asT().~T(); }
+public:
+  Maybe() { constructed = false; }
+  ~Maybe() { if (constructed) { asT().~T(); } }
 
-    bool empty() const { return !constructed; }
+  bool empty() const { return !constructed; }
 
-    void construct() {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T();
-      constructed = true;
-    }
+  void construct()
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T();
+    constructed = true;
+  }
 
-    template<class T1>
-    void construct(const T1& t1) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1);
-      constructed = true;
-    }
+  template<class T1>
+  void construct(const T1& aT1)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1);
+    constructed = true;
+  }
+
+  template<class T1, class T2>
+  void construct(const T1& aT1, const T2& aT2)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2);
+    constructed = true;
+  }
 
-    template<class T1, class T2>
-    void construct(const T1& t1, const T2& t2) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3>
-    void construct(const T1& t1, const T2& t2, const T3& t3) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4, class T5>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5);
-      constructed = true;
-    }
-
-    template<class T1, class T2, class T3, class T4, class T5,
-             class T6>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
-                   const T6& t6) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5, t6);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5, class T6>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5, const T6& aT6)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4, class T5,
-             class T6, class T7>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
-                   const T6& t6, const T7& t7) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5, class T6,
+           class T7>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5, const T6& aT6, const T7& aT7)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4, class T5,
-             class T6, class T7, class T8>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
-                   const T6& t6, const T7& t7, const T8& t8) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5, class T6,
+           class T7, class T8>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4, class T5,
-             class T6, class T7, class T8, class T9>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
-                   const T6& t6, const T7& t7, const T8& t8, const T9& t9) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8, t9);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5, class T6,
+           class T7, class T8, class T9>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8,
+                 const T9& aT9)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8, aT9);
+    constructed = true;
+  }
 
-    template<class T1, class T2, class T3, class T4, class T5,
-             class T6, class T7, class T8, class T9, class T10>
-    void construct(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5,
-                   const T6& t6, const T7& t7, const T8& t8, const T9& t9, const T10& t10) {
-      MOZ_ASSERT(!constructed);
-      ::new (storage.addr()) T(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10);
-      constructed = true;
-    }
+  template<class T1, class T2, class T3, class T4, class T5, class T6,
+           class T7, class T8, class T9, class T10>
+  void construct(const T1& aT1, const T2& aT2, const T3& aT3, const T4& aT4,
+                 const T5& aT5, const T6& aT6, const T7& aT7, const T8& aT8,
+                 const T9& aT9, const T10& aT10)
+  {
+    MOZ_ASSERT(!constructed);
+    ::new (storage.addr()) T(aT1, aT2, aT3, aT4, aT5, aT6, aT7, aT8, aT9, aT10);
+    constructed = true;
+  }
 
-    T* addr() {
-      MOZ_ASSERT(constructed);
-      return &asT();
-    }
+  T* addr()
+  {
+    MOZ_ASSERT(constructed);
+    return &asT();
+  }
 
-    T& ref() {
-      MOZ_ASSERT(constructed);
-      return asT();
-    }
+  T& ref()
+  {
+    MOZ_ASSERT(constructed);
+    return asT();
+  }
 
-    const T& ref() const {
-      MOZ_ASSERT(constructed);
-      return const_cast<Maybe*>(this)->asT();
-    }
+  const T& ref() const
+  {
+    MOZ_ASSERT(constructed);
+    return const_cast<Maybe*>(this)->asT();
+  }
 
-    void destroy() {
-      ref().~T();
-      constructed = false;
+  void destroy()
+  {
+    ref().~T();
+    constructed = false;
+  }
+
+  void destroyIfConstructed()
+  {
+    if (!empty()) {
+      destroy();
     }
+  }
 
-    void destroyIfConstructed() {
-      if (!empty())
-        destroy();
-    }
-
-  private:
-    Maybe(const Maybe& other) MOZ_DELETE;
-    const Maybe& operator=(const Maybe& other) MOZ_DELETE;
+private:
+  Maybe(const Maybe& aOther) MOZ_DELETE;
+  const Maybe& operator=(const Maybe& aOther) MOZ_DELETE;
 };
 
 } // namespace mozilla
 
 #endif /* mozilla_Maybe_h */
--- a/mfbt/MaybeOneOf.h
+++ b/mfbt/MaybeOneOf.h
@@ -41,80 +41,90 @@ class MaybeOneOf
   }
 
   template <class T>
   const T& as() const {
     MOZ_ASSERT(state == Type2State<T>::result);
     return *(T*)storage.addr();
   }
 
- public:
+public:
   MaybeOneOf() : state(None) {}
   ~MaybeOneOf() { destroyIfConstructed(); }
 
   bool empty() const { return state == None; }
 
   template <class T>
   bool constructed() const { return state == Type2State<T>::result; }
 
   template <class T>
-  void construct() {
+  void construct()
+  {
     MOZ_ASSERT(state == None);
     state = Type2State<T>::result;
     ::new (storage.addr()) T();
   }
 
   template <class T, class U>
-  void construct(U&& u) {
+  void construct(U&& aU)
+  {
     MOZ_ASSERT(state == None);
     state = Type2State<T>::result;
-    ::new (storage.addr()) T(Move(u));
+    ::new (storage.addr()) T(Move(aU));
   }
 
   template <class T, class U1>
-  void construct(const U1& u1) {
+  void construct(const U1& aU1)
+  {
     MOZ_ASSERT(state == None);
     state = Type2State<T>::result;
-    ::new (storage.addr()) T(u1);
+    ::new (storage.addr()) T(aU1);
   }
 
   template <class T, class U1, class U2>
-  void construct(const U1& u1, const U2& u2) {
+  void construct(const U1& aU1, const U2& aU2)
+  {
     MOZ_ASSERT(state == None);
     state = Type2State<T>::result;
-    ::new (storage.addr()) T(u1, u2);
+    ::new (storage.addr()) T(aU1, aU2);
   }
 
   template <class T>
-  T& ref() {
+  T& ref()
+  {
     return as<T>();
   }
 
   template <class T>
-  const T& ref() const {
+  const T& ref() const
+  {
     return as<T>();
   }
 
-  void destroy() {
+  void destroy()
+  {
     MOZ_ASSERT(state == SomeT1 || state == SomeT2);
-    if (state == SomeT1)
+    if (state == SomeT1) {
       as<T1>().~T1();
-    else if (state == SomeT2)
+    } else if (state == SomeT2) {
       as<T2>().~T2();
+    }
     state = None;
   }
 
-  void destroyIfConstructed() {
-    if (!empty())
+  void destroyIfConstructed()
+  {
+    if (!empty()) {
       destroy();
+    }
   }
 
-  private:
-    MaybeOneOf(const MaybeOneOf& other) MOZ_DELETE;
-    const MaybeOneOf& operator=(const MaybeOneOf& other) MOZ_DELETE;
+private:
+  MaybeOneOf(const MaybeOneOf& aOther) MOZ_DELETE;
+  const MaybeOneOf& operator=(const MaybeOneOf& aOther) MOZ_DELETE;
 };
 
 template <class T1, class T2>
 template <class Ignored>
 struct MaybeOneOf<T1, T2>::Type2State<T1, Ignored> {
   typedef MaybeOneOf<T1, T2> Enclosing;
   static const typename Enclosing::State result = Enclosing::SomeT1;
 };
--- a/mfbt/MemoryChecking.h
+++ b/mfbt/MemoryChecking.h
@@ -1,16 +1,16 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 /*
- * Provides a common interface to the ASan (AddressSanitizer) and Valgrind 
+ * Provides a common interface to the ASan (AddressSanitizer) and Valgrind
  * functions used to mark memory in certain ways. In detail, the following
  * three macros are provided:
  *
  *   MOZ_MAKE_MEM_NOACCESS  - Mark memory as unsafe to access (e.g. freed)
  *   MOZ_MAKE_MEM_UNDEFINED - Mark memory as accessible, with content undefined
  *   MOZ_MAKE_MEM_DEFINED - Mark memory as accessible, with content defined
  *
  * With Valgrind in use, these directly map to the three respective Valgrind
@@ -30,23 +30,23 @@
 #if defined(MOZ_ASAN) || defined(MOZ_VALGRIND)
 #define MOZ_HAVE_MEM_CHECKS 1
 #endif
 
 #if defined(MOZ_ASAN)
 #include <stddef.h>
 
 extern "C" {
-  /* These definitions are usually provided through the 
-   * sanitizer/asan_interface.h header installed by ASan.
-   */
-  void __asan_poison_memory_region(void const volatile *addr, size_t size)
-    __attribute__((visibility("default")));
-  void __asan_unpoison_memory_region(void const volatile *addr, size_t size)
-    __attribute__((visibility("default")));
+/* These definitions are usually provided through the
+ * sanitizer/asan_interface.h header installed by ASan.
+ */
+void __asan_poison_memory_region(void const volatile *addr, size_t size)
+  __attribute__((visibility("default")));
+void __asan_unpoison_memory_region(void const volatile *addr, size_t size)
+  __attribute__((visibility("default")));
 
 #define MOZ_MAKE_MEM_NOACCESS(addr, size) \
   __asan_poison_memory_region((addr), (size))
 
 #define MOZ_MAKE_MEM_UNDEFINED(addr, size) \
   __asan_unpoison_memory_region((addr), (size))
 
 #define MOZ_MAKE_MEM_DEFINED(addr, size) \
--- a/mfbt/Move.h
+++ b/mfbt/Move.h
@@ -205,46 +205,46 @@ namespace mozilla {
  */
 
 /**
  * Identical to std::Move(); this is necessary until our stlport supports
  * std::move().
  */
 template<typename T>
 inline typename RemoveReference<T>::Type&&
-Move(T&& a)
+Move(T&& aX)
 {
-  return static_cast<typename RemoveReference<T>::Type&&>(a);
+  return static_cast<typename RemoveReference<T>::Type&&>(aX);
 }
 
 /**
  * These two overloads are identical to std::forward(); they are necessary until
  * our stlport supports std::forward().
  */
 template<typename T>
 inline T&&
-Forward(typename RemoveReference<T>::Type& a)
+Forward(typename RemoveReference<T>::Type& aX)
 {
-  return static_cast<T&&>(a);
+  return static_cast<T&&>(aX);
 }
 
 template<typename T>
 inline T&&
-Forward(typename RemoveReference<T>::Type&& t)
+Forward(typename RemoveReference<T>::Type&& aX)
 {
   static_assert(!IsLvalueReference<T>::value,
                 "misuse of Forward detected!  try the other overload");
-  return static_cast<T&&>(t);
+  return static_cast<T&&>(aX);
 }
 
-/** Swap |t| and |u| using move-construction if possible. */
+/** Swap |aX| and |aY| using move-construction if possible. */
 template<typename T>
 inline void
-Swap(T& t, T& u)
+Swap(T& aX, T& aY)
 {
-  T tmp(Move(t));
-  t = Move(u);
-  u = Move(tmp);
+  T tmp(Move(aX));
+  aX = Move(aY);
+  aY = Move(tmp);
 }
 
 } // namespace mozilla
 
 #endif /* mozilla_Move_h */
--- a/mfbt/NullPtr.h
+++ b/mfbt/NullPtr.h
@@ -87,26 +87,26 @@ struct IsNullPointer { static const bool
  * to be as an argument type for operator overloads (because C++ doesn't allow
  * operator= to have more than one argument, operator== to have more than two,
  * &c.).  But even in such cases, it really only works if there are no other
  * overloads of the operator that accept a pointer type.  If you want both T*
  * and nullptr_t overloads, you'll have to wait til we drop gcc 4.4/4.5 support.
  * (Currently b2g is the only impediment to this.)
  */
 #ifdef MOZ_HAVE_CXX11_NULLPTR
-  // decltype does the right thing for actual nullptr.
-  namespace mozilla {
-  typedef decltype(nullptr) NullptrT;
-  template<>
-  struct IsNullPointer<decltype(nullptr)> { static const bool value = true; };
-  }
+// decltype does the right thing for actual nullptr.
+namespace mozilla {
+typedef decltype(nullptr) NullptrT;
+template<>
+struct IsNullPointer<decltype(nullptr)> { static const bool value = true; };
+}
 #  undef MOZ_HAVE_CXX11_NULLPTR
 #elif MOZ_IS_GCC
 #  define nullptr __null
-  // void* sweeps up more than just nullptr, but compilers supporting true
-  // nullptr are the majority now, so they should detect mistakes.  If you're
-  // feeling paranoid, check/assert that your NullptrT equals nullptr.
-  namespace mozilla { typedef void* NullptrT; }
+// void* sweeps up more than just nullptr, but compilers supporting true
+// nullptr are the majority now, so they should detect mistakes.  If you're
+// feeling paranoid, check/assert that your NullptrT equals nullptr.
+namespace mozilla { typedef void* NullptrT; }
 #else
 #  error "No compiler support for nullptr or its emulation."
 #endif
 
 #endif /* mozilla_NullPtr_h */
--- a/mfbt/NumericLimits.h
+++ b/mfbt/NumericLimits.h
@@ -12,20 +12,20 @@
 #include "mozilla/Char16.h"
 
 #include <limits>
 #include <stdint.h>
 
 namespace mozilla {
 
 /**
- * The NumericLimits class provides a compatibility layer with std::numeric_limits
- * for char16_t, otherwise it is exactly the same as std::numeric_limits.
- * Code which does not need std::numeric_limits<char16_t> should avoid using
- * NumericLimits.
+ * The NumericLimits class provides a compatibility layer with
+ * std::numeric_limits for char16_t, otherwise it is exactly the same as
+ * std::numeric_limits.  Code which does not need std::numeric_limits<char16_t>
+ * should avoid using NumericLimits.
  */
 template<typename T>
 class NumericLimits : public std::numeric_limits<T>
 {
 };
 
 #ifdef MOZ_CHAR16_IS_NOT_WCHAR
 template<>
--- a/mfbt/PodOperations.h
+++ b/mfbt/PodOperations.h
@@ -19,164 +19,175 @@
 #include "mozilla/ArrayUtils.h"
 #include "mozilla/Attributes.h"
 
 #include <stdint.h>
 #include <string.h>
 
 namespace mozilla {
 
-/** Set the contents of |t| to 0. */
+/** Set the contents of |aT| to 0. */
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodZero(T* t)
+PodZero(T* aT)
 {
-  memset(t, 0, sizeof(T));
+  memset(aT, 0, sizeof(T));
 }
 
-/** Set the contents of |nelem| elements starting at |t| to 0. */
+/** Set the contents of |aNElem| elements starting at |aT| to 0. */
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodZero(T* t, size_t nelem)
+PodZero(T* aT, size_t aNElem)
 {
   /*
-   * This function is often called with 'nelem' small; we use an inline loop
+   * This function is often called with 'aNElem' small; we use an inline loop
    * instead of calling 'memset' with a non-constant length.  The compiler
    * should inline the memset call with constant size, though.
    */
-  for (T* end = t + nelem; t < end; t++)
-    memset(t, 0, sizeof(T));
+  for (T* end = aT + aNElem; aT < end; aT++) {
+    memset(aT, 0, sizeof(T));
+  }
 }
 
 /*
  * Arrays implicitly convert to pointers to their first element, which is
  * dangerous when combined with the above PodZero definitions.  Adding an
  * overload for arrays is ambiguous, so we need another identifier.  The
  * ambiguous overload is left to catch mistaken uses of PodZero; if you get a
  * compile error involving PodZero and array types, use PodArrayZero instead.
  */
 template<typename T, size_t N>
-static void PodZero(T (&t)[N]) MOZ_DELETE;
+static void PodZero(T (&aT)[N]) MOZ_DELETE;
 template<typename T, size_t N>
-static void PodZero(T (&t)[N], size_t nelem) MOZ_DELETE;
+static void PodZero(T (&aT)[N], size_t aNElem) MOZ_DELETE;
 
-/** Set the contents of the array |t| to zero. */
+/** Set the contents of the array |aT| to zero. */
 template <class T, size_t N>
 static MOZ_ALWAYS_INLINE void
-PodArrayZero(T (&t)[N])
+PodArrayZero(T (&aT)[N])
 {
-  memset(t, 0, N * sizeof(T));
+  memset(aT, 0, N * sizeof(T));
 }
 
 template <typename T, size_t N>
 static MOZ_ALWAYS_INLINE void
-PodArrayZero(Array<T, N>& arr)
+PodArrayZero(Array<T, N>& aArr)
 {
-  memset(&arr[0], 0, N * sizeof(T));
+  memset(&aArr[0], 0, N * sizeof(T));
 }
 
 /**
- * Assign |*src| to |*dst|.  The locations must not be the same and must not
+ * Assign |*aSrc| to |*aDst|.  The locations must not be the same and must not
  * overlap.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodAssign(T* dst, const T* src)
+PodAssign(T* aDst, const T* aSrc)
 {
-  MOZ_ASSERT(dst != src);
-  MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= 1);
-  MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= 1);
-  memcpy(reinterpret_cast<char*>(dst), reinterpret_cast<const char*>(src), sizeof(T));
+  MOZ_ASSERT(aDst != aSrc);
+  MOZ_ASSERT_IF(aSrc < aDst,
+                PointerRangeSize(aSrc, static_cast<const T*>(aDst)) >= 1);
+  MOZ_ASSERT_IF(aDst < aSrc,
+                PointerRangeSize(static_cast<const T*>(aDst), aSrc) >= 1);
+  memcpy(reinterpret_cast<char*>(aDst), reinterpret_cast<const char*>(aSrc),
+         sizeof(T));
 }
 
 /**
- * Copy |nelem| T elements from |src| to |dst|.  The two memory ranges must not
- * overlap!
+ * Copy |aNElem| T elements from |aSrc| to |aDst|.  The two memory ranges must
+ * not overlap!
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodCopy(T* dst, const T* src, size_t nelem)
+PodCopy(T* aDst, const T* aSrc, size_t aNElem)
 {
-  MOZ_ASSERT(dst != src);
-  MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= nelem);
-  MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= nelem);
+  MOZ_ASSERT(aDst != aSrc);
+  MOZ_ASSERT_IF(aSrc < aDst,
+                PointerRangeSize(aSrc, static_cast<const T*>(aDst)) >= aNElem);
+  MOZ_ASSERT_IF(aDst < aSrc,
+                PointerRangeSize(static_cast<const T*>(aDst), aSrc) >= aNElem);
 
-  if (nelem < 128) {
+  if (aNElem < 128) {
     /*
      * Avoid using operator= in this loop, as it may have been
      * intentionally deleted by the POD type.
      */
-    for (const T* srcend = src + nelem; src < srcend; src++, dst++)
-      PodAssign(dst, src);
+    for (const T* srcend = aSrc + aNElem; aSrc < srcend; aSrc++, aDst++) {
+      PodAssign(aDst, aSrc);
+    }
   } else {
-    memcpy(dst, src, nelem * sizeof(T));
+    memcpy(aDst, aSrc, aNElem * sizeof(T));
   }
 }
 
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodCopy(volatile T* dst, const volatile T* src, size_t nelem)
+PodCopy(volatile T* aDst, const volatile T* aSrc, size_t aNElem)
 {
-  MOZ_ASSERT(dst != src);
-  MOZ_ASSERT_IF(src < dst,
-                PointerRangeSize(src, static_cast<const volatile T*>(dst)) >= nelem);
-  MOZ_ASSERT_IF(dst < src,
-                PointerRangeSize(static_cast<const volatile T*>(dst), src) >= nelem);
+  MOZ_ASSERT(aDst != aSrc);
+  MOZ_ASSERT_IF(aSrc < aDst,
+    PointerRangeSize(aSrc, static_cast<const volatile T*>(aDst)) >= aNElem);
+  MOZ_ASSERT_IF(aDst < aSrc,
+    PointerRangeSize(static_cast<const volatile T*>(aDst), aSrc) >= aNElem);
 
   /*
-   * Volatile |dst| requires extra work, because it's undefined behavior to
+   * Volatile |aDst| requires extra work, because it's undefined behavior to
    * modify volatile objects using the mem* functions.  Just write out the
    * loops manually, using operator= rather than memcpy for the same reason,
    * and let the compiler optimize to the extent it can.
    */
-  for (const volatile T* srcend = src + nelem; src < srcend; src++, dst++)
-    *dst = *src;
+  for (const volatile T* srcend = aSrc + aNElem;
+       aSrc < srcend;
+       aSrc++, aDst++) {
+    *aDst = *aSrc;
+  }
 }
 
 /*
- * Copy the contents of the array |src| into the array |dst|, both of size N.
+ * Copy the contents of the array |aSrc| into the array |aDst|, both of size N.
  * The arrays must not overlap!
  */
 template <class T, size_t N>
 static MOZ_ALWAYS_INLINE void
-PodArrayCopy(T (&dst)[N], const T (&src)[N])
+PodArrayCopy(T (&aDst)[N], const T (&aSrc)[N])
 {
-  PodCopy(dst, src, N);
+  PodCopy(aDst, aSrc, N);
 }
 
 /**
- * Copy the memory for |nelem| T elements from |src| to |dst|.  If the two
- * memory ranges overlap, then the effect is as if the |nelem| elements are
- * first copied from |src| to a temporary array, and then from the temporary
- * array to |dst|.
+ * Copy the memory for |aNElem| T elements from |aSrc| to |aDst|.  If the two
+ * memory ranges overlap, then the effect is as if the |aNElem| elements are
+ * first copied from |aSrc| to a temporary array, and then from the temporary
+ * array to |aDst|.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE void
-PodMove(T* dst, const T* src, size_t nelem)
+PodMove(T* aDst, const T* aSrc, size_t aNElem)
 {
-  MOZ_ASSERT(nelem <= SIZE_MAX / sizeof(T),
+  MOZ_ASSERT(aNElem <= SIZE_MAX / sizeof(T),
              "trying to move an impossible number of elements");
-  memmove(dst, src, nelem * sizeof(T));
+  memmove(aDst, aSrc, aNElem * sizeof(T));
 }
 
 /**
  * Determine whether the |len| elements at |one| are memory-identical to the
  * |len| elements at |two|.
  */
 template<typename T>
 static MOZ_ALWAYS_INLINE bool
 PodEqual(const T* one, const T* two, size_t len)
 {
   if (len < 128) {
     const T* p1end = one + len;
     const T* p1 = one;
     const T* p2 = two;
     for (; p1 < p1end; p1++, p2++) {
-      if (*p1 != *p2)
+      if (*p1 != *p2) {
         return false;
+      }
     }
     return true;
   }
 
   return !memcmp(one, two, len * sizeof(T));
 }
 
 } // namespace mozilla
--- a/mfbt/Poison.cpp
+++ b/mfbt/Poison.cpp
@@ -37,34 +37,34 @@ uintptr_t gMozillaPoisonSize;
 // space, or to a page that has been reserved and rendered
 // inaccessible via OS primitives.  See tests/TestPoisonArea.cpp for
 // extensive discussion of the requirements for this page.  The code
 // from here to 'class FreeList' needs to be kept in sync with that
 // file.
 
 #ifdef _WIN32
 static void *
-ReserveRegion(uintptr_t region, uintptr_t size)
+ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
 {
-  return VirtualAlloc((void *)region, size, MEM_RESERVE, PAGE_NOACCESS);
+  return VirtualAlloc((void *)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS);
 }
 
 static void
-ReleaseRegion(void *region, uintptr_t size)
+ReleaseRegion(void *aRegion, uintptr_t aSize)
 {
-  VirtualFree(region, size, MEM_RELEASE);
+  VirtualFree(aRegion, aSize, MEM_RELEASE);
 }
 
 static bool
-ProbeRegion(uintptr_t region, uintptr_t size)
+ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
 {
   SYSTEM_INFO sinfo;
   GetSystemInfo(&sinfo);
-  if (region >= (uintptr_t)sinfo.lpMaximumApplicationAddress &&
-      region + size >= (uintptr_t)sinfo.lpMaximumApplicationAddress) {
+  if (aRegion >= (uintptr_t)sinfo.lpMaximumApplicationAddress &&
+      aRegion + aSize >= (uintptr_t)sinfo.lpMaximumApplicationAddress) {
     return true;
   } else {
     return false;
   }
 }
 
 static uintptr_t
 GetDesiredRegionSize()
@@ -73,31 +73,31 @@ GetDesiredRegionSize()
   GetSystemInfo(&sinfo);
   return sinfo.dwAllocationGranularity;
 }
 
 #define RESERVE_FAILED 0
 
 #elif defined(__OS2__)
 static void *
-ReserveRegion(uintptr_t region, uintptr_t size)
+ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
 {
   // OS/2 doesn't support allocation at an arbitrary address,
   // so return an address that is known to be invalid.
   return (void*)0xFFFD0000;
 }
 
 static void
-ReleaseRegion(void *region, uintptr_t size)
+ReleaseRegion(void *aRegion, uintptr_t aSize)
 {
   return;
 }
 
 static bool
-ProbeRegion(uintptr_t region, uintptr_t size)
+ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
 {
   // There's no reliable way to probe an address in the system
   // arena other than by touching it and seeing if a trap occurs.
   return false;
 }
 
 static uintptr_t
 GetDesiredRegionSize()
@@ -108,31 +108,33 @@ GetDesiredRegionSize()
 
 #define RESERVE_FAILED 0
 
 #else // Unix
 
 #include "mozilla/TaggedAnonymousMemory.h"
 
 static void *
-ReserveRegion(uintptr_t region, uintptr_t size)
+ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
 {
-  return MozTaggedAnonymousMmap(reinterpret_cast<void*>(region), size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0, "poison");
+  return MozTaggedAnonymousMmap(reinterpret_cast<void*>(aRegion), aSize,
+                                PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0,
+                                "poison");
 }
 
 static void
-ReleaseRegion(void *region, uintptr_t size)
+ReleaseRegion(void *aRegion, uintptr_t aSize)
 {
-  munmap(region, size);
+  munmap(aRegion, aSize);
 }
 
 static bool
-ProbeRegion(uintptr_t region, uintptr_t size)
+ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
 {
-  if (madvise(reinterpret_cast<void*>(region), size, MADV_NORMAL)) {
+  if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
     return true;
   } else {
     return false;
   }
 }
 
 static uintptr_t
 GetDesiredRegionSize()
@@ -152,57 +154,57 @@ ReservePoisonArea(uintptr_t rgnsize)
 {
   if (sizeof(uintptr_t) == 8) {
     // Use the hardware-inaccessible region.
     // We have to avoid 64-bit constants and shifts by 32 bits, since this
     // code is compiled in 32-bit mode, although it is never executed there.
     return
       (((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu))
        & ~(rgnsize-1));
+  }
 
-  } else {
-    // First see if we can allocate the preferred poison address from the OS.
-    uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize-1));
-    void *result = ReserveRegion(candidate, rgnsize);
-    if (result == (void *)candidate) {
-      // success - inaccessible page allocated
-      return candidate;
-    }
+  // First see if we can allocate the preferred poison address from the OS.
+  uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize-1));
+  void *result = ReserveRegion(candidate, rgnsize);
+  if (result == (void *)candidate) {
+    // success - inaccessible page allocated
+    return candidate;
+  }
 
-    // That didn't work, so see if the preferred address is within a range
-    // of permanently inacessible memory.
-    if (ProbeRegion(candidate, rgnsize)) {
-      // success - selected page cannot be usable memory
-      if (result != RESERVE_FAILED)
-        ReleaseRegion(result, rgnsize);
-      return candidate;
+  // That didn't work, so see if the preferred address is within a range
+  // of permanently inacessible memory.
+  if (ProbeRegion(candidate, rgnsize)) {
+    // success - selected page cannot be usable memory
+    if (result != RESERVE_FAILED) {
+      ReleaseRegion(result, rgnsize);
     }
+    return candidate;
+  }
 
-    // The preferred address is already in use.  Did the OS give us a
-    // consolation prize?
-    if (result != RESERVE_FAILED) {
-      return uintptr_t(result);
-    }
+  // The preferred address is already in use.  Did the OS give us a
+  // consolation prize?
+  if (result != RESERVE_FAILED) {
+    return uintptr_t(result);
+  }
 
-    // It didn't, so try to allocate again, without any constraint on
-    // the address.
-    result = ReserveRegion(0, rgnsize);
-    if (result != RESERVE_FAILED) {
-      return uintptr_t(result);
-    }
+  // It didn't, so try to allocate again, without any constraint on
+  // the address.
+  result = ReserveRegion(0, rgnsize);
+  if (result != RESERVE_FAILED) {
+    return uintptr_t(result);
+  }
 
-    // no usable poison region identified
-    MOZ_CRASH();
-    return 0;
-  }
+  // no usable poison region identified
+  MOZ_CRASH();
+  return 0;
 }
 
 void
 mozPoisonValueInit()
 {
   gMozillaPoisonSize = GetDesiredRegionSize();
   gMozillaPoisonBase = ReservePoisonArea(gMozillaPoisonSize);
 
-  if (gMozillaPoisonSize == 0) // can't happen
+  if (gMozillaPoisonSize == 0) { // can't happen
     return;
-
-  gMozillaPoisonValue = gMozillaPoisonBase + gMozillaPoisonSize/2 - 1;
+  }
+  gMozillaPoisonValue = gMozillaPoisonBase + gMozillaPoisonSize / 2 - 1;
 }
--- a/mfbt/tests/TestFloatingPoint.cpp
+++ b/mfbt/tests/TestFloatingPoint.cpp
@@ -185,43 +185,43 @@ TestAreIdentical()
 {
   TestDoublesAreIdentical();
   TestFloatsAreIdentical();
 }
 
 static void
 TestDoubleExponentComponent()
 {
-  MOZ_RELEASE_ASSERT(ExponentComponent(0.0) == -int_fast16_t(FloatingPoint<double>::ExponentBias));
-  MOZ_RELEASE_ASSERT(ExponentComponent(-0.0) == -int_fast16_t(FloatingPoint<double>::ExponentBias));
+  MOZ_RELEASE_ASSERT(ExponentComponent(0.0) == -int_fast16_t(FloatingPoint<double>::kExponentBias));
+  MOZ_RELEASE_ASSERT(ExponentComponent(-0.0) == -int_fast16_t(FloatingPoint<double>::kExponentBias));
   MOZ_RELEASE_ASSERT(ExponentComponent(0.125) == -3);
   MOZ_RELEASE_ASSERT(ExponentComponent(0.5) == -1);
   MOZ_RELEASE_ASSERT(ExponentComponent(1.0) == 0);
   MOZ_RELEASE_ASSERT(ExponentComponent(1.5) == 0);
   MOZ_RELEASE_ASSERT(ExponentComponent(2.0) == 1);
   MOZ_RELEASE_ASSERT(ExponentComponent(7.0) == 2);
-  MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<double>()) == FloatingPoint<double>::ExponentBias + 1);
-  MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<double>()) == FloatingPoint<double>::ExponentBias + 1);
-  MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<double>()) == FloatingPoint<double>::ExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<double>()) == FloatingPoint<double>::kExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<double>()) == FloatingPoint<double>::kExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<double>()) == FloatingPoint<double>::kExponentBias + 1);
 }
 
 static void
 TestFloatExponentComponent()
 {
-  MOZ_RELEASE_ASSERT(ExponentComponent(0.0f) == -int_fast16_t(FloatingPoint<float>::ExponentBias));
-  MOZ_RELEASE_ASSERT(ExponentComponent(-0.0f) == -int_fast16_t(FloatingPoint<float>::ExponentBias));
+  MOZ_RELEASE_ASSERT(ExponentComponent(0.0f) == -int_fast16_t(FloatingPoint<float>::kExponentBias));
+  MOZ_RELEASE_ASSERT(ExponentComponent(-0.0f) == -int_fast16_t(FloatingPoint<float>::kExponentBias));
   MOZ_RELEASE_ASSERT(ExponentComponent(0.125f) == -3);
   MOZ_RELEASE_ASSERT(ExponentComponent(0.5f) == -1);
   MOZ_RELEASE_ASSERT(ExponentComponent(1.0f) == 0);
   MOZ_RELEASE_ASSERT(ExponentComponent(1.5f) == 0);
   MOZ_RELEASE_ASSERT(ExponentComponent(2.0f) == 1);
   MOZ_RELEASE_ASSERT(ExponentComponent(7.0f) == 2);
-  MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<float>()) == FloatingPoint<float>::ExponentBias + 1);
-  MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<float>()) == FloatingPoint<float>::ExponentBias + 1);
-  MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<float>()) == FloatingPoint<float>::ExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(PositiveInfinity<float>()) == FloatingPoint<float>::kExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(NegativeInfinity<float>()) == FloatingPoint<float>::kExponentBias + 1);
+  MOZ_RELEASE_ASSERT(ExponentComponent(UnspecifiedNaN<float>()) == FloatingPoint<float>::kExponentBias + 1);
 }
 
 static void
 TestExponentComponent()
 {
   TestDoubleExponentComponent();
   TestFloatExponentComponent();
 }