--- a/content/base/src/nsDOMFile.cpp
+++ b/content/base/src/nsDOMFile.cpp
@@ -935,17 +935,17 @@ public:
size_t size = DOMMemoryFileDataOwnerMallocSizeOf(owner->mData);
if (size < LARGE_OBJECT_MIN_SIZE) {
smallObjectsTotal += size;
} else {
SHA1Sum sha1;
sha1.update(owner->mData, owner->mLength);
- uint8_t digest[SHA1Sum::HashSize]; // SHA1 digests are 20 bytes long.
+ uint8_t digest[SHA1Sum::kHashSize]; // SHA1 digests are 20 bytes long.
sha1.finish(digest);
nsAutoCString digestString;
for (size_t i = 0; i < sizeof(digest); i++) {
digestString.AppendPrintf("%02x", digest[i]);
}
nsresult rv = aCallback->Callback(
--- a/gfx/layers/LayerScope.cpp
+++ b/gfx/layers/LayerScope.cpp
@@ -238,19 +238,19 @@ private:
}
// Client request is valid. Start to generate and send server response.
nsAutoCString guid("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
nsAutoCString res;
SHA1Sum sha1;
nsCString combined(wsKey + guid);
sha1.update(combined.get(), combined.Length());
- uint8_t digest[SHA1Sum::HashSize]; // SHA1 digests are 20 bytes long.
+ uint8_t digest[SHA1Sum::kHashSize]; // SHA1 digests are 20 bytes long.
sha1.finish(digest);
- nsCString newString(reinterpret_cast<char*>(digest), SHA1Sum::HashSize);
+ nsCString newString(reinterpret_cast<char*>(digest), SHA1Sum::kHashSize);
Base64Encode(newString, res);
nsCString response("HTTP/1.1 101 Switching Protocols\r\n");
response.AppendLiteral("Upgrade: websocket\r\n");
response.AppendLiteral("Connection: Upgrade\r\n");
response.Append(nsCString("Sec-WebSocket-Accept: ") + res + nsCString("\r\n"));
response.AppendLiteral("Sec-WebSocket-Protocol: binary\r\n\r\n");
uint32_t written = 0;
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -1011,17 +1011,17 @@ class HashTable : private AllocPolicy
uint32_t rehashes; // tombstone decontaminations
} stats;
# define METER(x) x
#else
# define METER(x)
#endif
friend class mozilla::ReentrancyGuard;
- mutable mozilla::DebugOnly<bool> entered;
+ mutable mozilla::DebugOnly<bool> mEntered;
mozilla::DebugOnly<uint64_t> mutationCount;
// The default initial capacity is 32 (enough to hold 16 elements), but it
// can be as low as 4.
static const unsigned sMinCapacityLog2 = 2;
static const unsigned sMinCapacity = 1 << sMinCapacityLog2;
static const unsigned sMaxInit = JS_BIT(23);
static const unsigned sMaxCapacity = JS_BIT(24);
@@ -1071,17 +1071,17 @@ class HashTable : private AllocPolicy
public:
explicit HashTable(AllocPolicy ap)
: AllocPolicy(ap),
hashShift(sHashBits),
entryCount(0),
gen(0),
removedCount(0),
table(nullptr),
- entered(false),
+ mEntered(false),
mutationCount(0)
{}
MOZ_WARN_UNUSED_RESULT bool init(uint32_t length)
{
MOZ_ASSERT(!initialized());
// Reject all lengths whose initial computed capacity would exceed
@@ -1447,17 +1447,17 @@ class HashTable : private AllocPolicy
}
removedCount = 0;
entryCount = 0;
mutationCount++;
}
void finish()
{
- MOZ_ASSERT(!entered);
+ MOZ_ASSERT(!mEntered);
if (!table)
return;
destroyTable(*this, table, capacity());
table = nullptr;
gen++;
entryCount = 0;
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -417,24 +417,24 @@ class StoreBuffer
RelocatableMonoTypeBuffer<CellPtrEdge> bufferRelocCell;
GenericBuffer bufferGeneric;
JSRuntime *runtime_;
const Nursery &nursery_;
bool aboutToOverflow_;
bool enabled_;
- mozilla::DebugOnly<bool> entered; /* For ReentrancyGuard. */
+ mozilla::DebugOnly<bool> mEntered; /* For ReentrancyGuard. */
public:
explicit StoreBuffer(JSRuntime *rt, const Nursery &nursery)
: bufferVal(), bufferCell(), bufferSlot(), bufferWholeCell(),
bufferRelocVal(), bufferRelocCell(), bufferGeneric(),
runtime_(rt), nursery_(nursery), aboutToOverflow_(false), enabled_(false),
- entered(false)
+ mEntered(false)
{
}
bool enable();
void disable();
bool isEnabled() const { return enabled_; }
bool clear();
--- a/mfbt/Alignment.h
+++ b/mfbt/Alignment.h
@@ -59,18 +59,18 @@ public:
* bytes.
*
* We support 1, 2, 4, 8, and 16-bit alignment.
*/
template<size_t Align>
struct AlignedElem;
/*
- * We have to specialize this template because GCC doesn't like __attribute__((aligned(foo))) where
- * foo is a template parameter.
+ * We have to specialize this template because GCC doesn't like
+ * __attribute__((aligned(foo))) where foo is a template parameter.
*/
template<>
struct AlignedElem<1>
{
MOZ_ALIGNED_DECL(uint8_t elem, 1);
};
--- a/mfbt/ArrayUtils.h
+++ b/mfbt/ArrayUtils.h
@@ -139,17 +139,18 @@ IsInRange(T* aPtr, U* aBegin, U* aEnd)
* Convenience version of the above method when the valid range is specified as
* uintptr_t values. As above, |aPtr| must be aligned, and |aBegin| and |aEnd|
* must be aligned with respect to |T|.
*/
template<typename T>
inline bool
IsInRange(T* aPtr, uintptr_t aBegin, uintptr_t aEnd)
{
- return IsInRange(aPtr, reinterpret_cast<T*>(aBegin), reinterpret_cast<T*>(aEnd));
+ return IsInRange(aPtr,
+ reinterpret_cast<T*>(aBegin), reinterpret_cast<T*>(aEnd));
}
namespace detail {
/*
* Helper for the MOZ_ARRAY_LENGTH() macro to make the length a typesafe
* compile-time constant even on compilers lacking constexpr support.
*/
--- a/mfbt/Assertions.h
+++ b/mfbt/Assertions.h
@@ -144,17 +144,18 @@ MOZ_ReportAssertionFailure(const char* a
#ifdef MOZ_DUMP_ASSERTION_STACK
nsTraceRefcnt::WalkTheStack(stderr);
#endif
fflush(stderr);
#endif
}
static MOZ_ALWAYS_INLINE void
-MOZ_ReportCrash(const char* aStr, const char* aFilename, int aLine) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
+MOZ_ReportCrash(const char* aStr, const char* aFilename, int aLine)
+ MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
{
#ifdef ANDROID
__android_log_print(ANDROID_LOG_FATAL, "MOZ_CRASH",
"Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine);
#else
fprintf(stderr, "Hit MOZ_CRASH(%s) at %s:%d\n", aStr, aFilename, aLine);
#ifdef MOZ_DUMP_ASSERTION_STACK
nsTraceRefcnt::WalkTheStack(stderr);
@@ -249,17 +250,17 @@ MOZ_ReportCrash(const char* aStr, const
*/
#ifndef DEBUG
# define MOZ_CRASH(...) MOZ_REALLY_CRASH()
#else
# define MOZ_CRASH(...) \
do { \
MOZ_ReportCrash("" __VA_ARGS__, __FILE__, __LINE__); \
MOZ_REALLY_CRASH(); \
- } while(0)
+ } while (0)
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
/*
* MOZ_ASSERT(expr [, explanation-string]) asserts that |expr| must be truthy in
@@ -379,17 +380,17 @@ void ValidateAssertConditionType()
#define MOZ_RELEASE_ASSERT(...) \
MOZ_RELEASE_ASSERT_GLUE( \
MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_ASSERT_HELPER, __VA_ARGS__), \
(__VA_ARGS__))
#ifdef DEBUG
# define MOZ_ASSERT(...) MOZ_RELEASE_ASSERT(__VA_ARGS__)
#else
-# define MOZ_ASSERT(...) do { } while(0)
+# define MOZ_ASSERT(...) do { } while (0)
#endif /* DEBUG */
/*
* MOZ_NIGHTLY_ASSERT is defined for both debug and release builds on the
* Nightly channel, but only debug builds on Aurora, Beta, and Release.
*/
#if defined(NIGHTLY_BUILD)
# define MOZ_NIGHTLY_ASSERT(...) MOZ_RELEASE_ASSERT(__VA_ARGS__)
--- a/mfbt/Atomics.h
+++ b/mfbt/Atomics.h
@@ -280,17 +280,18 @@ struct IntrinsicAddSub<T*, Order> : publ
return aPtr.fetch_sub(fixupAddend(aVal), Base::OrderedOp::AtomicRMWOrder);
}
private:
/*
* GCC 4.6's <atomic> header has a bug where adding X to an
* atomic<T*> is not the same as adding X to a T*. Hence the need
* for this function to provide the correct addend.
*/
- static ptrdiff_t fixupAddend(ptrdiff_t aVal) {
+ static ptrdiff_t fixupAddend(ptrdiff_t aVal)
+ {
#if defined(__clang__) || defined(_MSC_VER)
return aVal;
#elif defined(__GNUC__) && MOZ_GCC_VERSION_AT_LEAST(4, 6, 0) && \
!MOZ_GCC_VERSION_AT_LEAST(4, 7, 0)
return aVal * sizeof(T);
#else
return aVal;
#endif
--- a/mfbt/Attributes.h
+++ b/mfbt/Attributes.h
@@ -150,19 +150,20 @@
* this operator to allow only explicit type conversions, disallowing
* implicit conversions.
*
* Example:
*
* template<typename T>
* class Ptr
* {
- * T* ptr;
- * MOZ_EXPLICIT_CONVERSION operator bool() const {
- * return ptr != nullptr;
+ * T* mPtr;
+ * MOZ_EXPLICIT_CONVERSION operator bool() const
+ * {
+ * return mPtr != nullptr;
* }
* };
*
*/
#ifdef MOZ_HAVE_EXPLICIT_CONVERSION
# define MOZ_EXPLICIT_CONVERSION explicit
#else
# define MOZ_EXPLICIT_CONVERSION /* no support */
@@ -201,17 +202,18 @@
#endif
/*
* MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS, specified at the end of a function
* declaration, indicates that for the purposes of static analysis, this
* function does not return. (The function definition does not need to be
* annotated.)
*
- * MOZ_ReportCrash(const char* s, const char* file, int ln) MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
+ * MOZ_ReportCrash(const char* s, const char* file, int ln)
+ * MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
*
* Some static analyzers, like scan-build from clang, can use this information
* to eliminate false positives. From the upstream documentation of scan-build:
* "This attribute is useful for annotating assertion handlers that actually
* can return, but for the purpose of using the analyzer we want to pretend
* that such functions do not return."
*
*/
@@ -442,23 +444,23 @@
* Attributes that apply to variables or parameters follow the variable's name:
*
* int variable MOZ_VARIABLE_ATTRIBUTE;
*
* Attributes that apply to types follow the type name:
*
* typedef int MOZ_TYPE_ATTRIBUTE MagicInt;
* int MOZ_TYPE_ATTRIBUTE someVariable;
- * int * MOZ_TYPE_ATTRIBUTE magicPtrInt;
- * int MOZ_TYPE_ATTRIBUTE * ptrToMagicInt;
+ * int* MOZ_TYPE_ATTRIBUTE magicPtrInt;
+ * int MOZ_TYPE_ATTRIBUTE* ptrToMagicInt;
*
* Attributes that apply to statements precede the statement:
*
* MOZ_IF_ATTRIBUTE if (x == 0)
- * MOZ_DO_ATTRIBUTE do { } while(0);
+ * MOZ_DO_ATTRIBUTE do { } while (0);
*
* Attributes that apply to labels precede the label:
*
* MOZ_LABEL_ATTRIBUTE target:
* goto target;
* MOZ_CASE_ATTRIBUTE case 5:
* MOZ_DEFAULT_ATTRIBUTE default:
*
--- a/mfbt/BinarySearch.h
+++ b/mfbt/BinarySearch.h
@@ -21,18 +21,19 @@ namespace mozilla {
* BinarySearch returns |false| and the outparam returns the first index in
* [aBegin, aEnd] where |aTarget| can be inserted to maintain sorted order.
*
* Example:
*
* Vector<int> sortedInts = ...
*
* size_t match;
- * if (BinarySearch(sortedInts, 0, sortedInts.length(), 13, &match))
+ * if (BinarySearch(sortedInts, 0, sortedInts.length(), 13, &match)) {
* printf("found 13 at %lu\n", match);
+ * }
*/
template <typename Container, typename T>
bool
BinarySearch(const Container& aContainer, size_t aBegin, size_t aEnd,
T aTarget, size_t* aMatchOrInsertionPoint)
{
MOZ_ASSERT(aBegin <= aEnd);
--- a/mfbt/Char16.h
+++ b/mfbt/Char16.h
@@ -102,63 +102,64 @@ public:
return const_cast<char16_t*>(mPtr);
}
explicit operator wchar_t*() const
{
return const_cast<wchar_t*>(static_cast<const wchar_t*>(*this));
}
/**
- * Some Windows API calls accept BYTE* but require that data actually be WCHAR*.
- * Supporting this requires explicit operators to support the requisite explicit
- * casts.
+ * Some Windows API calls accept BYTE* but require that data actually be
+ * WCHAR*. Supporting this requires explicit operators to support the
+ * requisite explicit casts.
*/
explicit operator const char*() const
{
return reinterpret_cast<const char*>(mPtr);
}
explicit operator const unsigned char*() const
{
return reinterpret_cast<const unsigned char*>(mPtr);
}
explicit operator unsigned char*() const
{
- return const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(mPtr));
+ return
+ const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(mPtr));
}
explicit operator void*() const
{
return const_cast<char16_t*>(mPtr);
}
/* Some operators used on pointers. */
char16_t operator[](size_t aIndex) const
{
return mPtr[aIndex];
}
- bool operator==(const char16ptr_t &aOther) const
+ bool operator==(const char16ptr_t& aOther) const
{
return mPtr == aOther.mPtr;
}
bool operator==(decltype(nullptr)) const
{
return mPtr == nullptr;
}
- bool operator!=(const char16ptr_t &aOther) const
+ bool operator!=(const char16ptr_t& aOther) const
{
return mPtr != aOther.mPtr;
}
bool operator!=(decltype(nullptr)) const
{
return mPtr != nullptr;
}
char16ptr_t operator+(size_t aValue) const
{
return char16ptr_t(mPtr + aValue);
}
- ptrdiff_t operator-(const char16ptr_t &aOther) const
+ ptrdiff_t operator-(const char16ptr_t& aOther) const
{
return mPtr - aOther.mPtr;
}
};
inline decltype((char*)0-(char*)0)
operator-(const char16_t* aX, const char16ptr_t aY)
{
--- a/mfbt/CheckedInt.h
+++ b/mfbt/CheckedInt.h
@@ -442,17 +442,17 @@ struct NegateImpl<T, true>
*
* The arithmetic operators in this class are guaranteed not to raise a signal
* (e.g. in case of a division by zero).
*
* For example, suppose that you want to implement a function that computes
* (aX+aY)/aZ, that doesn't crash if aZ==0, and that reports on error (divide by
* zero or integer overflow). You could code it as follows:
@code
- bool computeXPlusYOverZ(int aX, int aY, int aZ, int *aResult)
+ bool computeXPlusYOverZ(int aX, int aY, int aZ, int* aResult)
{
CheckedInt<int> checkedResult = (CheckedInt<int>(aX) + aY) / aZ;
if (checkedResult.isValid()) {
*aResult = checkedResult.value();
return true;
} else {
return false;
}
--- a/mfbt/Compression.cpp
+++ b/mfbt/Compression.cpp
@@ -43,17 +43,17 @@ LZ4::decompress(const char* aSource, cha
CheckedInt<int> outputSizeChecked = aOutputSize;
MOZ_ASSERT(outputSizeChecked.isValid());
int ret = LZ4_decompress_fast(aSource, aDest, outputSizeChecked.value());
return ret >= 0;
}
bool
LZ4::decompress(const char* aSource, size_t aInputSize, char* aDest,
- size_t aMaxOutputSize, size_t *aOutputSize)
+ size_t aMaxOutputSize, size_t* aOutputSize)
{
CheckedInt<int> maxOutputSizeChecked = aMaxOutputSize;
MOZ_ASSERT(maxOutputSizeChecked.isValid());
CheckedInt<int> inputSizeChecked = aInputSize;
MOZ_ASSERT(inputSizeChecked.isValid());
int ret = LZ4_decompress_safe(aSource, aDest, inputSizeChecked.value(),
maxOutputSizeChecked.value());
--- a/mfbt/Compression.h
+++ b/mfbt/Compression.h
@@ -89,17 +89,17 @@ public:
* @param aInputSize is the length of the input compressed data
* @param aMaxOutputSize is the size of the destination buffer (which must be
* already allocated)
* @param aOutputSize the actual number of bytes decoded in the destination
* buffer (necessarily <= aMaxOutputSize)
*/
static MFBT_API bool
decompress(const char* aSource, size_t aInputSize, char* aDest,
- size_t aMaxOutputSize, size_t *aOutputSize);
+ size_t aMaxOutputSize, size_t* aOutputSize);
/*
* Provides the maximum size that LZ4 may output in a "worst case"
* scenario (input data not compressible) primarily useful for memory
* allocation of output buffer.
* note : this function is limited by "int" range (2^31-1)
*
* @param aInputSize is the input size. Max supported value is ~1.9GB
--- a/mfbt/Endian.h
+++ b/mfbt/Endian.h
@@ -469,17 +469,18 @@ protected:
{
copyAndSwapTo<ThisEndian, Big>(aDest, aSrc, aCount);
}
/*
* Likewise, but converts values in place.
*/
template<typename T>
- static void swapToBigEndianInPlace(T* aPtr, size_t aCount) {
+ static void swapToBigEndianInPlace(T* aPtr, size_t aCount)
+ {
maybeSwapInPlace<ThisEndian, Big>(aPtr, aCount);
}
/*
* Synonyms for the big-endian functions, for better readability
* in network code.
*/
--- a/mfbt/FloatingPoint.cpp
+++ b/mfbt/FloatingPoint.cpp
@@ -1,9 +1,10 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Implementations of FloatingPoint functions */
#include "mozilla/FloatingPoint.h"
--- a/mfbt/HashFunctions.cpp
+++ b/mfbt/HashFunctions.cpp
@@ -1,9 +1,10 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Implementations of hash functions. */
#include "mozilla/HashFunctions.h"
#include "mozilla/Types.h"
--- a/mfbt/IntegerPrintfMacros.h
+++ b/mfbt/IntegerPrintfMacros.h
@@ -1,12 +1,13 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Implements the C99 <inttypes.h> interface, minus the SCN* format macros. */
#ifndef mozilla_IntegerPrintfMacros_h_
#define mozilla_IntegerPrintfMacros_h_
/*
* MSVC++ doesn't include <inttypes.h>, even in versions shipping <stdint.h>, so
--- a/mfbt/IntegerTypeTraits.h
+++ b/mfbt/IntegerTypeTraits.h
@@ -77,31 +77,33 @@ struct StdintTypeForSizeAndSignedness<8,
template<size_t Size>
struct UnsignedStdintTypeForSize
: detail::StdintTypeForSizeAndSignedness<Size, false>
{};
template<typename IntegerType>
struct PositionOfSignBit
{
- static_assert(IsIntegral<IntegerType>::value, "PositionOfSignBit is only for integral types");
+ static_assert(IsIntegral<IntegerType>::value,
+ "PositionOfSignBit is only for integral types");
// 8 here should be CHAR_BIT from limits.h, but the world has moved on.
static const size_t value = 8 * sizeof(IntegerType) - 1;
};
/**
* MinValue returns the minimum value of the given integer type as a
* compile-time constant, which std::numeric_limits<IntegerType>::min()
* cannot do in c++98.
*/
template<typename IntegerType>
struct MinValue
{
private:
- static_assert(IsIntegral<IntegerType>::value, "MinValue is only for integral types");
+ static_assert(IsIntegral<IntegerType>::value,
+ "MinValue is only for integral types");
typedef typename MakeUnsigned<IntegerType>::Type UnsignedIntegerType;
static const size_t PosOfSignBit = PositionOfSignBit<IntegerType>::value;
public:
// Bitwise ops may return a larger type, that's why we cast explicitly.
// In C++, left bit shifts on signed values is undefined by the standard
// unless the shifted value is representable.
@@ -117,17 +119,18 @@ public:
/**
* MaxValue returns the maximum value of the given integer type as a
* compile-time constant, which std::numeric_limits<IntegerType>::max()
* cannot do in c++98.
*/
template<typename IntegerType>
struct MaxValue
{
- static_assert(IsIntegral<IntegerType>::value, "MaxValue is only for integral types");
+ static_assert(IsIntegral<IntegerType>::value,
+ "MaxValue is only for integral types");
// Tricksy, but covered by the CheckedInt unit test.
// Relies on the type of MinValue<IntegerType>::value
// being IntegerType.
static const IntegerType value = ~MinValue<IntegerType>::value;
};
} // namespace mozilla
--- a/mfbt/LinkedList.h
+++ b/mfbt/LinkedList.h
@@ -458,17 +458,18 @@ public:
cur = cur->mNext;
} while (cur != &sentinel);
#endif /* ifdef DEBUG */
}
private:
friend class LinkedListElement<T>;
- void assertContains(const T* aValue) const {
+ void assertContains(const T* aValue) const
+ {
#ifdef DEBUG
for (const T* elem = getFirst(); elem; elem = elem->getNext()) {
if (elem == aValue) {
return;
}
}
MOZ_CRASH("element wasn't found in this list!");
#endif
--- a/mfbt/MathAlgorithms.h
+++ b/mfbt/MathAlgorithms.h
@@ -97,17 +97,18 @@ struct AbsReturnTypeFixed;
template<> struct AbsReturnTypeFixed<int8_t> { typedef uint8_t Type; };
template<> struct AbsReturnTypeFixed<int16_t> { typedef uint16_t Type; };
template<> struct AbsReturnTypeFixed<int32_t> { typedef uint32_t Type; };
template<> struct AbsReturnTypeFixed<int64_t> { typedef uint64_t Type; };
template<typename T>
struct AbsReturnType : AbsReturnTypeFixed<T> {};
-template<> struct AbsReturnType<char> : EnableIf<char(-1) < char(0), unsigned char> {};
+template<> struct AbsReturnType<char> :
+ EnableIf<char(-1) < char(0), unsigned char> {};
template<> struct AbsReturnType<signed char> { typedef unsigned char Type; };
template<> struct AbsReturnType<short> { typedef unsigned short Type; };
template<> struct AbsReturnType<int> { typedef unsigned int Type; };
template<> struct AbsReturnType<long> { typedef unsigned long Type; };
template<> struct AbsReturnType<long long> { typedef unsigned long long Type; };
template<> struct AbsReturnType<float> { typedef float Type; };
template<> struct AbsReturnType<double> { typedef double Type; };
template<> struct AbsReturnType<long double> { typedef long double Type; };
@@ -140,17 +141,18 @@ template<>
inline long double
Abs<long double>(const long double aLongDouble)
{
return std::fabs(aLongDouble);
}
} // namespace mozilla
-#if defined(_WIN32) && (_MSC_VER >= 1300) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
+#if defined(_WIN32) && (_MSC_VER >= 1300) && \
+ (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
# define MOZ_BITSCAN_WINDOWS
# include <intrin.h>
# pragma intrinsic(_BitScanForward, _BitScanReverse)
# if defined(_M_AMD64) || defined(_M_X64)
# define MOZ_BITSCAN_WINDOWS64
# pragma intrinsic(_BitScanForward64, _BitScanReverse64)
--- a/mfbt/MaybeOneOf.h
+++ b/mfbt/MaybeOneOf.h
@@ -7,18 +7,17 @@
#ifndef mozilla_MaybeOneOf_h
#define mozilla_MaybeOneOf_h
#include "mozilla/Alignment.h"
#include "mozilla/Assertions.h"
#include "mozilla/Move.h"
#include "mozilla/TemplateLib.h"
-// For placement new
-#include <new>
+#include <new> // For placement new
namespace mozilla {
/*
* MaybeOneOf<T1, T2> is like Maybe, but it supports constructing either T1
* or T2. When a MaybeOneOf<T1, T2> is constructed, it is |empty()|, i.e.,
* no value has been constructed and no destructor will be called when the
* MaybeOneOf<T1, T2> is destroyed. Upon calling |construct<T1>()| or
@@ -30,23 +29,25 @@ template<class T1, class T2>
class MaybeOneOf
{
AlignedStorage<tl::Max<sizeof(T1), sizeof(T2)>::value> storage;
enum State { None, SomeT1, SomeT2 } state;
template <class T, class Ignored = void> struct Type2State {};
template <class T>
- T& as() {
+ T& as()
+ {
MOZ_ASSERT(state == Type2State<T>::result);
return *(T*)storage.addr();
}
template <class T>
- const T& as() const {
+ const T& as() const
+ {
MOZ_ASSERT(state == Type2State<T>::result);
return *(T*)storage.addr();
}
public:
MaybeOneOf() : state(None) {}
~MaybeOneOf() { destroyIfConstructed(); }
@@ -119,23 +120,25 @@ public:
private:
MaybeOneOf(const MaybeOneOf& aOther) MOZ_DELETE;
const MaybeOneOf& operator=(const MaybeOneOf& aOther) MOZ_DELETE;
};
template <class T1, class T2>
template <class Ignored>
-struct MaybeOneOf<T1, T2>::Type2State<T1, Ignored> {
+struct MaybeOneOf<T1, T2>::Type2State<T1, Ignored>
+{
typedef MaybeOneOf<T1, T2> Enclosing;
static const typename Enclosing::State result = Enclosing::SomeT1;
};
template <class T1, class T2>
template <class Ignored>
-struct MaybeOneOf<T1, T2>::Type2State<T2, Ignored> {
+struct MaybeOneOf<T1, T2>::Type2State<T2, Ignored>
+{
typedef MaybeOneOf<T1, T2> Enclosing;
static const typename Enclosing::State result = Enclosing::SomeT2;
};
} // namespace mozilla
#endif /* mozilla_MaybeOneOf_h */
--- a/mfbt/MemoryChecking.h
+++ b/mfbt/MemoryChecking.h
@@ -58,15 +58,15 @@ void MOZ_EXPORT __asan_unpoison_memory_r
#define MOZ_MAKE_MEM_UNDEFINED(addr, size) \
VALGRIND_MAKE_MEM_UNDEFINED((addr), (size))
#define MOZ_MAKE_MEM_DEFINED(addr, size) \
VALGRIND_MAKE_MEM_DEFINED((addr), (size))
#else
-#define MOZ_MAKE_MEM_NOACCESS(addr, size) do {} while(0)
-#define MOZ_MAKE_MEM_UNDEFINED(addr, size) do {} while(0)
-#define MOZ_MAKE_MEM_DEFINED(addr, size) do {} while(0)
+#define MOZ_MAKE_MEM_NOACCESS(addr, size) do {} while (0)
+#define MOZ_MAKE_MEM_UNDEFINED(addr, size) do {} while (0)
+#define MOZ_MAKE_MEM_DEFINED(addr, size) do {} while (0)
#endif
#endif /* mozilla_MemoryChecking_h */
--- a/mfbt/Move.h
+++ b/mfbt/Move.h
@@ -139,18 +139,18 @@ namespace mozilla {
*
* ("'Don't Repeat Yourself'? What's that?")
*
* This takes advantage of two new rules in C++11:
*
* - First, when a function template takes an argument that is an rvalue
* reference to a template argument (like 'XArg&& x' and 'YArg&& y' above),
* then when the argument is applied to an lvalue, the template argument
- * resolves to 'T &'; and when it is applied to an rvalue, the template
- * argument resolves to 'T &&'. Thus, in a call to C::C like:
+ * resolves to 'T&'; and when it is applied to an rvalue, the template
+ * argument resolves to 'T&&'. Thus, in a call to C::C like:
*
* X foo(int);
* Y yy;
*
* C(foo(5), yy)
*
* XArg would resolve to 'X&&', and YArg would resolve to 'Y&'.
*
--- a/mfbt/Pair.h
+++ b/mfbt/Pair.h
@@ -33,105 +33,109 @@ template<typename A, typename B,
IsEmpty<B>::value && !IsBaseOf<A, B>::value && !IsBaseOf<B, A>::value
? detail::AsBase
: detail::AsMember>
struct PairHelper;
template<typename A, typename B>
struct PairHelper<A, B, AsMember, AsMember>
{
- protected:
- template<typename AArg, typename BArg>
- PairHelper(AArg&& a, BArg&& b)
- : firstA(Forward<AArg>(a)),
- secondB(Forward<BArg>(b))
- {}
+protected:
+ template<typename AArg, typename BArg>
+ PairHelper(AArg&& aA, BArg&& aB)
+ : mFirstA(Forward<AArg>(aA)),
+ mSecondB(Forward<BArg>(aB))
+ {}
- A& first() { return firstA; }
- const A& first() const { return firstA; }
- B& second() { return secondB; }
- const B& second() const { return secondB; }
+ A& first() { return mFirstA; }
+ const A& first() const { return mFirstA; }
+ B& second() { return mSecondB; }
+ const B& second() const { return mSecondB; }
- void swap(PairHelper& other) {
- Swap(firstA, other.firstA);
- Swap(secondB, other.secondB);
- }
+ void swap(PairHelper& aOther)
+ {
+ Swap(mFirstA, aOther.mFirstA);
+ Swap(mSecondB, aOther.mSecondB);
+ }
- private:
- A firstA;
- B secondB;
+private:
+ A mFirstA;
+ B mSecondB;
};
template<typename A, typename B>
struct PairHelper<A, B, AsMember, AsBase> : private B
{
- protected:
- template<typename AArg, typename BArg>
- PairHelper(AArg&& a, BArg&& b)
- : B(Forward<BArg>(b)),
- firstA(Forward<AArg>(a))
- {}
+protected:
+ template<typename AArg, typename BArg>
+ PairHelper(AArg&& aA, BArg&& aB)
+ : B(Forward<BArg>(aB)),
+ mFirstA(Forward<AArg>(aA))
+ {}
- A& first() { return firstA; }
- const A& first() const { return firstA; }
- B& second() { return *this; }
- const B& second() const { return *this; }
+ A& first() { return mFirstA; }
+ const A& first() const { return mFirstA; }
+ B& second() { return *this; }
+ const B& second() const { return *this; }
- void swap(PairHelper& other) {
- Swap(firstA, other.firstA);
- Swap(static_cast<B&>(*this), static_cast<B&>(other));
- }
+ void swap(PairHelper& aOther)
+ {
+ Swap(mFirstA, aOther.mFirstA);
+ Swap(static_cast<B&>(*this), static_cast<B&>(aOther));
+ }
- private:
- A firstA;
+private:
+ A mFirstA;
};
template<typename A, typename B>
struct PairHelper<A, B, AsBase, AsMember> : private A
{
- protected:
- template<typename AArg, typename BArg>
- PairHelper(AArg&& a, BArg&& b)
- : A(Forward<AArg>(a)),
- secondB(Forward<BArg>(b))
- {}
+protected:
+ template<typename AArg, typename BArg>
+ PairHelper(AArg&& aA, BArg&& aB)
+ : A(Forward<AArg>(aA)),
+ mSecondB(Forward<BArg>(aB))
+ {}
- A& first() { return *this; }
- const A& first() const { return *this; }
- B& second() { return secondB; }
- const B& second() const { return secondB; }
+ A& first() { return *this; }
+ const A& first() const { return *this; }
+ B& second() { return mSecondB; }
+ const B& second() const { return mSecondB; }
- void swap(PairHelper& other) {
- Swap(static_cast<A&>(*this), static_cast<A&>(other));
- Swap(secondB, other.secondB);
- }
+ void swap(PairHelper& aOther)
+ {
+ Swap(static_cast<A&>(*this), static_cast<A&>(aOther));
+ Swap(mSecondB, aOther.mSecondB);
+ }
- private:
- B secondB;
+private:
+ B mSecondB;
};
template<typename A, typename B>
struct PairHelper<A, B, AsBase, AsBase> : private A, private B
{
- protected:
- template<typename AArg, typename BArg>
- PairHelper(AArg&& a, BArg&& b)
- : A(Forward<AArg>(a)),
- B(Forward<BArg>(b))
- {}
+protected:
+ template<typename AArg, typename BArg>
+ PairHelper(AArg&& aA, BArg&& aB)
+ : A(Forward<AArg>(aA)),
+ B(Forward<BArg>(aB))
+ {}
- A& first() { return static_cast<A&>(*this); }
- const A& first() const { return static_cast<A&>(*this); }
- B& second() { return static_cast<B&>(*this); }
- const B& second() const { return static_cast<B&>(*this); }
+ A& first() { return static_cast<A&>(*this); }
+ const A& first() const { return static_cast<A&>(*this); }
+ B& second() { return static_cast<B&>(*this); }
+ const B& second() const { return static_cast<B&>(*this); }
- void swap(PairHelper& other) {
- Swap(static_cast<A&>(*this), static_cast<A&>(other));
- Swap(static_cast<B&>(*this), static_cast<B&>(other));
- }
+ void swap(PairHelper& aOther)
+ {
+ Swap(static_cast<A&>(*this), static_cast<A&>(aOther));
+ Swap(static_cast<B&>(*this), static_cast<B&>(aOther));
+ }
};
} // namespace detail
/**
* Pair is the logical concatenation of an instance of A with an instance B.
* Space is conserved when possible. Neither A nor B may be a final class.
*
@@ -143,40 +147,38 @@ struct PairHelper<A, B, AsBase, AsBase>
* initialization or destruction of A and B, and so on. (This is approximately
* required to optimize space usage.) The first/second names are merely
* conceptual!
*/
template<typename A, typename B>
struct Pair
: private detail::PairHelper<A, B>
{
- typedef typename detail::PairHelper<A, B> Base;
+ typedef typename detail::PairHelper<A, B> Base;
- public:
- template<typename AArg, typename BArg>
- Pair(AArg&& a, BArg&& b)
- : Base(Forward<AArg>(a), Forward<BArg>(b))
- {}
+public:
+ template<typename AArg, typename BArg>
+ Pair(AArg&& aA, BArg&& aB)
+ : Base(Forward<AArg>(aA), Forward<BArg>(aB))
+ {}
- /** The A instance. */
- using Base::first;
- /** The B instance. */
- using Base::second;
+ /** The A instance. */
+ using Base::first;
+ /** The B instance. */
+ using Base::second;
- /** Swap this pair with another pair. */
- void swap(Pair& other) {
- Base::swap(other);
- }
+ /** Swap this pair with another pair. */
+ void swap(Pair& aOther) { Base::swap(aOther); }
- private:
- Pair(const Pair&) MOZ_DELETE;
+private:
+ Pair(const Pair&) MOZ_DELETE;
};
template<typename A, class B>
void
-Swap(Pair<A, B>& x, Pair<A, B>& y)
+Swap(Pair<A, B>& aX, Pair<A, B>& aY)
{
- x.swap(y);
+ aX.swap(aY);
}
} // namespace mozilla
#endif /* mozilla_Pair_h */
--- a/mfbt/Poison.cpp
+++ b/mfbt/Poison.cpp
@@ -36,24 +36,24 @@ uintptr_t gMozillaPoisonSize;
// form a pointer either to an always-unmapped region of the address
// space, or to a page that has been reserved and rendered
// inaccessible via OS primitives. See tests/TestPoisonArea.cpp for
// extensive discussion of the requirements for this page. The code
// from here to 'class FreeList' needs to be kept in sync with that
// file.
#ifdef _WIN32
-static void *
+static void*
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
- return VirtualAlloc((void *)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS);
+ return VirtualAlloc((void*)aRegion, aSize, MEM_RESERVE, PAGE_NOACCESS);
}
static void
-ReleaseRegion(void *aRegion, uintptr_t aSize)
+ReleaseRegion(void* aRegion, uintptr_t aSize)
{
VirtualFree(aRegion, aSize, MEM_RELEASE);
}
static bool
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
SYSTEM_INFO sinfo;
@@ -72,26 +72,26 @@ GetDesiredRegionSize()
SYSTEM_INFO sinfo;
GetSystemInfo(&sinfo);
return sinfo.dwAllocationGranularity;
}
#define RESERVE_FAILED 0
#elif defined(__OS2__)
-static void *
+static void*
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
// OS/2 doesn't support allocation at an arbitrary address,
// so return an address that is known to be invalid.
return (void*)0xFFFD0000;
}
static void
-ReleaseRegion(void *aRegion, uintptr_t aSize)
+ReleaseRegion(void* aRegion, uintptr_t aSize)
{
return;
}
static bool
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
// There's no reliable way to probe an address in the system
@@ -107,26 +107,26 @@ GetDesiredRegionSize()
}
#define RESERVE_FAILED 0
#else // Unix
#include "mozilla/TaggedAnonymousMemory.h"
-static void *
+static void*
ReserveRegion(uintptr_t aRegion, uintptr_t aSize)
{
return MozTaggedAnonymousMmap(reinterpret_cast<void*>(aRegion), aSize,
PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0,
"poison");
}
static void
-ReleaseRegion(void *aRegion, uintptr_t aSize)
+ReleaseRegion(void* aRegion, uintptr_t aSize)
{
munmap(aRegion, aSize);
}
static bool
ProbeRegion(uintptr_t aRegion, uintptr_t aSize)
{
if (madvise(reinterpret_cast<void*>(aRegion), aSize, MADV_NORMAL)) {
@@ -142,34 +142,34 @@ GetDesiredRegionSize()
return sysconf(_SC_PAGESIZE);
}
#define RESERVE_FAILED MAP_FAILED
#endif // system dependencies
static_assert(sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8, "");
-static_assert(sizeof(uintptr_t) == sizeof(void *), "");
+static_assert(sizeof(uintptr_t) == sizeof(void*), "");
static uintptr_t
ReservePoisonArea(uintptr_t rgnsize)
{
if (sizeof(uintptr_t) == 8) {
// Use the hardware-inaccessible region.
// We have to avoid 64-bit constants and shifts by 32 bits, since this
// code is compiled in 32-bit mode, although it is never executed there.
return
(((uintptr_t(0x7FFFFFFFu) << 31) << 1 | uintptr_t(0xF0DEAFFFu))
& ~(rgnsize-1));
}
// First see if we can allocate the preferred poison address from the OS.
uintptr_t candidate = (0xF0DEAFFF & ~(rgnsize-1));
- void *result = ReserveRegion(candidate, rgnsize);
- if (result == (void *)candidate) {
+ void* result = ReserveRegion(candidate, rgnsize);
+ if (result == (void*)candidate) {
// success - inaccessible page allocated
return candidate;
}
// That didn't work, so see if the preferred address is within a range
// of permanently inacessible memory.
if (ProbeRegion(candidate, rgnsize)) {
// success - selected page cannot be usable memory
--- a/mfbt/Range.h
+++ b/mfbt/Range.h
@@ -13,35 +13,33 @@
#include <stddef.h>
namespace mozilla {
// Range<T> is a tuple containing a pointer and a length.
template <typename T>
class Range
{
- const RangedPtr<T> mStart;
- const RangedPtr<T> mEnd;
+ const RangedPtr<T> mStart;
+ const RangedPtr<T> mEnd;
- typedef void (Range::* ConvertibleToBool)();
- void nonNull() {}
+ typedef void (Range::* ConvertibleToBool)();
+ void nonNull() {}
- public:
- Range() : mStart(nullptr, 0), mEnd(nullptr, 0) {}
- Range(T* p, size_t len)
- : mStart(p, p, p + len),
- mEnd(p + len, p, p + len)
- {}
+public:
+ Range() : mStart(nullptr, 0), mEnd(nullptr, 0) {}
+ Range(T* aPtr, size_t aLength)
+ : mStart(aPtr, aPtr, aPtr + aLength),
+ mEnd(aPtr + aLength, aPtr, aPtr + aLength)
+ {}
- RangedPtr<T> start() const { return mStart; }
- RangedPtr<T> end() const { return mEnd; }
- size_t length() const { return mEnd - mStart; }
+ RangedPtr<T> start() const { return mStart; }
+ RangedPtr<T> end() const { return mEnd; }
+ size_t length() const { return mEnd - mStart; }
- T& operator[](size_t offset) const {
- return mStart[offset];
- }
+ T& operator[](size_t aOffset) const { return mStart[aOffset]; }
- operator ConvertibleToBool() const { return mStart ? &Range::nonNull : 0; }
+ operator ConvertibleToBool() const { return mStart ? &Range::nonNull : 0; }
};
} // namespace mozilla
#endif /* mozilla_Range_h */
--- a/mfbt/RangedPtr.h
+++ b/mfbt/RangedPtr.h
@@ -38,223 +38,247 @@ namespace mozilla {
*
* RangedPtr<T> intentionally does not implicitly convert to T*. Use get() to
* explicitly convert to T*. Keep in mind that the raw pointer of course won't
* implement bounds checking in debug builds.
*/
template<typename T>
class RangedPtr
{
- T* ptr;
+ T* mPtr;
#ifdef DEBUG
- T* const rangeStart;
- T* const rangeEnd;
+ T* const mRangeStart;
+ T* const mRangeEnd;
#endif
- typedef void (RangedPtr::* ConvertibleToBool)();
- void nonNull() {}
-
- void checkSanity() {
- MOZ_ASSERT(rangeStart <= ptr);
- MOZ_ASSERT(ptr <= rangeEnd);
- }
+ typedef void (RangedPtr::* ConvertibleToBool)();
+ void nonNull() {}
- /* Creates a new pointer for |p|, restricted to this pointer's range. */
- RangedPtr<T> create(T *p) const {
-#ifdef DEBUG
- return RangedPtr<T>(p, rangeStart, rangeEnd);
-#else
- return RangedPtr<T>(p, nullptr, size_t(0));
-#endif
- }
+ void checkSanity()
+ {
+ MOZ_ASSERT(mRangeStart <= mPtr);
+ MOZ_ASSERT(mPtr <= mRangeEnd);
+ }
- uintptr_t asUintptr() const { return uintptr_t(ptr); }
-
- public:
- RangedPtr(T* p, T* start, T* end)
- : ptr(p)
+ /* Creates a new pointer for |aPtr|, restricted to this pointer's range. */
+ RangedPtr<T> create(T* aPtr) const
+ {
#ifdef DEBUG
- , rangeStart(start), rangeEnd(end)
+ return RangedPtr<T>(aPtr, mRangeStart, mRangeEnd);
+#else
+ return RangedPtr<T>(aPtr, nullptr, size_t(0));
#endif
- {
- MOZ_ASSERT(rangeStart <= rangeEnd);
- checkSanity();
- }
- RangedPtr(T* p, T* start, size_t length)
- : ptr(p)
+ }
+
+ uintptr_t asUintptr() const { return reinterpret_cast<uintptr_t>(mPtr); }
+
+public:
+ RangedPtr(T* aPtr, T* aStart, T* aEnd)
+ : mPtr(aPtr)
#ifdef DEBUG
- , rangeStart(start), rangeEnd(start + length)
+ , mRangeStart(aStart), mRangeEnd(aEnd)
#endif
- {
- MOZ_ASSERT(length <= size_t(-1) / sizeof(T));
- MOZ_ASSERT(uintptr_t(rangeStart) + length * sizeof(T) >= uintptr_t(rangeStart));
- checkSanity();
- }
+ {
+ MOZ_ASSERT(mRangeStart <= mRangeEnd);
+ checkSanity();
+ }
+ RangedPtr(T* aPtr, T* aStart, size_t aLength)
+ : mPtr(aPtr)
+#ifdef DEBUG
+ , mRangeStart(aStart), mRangeEnd(aStart + aLength)
+#endif
+ {
+ MOZ_ASSERT(aLength <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT(reinterpret_cast<uintptr_t>(mRangeStart) + aLength * sizeof(T) >=
+ reinterpret_cast<uintptr_t>(mRangeStart));
+ checkSanity();
+ }
- /* Equivalent to RangedPtr(p, p, length). */
- RangedPtr(T* p, size_t length)
- : ptr(p)
+ /* Equivalent to RangedPtr(aPtr, aPtr, aLength). */
+ RangedPtr(T* aPtr, size_t aLength)
+ : mPtr(aPtr)
#ifdef DEBUG
- , rangeStart(p), rangeEnd(p + length)
+ , mRangeStart(aPtr), mRangeEnd(aPtr + aLength)
#endif
- {
- MOZ_ASSERT(length <= size_t(-1) / sizeof(T));
- MOZ_ASSERT(uintptr_t(rangeStart) + length * sizeof(T) >= uintptr_t(rangeStart));
- checkSanity();
- }
+ {
+ MOZ_ASSERT(aLength <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT(reinterpret_cast<uintptr_t>(mRangeStart) + aLength * sizeof(T) >=
+ reinterpret_cast<uintptr_t>(mRangeStart));
+ checkSanity();
+ }
- /* Equivalent to RangedPtr(arr, arr, N). */
- template<size_t N>
- RangedPtr(T (&arr)[N])
- : ptr(arr)
+ /* Equivalent to RangedPtr(aArr, aArr, N). */
+ template<size_t N>
+ RangedPtr(T (&aArr)[N])
+ : mPtr(aArr)
#ifdef DEBUG
- , rangeStart(arr), rangeEnd(arr + N)
+ , mRangeStart(aArr), mRangeEnd(aArr + N)
#endif
- {
- checkSanity();
- }
+ {
+ checkSanity();
+ }
- T* get() const {
- return ptr;
- }
+ T* get() const { return mPtr; }
- operator ConvertibleToBool() const { return ptr ? &RangedPtr::nonNull : 0; }
+ operator ConvertibleToBool() const { return mPtr ? &RangedPtr::nonNull : 0; }
- /*
- * You can only assign one RangedPtr into another if the two pointers have
- * the same valid range:
- *
- * char arr1[] = "hi";
- * char arr2[] = "bye";
- * RangedPtr<char> p1(arr1, 2);
- * p1 = RangedPtr<char>(arr1 + 1, arr1, arr1 + 2); // works
- * p1 = RangedPtr<char>(arr2, 3); // asserts
- */
- RangedPtr<T>& operator=(const RangedPtr<T>& other) {
- MOZ_ASSERT(rangeStart == other.rangeStart);
- MOZ_ASSERT(rangeEnd == other.rangeEnd);
- ptr = other.ptr;
- checkSanity();
- return *this;
- }
+ /*
+ * You can only assign one RangedPtr into another if the two pointers have
+ * the same valid range:
+ *
+ * char arr1[] = "hi";
+ * char arr2[] = "bye";
+ * RangedPtr<char> p1(arr1, 2);
+ * p1 = RangedPtr<char>(arr1 + 1, arr1, arr1 + 2); // works
+ * p1 = RangedPtr<char>(arr2, 3); // asserts
+ */
+ RangedPtr<T>& operator=(const RangedPtr<T>& aOther)
+ {
+ MOZ_ASSERT(mRangeStart == aOther.mRangeStart);
+ MOZ_ASSERT(mRangeEnd == aOther.mRangeEnd);
+ mPtr = aOther.mPtr;
+ checkSanity();
+ return *this;
+ }
- RangedPtr<T> operator+(size_t inc) {
- MOZ_ASSERT(inc <= size_t(-1) / sizeof(T));
- MOZ_ASSERT(asUintptr() + inc * sizeof(T) >= asUintptr());
- return create(ptr + inc);
- }
+ RangedPtr<T> operator+(size_t aInc)
+ {
+ MOZ_ASSERT(aInc <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT(asUintptr() + aInc * sizeof(T) >= asUintptr());
+ return create(mPtr + aInc);
+ }
- RangedPtr<T> operator-(size_t dec) {
- MOZ_ASSERT(dec <= size_t(-1) / sizeof(T));
- MOZ_ASSERT(asUintptr() - dec * sizeof(T) <= asUintptr());
- return create(ptr - dec);
- }
+ RangedPtr<T> operator-(size_t aDec)
+ {
+ MOZ_ASSERT(aDec <= size_t(-1) / sizeof(T));
+ MOZ_ASSERT(asUintptr() - aDec * sizeof(T) <= asUintptr());
+ return create(mPtr - aDec);
+ }
- /*
- * You can assign a raw pointer into a RangedPtr if the raw pointer is
- * within the range specified at creation.
- */
- template <typename U>
- RangedPtr<T>& operator=(U* p) {
- *this = create(p);
- return *this;
- }
+ /*
+ * You can assign a raw pointer into a RangedPtr if the raw pointer is
+ * within the range specified at creation.
+ */
+ template <typename U>
+ RangedPtr<T>& operator=(U* aPtr)
+ {
+ *this = create(aPtr);
+ return *this;
+ }
- template <typename U>
- RangedPtr<T>& operator=(const RangedPtr<U>& p) {
- MOZ_ASSERT(rangeStart <= p.ptr);
- MOZ_ASSERT(p.ptr <= rangeEnd);
- ptr = p.ptr;
- checkSanity();
- return *this;
- }
+ template <typename U>
+ RangedPtr<T>& operator=(const RangedPtr<U>& aPtr)
+ {
+ MOZ_ASSERT(mRangeStart <= aPtr.mPtr);
+ MOZ_ASSERT(aPtr.mPtr <= mRangeEnd);
+ mPtr = aPtr.mPtr;
+ checkSanity();
+ return *this;
+ }
- RangedPtr<T>& operator++() {
- return (*this += 1);
- }
+ RangedPtr<T>& operator++()
+ {
+ return (*this += 1);
+ }
- RangedPtr<T> operator++(int) {
- RangedPtr<T> rcp = *this;
- ++*this;
- return rcp;
- }
+ RangedPtr<T> operator++(int)
+ {
+ RangedPtr<T> rcp = *this;
+ ++*this;
+ return rcp;
+ }
- RangedPtr<T>& operator--() {
- return (*this -= 1);
- }
+ RangedPtr<T>& operator--()
+ {
+ return (*this -= 1);
+ }
- RangedPtr<T> operator--(int) {
- RangedPtr<T> rcp = *this;
- --*this;
- return rcp;
- }
+ RangedPtr<T> operator--(int)
+ {
+ RangedPtr<T> rcp = *this;
+ --*this;
+ return rcp;
+ }
- RangedPtr<T>& operator+=(size_t inc) {
- *this = *this + inc;
- return *this;
- }
+ RangedPtr<T>& operator+=(size_t aInc)
+ {
+ *this = *this + aInc;
+ return *this;
+ }
- RangedPtr<T>& operator-=(size_t dec) {
- *this = *this - dec;
- return *this;
- }
+ RangedPtr<T>& operator-=(size_t aDec)
+ {
+ *this = *this - aDec;
+ return *this;
+ }
- T& operator[](int index) const {
- MOZ_ASSERT(size_t(index > 0 ? index : -index) <= size_t(-1) / sizeof(T));
- return *create(ptr + index);
- }
+ T& operator[](int aIndex) const
+ {
+ MOZ_ASSERT(size_t(aIndex > 0 ? aIndex : -aIndex) <= size_t(-1) / sizeof(T));
+ return *create(mPtr + aIndex);
+ }
- T& operator*() const {
- MOZ_ASSERT(ptr >= rangeStart);
- MOZ_ASSERT(ptr < rangeEnd);
- return *ptr;
- }
+ T& operator*() const
+ {
+ MOZ_ASSERT(mPtr >= mRangeStart);
+ MOZ_ASSERT(mPtr < mRangeEnd);
+ return *mPtr;
+ }
- template <typename U>
- bool operator==(const RangedPtr<U>& other) const {
- return ptr == other.ptr;
- }
- template <typename U>
- bool operator!=(const RangedPtr<U>& other) const {
- return !(*this == other);
- }
+ template <typename U>
+ bool operator==(const RangedPtr<U>& aOther) const
+ {
+ return mPtr == aOther.mPtr;
+ }
+ template <typename U>
+ bool operator!=(const RangedPtr<U>& aOther) const
+ {
+ return !(*this == aOther);
+ }
- template<typename U>
- bool operator==(const U* u) const {
- return ptr == u;
- }
- template<typename U>
- bool operator!=(const U* u) const {
- return !(*this == u);
- }
-
- template <typename U>
- bool operator<(const RangedPtr<U>& other) const {
- return ptr < other.ptr;
- }
- template <typename U>
- bool operator<=(const RangedPtr<U>& other) const {
- return ptr <= other.ptr;
- }
+ template<typename U>
+ bool operator==(const U* u) const
+ {
+ return mPtr == u;
+ }
+ template<typename U>
+ bool operator!=(const U* u) const
+ {
+ return !(*this == u);
+ }
- template <typename U>
- bool operator>(const RangedPtr<U>& other) const {
- return ptr > other.ptr;
- }
- template <typename U>
- bool operator>=(const RangedPtr<U>& other) const {
- return ptr >= other.ptr;
- }
+ template <typename U>
+ bool operator<(const RangedPtr<U>& aOther) const
+ {
+ return mPtr < aOther.mPtr;
+ }
+ template <typename U>
+ bool operator<=(const RangedPtr<U>& aOther) const
+ {
+ return mPtr <= aOther.mPtr;
+ }
- size_t operator-(const RangedPtr<T>& other) const {
- MOZ_ASSERT(ptr >= other.ptr);
- return PointerRangeSize(other.ptr, ptr);
- }
+ template <typename U>
+ bool operator>(const RangedPtr<U>& aOther) const
+ {
+ return mPtr > aOther.mPtr;
+ }
+ template <typename U>
+ bool operator>=(const RangedPtr<U>& aOther) const
+ {
+ return mPtr >= aOther.mPtr;
+ }
- private:
- RangedPtr() MOZ_DELETE;
- T* operator&() MOZ_DELETE;
+ size_t operator-(const RangedPtr<T>& aOther) const
+ {
+ MOZ_ASSERT(mPtr >= aOther.mPtr);
+ return PointerRangeSize(aOther.mPtr, mPtr);
+ }
+
+private:
+ RangedPtr() MOZ_DELETE;
+ T* operator&() MOZ_DELETE;
};
} /* namespace mozilla */
#endif /* mozilla_RangedPtr_h */
--- a/mfbt/ReentrancyGuard.h
+++ b/mfbt/ReentrancyGuard.h
@@ -13,45 +13,45 @@
#include "mozilla/Attributes.h"
#include "mozilla/GuardObjects.h"
namespace mozilla {
/* Useful for implementing containers that assert non-reentrancy */
class ReentrancyGuard
{
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
#ifdef DEBUG
- bool& entered;
+ bool& mEntered;
#endif
- public:
- template<class T>
+public:
+ template<class T>
#ifdef DEBUG
- ReentrancyGuard(T& obj
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : entered(obj.entered)
+ ReentrancyGuard(T& aObj
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : mEntered(aObj.mEntered)
#else
- ReentrancyGuard(T&
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ ReentrancyGuard(T&
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
#endif
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
#ifdef DEBUG
- MOZ_ASSERT(!entered);
- entered = true;
+ MOZ_ASSERT(!mEntered);
+ mEntered = true;
#endif
- }
- ~ReentrancyGuard()
- {
+ }
+ ~ReentrancyGuard()
+ {
#ifdef DEBUG
- entered = false;
+ mEntered = false;
#endif
- }
+ }
- private:
- ReentrancyGuard(const ReentrancyGuard&) MOZ_DELETE;
- void operator=(const ReentrancyGuard&) MOZ_DELETE;
+private:
+ ReentrancyGuard(const ReentrancyGuard&) MOZ_DELETE;
+ void operator=(const ReentrancyGuard&) MOZ_DELETE;
};
} // namespace mozilla
#endif /* mozilla_ReentrancyGuard_h */
--- a/mfbt/RefPtr.h
+++ b/mfbt/RefPtr.h
@@ -7,23 +7,25 @@
/* Helpers for defining and using refcounted objects. */
#ifndef mozilla_RefPtr_h
#define mozilla_RefPtr_h
#include "mozilla/Assertions.h"
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
+#include "mozilla/NullPtr.h"
#include "mozilla/RefCountType.h"
#include "mozilla/TypeTraits.h"
#if defined(MOZILLA_INTERNAL_API)
#include "nsXPCOM.h"
#endif
-#if defined(MOZILLA_INTERNAL_API) && (defined(DEBUG) || defined(FORCE_BUILD_REFCNT_LOGGING))
+#if defined(MOZILLA_INTERNAL_API) && \
+ (defined(DEBUG) || defined(FORCE_BUILD_REFCNT_LOGGING))
#define MOZ_REFCOUNTED_LEAK_CHECKING
#endif
namespace mozilla {
template<typename T> class RefCounted;
template<typename T> class RefPtr;
template<typename T> class TemporaryRef;
@@ -60,103 +62,107 @@ namespace detail {
const MozRefCountType DEAD = 0xffffdead;
#endif
// When building code that gets compiled into Gecko, try to use the
// trace-refcount leak logging facilities.
#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
class RefCountLogger
{
- public:
- static void logAddRef(const void* aPointer, MozRefCountType aRefCount,
- const char* aTypeName, uint32_t aInstanceSize)
- {
- MOZ_ASSERT(aRefCount != DEAD);
- NS_LogAddRef(const_cast<void*>(aPointer), aRefCount, aTypeName, aInstanceSize);
- }
+public:
+ static void logAddRef(const void* aPointer, MozRefCountType aRefCount,
+ const char* aTypeName, uint32_t aInstanceSize)
+ {
+ MOZ_ASSERT(aRefCount != DEAD);
+ NS_LogAddRef(const_cast<void*>(aPointer), aRefCount, aTypeName,
+ aInstanceSize);
+ }
- static void logRelease(const void* aPointer, MozRefCountType aRefCount,
- const char* aTypeName)
- {
- MOZ_ASSERT(aRefCount != DEAD);
- NS_LogRelease(const_cast<void*>(aPointer), aRefCount, aTypeName);
- }
+ static void logRelease(const void* aPointer, MozRefCountType aRefCount,
+ const char* aTypeName)
+ {
+ MOZ_ASSERT(aRefCount != DEAD);
+ NS_LogRelease(const_cast<void*>(aPointer), aRefCount, aTypeName);
+ }
};
#endif
// This is used WeakPtr.h as well as this file.
enum RefCountAtomicity
{
AtomicRefCount,
NonAtomicRefCount
};
template<typename T, RefCountAtomicity Atomicity>
class RefCounted
{
- friend class RefPtr<T>;
+ friend class RefPtr<T>;
+
+protected:
+ RefCounted() : mRefCnt(0) {}
+ ~RefCounted() { MOZ_ASSERT(mRefCnt == detail::DEAD); }
- protected:
- RefCounted() : refCnt(0) { }
- ~RefCounted() {
- MOZ_ASSERT(refCnt == detail::DEAD);
- }
-
- public:
- // Compatibility with nsRefPtr.
- void AddRef() const {
- // Note: this method must be thread safe for AtomicRefCounted.
- MOZ_ASSERT(int32_t(refCnt) >= 0);
+public:
+ // Compatibility with nsRefPtr.
+ void AddRef() const
+ {
+ // Note: this method must be thread safe for AtomicRefCounted.
+ MOZ_ASSERT(int32_t(mRefCnt) >= 0);
#ifndef MOZ_REFCOUNTED_LEAK_CHECKING
- ++refCnt;
+ ++mRefCnt;
#else
- const char* type = static_cast<const T*>(this)->typeName();
- uint32_t size = static_cast<const T*>(this)->typeSize();
- const void* ptr = static_cast<const T*>(this);
- MozRefCountType cnt = ++refCnt;
- detail::RefCountLogger::logAddRef(ptr, cnt, type, size);
+ const char* type = static_cast<const T*>(this)->typeName();
+ uint32_t size = static_cast<const T*>(this)->typeSize();
+ const void* ptr = static_cast<const T*>(this);
+ MozRefCountType cnt = ++mRefCnt;
+ detail::RefCountLogger::logAddRef(ptr, cnt, type, size);
#endif
- }
+ }
- void Release() const {
- // Note: this method must be thread safe for AtomicRefCounted.
- MOZ_ASSERT(int32_t(refCnt) > 0);
+ void Release() const
+ {
+ // Note: this method must be thread safe for AtomicRefCounted.
+ MOZ_ASSERT(int32_t(mRefCnt) > 0);
#ifndef MOZ_REFCOUNTED_LEAK_CHECKING
- MozRefCountType cnt = --refCnt;
+ MozRefCountType cnt = --mRefCnt;
#else
- const char* type = static_cast<const T*>(this)->typeName();
- const void* ptr = static_cast<const T*>(this);
- MozRefCountType cnt = --refCnt;
- // Note: it's not safe to touch |this| after decrementing the refcount,
- // except for below.
- detail::RefCountLogger::logRelease(ptr, cnt, type);
+ const char* type = static_cast<const T*>(this)->typeName();
+ const void* ptr = static_cast<const T*>(this);
+ MozRefCountType cnt = --mRefCnt;
+ // Note: it's not safe to touch |this| after decrementing the refcount,
+ // except for below.
+ detail::RefCountLogger::logRelease(ptr, cnt, type);
#endif
- if (0 == cnt) {
- // Because we have atomically decremented the refcount above, only
- // one thread can get a 0 count here, so as long as we can assume that
- // everything else in the system is accessing this object through
- // RefPtrs, it's safe to access |this| here.
+ if (0 == cnt) {
+ // Because we have atomically decremented the refcount above, only
+ // one thread can get a 0 count here, so as long as we can assume that
+ // everything else in the system is accessing this object through
+ // RefPtrs, it's safe to access |this| here.
#ifdef DEBUG
- refCnt = detail::DEAD;
+ mRefCnt = detail::DEAD;
#endif
- delete static_cast<const T*>(this);
- }
+ delete static_cast<const T*>(this);
}
+ }
- // Compatibility with wtf::RefPtr.
- void ref() { AddRef(); }
- void deref() { Release(); }
- MozRefCountType refCount() const { return refCnt; }
- bool hasOneRef() const {
- MOZ_ASSERT(refCnt > 0);
- return refCnt == 1;
- }
+ // Compatibility with wtf::RefPtr.
+ void ref() { AddRef(); }
+ void deref() { Release(); }
+ MozRefCountType refCount() const { return mRefCnt; }
+ bool hasOneRef() const
+ {
+ MOZ_ASSERT(mRefCnt > 0);
+ return mRefCnt == 1;
+ }
- private:
- mutable typename Conditional<Atomicity == AtomicRefCount, Atomic<MozRefCountType>, MozRefCountType>::Type refCnt;
+private:
+ mutable typename Conditional<Atomicity == AtomicRefCount,
+ Atomic<MozRefCountType>,
+ MozRefCountType>::Type mRefCnt;
};
#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
#define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T) \
virtual const char* typeName() const { return #T; } \
virtual size_t typeSize() const { return sizeof(*this); }
#else
#define MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(T)
@@ -164,167 +170,181 @@ class RefCounted
// Note that this macro is expanded unconditionally because it declares only
// two small inline functions which will hopefully get eliminated by the linker
// in non-leak-checking builds.
#define MOZ_DECLARE_REFCOUNTED_TYPENAME(T) \
const char* typeName() const { return #T; } \
size_t typeSize() const { return sizeof(*this); }
-}
+} // namespace detail
template<typename T>
class RefCounted : public detail::RefCounted<T, detail::NonAtomicRefCount>
{
- public:
- ~RefCounted() {
- static_assert(IsBaseOf<RefCounted, T>::value,
- "T must derive from RefCounted<T>");
- }
+public:
+ ~RefCounted()
+ {
+ static_assert(IsBaseOf<RefCounted, T>::value,
+ "T must derive from RefCounted<T>");
+ }
};
namespace external {
/**
* AtomicRefCounted<T> is like RefCounted<T>, with an atomically updated
* reference counter.
*
* NOTE: Please do not use this class, use NS_INLINE_DECL_THREADSAFE_REFCOUNTING
* instead.
*/
template<typename T>
-class AtomicRefCounted : public mozilla::detail::RefCounted<T, mozilla::detail::AtomicRefCount>
+class AtomicRefCounted :
+ public mozilla::detail::RefCounted<T, mozilla::detail::AtomicRefCount>
{
- public:
- ~AtomicRefCounted() {
- static_assert(IsBaseOf<AtomicRefCounted, T>::value,
- "T must derive from AtomicRefCounted<T>");
- }
+public:
+ ~AtomicRefCounted()
+ {
+ static_assert(IsBaseOf<AtomicRefCounted, T>::value,
+ "T must derive from AtomicRefCounted<T>");
+ }
};
-}
+} // namespace external
/**
* RefPtr points to a refcounted thing that has AddRef and Release
* methods to increase/decrease the refcount, respectively. After a
* RefPtr<T> is assigned a T*, the T* can be used through the RefPtr
* as if it were a T*.
*
* A RefPtr can forget its underlying T*, which results in the T*
* being wrapped in a temporary object until the T* is either
* re-adopted from or released by the temporary.
*/
template<typename T>
class RefPtr
{
- // To allow them to use unref()
- friend class TemporaryRef<T>;
- friend class OutParamRef<T>;
+ // To allow them to use unref()
+ friend class TemporaryRef<T>;
+ friend class OutParamRef<T>;
- struct DontRef {};
+ struct DontRef {};
- public:
- RefPtr() : ptr(0) { }
- RefPtr(const RefPtr& o) : ptr(ref(o.ptr)) {}
- MOZ_IMPLICIT RefPtr(const TemporaryRef<T>& o) : ptr(o.drop()) {}
- MOZ_IMPLICIT RefPtr(T* t) : ptr(ref(t)) {}
+public:
+ RefPtr() : mPtr(0) {}
+ RefPtr(const RefPtr& aOther) : mPtr(ref(aOther.mPtr)) {}
+ MOZ_IMPLICIT RefPtr(const TemporaryRef<T>& aOther) : mPtr(aOther.drop()) {}
+ MOZ_IMPLICIT RefPtr(T* aVal) : mPtr(ref(aVal)) {}
- template<typename U>
- RefPtr(const RefPtr<U>& o) : ptr(ref(o.get())) {}
+ template<typename U>
+ RefPtr(const RefPtr<U>& aOther) : mPtr(ref(aOther.get())) {}
- ~RefPtr() { unref(ptr); }
+ ~RefPtr() { unref(mPtr); }
- RefPtr& operator=(const RefPtr& o) {
- assign(ref(o.ptr));
- return *this;
- }
- RefPtr& operator=(const TemporaryRef<T>& o) {
- assign(o.drop());
- return *this;
- }
- RefPtr& operator=(T* t) {
- assign(ref(t));
- return *this;
- }
+ RefPtr& operator=(const RefPtr& aOther)
+ {
+ assign(ref(aOther.mPtr));
+ return *this;
+ }
+ RefPtr& operator=(const TemporaryRef<T>& aOther)
+ {
+ assign(aOther.drop());
+ return *this;
+ }
+ RefPtr& operator=(T* aVal)
+ {
+ assign(ref(aVal));
+ return *this;
+ }
- template<typename U>
- RefPtr& operator=(const RefPtr<U>& o) {
- assign(ref(o.get()));
- return *this;
- }
+ template<typename U>
+ RefPtr& operator=(const RefPtr<U>& aOther)
+ {
+ assign(ref(aOther.get()));
+ return *this;
+ }
- TemporaryRef<T> forget() {
- T* tmp = ptr;
- ptr = 0;
- return TemporaryRef<T>(tmp, DontRef());
- }
+ TemporaryRef<T> forget()
+ {
+ T* tmp = mPtr;
+ mPtr = nullptr;
+ return TemporaryRef<T>(tmp, DontRef());
+ }
- T* get() const { return ptr; }
- operator T*() const { return ptr; }
- T* operator->() const { return ptr; }
- T& operator*() const { return *ptr; }
- template<typename U>
- operator TemporaryRef<U>() { return TemporaryRef<U>(ptr); }
+ T* get() const { return mPtr; }
+ operator T*() const { return mPtr; }
+ T* operator->() const { return mPtr; }
+ T& operator*() const { return *mPtr; }
+ template<typename U>
+ operator TemporaryRef<U>() { return TemporaryRef<U>(mPtr); }
- private:
- void assign(T* t) {
- unref(ptr);
- ptr = t;
- }
+private:
+ void assign(T* aVal)
+ {
+ unref(mPtr);
+ mPtr = aVal;
+ }
- T* ptr;
+ T* mPtr;
- static MOZ_ALWAYS_INLINE T* ref(T* t) {
- if (t)
- t->AddRef();
- return t;
+ static MOZ_ALWAYS_INLINE T* ref(T* aVal)
+ {
+ if (aVal) {
+ aVal->AddRef();
}
+ return aVal;
+ }
- static MOZ_ALWAYS_INLINE void unref(T* t) {
- if (t)
- t->Release();
+ static MOZ_ALWAYS_INLINE void unref(T* aVal)
+ {
+ if (aVal) {
+ aVal->Release();
}
+ }
};
/**
* TemporaryRef<T> represents an object that holds a temporary
* reference to a T. TemporaryRef objects can't be manually ref'd or
* unref'd (being temporaries, not lvalues), so can only relinquish
* references to other objects, or unref on destruction.
*/
template<typename T>
class TemporaryRef
{
- // To allow it to construct TemporaryRef from a bare T*
- friend class RefPtr<T>;
+ // To allow it to construct TemporaryRef from a bare T*
+ friend class RefPtr<T>;
- typedef typename RefPtr<T>::DontRef DontRef;
+ typedef typename RefPtr<T>::DontRef DontRef;
- public:
- MOZ_IMPLICIT TemporaryRef(T* t) : ptr(RefPtr<T>::ref(t)) {}
- TemporaryRef(const TemporaryRef& o) : ptr(o.drop()) {}
+public:
+ MOZ_IMPLICIT TemporaryRef(T* aVal) : mPtr(RefPtr<T>::ref(aVal)) {}
+ TemporaryRef(const TemporaryRef& aOther) : mPtr(aOther.drop()) {}
- template<typename U>
- TemporaryRef(const TemporaryRef<U>& o) : ptr(o.drop()) {}
+ template<typename U>
+ TemporaryRef(const TemporaryRef<U>& aOther) : mPtr(aOther.drop()) {}
- ~TemporaryRef() { RefPtr<T>::unref(ptr); }
+ ~TemporaryRef() { RefPtr<T>::unref(mPtr); }
- T* drop() const {
- T* tmp = ptr;
- ptr = 0;
- return tmp;
- }
+ T* drop() const
+ {
+ T* tmp = mPtr;
+ mPtr = nullptr;
+ return tmp;
+ }
- private:
- TemporaryRef(T* t, const DontRef&) : ptr(t) {}
+private:
+ TemporaryRef(T* aVal, const DontRef&) : mPtr(aVal) {}
- mutable T* ptr;
+ mutable T* mPtr;
- TemporaryRef() MOZ_DELETE;
- void operator=(const TemporaryRef&) MOZ_DELETE;
+ TemporaryRef() MOZ_DELETE;
+ void operator=(const TemporaryRef&) MOZ_DELETE;
};
/**
* OutParamRef is a wrapper that tracks a refcounted pointer passed as
* an outparam argument to a function. OutParamRef implements COM T**
* outparam semantics: this requires the callee to AddRef() the T*
* returned through the T** outparam on behalf of the caller. This
* means the caller (through OutParamRef) must Release() the old
@@ -334,72 +354,74 @@ class TemporaryRef
*
* Prefer returning TemporaryRef<T> from functions over creating T**
* outparams and passing OutParamRef<T> to T**. Prefer RefPtr<T>*
* outparams over T** outparams.
*/
template<typename T>
class OutParamRef
{
- friend OutParamRef byRef<T>(RefPtr<T>&);
+ friend OutParamRef byRef<T>(RefPtr<T>&);
- public:
- ~OutParamRef() {
- RefPtr<T>::unref(refPtr.ptr);
- refPtr.ptr = tmp;
- }
+public:
+ ~OutParamRef()
+ {
+ RefPtr<T>::unref(mRefPtr.mPtr);
+ mRefPtr.mPtr = mTmp;
+ }
- operator T**() { return &tmp; }
+ operator T**() { return &mTmp; }
- private:
- explicit OutParamRef(RefPtr<T>& p) : refPtr(p), tmp(p.get()) {}
+private:
+ explicit OutParamRef(RefPtr<T>& p) : mRefPtr(p), mTmp(p.get()) {}
- RefPtr<T>& refPtr;
- T* tmp;
+ RefPtr<T>& mRefPtr;
+ T* mTmp;
- OutParamRef() MOZ_DELETE;
- OutParamRef& operator=(const OutParamRef&) MOZ_DELETE;
+ OutParamRef() MOZ_DELETE;
+ OutParamRef& operator=(const OutParamRef&) MOZ_DELETE;
};
/**
* byRef cooperates with OutParamRef to implement COM outparam semantics.
*/
template<typename T>
OutParamRef<T>
-byRef(RefPtr<T>& ptr)
+byRef(RefPtr<T>& aPtr)
{
- return OutParamRef<T>(ptr);
+ return OutParamRef<T>(aPtr);
}
} // namespace mozilla
#if 0
// Command line that builds these tests
//
// cp RefPtr.h test.cc && g++ -g -Wall -pedantic -DDEBUG -o test test.cc && ./test
using namespace mozilla;
struct Foo : public RefCounted<Foo>
{
MOZ_DECLARE_REFCOUNTED_TYPENAME(Foo)
- Foo() : dead(false) { }
- ~Foo() {
- MOZ_ASSERT(!dead);
- dead = true;
- numDestroyed++;
+ Foo() : mDead(false) {}
+ ~Foo()
+ {
+ MOZ_ASSERT(!mDead);
+ mDead = true;
+ sNumDestroyed++;
}
- bool dead;
- static int numDestroyed;
+ bool mDead;
+ static int sNumDestroyed;
};
-int Foo::numDestroyed;
+int Foo::sNumDestroyed;
-struct Bar : public Foo { };
+struct Bar : public Foo {};
TemporaryRef<Foo>
NewFoo()
{
return RefPtr<Foo>(new Foo());
}
TemporaryRef<Foo>
@@ -440,91 +462,91 @@ GetNullFoo()
}
int
main(int argc, char** argv)
{
// This should blow up
// Foo* f = new Foo(); delete f;
- MOZ_ASSERT(0 == Foo::numDestroyed);
+ MOZ_ASSERT(0 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = new Foo();
MOZ_ASSERT(f->refCount() == 1);
}
- MOZ_ASSERT(1 == Foo::numDestroyed);
+ MOZ_ASSERT(1 == Foo::sNumDestroyed);
{
RefPtr<Foo> f1 = NewFoo();
RefPtr<Foo> f2(NewFoo());
- MOZ_ASSERT(1 == Foo::numDestroyed);
+ MOZ_ASSERT(1 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(3 == Foo::numDestroyed);
+ MOZ_ASSERT(3 == Foo::sNumDestroyed);
{
RefPtr<Foo> b = NewBar();
- MOZ_ASSERT(3 == Foo::numDestroyed);
+ MOZ_ASSERT(3 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(4 == Foo::numDestroyed);
+ MOZ_ASSERT(4 == Foo::sNumDestroyed);
{
RefPtr<Foo> f1;
{
f1 = new Foo();
RefPtr<Foo> f2(f1);
RefPtr<Foo> f3 = f2;
- MOZ_ASSERT(4 == Foo::numDestroyed);
+ MOZ_ASSERT(4 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(4 == Foo::numDestroyed);
+ MOZ_ASSERT(4 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(5 == Foo::numDestroyed);
+ MOZ_ASSERT(5 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = new Foo();
f.forget();
- MOZ_ASSERT(6 == Foo::numDestroyed);
+ MOZ_ASSERT(6 == Foo::sNumDestroyed);
}
{
RefPtr<Foo> f = new Foo();
GetNewFoo(byRef(f));
- MOZ_ASSERT(7 == Foo::numDestroyed);
+ MOZ_ASSERT(7 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(8 == Foo::numDestroyed);
+ MOZ_ASSERT(8 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = new Foo();
GetPassedFoo(byRef(f));
- MOZ_ASSERT(8 == Foo::numDestroyed);
+ MOZ_ASSERT(8 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(9 == Foo::numDestroyed);
+ MOZ_ASSERT(9 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = new Foo();
GetNewFoo(&f);
- MOZ_ASSERT(10 == Foo::numDestroyed);
+ MOZ_ASSERT(10 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(11 == Foo::numDestroyed);
+ MOZ_ASSERT(11 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = new Foo();
GetPassedFoo(&f);
- MOZ_ASSERT(11 == Foo::numDestroyed);
+ MOZ_ASSERT(11 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(12 == Foo::numDestroyed);
+ MOZ_ASSERT(12 == Foo::sNumDestroyed);
{
RefPtr<Foo> f1 = new Bar();
}
- MOZ_ASSERT(13 == Foo::numDestroyed);
+ MOZ_ASSERT(13 == Foo::sNumDestroyed);
{
RefPtr<Foo> f = GetNullFoo();
- MOZ_ASSERT(13 == Foo::numDestroyed);
+ MOZ_ASSERT(13 == Foo::sNumDestroyed);
}
- MOZ_ASSERT(13 == Foo::numDestroyed);
+ MOZ_ASSERT(13 == Foo::sNumDestroyed);
return 0;
}
#endif
#endif /* mozilla_RefPtr_h */
--- a/mfbt/RollingMean.h
+++ b/mfbt/RollingMean.h
@@ -1,9 +1,10 @@
-/* -*- Mode: C++; tab-w idth: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* A set abstraction for enumeration values. */
#ifndef mozilla_RollingMean_h_
#define mozilla_RollingMean_h_
@@ -23,85 +24,92 @@ namespace mozilla {
* type in order to maintain that the sum of all values in the average doesn't
* exceed the maximum input value.
*
* WARNING: Float types are not supported due to rounding errors.
*/
template<typename T, typename S>
class RollingMean
{
- private:
- size_t mInsertIndex;
- size_t mMaxValues;
- Vector<T> mValues;
- S mTotal;
+private:
+ size_t mInsertIndex;
+ size_t mMaxValues;
+ Vector<T> mValues;
+ S mTotal;
- public:
- static_assert(!IsFloatingPoint<T>::value,
- "floating-point types are unsupported due to rounding "
- "errors");
+public:
+ static_assert(!IsFloatingPoint<T>::value,
+ "floating-point types are unsupported due to rounding "
+ "errors");
+
+ explicit RollingMean(size_t aMaxValues)
+ : mInsertIndex(0),
+ mMaxValues(aMaxValues),
+ mTotal(0)
+ {
+ MOZ_ASSERT(aMaxValues > 0);
+ }
- explicit RollingMean(size_t aMaxValues)
- : mInsertIndex(0),
- mMaxValues(aMaxValues),
- mTotal(0)
- {
- MOZ_ASSERT(aMaxValues > 0);
- }
+ RollingMean& operator=(RollingMean&& aOther)
+ {
+ MOZ_ASSERT(this != &aOther, "self-assignment is forbidden");
+ this->~RollingMean();
+ new(this) RollingMean(aOther.mMaxValues);
+ mInsertIndex = aOther.mInsertIndex;
+ mTotal = aOther.mTotal;
+ mValues.swap(aOther.mValues);
+ return *this;
+ }
- RollingMean& operator=(RollingMean&& aOther) {
- MOZ_ASSERT(this != &aOther, "self-assignment is forbidden");
- this->~RollingMean();
- new(this) RollingMean(aOther.mMaxValues);
- mInsertIndex = aOther.mInsertIndex;
- mTotal = aOther.mTotal;
- mValues.swap(aOther.mValues);
- return *this;
+ /**
+ * Insert a value into the rolling mean.
+ */
+ bool insert(T aValue)
+ {
+ MOZ_ASSERT(mValues.length() <= mMaxValues);
+
+ if (mValues.length() == mMaxValues) {
+ mTotal = mTotal - mValues[mInsertIndex] + aValue;
+ mValues[mInsertIndex] = aValue;
+ } else {
+ if (!mValues.append(aValue)) {
+ return false;
+ }
+ mTotal = mTotal + aValue;
}
- /**
- * Insert a value into the rolling mean.
- */
- bool insert(T aValue) {
- MOZ_ASSERT(mValues.length() <= mMaxValues);
+ mInsertIndex = (mInsertIndex + 1) % mMaxValues;
+ return true;
+ }
- if (mValues.length() == mMaxValues) {
- mTotal = mTotal - mValues[mInsertIndex] + aValue;
- mValues[mInsertIndex] = aValue;
- } else {
- if (!mValues.append(aValue))
- return false;
- mTotal = mTotal + aValue;
- }
-
- mInsertIndex = (mInsertIndex + 1) % mMaxValues;
- return true;
- }
+ /**
+ * Calculate the rolling mean.
+ */
+ T mean()
+ {
+ MOZ_ASSERT(!empty());
+ return T(mTotal / mValues.length());
+ }
- /**
- * Calculate the rolling mean.
- */
- T mean() {
- MOZ_ASSERT(!empty());
- return T(mTotal / mValues.length());
- }
-
- bool empty() {
- return mValues.empty();
- }
+ bool empty()
+ {
+ return mValues.empty();
+ }
- /**
- * Remove all values from the rolling mean.
- */
- void clear() {
- mValues.clear();
- mInsertIndex = 0;
- mTotal = T(0);
- }
+ /**
+ * Remove all values from the rolling mean.
+ */
+ void clear()
+ {
+ mValues.clear();
+ mInsertIndex = 0;
+ mTotal = T(0);
+ }
- size_t maxValues() {
- return mMaxValues;
- }
+ size_t maxValues()
+ {
+ return mMaxValues;
+ }
};
} // namespace mozilla
#endif // mozilla_RollingMean_h_
--- a/mfbt/SHA1.cpp
+++ b/mfbt/SHA1.cpp
@@ -1,47 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/Assertions.h"
#include "mozilla/Endian.h"
#include "mozilla/SHA1.h"
#include <string.h>
using mozilla::NativeEndian;
using mozilla::SHA1Sum;
static inline uint32_t
-SHA_ROTL(uint32_t t, uint32_t n)
+SHA_ROTL(uint32_t aT, uint32_t aN)
{
- MOZ_ASSERT(n < 32);
- return (t << n) | (t >> (32 - n));
+ MOZ_ASSERT(aN < 32);
+ return (aT << aN) | (aT >> (32 - aN));
}
static void
-shaCompress(volatile unsigned* X, const uint32_t* datain);
+shaCompress(volatile unsigned* aX, const uint32_t* aBuf);
#define SHA_F1(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
#define SHA_F2(X, Y, Z) ((X) ^ (Y) ^ (Z))
#define SHA_F3(X, Y, Z) (((X) & (Y)) | ((Z) & ((X) | (Y))))
#define SHA_F4(X, Y, Z) ((X) ^ (Y) ^ (Z))
#define SHA_MIX(n, a, b, c) XW(n) = SHA_ROTL(XW(a) ^ XW(b) ^ XW(c) ^XW(n), 1)
SHA1Sum::SHA1Sum()
- : size(0), mDone(false)
+ : mSize(0), mDone(false)
{
// Initialize H with constants from FIPS180-1.
- H[0] = 0x67452301L;
- H[1] = 0xefcdab89L;
- H[2] = 0x98badcfeL;
- H[3] = 0x10325476L;
- H[4] = 0xc3d2e1f0L;
+ mH[0] = 0x67452301L;
+ mH[1] = 0xefcdab89L;
+ mH[2] = 0x98badcfeL;
+ mH[3] = 0x10325476L;
+ mH[4] = 0xc3d2e1f0L;
}
/*
* Explanation of H array and index values:
*
* The context's H array is actually the concatenation of two arrays
* defined by SHA1, the H array of state variables (5 elements),
* and the W array of intermediate values, of which there are 16 elements.
@@ -74,88 +76,92 @@ SHA1Sum::SHA1Sum()
*/
#define H2X 11 /* X[0] is H[11], and H[0] is X[-11] */
#define W2X 6 /* X[0] is W[6], and W[0] is X[-6] */
/*
* SHA: Add data to context.
*/
void
-SHA1Sum::update(const void* dataIn, uint32_t len)
+SHA1Sum::update(const void* aData, uint32_t aLen)
{
MOZ_ASSERT(!mDone, "SHA1Sum can only be used to compute a single hash.");
- const uint8_t* data = static_cast<const uint8_t*>(dataIn);
+ const uint8_t* data = static_cast<const uint8_t*>(aData);
- if (len == 0)
+ if (aLen == 0) {
return;
+ }
/* Accumulate the byte count. */
- unsigned int lenB = static_cast<unsigned int>(size) & 63U;
+ unsigned int lenB = static_cast<unsigned int>(mSize) & 63U;
- size += len;
+ mSize += aLen;
/* Read the data into W and process blocks as they get full. */
unsigned int togo;
if (lenB > 0) {
togo = 64U - lenB;
- if (len < togo)
- togo = len;
- memcpy(u.b + lenB, data, togo);
- len -= togo;
+ if (aLen < togo) {
+ togo = aLen;
+ }
+ memcpy(mU.mB + lenB, data, togo);
+ aLen -= togo;
data += togo;
lenB = (lenB + togo) & 63U;
- if (!lenB)
- shaCompress(&H[H2X], u.w);
+ if (!lenB) {
+ shaCompress(&mH[H2X], mU.mW);
+ }
}
- while (len >= 64U) {
- len -= 64U;
- shaCompress(&H[H2X], reinterpret_cast<const uint32_t*>(data));
+ while (aLen >= 64U) {
+ aLen -= 64U;
+ shaCompress(&mH[H2X], reinterpret_cast<const uint32_t*>(data));
data += 64U;
}
- if (len > 0)
- memcpy(u.b, data, len);
+ if (aLen > 0) {
+ memcpy(mU.mB, data, aLen);
+ }
}
/*
* SHA: Generate hash value
*/
void
-SHA1Sum::finish(SHA1Sum::Hash& hashOut)
+SHA1Sum::finish(SHA1Sum::Hash& aHashOut)
{
MOZ_ASSERT(!mDone, "SHA1Sum can only be used to compute a single hash.");
- uint64_t size2 = size;
- uint32_t lenB = uint32_t(size2) & 63;
+ uint64_t size = mSize;
+ uint32_t lenB = uint32_t(size) & 63;
static const uint8_t bulk_pad[64] =
{ 0x80,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
/* Pad with a binary 1 (e.g. 0x80), then zeroes, then length in bits. */
update(bulk_pad, (((55 + 64) - lenB) & 63) + 1);
- MOZ_ASSERT((uint32_t(size) & 63) == 56);
+ MOZ_ASSERT((uint32_t(mSize) & 63) == 56);
/* Convert size from bytes to bits. */
- size2 <<= 3;
- u.w[14] = NativeEndian::swapToBigEndian(uint32_t(size2 >> 32));
- u.w[15] = NativeEndian::swapToBigEndian(uint32_t(size2));
- shaCompress(&H[H2X], u.w);
+ size <<= 3;
+ mU.mW[14] = NativeEndian::swapToBigEndian(uint32_t(size >> 32));
+ mU.mW[15] = NativeEndian::swapToBigEndian(uint32_t(size));
+ shaCompress(&mH[H2X], mU.mW);
/* Output hash. */
- u.w[0] = NativeEndian::swapToBigEndian(H[0]);
- u.w[1] = NativeEndian::swapToBigEndian(H[1]);
- u.w[2] = NativeEndian::swapToBigEndian(H[2]);
- u.w[3] = NativeEndian::swapToBigEndian(H[3]);
- u.w[4] = NativeEndian::swapToBigEndian(H[4]);
- memcpy(hashOut, u.w, 20);
+ mU.mW[0] = NativeEndian::swapToBigEndian(mH[0]);
+ mU.mW[1] = NativeEndian::swapToBigEndian(mH[1]);
+ mU.mW[2] = NativeEndian::swapToBigEndian(mH[2]);
+ mU.mW[3] = NativeEndian::swapToBigEndian(mH[3]);
+ mU.mW[4] = NativeEndian::swapToBigEndian(mH[4]);
+ memcpy(aHashOut, mU.mW, 20);
mDone = true;
}
/*
* SHA: Compression function, unrolled.
*
* Some operations in shaCompress are done as 5 groups of 16 operations.
* Others are done as 4 groups of 20 operations.
@@ -180,55 +186,55 @@ SHA1Sum::finish(SHA1Sum::Hash& hashOut)
* It is MUCH bigger code than simply using the context's W array, because
* all the offsets to the W array in the stack are 32-bit signed offsets,
* and it is no faster than storing the values in the context's W array.
*
* The original code for sha_fast.c prevented this creation of a separate
* W array in the stack by creating a W array of 80 members, each of
* whose elements is assigned only once. It also separated the computations
* of the W array values and the computations of the values for the 5
- * state variables into two separate passes, W's, then A-E's so that the
+ * state variables into two separate passes, W's, then A-E's so that the
* second pass could be done all in registers (except for accessing the W
* array) on machines with fewer registers. The method is suboptimal
* for machines with enough registers to do it all in one pass, and it
* necessitates using many instructions with 32-bit offsets.
*
* This code eliminates the separate W array on the stack by a completely
* different means: by declaring the X array volatile. This prevents
* the optimizer from trying to reduce the use of the X array by the
* creation of a MORE expensive W array on the stack. The result is
* that all instructions use signed 8-bit offsets and not 32-bit offsets.
*
* The combination of this code and the -O3 optimizer flag on GCC 3.4.3
* results in code that is 3 times faster than the previous NSS sha_fast
* code on AMD64.
*/
static void
-shaCompress(volatile unsigned *X, const uint32_t *inbuf)
+shaCompress(volatile unsigned* aX, const uint32_t* aBuf)
{
unsigned A, B, C, D, E;
-#define XH(n) X[n - H2X]
-#define XW(n) X[n - W2X]
+#define XH(n) aX[n - H2X]
+#define XW(n) aX[n - W2X]
#define K0 0x5a827999L
#define K1 0x6ed9eba1L
#define K2 0x8f1bbcdcL
#define K3 0xca62c1d6L
#define SHA_RND1(a, b, c, d, e, n) \
a = SHA_ROTL(b, 5) + SHA_F1(c, d, e) + a + XW(n) + K0; c = SHA_ROTL(c, 30)
#define SHA_RND2(a, b, c, d, e, n) \
a = SHA_ROTL(b, 5) + SHA_F2(c, d, e) + a + XW(n) + K1; c = SHA_ROTL(c, 30)
#define SHA_RND3(a, b, c, d, e, n) \
a = SHA_ROTL(b, 5) + SHA_F3(c, d, e) + a + XW(n) + K2; c = SHA_ROTL(c, 30)
#define SHA_RND4(a, b, c, d, e, n) \
a = SHA_ROTL(b ,5) + SHA_F4(c, d, e) + a + XW(n) + K3; c = SHA_ROTL(c, 30)
-#define LOAD(n) XW(n) = NativeEndian::swapToBigEndian(inbuf[n])
+#define LOAD(n) XW(n) = NativeEndian::swapToBigEndian(aBuf[n])
A = XH(0);
B = XH(1);
C = XH(2);
D = XH(3);
E = XH(4);
LOAD(0); SHA_RND1(E,A,B,C,D, 0);
--- a/mfbt/SHA1.h
+++ b/mfbt/SHA1.h
@@ -31,32 +31,33 @@ namespace mozilla {
* s.finish(hash);
* }
*
* The finish method may only be called once and cannot be followed by calls
* to update.
*/
class SHA1Sum
{
- union {
- uint32_t w[16]; /* input buffer */
- uint8_t b[64];
- } u;
- uint64_t size; /* count of hashed bytes. */
- unsigned H[22]; /* 5 state variables, 16 tmp values, 1 extra */
- bool mDone;
+ union
+ {
+ uint32_t mW[16]; /* input buffer */
+ uint8_t mB[64];
+ } mU;
+ uint64_t mSize; /* count of hashed bytes. */
+ unsigned mH[22]; /* 5 state variables, 16 tmp values, 1 extra */
+ bool mDone;
- public:
- MFBT_API SHA1Sum();
+public:
+ MFBT_API SHA1Sum();
- static const size_t HashSize = 20;
- typedef uint8_t Hash[HashSize];
+ static const size_t kHashSize = 20;
+ typedef uint8_t Hash[kHashSize];
- /* Add len bytes of dataIn to the data sequence being hashed. */
- MFBT_API void update(const void* dataIn, uint32_t len);
+ /* Add len bytes of dataIn to the data sequence being hashed. */
+ MFBT_API void update(const void* aData, uint32_t aLength);
- /* Compute the final hash of all data into hashOut. */
- MFBT_API void finish(SHA1Sum::Hash& hashOut);
+ /* Compute the final hash of all data into hashOut. */
+ MFBT_API void finish(SHA1Sum::Hash& aHashOut);
};
} /* namespace mozilla */
#endif /* mozilla_SHA1_h */
--- a/mfbt/Scoped.h
+++ b/mfbt/Scoped.h
@@ -59,207 +59,209 @@
#include "mozilla/NullPtr.h"
namespace mozilla {
/*
* Scoped is a helper to create RAII wrappers
* Type argument |Traits| is expected to have the following structure:
*
- * struct Traits {
+ * struct Traits
+ * {
* // Define the type of the value stored in the wrapper
* typedef value_type type;
* // Returns the value corresponding to the uninitialized or freed state
* const static type empty();
* // Release resources corresponding to the wrapped value
* // This function is responsible for not releasing an |empty| value
* const static void release(type);
* }
*/
template<typename Traits>
class Scoped
{
- public:
- typedef typename Traits::type Resource;
+public:
+ typedef typename Traits::type Resource;
- explicit Scoped(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
- : value(Traits::empty())
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- }
+ explicit Scoped(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM)
+ : mValue(Traits::empty())
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
- explicit Scoped(const Resource& v
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : value(v)
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- }
+ explicit Scoped(const Resource& aValue
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : mValue(aValue)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
- /* Move constructor. */
- explicit Scoped(Scoped&& v
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : value(Move(v.value))
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- v.value = Traits::empty();
- }
+ /* Move constructor. */
+ explicit Scoped(Scoped&& aOther
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : mValue(Move(aOther.mValue))
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ aOther.mValue = Traits::empty();
+ }
- ~Scoped() {
- Traits::release(value);
- }
+ ~Scoped() { Traits::release(mValue); }
- // Constant getter
- operator const Resource&() const { return value; }
- const Resource& operator->() const { return value; }
- const Resource& get() const { return value; }
- // Non-constant getter.
- Resource& rwget() { return value; }
+ // Constant getter
+ operator const Resource&() const { return mValue; }
+ const Resource& operator->() const { return mValue; }
+ const Resource& get() const { return mValue; }
+ // Non-constant getter.
+ Resource& rwget() { return mValue; }
- /*
- * Forget the resource.
- *
- * Once |forget| has been called, the |Scoped| is neutralized, i.e. it will
- * have no effect at destruction (unless it is reset to another resource by
- * |operator=|).
- *
- * @return The original resource.
- */
- Resource forget() {
- Resource tmp = value;
- value = Traits::empty();
- return tmp;
- }
+ /*
+ * Forget the resource.
+ *
+ * Once |forget| has been called, the |Scoped| is neutralized, i.e. it will
+ * have no effect at destruction (unless it is reset to another resource by
+ * |operator=|).
+ *
+ * @return The original resource.
+ */
+ Resource forget()
+ {
+ Resource tmp = mValue;
+ mValue = Traits::empty();
+ return tmp;
+ }
- /*
- * Perform immediate clean-up of this |Scoped|.
- *
- * If this |Scoped| is currently empty, this method has no effect.
- */
- void dispose() {
- Traits::release(value);
- value = Traits::empty();
- }
+ /*
+ * Perform immediate clean-up of this |Scoped|.
+ *
+ * If this |Scoped| is currently empty, this method has no effect.
+ */
+ void dispose()
+ {
+ Traits::release(mValue);
+ mValue = Traits::empty();
+ }
- bool operator==(const Resource& other) const {
- return value == other;
- }
+ bool operator==(const Resource& aOther) const { return mValue == aOther; }
- /*
- * Replace the resource with another resource.
- *
- * Calling |operator=| has the side-effect of triggering clean-up. If you do
- * not want to trigger clean-up, you should first invoke |forget|.
- *
- * @return this
- */
- Scoped& operator=(const Resource& other) {
- return reset(other);
- }
- Scoped& reset(const Resource& other) {
- Traits::release(value);
- value = other;
- return *this;
- }
+ /*
+ * Replace the resource with another resource.
+ *
+ * Calling |operator=| has the side-effect of triggering clean-up. If you do
+ * not want to trigger clean-up, you should first invoke |forget|.
+ *
+ * @return this
+ */
+ Scoped& operator=(const Resource& aOther) { return reset(aOther); }
+
+ Scoped& reset(const Resource& aOther)
+ {
+ Traits::release(mValue);
+ mValue = aOther;
+ return *this;
+ }
- /* Move assignment operator. */
- Scoped& operator=(Scoped&& rhs) {
- MOZ_ASSERT(&rhs != this, "self-move-assignment not allowed");
- this->~Scoped();
- new(this) Scoped(Move(rhs));
- return *this;
- }
+ /* Move assignment operator. */
+ Scoped& operator=(Scoped&& aRhs)
+ {
+ MOZ_ASSERT(&aRhs != this, "self-move-assignment not allowed");
+ this->~Scoped();
+ new(this) Scoped(Move(aRhs));
+ return *this;
+ }
- private:
- explicit Scoped(const Scoped& value) MOZ_DELETE;
- Scoped& operator=(const Scoped& value) MOZ_DELETE;
+private:
+ explicit Scoped(const Scoped& aValue) MOZ_DELETE;
+ Scoped& operator=(const Scoped& aValue) MOZ_DELETE;
- private:
- Resource value;
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+private:
+ Resource mValue;
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
};
/*
* SCOPED_TEMPLATE defines a templated class derived from Scoped
* This allows to implement templates such as ScopedFreePtr.
*
* @param name The name of the class to define.
* @param Traits A struct implementing clean-up. See the implementations
* for more details.
*/
-#define SCOPED_TEMPLATE(name, Traits) \
-template<typename Type> \
-struct name : public mozilla::Scoped<Traits<Type> > \
-{ \
- typedef mozilla::Scoped<Traits<Type> > Super; \
- typedef typename Super::Resource Resource; \
- name& operator=(Resource rhs) { \
- Super::operator=(rhs); \
- return *this; \
- } \
- name& operator=(name&& rhs) { \
- Super::operator=(Move(rhs)); \
- return *this; \
- } \
- explicit name(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM) \
- : Super(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM_TO_PARENT) \
- {} \
- explicit name(Resource rhs \
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM) \
- : Super(rhs \
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM_TO_PARENT) \
- {} \
- explicit name(name&& rhs \
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM) \
- : Super(Move(rhs) \
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM_TO_PARENT) \
- {} \
- private: \
- explicit name(name&) MOZ_DELETE; \
- name& operator=(name&) MOZ_DELETE; \
+#define SCOPED_TEMPLATE(name, Traits) \
+template<typename Type> \
+struct name : public mozilla::Scoped<Traits<Type> > \
+{ \
+ typedef mozilla::Scoped<Traits<Type> > Super; \
+ typedef typename Super::Resource Resource; \
+ name& operator=(Resource aRhs) \
+ { \
+ Super::operator=(aRhs); \
+ return *this; \
+ } \
+ name& operator=(name&& aRhs) \
+ { \
+ Super::operator=(Move(aRhs)); \
+ return *this; \
+ } \
+ explicit name(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM) \
+ : Super(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM_TO_PARENT) \
+ {} \
+ explicit name(Resource aRhs \
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM) \
+ : Super(aRhs \
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM_TO_PARENT) \
+ {} \
+ explicit name(name&& aRhs \
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM) \
+ : Super(Move(aRhs) \
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM_TO_PARENT) \
+ {} \
+private: \
+ explicit name(name&) MOZ_DELETE; \
+ name& operator=(name&) MOZ_DELETE; \
};
/*
* ScopedFreePtr is a RAII wrapper for pointers that need to be free()d.
*
* struct S { ... };
* ScopedFreePtr<S> foo = malloc(sizeof(S));
* ScopedFreePtr<char> bar = strdup(str);
*/
template<typename T>
struct ScopedFreePtrTraits
{
- typedef T* type;
- static T* empty() { return nullptr; }
- static void release(T* ptr) { free(ptr); }
+ typedef T* type;
+ static T* empty() { return nullptr; }
+ static void release(T* aPtr) { free(aPtr); }
};
SCOPED_TEMPLATE(ScopedFreePtr, ScopedFreePtrTraits)
/*
* ScopedDeletePtr is a RAII wrapper for pointers that need to be deleted.
*
* struct S { ... };
* ScopedDeletePtr<S> foo = new S();
*/
template<typename T>
struct ScopedDeletePtrTraits : public ScopedFreePtrTraits<T>
{
- static void release(T* ptr) { delete ptr; }
+ static void release(T* aPtr) { delete aPtr; }
};
SCOPED_TEMPLATE(ScopedDeletePtr, ScopedDeletePtrTraits)
/*
* ScopedDeleteArray is a RAII wrapper for pointers that need to be delete[]ed.
*
* struct S { ... };
* ScopedDeleteArray<S> foo = new S[42];
*/
template<typename T>
struct ScopedDeleteArrayTraits : public ScopedFreePtrTraits<T>
{
- static void release(T* ptr) { delete [] ptr; }
+ static void release(T* aPtr) { delete [] aPtr; }
};
SCOPED_TEMPLATE(ScopedDeleteArray, ScopedDeleteArrayTraits)
/*
* MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE makes it easy to create scoped
* pointers for types with custom deleters; just overload
* TypeSpecificDelete(T*) in the same namespace as T to call the deleter for
* type T.
@@ -276,30 +278,31 @@ SCOPED_TEMPLATE(ScopedDeleteArray, Scope
* PR_Close)
* ...
* {
* ScopedPRFileDesc file(PR_OpenFile(...));
* ...
* } // file is closed with PR_Close here
*/
#define MOZ_TYPE_SPECIFIC_SCOPED_POINTER_TEMPLATE(name, Type, Deleter) \
-template <> inline void TypeSpecificDelete(Type * value) { Deleter(value); } \
+template <> inline void TypeSpecificDelete(Type* aValue) { Deleter(aValue); } \
typedef ::mozilla::TypeSpecificScopedPointer<Type> name;
-template <typename T> void TypeSpecificDelete(T * value);
+template <typename T> void TypeSpecificDelete(T* aValue);
template <typename T>
struct TypeSpecificScopedPointerTraits
{
- typedef T* type;
- static type empty() { return nullptr; }
- static void release(type value)
- {
- if (value)
- TypeSpecificDelete(value);
+ typedef T* type;
+ static type empty() { return nullptr; }
+ static void release(type aValue)
+ {
+ if (aValue) {
+ TypeSpecificDelete(aValue);
}
+ }
};
SCOPED_TEMPLATE(TypeSpecificScopedPointer, TypeSpecificScopedPointerTraits)
} /* namespace mozilla */
#endif /* mozilla_Scoped_h */
--- a/mfbt/TemplateLib.h
+++ b/mfbt/TemplateLib.h
@@ -23,88 +23,89 @@
namespace mozilla {
namespace tl {
/** Compute min/max. */
template<size_t I, size_t J>
struct Min
{
- static const size_t value = I < J ? I : J;
+ static const size_t value = I < J ? I : J;
};
template<size_t I, size_t J>
struct Max
{
- static const size_t value = I > J ? I : J;
+ static const size_t value = I > J ? I : J;
};
/** Compute floor(log2(i)). */
template<size_t I>
struct FloorLog2
{
- static const size_t value = 1 + FloorLog2<I / 2>::value;
+ static const size_t value = 1 + FloorLog2<I / 2>::value;
};
template<> struct FloorLog2<0> { /* Error */ };
template<> struct FloorLog2<1> { static const size_t value = 0; };
/** Compute ceiling(log2(i)). */
template<size_t I>
struct CeilingLog2
{
- static const size_t value = FloorLog2<2 * I - 1>::value;
+ static const size_t value = FloorLog2<2 * I - 1>::value;
};
/** Round up to the nearest power of 2. */
template<size_t I>
struct RoundUpPow2
{
- static const size_t value = size_t(1) << CeilingLog2<I>::value;
+ static const size_t value = size_t(1) << CeilingLog2<I>::value;
};
template<>
struct RoundUpPow2<0>
{
- static const size_t value = 1;
+ static const size_t value = 1;
};
/** Compute the number of bits in the given unsigned type. */
template<typename T>
struct BitSize
{
- static const size_t value = sizeof(T) * CHAR_BIT;
+ static const size_t value = sizeof(T) * CHAR_BIT;
};
/**
* Produce an N-bit mask, where N <= BitSize<size_t>::value. Handle the
* language-undefined edge case when N = BitSize<size_t>::value.
*/
template<size_t N>
struct NBitMask
{
- // Assert the precondition. On success this evaluates to 0. Otherwise it
- // triggers divide-by-zero at compile time: a guaranteed compile error in
- // C++11, and usually one in C++98. Add this value to |value| to assure
- // its computation.
- static const size_t checkPrecondition = 0 / size_t(N < BitSize<size_t>::value);
- static const size_t value = (size_t(1) << N) - 1 + checkPrecondition;
+ // Assert the precondition. On success this evaluates to 0. Otherwise it
+ // triggers divide-by-zero at compile time: a guaranteed compile error in
+ // C++11, and usually one in C++98. Add this value to |value| to assure
+ // its computation.
+ static const size_t checkPrecondition =
+ 0 / size_t(N < BitSize<size_t>::value);
+ static const size_t value = (size_t(1) << N) - 1 + checkPrecondition;
};
template<>
struct NBitMask<BitSize<size_t>::value>
{
- static const size_t value = size_t(-1);
+ static const size_t value = size_t(-1);
};
/**
* For the unsigned integral type size_t, compute a mask M for N such that
* for all X, !(X & M) implies X * N will not overflow (w.r.t size_t)
*/
template<size_t N>
struct MulOverflowMask
{
- static const size_t value =
- ~NBitMask<BitSize<size_t>::value - CeilingLog2<N>::value>::value;
+ static const size_t value =
+ ~NBitMask<BitSize<size_t>::value - CeilingLog2<N>::value>::value;
};
template<> struct MulOverflowMask<0> { /* Error */ };
template<> struct MulOverflowMask<1> { static const size_t value = 0; };
} // namespace tl
} // namespace mozilla
--- a/mfbt/ThreadLocal.h
+++ b/mfbt/ThreadLocal.h
@@ -68,85 +68,84 @@ typedef sig_atomic_t sig_safe_t;
*
* // Get the TLS value
* int value = tlsKey.get();
*/
template<typename T>
class ThreadLocal
{
#if defined(XP_WIN)
- typedef unsigned long key_t;
+ typedef unsigned long key_t;
#else
- typedef pthread_key_t key_t;
+ typedef pthread_key_t key_t;
#endif
- union Helper {
- void* ptr;
- T value;
- };
+ union Helper
+ {
+ void* mPtr;
+ T mValue;
+ };
- public:
- MOZ_WARN_UNUSED_RESULT inline bool init();
-
- inline T get() const;
+public:
+ MOZ_WARN_UNUSED_RESULT inline bool init();
- inline void set(const T value);
+ inline T get() const;
+
+ inline void set(const T aValue);
- bool initialized() const {
- return inited;
- }
+ bool initialized() const { return mInited; }
- private:
- key_t key;
- bool inited;
+private:
+ key_t mKey;
+ bool mInited;
};
template<typename T>
inline bool
ThreadLocal<T>::init()
{
static_assert(sizeof(T) <= sizeof(void*),
"mozilla::ThreadLocal can't be used for types larger than "
"a pointer");
MOZ_ASSERT(!initialized());
#ifdef XP_WIN
- key = TlsAlloc();
- inited = key != 0xFFFFFFFFUL; // TLS_OUT_OF_INDEXES
+ mKey = TlsAlloc();
+ mInited = mKey != 0xFFFFFFFFUL; // TLS_OUT_OF_INDEXES
#else
- inited = !pthread_key_create(&key, nullptr);
+ mInited = !pthread_key_create(&mKey, nullptr);
#endif
- return inited;
+ return mInited;
}
template<typename T>
inline T
ThreadLocal<T>::get() const
{
MOZ_ASSERT(initialized());
Helper h;
#ifdef XP_WIN
- h.ptr = TlsGetValue(key);
+ h.mPtr = TlsGetValue(mKey);
#else
- h.ptr = pthread_getspecific(key);
+ h.mPtr = pthread_getspecific(mKey);
#endif
- return h.value;
+ return h.mValue;
}
template<typename T>
inline void
-ThreadLocal<T>::set(const T value)
+ThreadLocal<T>::set(const T aValue)
{
MOZ_ASSERT(initialized());
Helper h;
- h.value = value;
- bool succeeded;
+ h.mValue = aValue;
#ifdef XP_WIN
- succeeded = TlsSetValue(key, h.ptr);
+ bool succeeded = TlsSetValue(mKey, h.mPtr);
#else
- succeeded = !pthread_setspecific(key, h.ptr);
+ bool succeeded = !pthread_setspecific(mKey, h.mPtr);
#endif
- if (!succeeded)
+ if (!succeeded) {
MOZ_CRASH();
+ }
}
} // namespace mozilla
#endif /* mozilla_ThreadLocal_h */
--- a/mfbt/ToString.h
+++ b/mfbt/ToString.h
@@ -15,18 +15,18 @@
namespace mozilla {
/**
* A convenience function for converting an object to a string representation.
* Supports any object which can be streamed to an std::ostream.
*/
template<typename T>
std::string
-ToString(const T& t)
+ToString(const T& aValue)
{
std::ostringstream stream;
- stream << t;
+ stream << aValue;
return stream.str();
}
} // namespace mozilla
#endif /* mozilla_ToString_h */
--- a/mfbt/TypeTraits.h
+++ b/mfbt/TypeTraits.h
@@ -29,19 +29,19 @@ template<typename> struct RemoveCV;
/**
* Helper class used as a base for various type traits, exposed publicly
* because <type_traits> exposes it as well.
*/
template<typename T, T Value>
struct IntegralConstant
{
- static const T value = Value;
- typedef T ValueType;
- typedef IntegralConstant<T, Value> Type;
+ static const T value = Value;
+ typedef T ValueType;
+ typedef IntegralConstant<T, Value> Type;
};
/** Convenient aliases. */
typedef IntegralConstant<bool, true> TrueType;
typedef IntegralConstant<bool, false> FalseType;
/* 20.9.4 Unary type traits [meta.unary] */
@@ -517,45 +517,45 @@ struct BaseOfTester : IntegralConstant<b
// The trickery used to implement IsBaseOf here makes it possible to use it for
// the cases of private and multiple inheritance. This code was inspired by the
// sample code here:
//
// http://stackoverflow.com/questions/2910979/how-is-base-of-works
template<class Base, class Derived>
struct BaseOfHelper
{
- public:
- operator Base*() const;
- operator Derived*();
+public:
+ operator Base*() const;
+ operator Derived*();
};
template<class Base, class Derived>
struct BaseOfTester
{
- private:
- template<class T>
- static char test(Derived*, T);
- static int test(Base*, int);
+private:
+ template<class T>
+ static char test(Derived*, T);
+ static int test(Base*, int);
- public:
- static const bool value =
- sizeof(test(BaseOfHelper<Base, Derived>(), int())) == sizeof(char);
+public:
+ static const bool value =
+ sizeof(test(BaseOfHelper<Base, Derived>(), int())) == sizeof(char);
};
template<class Base, class Derived>
struct BaseOfTester<Base, const Derived>
{
- private:
- template<class T>
- static char test(Derived*, T);
- static int test(Base*, int);
+private:
+ template<class T>
+ static char test(Derived*, T);
+ static int test(Base*, int);
- public:
- static const bool value =
- sizeof(test(BaseOfHelper<Base, Derived>(), int())) == sizeof(char);
+public:
+ static const bool value =
+ sizeof(test(BaseOfHelper<Base, Derived>(), int())) == sizeof(char);
};
template<class Base, class Derived>
struct BaseOfTester<Base&, Derived&> : FalseType {};
template<class Type>
struct BaseOfTester<Type, Type> : TrueType {};
@@ -583,28 +583,28 @@ struct IsBaseOf
: IntegralConstant<bool, detail::BaseOfTester<Base, Derived>::value>
{};
namespace detail {
template<typename From, typename To>
struct ConvertibleTester
{
- private:
- static From create();
+private:
+ static From create();
- template<typename From1, typename To1>
- static char test(To to);
+ template<typename From1, typename To1>
+ static char test(To to);
- template<typename From1, typename To1>
- static int test(...);
+ template<typename From1, typename To1>
+ static int test(...);
- public:
- static const bool value =
- sizeof(test<From, To>(create())) == sizeof(char);
+public:
+ static const bool value =
+ sizeof(test<From, To>(create())) == sizeof(char);
};
} // namespace detail
/**
* IsConvertible determines whether a value of type From will implicitly convert
* to a value of type To. For example:
*
@@ -640,107 +640,107 @@ struct IsConvertible
* mozilla::RemoveConst<int>::Type is int;
* mozilla::RemoveConst<const int>::Type is int;
* mozilla::RemoveConst<const int*>::Type is const int*;
* mozilla::RemoveConst<int* const>::Type is int*.
*/
template<typename T>
struct RemoveConst
{
- typedef T Type;
+ typedef T Type;
};
template<typename T>
struct RemoveConst<const T>
{
- typedef T Type;
+ typedef T Type;
};
/**
* RemoveVolatile removes top-level volatile qualifications on a type.
*
* mozilla::RemoveVolatile<int>::Type is int;
* mozilla::RemoveVolatile<volatile int>::Type is int;
* mozilla::RemoveVolatile<volatile int*>::Type is volatile int*;
* mozilla::RemoveVolatile<int* volatile>::Type is int*.
*/
template<typename T>
struct RemoveVolatile
{
- typedef T Type;
+ typedef T Type;
};
template<typename T>
struct RemoveVolatile<volatile T>
{
- typedef T Type;
+ typedef T Type;
};
/**
* RemoveCV removes top-level const and volatile qualifications on a type.
*
* mozilla::RemoveCV<int>::Type is int;
* mozilla::RemoveCV<const int>::Type is int;
* mozilla::RemoveCV<volatile int>::Type is int;
* mozilla::RemoveCV<int* const volatile>::Type is int*.
*/
template<typename T>
struct RemoveCV
{
- typedef typename RemoveConst<typename RemoveVolatile<T>::Type>::Type Type;
+ typedef typename RemoveConst<typename RemoveVolatile<T>::Type>::Type Type;
};
/* 20.9.7.2 Reference modifications [meta.trans.ref] */
/**
* Converts reference types to the underlying types.
*
* mozilla::RemoveReference<T>::Type is T;
* mozilla::RemoveReference<T&>::Type is T;
* mozilla::RemoveReference<T&&>::Type is T;
*/
template<typename T>
struct RemoveReference
{
- typedef T Type;
+ typedef T Type;
};
template<typename T>
struct RemoveReference<T&>
{
- typedef T Type;
+ typedef T Type;
};
template<typename T>
struct RemoveReference<T&&>
{
- typedef T Type;
+ typedef T Type;
};
template<bool Condition, typename A, typename B>
struct Conditional;
namespace detail {
enum Voidness { TIsVoid, TIsNotVoid };
template<typename T, Voidness V = IsVoid<T>::value ? TIsVoid : TIsNotVoid>
struct AddLvalueReferenceHelper;
template<typename T>
struct AddLvalueReferenceHelper<T, TIsVoid>
{
- typedef void Type;
+ typedef void Type;
};
template<typename T>
struct AddLvalueReferenceHelper<T, TIsNotVoid>
{
- typedef T& Type;
+ typedef T& Type;
};
} // namespace detail
/**
* AddLvalueReference adds an lvalue & reference to T if one isn't already
* present. (Note: adding an lvalue reference to an rvalue && reference in
* essence replaces the && with a &&, per C+11 reference collapsing rules. For
@@ -799,17 +799,17 @@ template<typename T,
typename CVRemoved = typename RemoveCV<T>::Type,
bool IsSignedIntegerType = IsSigned<CVRemoved>::value &&
!IsSame<char, CVRemoved>::value>
struct MakeSigned;
template<typename T, typename CVRemoved>
struct MakeSigned<T, CVRemoved, true>
{
- typedef T Type;
+ typedef T Type;
};
template<typename T, typename CVRemoved>
struct MakeSigned<T, CVRemoved, false>
: WithCV<IsConst<T>::value, IsVolatile<T>::value,
typename CorrespondingSigned<CVRemoved>::Type>
{};
@@ -834,17 +834,18 @@ struct MakeSigned<T, CVRemoved, false>
* mozilla::MakeSigned<volatile int>::Type is volatile int;
* mozilla::MakeSigned<const unsigned short>::Type is const signed short;
* mozilla::MakeSigned<const char>::Type is const signed char;
* mozilla::MakeSigned<bool> is an error;
* mozilla::MakeSigned<void*> is an error.
*/
template<typename T>
struct MakeSigned
- : EnableIf<IsIntegral<T>::value && !IsSame<bool, typename RemoveCV<T>::Type>::value,
+ : EnableIf<IsIntegral<T>::value &&
+ !IsSame<bool, typename RemoveCV<T>::Type>::value,
typename detail::MakeSigned<T>
>::Type
{};
namespace detail {
template<typename T>
struct CorrespondingUnsigned;
@@ -867,17 +868,17 @@ template<typename T,
typename CVRemoved = typename RemoveCV<T>::Type,
bool IsUnsignedIntegerType = IsUnsigned<CVRemoved>::value &&
!IsSame<char, CVRemoved>::value>
struct MakeUnsigned;
template<typename T, typename CVRemoved>
struct MakeUnsigned<T, CVRemoved, true>
{
- typedef T Type;
+ typedef T Type;
};
template<typename T, typename CVRemoved>
struct MakeUnsigned<T, CVRemoved, false>
: WithCV<IsConst<T>::value, IsVolatile<T>::value,
typename CorrespondingUnsigned<CVRemoved>::Type>
{};
@@ -902,17 +903,18 @@ struct MakeUnsigned<T, CVRemoved, false>
* mozilla::MakeUnsigned<volatile unsigned int>::Type is volatile unsigned int;
* mozilla::MakeUnsigned<const signed short>::Type is const unsigned short;
* mozilla::MakeUnsigned<const char>::Type is const unsigned char;
* mozilla::MakeUnsigned<bool> is an error;
* mozilla::MakeUnsigned<void*> is an error.
*/
template<typename T>
struct MakeUnsigned
- : EnableIf<IsIntegral<T>::value && !IsSame<bool, typename RemoveCV<T>::Type>::value,
+ : EnableIf<IsIntegral<T>::value &&
+ !IsSame<bool, typename RemoveCV<T>::Type>::value,
typename detail::MakeUnsigned<T>
>::Type
{};
/* 20.9.7.4 Array modifications [meta.trans.arr] */
/**
* RemoveExtent produces either the type of the elements of the array T, or T
@@ -921,29 +923,29 @@ struct MakeUnsigned
* mozilla::RemoveExtent<int>::Type is int;
* mozilla::RemoveExtent<const int[]>::Type is const int;
* mozilla::RemoveExtent<volatile int[5]>::Type is volatile int;
* mozilla::RemoveExtent<long[][17]>::Type is long[17].
*/
template<typename T>
struct RemoveExtent
{
- typedef T Type;
+ typedef T Type;
};
template<typename T>
struct RemoveExtent<T[]>
{
- typedef T Type;
+ typedef T Type;
};
template<typename T, decltype(sizeof(1)) N>
struct RemoveExtent<T[N]>
{
- typedef T Type;
+ typedef T Type;
};
/* 20.9.7.5 Pointer modifications [meta.trans.ptr] */
/* 20.9.7.6 Other transformations [meta.trans.other] */
/**
* EnableIf is a struct containing a typedef of T if and only if B is true.
@@ -965,32 +967,32 @@ struct RemoveExtent<T[N]>
*/
template<bool B, typename T>
struct EnableIf
{};
template<typename T>
struct EnableIf<true, T>
{
- typedef T Type;
+ typedef T Type;
};
/**
* Conditional selects a class between two, depending on a given boolean value.
*
* mozilla::Conditional<true, A, B>::Type is A;
* mozilla::Conditional<false, A, B>::Type is B;
*/
template<bool Condition, typename A, typename B>
struct Conditional
{
- typedef A Type;
+ typedef A Type;
};
template<class A, class B>
struct Conditional<false, A, B>
{
- typedef B Type;
+ typedef B Type;
};
} /* namespace mozilla */
#endif /* mozilla_TypeTraits_h */
--- a/mfbt/TypedEnum.h
+++ b/mfbt/TypedEnum.h
@@ -64,23 +64,22 @@
* nothing on compilers that do not support it.
*
* MOZ_{BEGIN,END}_ENUM_CLASS doesn't work for defining enum classes nested
* inside classes. To define an enum class nested inside another class, use
* MOZ_{BEGIN,END}_NESTED_ENUM_CLASS, and place a MOZ_FINISH_NESTED_ENUM_CLASS
* in namespace scope to handle bits that can only be implemented with
* namespace-scoped code. For example:
*
- * class FooBar {
- *
+ * class FooBar
+ * {
* MOZ_BEGIN_NESTED_ENUM_CLASS(Enum, int32_t)
* A,
* B = 6
* MOZ_END_NESTED_ENUM_CLASS(Enum)
- *
* };
*
* MOZ_FINISH_NESTED_ENUM_CLASS(FooBar::Enum)
*/
#if defined(MOZ_HAVE_CXX11_STRONG_ENUMS)
/*
* All compilers that support strong enums also support an explicit
* underlying type, so no extra check is needed.
@@ -143,38 +142,38 @@
* return Enum::A;
* }
*/
/* Single-argument form. */
# define MOZ_BEGIN_NESTED_ENUM_CLASS_HELPER1(Name) \
class Name \
{ \
- public: \
- enum Enum \
- {
+ public: \
+ enum Enum \
+ {
/* Two-argument form. */
# define MOZ_BEGIN_NESTED_ENUM_CLASS_HELPER2(Name, type) \
class Name \
{ \
- public: \
- enum Enum MOZ_ENUM_TYPE(type) \
- {
+ public: \
+ enum Enum MOZ_ENUM_TYPE(type) \
+ {
# define MOZ_END_NESTED_ENUM_CLASS(Name) \
- }; \
- Name() {} \
- MOZ_CONSTEXPR Name(Enum aEnum) : mEnum(aEnum) {} \
- template<typename Other> \
- explicit MOZ_CONSTEXPR Name(Other num) : mEnum((Enum)num) {} \
- MOZ_CONSTEXPR operator Enum() const { return mEnum; } \
- explicit MOZ_CONSTEXPR Name(const mozilla::CastableTypedEnumResult<Name>& aOther) \
- : mEnum(aOther.get()) \
- {} \
- private: \
- Enum mEnum; \
+ }; \
+ Name() {} \
+ MOZ_CONSTEXPR Name(Enum aEnum) : mEnum(aEnum) {} \
+ template<typename Other> \
+ explicit MOZ_CONSTEXPR Name(Other num) : mEnum((Enum)num) {} \
+ MOZ_CONSTEXPR operator Enum() const { return mEnum; } \
+ explicit MOZ_CONSTEXPR Name(const mozilla::CastableTypedEnumResult<Name>& aOther) \
+ : mEnum(aOther.get()) \
+ {} \
+ private: \
+ Enum mEnum; \
};
# define MOZ_FINISH_NESTED_ENUM_CLASS(Name) \
inline int operator+(const int&, const Name::Enum&) MOZ_DELETE; \
inline int operator+(const Name::Enum&, const int&) MOZ_DELETE; \
inline int operator-(const int&, const Name::Enum&) MOZ_DELETE; \
inline int operator-(const Name::Enum&, const int&) MOZ_DELETE; \
inline int operator*(const int&, const Name::Enum&) MOZ_DELETE; \
inline int operator*(const Name::Enum&, const int&) MOZ_DELETE; \
@@ -248,24 +247,24 @@
*
* MOZ_BEGIN_ENUM_CLASS(E)
* Foo,
* Bar
* MOZ_END_ENUM_CLASS(E)
*
* S<E, E::Bar> s;
*
- * In this example, the second template parameter to S is meant to be of type T,
- * but on non-C++11 compilers, type T is a class type, not an integer type, so
- * it is not accepted as the type of a constant template parameter. One would
- * then want to use MOZ_ENUM_CLASS_ENUM_TYPE(T), but that doesn't work either
- * as T depends on template parameters (more specifically here, T _is_ a template
- * parameter) so as MOZ_ENUM_CLASS_ENUM_TYPE(T) expands to T::Enum, we are missing
- * the required "typename" keyword. So here, MOZ_TEMPLATE_ENUM_CLASS_ENUM_TYPE
- * is needed.
+ * In this example, the second template parameter to S is meant to be of type
+ * T, but on non-C++11 compilers, type T is a class type, not an integer
+ * type, so it is not accepted as the type of a constant template parameter.
+ * One would then want to use MOZ_ENUM_CLASS_ENUM_TYPE(T), but that doesn't
+ * work either as T depends on template parameters (more specifically here, T
+ * _is_ a template parameter) so as MOZ_ENUM_CLASS_ENUM_TYPE(T) expands to
+ * T::Enum, we are missing the required "typename" keyword. So here,
+ * MOZ_TEMPLATE_ENUM_CLASS_ENUM_TYPE is needed.
*/
# define MOZ_TEMPLATE_ENUM_CLASS_ENUM_TYPE(Name) typename Name::Enum
#endif
# define MOZ_BEGIN_NESTED_ENUM_CLASS_GLUE(a, b) a b
# define MOZ_BEGIN_NESTED_ENUM_CLASS(...) \
MOZ_BEGIN_NESTED_ENUM_CLASS_GLUE( \
MOZ_PASTE_PREFIX_AND_ARG_COUNT(MOZ_BEGIN_NESTED_ENUM_CLASS_HELPER, \
--- a/mfbt/TypedEnumBits.h
+++ b/mfbt/TypedEnumBits.h
@@ -1,91 +1,93 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-/* MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS allows using a typed enum as bit flags. */
+/*
+ * MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS allows using a typed enum as bit flags.
+ */
#ifndef mozilla_TypedEnumBits_h
#define mozilla_TypedEnumBits_h
#include "mozilla/IntegerTypeTraits.h"
#include "mozilla/TypedEnumInternal.h"
namespace mozilla {
#define MOZ_CASTABLETYPEDENUMRESULT_BINOP(Op, OtherType, ReturnType) \
template<typename E> \
MOZ_CONSTEXPR ReturnType \
-operator Op(const OtherType& e, const CastableTypedEnumResult<E>& r) \
+operator Op(const OtherType& aE, const CastableTypedEnumResult<E>& aR) \
{ \
- return ReturnType(e Op OtherType(r)); \
+ return ReturnType(aE Op OtherType(aR)); \
} \
template<typename E> \
MOZ_CONSTEXPR ReturnType \
-operator Op(const CastableTypedEnumResult<E>& r, const OtherType& e) \
+operator Op(const CastableTypedEnumResult<E>& aR, const OtherType& aE) \
{ \
- return ReturnType(OtherType(r) Op e); \
+ return ReturnType(OtherType(aR) Op aE); \
} \
template<typename E> \
MOZ_CONSTEXPR ReturnType \
-operator Op(const CastableTypedEnumResult<E>& r1, \
- const CastableTypedEnumResult<E>& r2) \
+operator Op(const CastableTypedEnumResult<E>& aR1, \
+ const CastableTypedEnumResult<E>& aR2) \
{ \
- return ReturnType(OtherType(r1) Op OtherType(r2)); \
+ return ReturnType(OtherType(aR1) Op OtherType(aR2)); \
}
MOZ_CASTABLETYPEDENUMRESULT_BINOP(|, E, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(&, E, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(^, E, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(==, E, bool)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(!=, E, bool)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(||, bool, bool)
MOZ_CASTABLETYPEDENUMRESULT_BINOP(&&, bool, bool)
template <typename E>
MOZ_CONSTEXPR CastableTypedEnumResult<E>
-operator ~(const CastableTypedEnumResult<E>& r)
+operator ~(const CastableTypedEnumResult<E>& aR)
{
- return CastableTypedEnumResult<E>(~(E(r)));
+ return CastableTypedEnumResult<E>(~(E(aR)));
}
#define MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(Op) \
template<typename E> \
E& \
-operator Op(E& r1, \
- const CastableTypedEnumResult<E>& r2) \
+operator Op(E& aR1, \
+ const CastableTypedEnumResult<E>& aR2) \
{ \
- return r1 Op E(r2); \
+ return aR1 Op E(aR2); \
}
MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(&=)
MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(|=)
MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP(^=)
#undef MOZ_CASTABLETYPEDENUMRESULT_COMPOUND_ASSIGN_OP
#undef MOZ_CASTABLETYPEDENUMRESULT_BINOP
#ifndef MOZ_HAVE_CXX11_STRONG_ENUMS
#define MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(Op, ReturnType) \
template<typename E> \
MOZ_CONSTEXPR ReturnType \
-operator Op(typename E::Enum e, const CastableTypedEnumResult<E>& r) \
+operator Op(typename E::Enum aE, const CastableTypedEnumResult<E>& aR) \
{ \
- return ReturnType(e Op E(r)); \
+ return ReturnType(aE Op E(aR)); \
} \
template<typename E> \
MOZ_CONSTEXPR ReturnType \
-operator Op(const CastableTypedEnumResult<E>& r, typename E::Enum e) \
+operator Op(const CastableTypedEnumResult<E>& aR, typename E::Enum aE) \
{ \
- return ReturnType(E(r) Op e); \
+ return ReturnType(E(aR) Op aE); \
}
MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(|, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(&, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(^, CastableTypedEnumResult<E>)
MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(==, bool)
MOZ_CASTABLETYPEDENUMRESULT_BINOP_EXTRA_NON_CXX11(!=, bool)
--- a/mfbt/TypedEnumInternal.h
+++ b/mfbt/TypedEnumInternal.h
@@ -69,44 +69,42 @@ namespace mozilla {
* is inherently more readable, and to ease porting existing code to typed
* enums. We achieve this by having operator& and other binary bitwise
* operators have as return type a class, CastableTypedEnumResult,
* that wraps a typed enum but adds bool convertibility.
*/
template<typename E>
class CastableTypedEnumResult
{
- private:
- const E mValue;
+private:
+ const E mValue;
- public:
- explicit MOZ_CONSTEXPR CastableTypedEnumResult(E value)
- : mValue(value)
- {}
+public:
+ explicit MOZ_CONSTEXPR CastableTypedEnumResult(E aValue)
+ : mValue(aValue)
+ {}
- MOZ_CONSTEXPR operator E() const { return mValue; }
+ MOZ_CONSTEXPR operator E() const { return mValue; }
- template<typename DestinationType>
- MOZ_EXPLICIT_CONVERSION MOZ_CONSTEXPR
- operator DestinationType() const {
- return DestinationType(mValue);
- }
+ template<typename DestinationType>
+ MOZ_EXPLICIT_CONVERSION MOZ_CONSTEXPR
+ operator DestinationType() const { return DestinationType(mValue); }
- MOZ_CONSTEXPR bool operator !() const { return !bool(mValue); }
+ MOZ_CONSTEXPR bool operator !() const { return !bool(mValue); }
#ifndef MOZ_HAVE_CXX11_STRONG_ENUMS
- // This get() method is used to implement a constructor in the
- // non-c++11 fallback path for MOZ_BEGIN_ENUM_CLASS, taking a
- // CastableTypedEnumResult. If we try to implement it using the
- // above conversion operator E(), then at least clang 3.3
- // (when forced to take the non-c++11 fallback path) compiles
- // this constructor to an infinite recursion. So we introduce this
- // get() method, that does exactly the same as the conversion operator,
- // to work around this.
- MOZ_CONSTEXPR E get() const { return mValue; }
+ // This get() method is used to implement a constructor in the
+ // non-c++11 fallback path for MOZ_BEGIN_ENUM_CLASS, taking a
+ // CastableTypedEnumResult. If we try to implement it using the
+ // above conversion operator E(), then at least clang 3.3
+ // (when forced to take the non-c++11 fallback path) compiles
+ // this constructor to an infinite recursion. So we introduce this
+ // get() method, that does exactly the same as the conversion operator,
+ // to work around this.
+ MOZ_CONSTEXPR E get() const { return mValue; }
#endif
};
} // namespace mozilla
#endif // __cplusplus
#endif // mozilla_TypedEnumInternal_h
--- a/mfbt/Vector.h
+++ b/mfbt/Vector.h
@@ -34,684 +34,720 @@ namespace mozilla {
template<typename T, size_t N, class AllocPolicy, class ThisVector>
class VectorBase;
namespace detail {
/*
* Check that the given capacity wastes the minimal amount of space if
- * allocated on the heap. This means that cap*sizeof(T) is as close to a
- * power-of-two as possible. growStorageBy() is responsible for ensuring
- * this.
+ * allocated on the heap. This means that aCapacity*sizeof(T) is as close to a
+ * power-of-two as possible. growStorageBy() is responsible for ensuring this.
*/
template<typename T>
-static bool CapacityHasExcessSpace(size_t cap)
+static bool CapacityHasExcessSpace(size_t aCapacity)
{
- size_t size = cap * sizeof(T);
+ size_t size = aCapacity * sizeof(T);
return RoundUpPow2(size) - size >= sizeof(T);
}
/*
* This template class provides a default implementation for vector operations
* when the element type is not known to be a POD, as judged by IsPod.
*/
template<typename T, size_t N, class AP, class ThisVector, bool IsPod>
struct VectorImpl
{
- /* Destroys constructed objects in the range [begin, end). */
- static inline void destroy(T* begin, T* end) {
- MOZ_ASSERT(begin <= end);
- for (T* p = begin; p < end; ++p)
- p->~T();
+ /* Destroys constructed objects in the range [aBegin, aEnd). */
+ static inline void destroy(T* aBegin, T* aEnd)
+ {
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ p->~T();
}
-
- /* Constructs objects in the uninitialized range [begin, end). */
- static inline void initialize(T* begin, T* end) {
- MOZ_ASSERT(begin <= end);
- for (T* p = begin; p < end; ++p)
- new(p) T();
- }
+ }
- /*
- * Copy-constructs objects in the uninitialized range
- * [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
- */
- template<typename U>
- static inline void copyConstruct(T* dst, const U* srcbeg, const U* srcend) {
- MOZ_ASSERT(srcbeg <= srcend);
- for (const U* p = srcbeg; p < srcend; ++p, ++dst)
- new(dst) T(*p);
+ /* Constructs objects in the uninitialized range [aBegin, aEnd). */
+ static inline void initialize(T* aBegin, T* aEnd)
+ {
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ new(p) T();
}
+ }
- /*
- * Move-constructs objects in the uninitialized range
- * [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
- */
- template<typename U>
- static inline void moveConstruct(T* dst, U* srcbeg, U* srcend) {
- MOZ_ASSERT(srcbeg <= srcend);
- for (U* p = srcbeg; p < srcend; ++p, ++dst)
- new(dst) T(Move(*p));
+ /*
+ * Copy-constructs objects in the uninitialized range
+ * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
+ */
+ template<typename U>
+ static inline void copyConstruct(T* aDst,
+ const U* aSrcStart, const U* aSrcEnd)
+ {
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ new(aDst) T(*p);
}
+ }
- /*
- * Copy-constructs objects in the uninitialized range [dst, dst+n) from the
- * same object u.
- */
- template<typename U>
- static inline void copyConstructN(T* dst, size_t n, const U& u) {
- for (T* end = dst + n; dst < end; ++dst)
- new(dst) T(u);
+ /*
+ * Move-constructs objects in the uninitialized range
+ * [aDst, aDst+(aSrcEnd-aSrcStart)) from the range [aSrcStart, aSrcEnd).
+ */
+ template<typename U>
+ static inline void moveConstruct(T* aDst, U* aSrcStart, U* aSrcEnd)
+ {
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ new(aDst) T(Move(*p));
}
+ }
+
+ /*
+ * Copy-constructs objects in the uninitialized range [aDst, aDst+aN) from
+ * the same object aU.
+ */
+ template<typename U>
+ static inline void copyConstructN(T* aDst, size_t aN, const U& aU)
+ {
+ for (T* end = aDst + aN; aDst < end; ++aDst) {
+ new(aDst) T(aU);
+ }
+ }
- /*
- * Grows the given buffer to have capacity newCap, preserving the objects
- * constructed in the range [begin, end) and updating v. Assumes that (1)
- * newCap has not overflowed, and (2) multiplying newCap by sizeof(T) will
- * not overflow.
- */
- static inline bool
- growTo(VectorBase<T, N, AP, ThisVector>& v, size_t newCap) {
- MOZ_ASSERT(!v.usingInlineStorage());
- MOZ_ASSERT(!CapacityHasExcessSpace<T>(newCap));
- T* newbuf = reinterpret_cast<T*>(v.malloc_(newCap * sizeof(T)));
- if (!newbuf)
- return false;
- T* dst = newbuf;
- T* src = v.beginNoCheck();
- for (; src < v.endNoCheck(); ++dst, ++src)
- new(dst) T(Move(*src));
- VectorImpl::destroy(v.beginNoCheck(), v.endNoCheck());
- v.free_(v.mBegin);
- v.mBegin = newbuf;
- /* v.mLength is unchanged. */
- v.mCapacity = newCap;
- return true;
+ /*
+ * Grows the given buffer to have capacity aNewCap, preserving the objects
+ * constructed in the range [begin, end) and updating aV. Assumes that (1)
+ * aNewCap has not overflowed, and (2) multiplying aNewCap by sizeof(T) will
+ * not overflow.
+ */
+ static inline bool
+ growTo(VectorBase<T, N, AP, ThisVector>& aV, size_t aNewCap)
+ {
+ MOZ_ASSERT(!aV.usingInlineStorage());
+ MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
+ T* newbuf = reinterpret_cast<T*>(aV.malloc_(aNewCap * sizeof(T)));
+ if (!newbuf) {
+ return false;
}
+ T* dst = newbuf;
+ T* src = aV.beginNoCheck();
+ for (; src < aV.endNoCheck(); ++dst, ++src) {
+ new(dst) T(Move(*src));
+ }
+ VectorImpl::destroy(aV.beginNoCheck(), aV.endNoCheck());
+ aV.free_(aV.mBegin);
+ aV.mBegin = newbuf;
+ /* aV.mLength is unchanged. */
+ aV.mCapacity = aNewCap;
+ return true;
+ }
};
/*
* This partial template specialization provides a default implementation for
* vector operations when the element type is known to be a POD, as judged by
* IsPod.
*/
template<typename T, size_t N, class AP, class ThisVector>
struct VectorImpl<T, N, AP, ThisVector, true>
{
- static inline void destroy(T*, T*) {}
+ static inline void destroy(T*, T*) {}
- static inline void initialize(T* begin, T* end) {
- /*
- * You would think that memset would be a big win (or even break even)
- * when we know T is a POD. But currently it's not. This is probably
- * because |append| tends to be given small ranges and memset requires
- * a function call that doesn't get inlined.
- *
- * memset(begin, 0, sizeof(T) * (end-begin));
- */
- MOZ_ASSERT(begin <= end);
- for (T* p = begin; p < end; ++p)
- new(p) T();
+ static inline void initialize(T* aBegin, T* aEnd)
+ {
+ /*
+ * You would think that memset would be a big win (or even break even)
+ * when we know T is a POD. But currently it's not. This is probably
+ * because |append| tends to be given small ranges and memset requires
+ * a function call that doesn't get inlined.
+ *
+ * memset(aBegin, 0, sizeof(T) * (aEnd - aBegin));
+ */
+ MOZ_ASSERT(aBegin <= aEnd);
+ for (T* p = aBegin; p < aEnd; ++p) {
+ new(p) T();
}
+ }
- template<typename U>
- static inline void copyConstruct(T* dst, const U* srcbeg, const U* srcend) {
- /*
- * See above memset comment. Also, notice that copyConstruct is
- * currently templated (T != U), so memcpy won't work without
- * requiring T == U.
- *
- * memcpy(dst, srcbeg, sizeof(T) * (srcend - srcbeg));
- */
- MOZ_ASSERT(srcbeg <= srcend);
- for (const U* p = srcbeg; p < srcend; ++p, ++dst)
- *dst = *p;
+ template<typename U>
+ static inline void copyConstruct(T* aDst,
+ const U* aSrcStart, const U* aSrcEnd)
+ {
+ /*
+ * See above memset comment. Also, notice that copyConstruct is
+ * currently templated (T != U), so memcpy won't work without
+ * requiring T == U.
+ *
+ * memcpy(aDst, aSrcStart, sizeof(T) * (aSrcEnd - aSrcStart));
+ */
+ MOZ_ASSERT(aSrcStart <= aSrcEnd);
+ for (const U* p = aSrcStart; p < aSrcEnd; ++p, ++aDst) {
+ *aDst = *p;
}
+ }
- template<typename U>
- static inline void moveConstruct(T* dst, const U* srcbeg, const U* srcend) {
- copyConstruct(dst, srcbeg, srcend);
- }
+ template<typename U>
+ static inline void moveConstruct(T* aDst,
+ const U* aSrcStart, const U* aSrcEnd)
+ {
+ copyConstruct(aDst, aSrcStart, aSrcEnd);
+ }
- static inline void copyConstructN(T* dst, size_t n, const T& t) {
- for (T* end = dst + n; dst < end; ++dst)
- *dst = t;
+ static inline void copyConstructN(T* aDst, size_t aN, const T& aT)
+ {
+ for (T* end = aDst + aN; aDst < end; ++aDst) {
+ *aDst = aT;
}
+ }
- static inline bool
- growTo(VectorBase<T, N, AP, ThisVector>& v, size_t newCap) {
- MOZ_ASSERT(!v.usingInlineStorage());
- MOZ_ASSERT(!CapacityHasExcessSpace<T>(newCap));
- size_t oldSize = sizeof(T) * v.mCapacity;
- size_t newSize = sizeof(T) * newCap;
- T* newbuf = reinterpret_cast<T*>(v.realloc_(v.mBegin, oldSize, newSize));
- if (!newbuf)
- return false;
- v.mBegin = newbuf;
- /* v.mLength is unchanged. */
- v.mCapacity = newCap;
- return true;
+ static inline bool
+ growTo(VectorBase<T, N, AP, ThisVector>& aV, size_t aNewCap)
+ {
+ MOZ_ASSERT(!aV.usingInlineStorage());
+ MOZ_ASSERT(!CapacityHasExcessSpace<T>(aNewCap));
+ size_t oldSize = sizeof(T) * aV.mCapacity;
+ size_t newSize = sizeof(T) * aNewCap;
+ T* newbuf = reinterpret_cast<T*>(aV.realloc_(aV.mBegin, oldSize, newSize));
+ if (!newbuf) {
+ return false;
}
+ aV.mBegin = newbuf;
+ /* aV.mLength is unchanged. */
+ aV.mCapacity = aNewCap;
+ return true;
+ }
};
} // namespace detail
/*
* A CRTP base class for vector-like classes. Unless you really really want
* your own vector class -- and you almost certainly don't -- you should use
* mozilla::Vector instead!
*
* See mozilla::Vector for interface requirements.
*/
template<typename T, size_t N, class AllocPolicy, class ThisVector>
class VectorBase : private AllocPolicy
{
- /* utilities */
+ /* utilities */
- static const bool sElemIsPod = IsPod<T>::value;
- typedef detail::VectorImpl<T, N, AllocPolicy, ThisVector, sElemIsPod> Impl;
- friend struct detail::VectorImpl<T, N, AllocPolicy, ThisVector, sElemIsPod>;
+ static const bool kElemIsPod = IsPod<T>::value;
+ typedef detail::VectorImpl<T, N, AllocPolicy, ThisVector, kElemIsPod> Impl;
+ friend struct detail::VectorImpl<T, N, AllocPolicy, ThisVector, kElemIsPod>;
- bool growStorageBy(size_t incr);
- bool convertToHeapStorage(size_t newCap);
+ bool growStorageBy(size_t aIncr);
+ bool convertToHeapStorage(size_t aNewCap);
- /* magic constants */
+ /* magic constants */
- static const int sMaxInlineBytes = 1024;
+ static const int kMaxInlineBytes = 1024;
- /* compute constants */
+ /* compute constants */
- /*
- * Consider element size to be 1 for buffer sizing if there are 0 inline
- * elements. This allows us to compile when the definition of the element
- * type is not visible here.
- *
- * Explicit specialization is only allowed at namespace scope, so in order
- * to keep everything here, we use a dummy template parameter with partial
- * specialization.
- */
- template<int M, int Dummy>
- struct ElemSize
- {
- static const size_t value = sizeof(T);
- };
- template<int Dummy>
- struct ElemSize<0, Dummy>
- {
- static const size_t value = 1;
- };
+ /*
+ * Consider element size to be 1 for buffer sizing if there are 0 inline
+ * elements. This allows us to compile when the definition of the element
+ * type is not visible here.
+ *
+ * Explicit specialization is only allowed at namespace scope, so in order
+ * to keep everything here, we use a dummy template parameter with partial
+ * specialization.
+ */
+ template<int M, int Dummy>
+ struct ElemSize
+ {
+ static const size_t value = sizeof(T);
+ };
+ template<int Dummy>
+ struct ElemSize<0, Dummy>
+ {
+ static const size_t value = 1;
+ };
- static const size_t sInlineCapacity =
- tl::Min<N, sMaxInlineBytes / ElemSize<N, 0>::value>::value;
+ static const size_t kInlineCapacity =
+ tl::Min<N, kMaxInlineBytes / ElemSize<N, 0>::value>::value;
- /* Calculate inline buffer size; avoid 0-sized array. */
- static const size_t sInlineBytes =
- tl::Max<1, sInlineCapacity * ElemSize<N, 0>::value>::value;
+ /* Calculate inline buffer size; avoid 0-sized array. */
+ static const size_t kInlineBytes =
+ tl::Max<1, kInlineCapacity * ElemSize<N, 0>::value>::value;
- /* member data */
+ /* member data */
- /*
- * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
- * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
- * mLength, mBegin + mCapacity) holds uninitialized memory. The range
- * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
- * previously allocated by a call to reserve().
- */
- T* mBegin;
+ /*
+ * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
+ * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
+ * mLength, mBegin + mCapacity) holds uninitialized memory. The range
+ * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
+ * previously allocated by a call to reserve().
+ */
+ T* mBegin;
- /* Number of elements in the vector. */
- size_t mLength;
+ /* Number of elements in the vector. */
+ size_t mLength;
- /* Max number of elements storable in the vector without resizing. */
- size_t mCapacity;
+ /* Max number of elements storable in the vector without resizing. */
+ size_t mCapacity;
#ifdef DEBUG
- /* Max elements of reserved or used space in this vector. */
- size_t mReserved;
+ /* Max elements of reserved or used space in this vector. */
+ size_t mReserved;
#endif
- /* Memory used for inline storage. */
- AlignedStorage<sInlineBytes> storage;
+ /* Memory used for inline storage. */
+ AlignedStorage<kInlineBytes> mStorage;
#ifdef DEBUG
- friend class ReentrancyGuard;
- bool entered;
+ friend class ReentrancyGuard;
+ bool mEntered;
#endif
- /* private accessors */
+ /* private accessors */
- bool usingInlineStorage() const {
- return mBegin == const_cast<VectorBase*>(this)->inlineStorage();
- }
+ bool usingInlineStorage() const
+ {
+ return mBegin == const_cast<VectorBase*>(this)->inlineStorage();
+ }
- T* inlineStorage() {
- return static_cast<T*>(storage.addr());
- }
+ T* inlineStorage()
+ {
+ return static_cast<T*>(mStorage.addr());
+ }
- T* beginNoCheck() const {
- return mBegin;
- }
+ T* beginNoCheck() const
+ {
+ return mBegin;
+ }
- T* endNoCheck() {
- return mBegin + mLength;
- }
+ T* endNoCheck()
+ {
+ return mBegin + mLength;
+ }
- const T* endNoCheck() const {
- return mBegin + mLength;
- }
+ const T* endNoCheck() const
+ {
+ return mBegin + mLength;
+ }
#ifdef DEBUG
- size_t reserved() const {
- MOZ_ASSERT(mReserved <= mCapacity);
- MOZ_ASSERT(mLength <= mReserved);
- return mReserved;
- }
+ size_t reserved() const
+ {
+ MOZ_ASSERT(mReserved <= mCapacity);
+ MOZ_ASSERT(mLength <= mReserved);
+ return mReserved;
+ }
#endif
- /* Append operations guaranteed to succeed due to pre-reserved space. */
- template<typename U> void internalAppend(U&& u);
- template<typename U, size_t O, class BP, class UV>
- void internalAppendAll(const VectorBase<U, O, BP, UV>& u);
- void internalAppendN(const T& t, size_t n);
- template<typename U> void internalAppend(const U* begin, size_t length);
+ /* Append operations guaranteed to succeed due to pre-reserved space. */
+ template<typename U> void internalAppend(U&& aU);
+ template<typename U, size_t O, class BP, class UV>
+ void internalAppendAll(const VectorBase<U, O, BP, UV>& aU);
+ void internalAppendN(const T& aT, size_t aN);
+ template<typename U> void internalAppend(const U* aBegin, size_t aLength);
+
+public:
+ static const size_t sMaxInlineStorage = N;
- public:
- static const size_t sMaxInlineStorage = N;
-
- typedef T ElementType;
+ typedef T ElementType;
- explicit VectorBase(AllocPolicy = AllocPolicy());
- explicit VectorBase(ThisVector&&); /* Move constructor. */
- ThisVector& operator=(ThisVector&&); /* Move assignment. */
- ~VectorBase();
+ explicit VectorBase(AllocPolicy = AllocPolicy());
+ explicit VectorBase(ThisVector&&); /* Move constructor. */
+ ThisVector& operator=(ThisVector&&); /* Move assignment. */
+ ~VectorBase();
+
+ /* accessors */
+
+ const AllocPolicy& allocPolicy() const { return *this; }
- /* accessors */
+ AllocPolicy& allocPolicy() { return *this; }
+
+ enum { InlineLength = N };
- const AllocPolicy& allocPolicy() const {
- return *this;
- }
+ size_t length() const { return mLength; }
- AllocPolicy& allocPolicy() {
- return *this;
- }
+ bool empty() const { return mLength == 0; }
+
+ size_t capacity() const { return mCapacity; }
- enum { InlineLength = N };
+ T* begin()
+ {
+ MOZ_ASSERT(!mEntered);
+ return mBegin;
+ }
- size_t length() const {
- return mLength;
- }
-
- bool empty() const {
- return mLength == 0;
- }
+ const T* begin() const
+ {
+ MOZ_ASSERT(!mEntered);
+ return mBegin;
+ }
- size_t capacity() const {
- return mCapacity;
- }
-
- T* begin() {
- MOZ_ASSERT(!entered);
- return mBegin;
- }
+ T* end()
+ {
+ MOZ_ASSERT(!mEntered);
+ return mBegin + mLength;
+ }
- const T* begin() const {
- MOZ_ASSERT(!entered);
- return mBegin;
- }
+ const T* end() const
+ {
+ MOZ_ASSERT(!mEntered);
+ return mBegin + mLength;
+ }
- T* end() {
- MOZ_ASSERT(!entered);
- return mBegin + mLength;
- }
+ T& operator[](size_t aIndex)
+ {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(aIndex < mLength);
+ return begin()[aIndex];
+ }
- const T* end() const {
- MOZ_ASSERT(!entered);
- return mBegin + mLength;
- }
+ const T& operator[](size_t aIndex) const
+ {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(aIndex < mLength);
+ return begin()[aIndex];
+ }
- T& operator[](size_t i) {
- MOZ_ASSERT(!entered);
- MOZ_ASSERT(i < mLength);
- return begin()[i];
- }
+ T& back()
+ {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(!empty());
+ return *(end() - 1);
+ }
- const T& operator[](size_t i) const {
- MOZ_ASSERT(!entered);
- MOZ_ASSERT(i < mLength);
- return begin()[i];
- }
+ const T& back() const
+ {
+ MOZ_ASSERT(!mEntered);
+ MOZ_ASSERT(!empty());
+ return *(end() - 1);
+ }
- T& back() {
- MOZ_ASSERT(!entered);
- MOZ_ASSERT(!empty());
- return *(end() - 1);
+ class Range
+ {
+ friend class VectorBase;
+ T* mCur;
+ T* mEnd;
+ Range(T* aCur, T* aEnd)
+ : mCur(aCur)
+ , mEnd(aEnd)
+ {
+ MOZ_ASSERT(aCur <= aEnd);
}
- const T& back() const {
- MOZ_ASSERT(!entered);
- MOZ_ASSERT(!empty());
- return *(end() - 1);
- }
+ public:
+ Range() {}
+ bool empty() const { return mCur == mEnd; }
+ size_t remain() const { return PointerRangeSize(mCur, mEnd); }
+ T& front() const { MOZ_ASSERT(!empty()); return *mCur; }
+ void popFront() { MOZ_ASSERT(!empty()); ++mCur; }
+ T popCopyFront() { MOZ_ASSERT(!empty()); return *mCur++; }
+ };
- class Range
- {
- friend class VectorBase;
- T* cur_;
- T* end_;
- Range(T* cur, T* end) : cur_(cur), end_(end) {
- MOZ_ASSERT(cur <= end);
- }
+ Range all() { return Range(begin(), end()); }
+
+ /* mutators */
- public:
- Range() {}
- bool empty() const { return cur_ == end_; }
- size_t remain() const { return PointerRangeSize(cur_, end_); }
- T& front() const { MOZ_ASSERT(!empty()); return *cur_; }
- void popFront() { MOZ_ASSERT(!empty()); ++cur_; }
- T popCopyFront() { MOZ_ASSERT(!empty()); return *cur_++; }
- };
-
- Range all() {
- return Range(begin(), end());
- }
+ /**
+ * Given that the vector is empty and has no inline storage, grow to
+ * |capacity|.
+ */
+ bool initCapacity(size_t aRequest);
- /* mutators */
+ /**
+ * If reserve(length() + N) succeeds, the N next appends are guaranteed to
+ * succeed.
+ */
+ bool reserve(size_t aRequest);
- /**
- * Given that the vector is empty and has no inline storage, grow to
- * |capacity|.
- */
- bool initCapacity(size_t request);
+ /**
+ * Destroy elements in the range [end() - aIncr, end()). Does not deallocate
+ * or unreserve storage for those elements.
+ */
+ void shrinkBy(size_t aIncr);
- /**
- * If reserve(length() + N) succeeds, the N next appends are guaranteed to
- * succeed.
- */
- bool reserve(size_t request);
+ /** Grow the vector by aIncr elements. */
+ bool growBy(size_t aIncr);
- /**
- * Destroy elements in the range [end() - incr, end()). Does not deallocate
- * or unreserve storage for those elements.
- */
- void shrinkBy(size_t incr);
-
- /** Grow the vector by incr elements. */
- bool growBy(size_t incr);
+ /** Call shrinkBy or growBy based on whether newSize > length(). */
+ bool resize(size_t aNewLength);
- /** Call shrinkBy or growBy based on whether newSize > length(). */
- bool resize(size_t newLength);
+ /**
+ * Increase the length of the vector, but don't initialize the new elements
+ * -- leave them as uninitialized memory.
+ */
+ bool growByUninitialized(size_t aIncr);
+ bool resizeUninitialized(size_t aNewLength);
- /**
- * Increase the length of the vector, but don't initialize the new elements
- * -- leave them as uninitialized memory.
- */
- bool growByUninitialized(size_t incr);
- bool resizeUninitialized(size_t newLength);
+ /** Shorthand for shrinkBy(length()). */
+ void clear();
- /** Shorthand for shrinkBy(length()). */
- void clear();
+ /** Clears and releases any heap-allocated storage. */
+ void clearAndFree();
- /** Clears and releases any heap-allocated storage. */
- void clearAndFree();
+ /**
+ * If true, appending |aNeeded| elements won't reallocate elements storage.
+ * This *doesn't* mean that infallibleAppend may be used! You still must
+ * reserve the extra space, even if this method indicates that appends won't
+ * need to reallocate elements storage.
+ */
+ bool canAppendWithoutRealloc(size_t aNeeded) const;
- /**
- * If true, appending |needed| elements won't reallocate elements storage.
- * This *doesn't* mean that infallibleAppend may be used! You still must
- * reserve the extra space, even if this method indicates that appends won't
- * need to reallocate elements storage.
- */
- bool canAppendWithoutRealloc(size_t needed) const;
-
- /** Potentially fallible append operations. */
+ /** Potentially fallible append operations. */
- /**
- * This can take either a T& or a T&&. Given a T&&, it moves |u| into the
- * vector, instead of copying it. If it fails, |u| is left unmoved. ("We are
- * not amused.")
- */
- template<typename U> bool append(U&& u);
+ /**
+ * This can take either a T& or a T&&. Given a T&&, it moves |aU| into the
+ * vector, instead of copying it. If it fails, |aU| is left unmoved. ("We are
+ * not amused.")
+ */
+ template<typename U> bool append(U&& aU);
- template<typename U, size_t O, class BP, class UV>
- bool appendAll(const VectorBase<U, O, BP, UV>& u);
- bool appendN(const T& t, size_t n);
- template<typename U> bool append(const U* begin, const U* end);
- template<typename U> bool append(const U* begin, size_t length);
+ template<typename U, size_t O, class BP, class UV>
+ bool appendAll(const VectorBase<U, O, BP, UV>& aU);
+ bool appendN(const T& aT, size_t aN);
+ template<typename U> bool append(const U* aBegin, const U* aEnd);
+ template<typename U> bool append(const U* aBegin, size_t aLength);
- /*
- * Guaranteed-infallible append operations for use upon vectors whose
- * memory has been pre-reserved. Don't use this if you haven't reserved the
- * memory!
- */
- template<typename U> void infallibleAppend(U&& u) {
- internalAppend(Forward<U>(u));
- }
- void infallibleAppendN(const T& t, size_t n) {
- internalAppendN(t, n);
- }
- template<typename U> void infallibleAppend(const U* aBegin, const U* aEnd) {
- internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
- }
- template<typename U> void infallibleAppend(const U* aBegin, size_t aLength) {
- internalAppend(aBegin, aLength);
- }
+ /*
+ * Guaranteed-infallible append operations for use upon vectors whose
+ * memory has been pre-reserved. Don't use this if you haven't reserved the
+ * memory!
+ */
+ template<typename U> void infallibleAppend(U&& aU)
+ {
+ internalAppend(Forward<U>(aU));
+ }
+ void infallibleAppendN(const T& aT, size_t aN)
+ {
+ internalAppendN(aT, aN);
+ }
+ template<typename U> void infallibleAppend(const U* aBegin, const U* aEnd)
+ {
+ internalAppend(aBegin, PointerRangeSize(aBegin, aEnd));
+ }
+ template<typename U> void infallibleAppend(const U* aBegin, size_t aLength)
+ {
+ internalAppend(aBegin, aLength);
+ }
- void popBack();
-
- T popCopy();
+ void popBack();
- /**
- * Transfers ownership of the internal buffer used by this vector to the
- * caller. (It's the caller's responsibility to properly deallocate this
- * buffer, in accordance with this vector's AllocPolicy.) After this call,
- * the vector is empty. Since the returned buffer may need to be allocated
- * (if the elements are currently stored in-place), the call can fail,
- * returning nullptr.
- *
- * N.B. Although a T*, only the range [0, length()) is constructed.
- */
- T* extractRawBuffer();
+ T popCopy();
- /**
- * Transfer ownership of an array of objects into the vector. The caller
- * must have allocated the array in accordance with this vector's
- * AllocPolicy.
- *
- * N.B. This call assumes that there are no uninitialized elements in the
- * passed array.
- */
- void replaceRawBuffer(T* p, size_t length);
+ /**
+ * Transfers ownership of the internal buffer used by this vector to the
+ * caller. (It's the caller's responsibility to properly deallocate this
+ * buffer, in accordance with this vector's AllocPolicy.) After this call,
+ * the vector is empty. Since the returned buffer may need to be allocated
+ * (if the elements are currently stored in-place), the call can fail,
+ * returning nullptr.
+ *
+ * N.B. Although a T*, only the range [0, length()) is constructed.
+ */
+ T* extractRawBuffer();
+
+ /**
+ * Transfer ownership of an array of objects into the vector. The caller
+ * must have allocated the array in accordance with this vector's
+ * AllocPolicy.
+ *
+ * N.B. This call assumes that there are no uninitialized elements in the
+ * passed array.
+ */
+ void replaceRawBuffer(T* aP, size_t aLength);
- /**
- * Places |val| at position |p|, shifting existing elements from |p| onward
- * one position higher. On success, |p| should not be reused because it'll
- * be a dangling pointer if reallocation of the vector storage occurred; the
- * return value should be used instead. On failure, nullptr is returned.
- *
- * Example usage:
- *
- * if (!(p = vec.insert(p, val)))
- * <handle failure>
- * <keep working with p>
- *
- * This is inherently a linear-time operation. Be careful!
- */
- template<typename U>
- T* insert(T* p, U&& val);
+ /**
+ * Places |aVal| at position |aP|, shifting existing elements from |aP| onward
+ * one position higher. On success, |aP| should not be reused because it'll
+ * be a dangling pointer if reallocation of the vector storage occurred; the
+ * return value should be used instead. On failure, nullptr is returned.
+ *
+ * Example usage:
+ *
+ * if (!(p = vec.insert(p, val))) {
+ * <handle failure>
+ * }
+ * <keep working with p>
+ *
+ * This is inherently a linear-time operation. Be careful!
+ */
+ template<typename U>
+ T* insert(T* aP, U&& aVal);
- /**
- * Removes the element |t|, which must fall in the bounds [begin, end),
- * shifting existing elements from |t + 1| onward one position lower.
- */
- void erase(T* t);
+ /**
+ * Removes the element |aT|, which must fall in the bounds [begin, end),
+ * shifting existing elements from |aT + 1| onward one position lower.
+ */
+ void erase(T* aT);
- /**
- * Removes the elements [|b|, |e|), which must fall in the bounds [begin, end),
- * shifting existing elements from |e + 1| onward to b's old position.
- */
- void erase(T* b, T *e);
+ /**
+ * Removes the elements [|aBegin|, |aEnd|), which must fall in the bounds
+ * [begin, end), shifting existing elements from |aEnd + 1| onward to aBegin's
+ * old position.
+ */
+ void erase(T* aBegin, T* aEnd);
- /**
- * Measure the size of the vector's heap-allocated storage.
- */
- size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const;
+ /**
+ * Measure the size of the vector's heap-allocated storage.
+ */
+ size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
- /**
- * Like sizeOfExcludingThis, but also measures the size of the vector
- * object (which must be heap-allocated) itself.
- */
- size_t sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const;
+ /**
+ * Like sizeOfExcludingThis, but also measures the size of the vector
+ * object (which must be heap-allocated) itself.
+ */
+ size_t sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
- void swap(ThisVector& other);
+ void swap(ThisVector& aOther);
- private:
- VectorBase(const VectorBase&) MOZ_DELETE;
- void operator=(const VectorBase&) MOZ_DELETE;
+private:
+ VectorBase(const VectorBase&) MOZ_DELETE;
+ void operator=(const VectorBase&) MOZ_DELETE;
- /* Move-construct/assign only from our derived class, ThisVector. */
- VectorBase(VectorBase&&) MOZ_DELETE;
- void operator=(VectorBase&&) MOZ_DELETE;
+ /* Move-construct/assign only from our derived class, ThisVector. */
+ VectorBase(VectorBase&&) MOZ_DELETE;
+ void operator=(VectorBase&&) MOZ_DELETE;
};
/* This does the re-entrancy check plus several other sanity checks. */
#define MOZ_REENTRANCY_GUARD_ET_AL \
ReentrancyGuard g(*this); \
- MOZ_ASSERT_IF(usingInlineStorage(), mCapacity == sInlineCapacity); \
+ MOZ_ASSERT_IF(usingInlineStorage(), mCapacity == kInlineCapacity); \
MOZ_ASSERT(reserved() <= mCapacity); \
MOZ_ASSERT(mLength <= reserved()); \
MOZ_ASSERT(mLength <= mCapacity)
/* Vector Implementation */
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE
-VectorBase<T, N, AP, TV>::VectorBase(AP ap)
- : AP(ap),
- mLength(0),
- mCapacity(sInlineCapacity)
+VectorBase<T, N, AP, TV>::VectorBase(AP aAP)
+ : AP(aAP)
+ , mLength(0)
+ , mCapacity(kInlineCapacity)
#ifdef DEBUG
- , mReserved(sInlineCapacity),
- entered(false)
+ , mReserved(kInlineCapacity)
+ , mEntered(false)
#endif
{
- mBegin = static_cast<T*>(storage.addr());
+ mBegin = static_cast<T*>(mStorage.addr());
}
/* Move constructor. */
template<typename T, size_t N, class AllocPolicy, class TV>
MOZ_ALWAYS_INLINE
-VectorBase<T, N, AllocPolicy, TV>::VectorBase(TV&& rhs)
- : AllocPolicy(Move(rhs))
+VectorBase<T, N, AllocPolicy, TV>::VectorBase(TV&& aRhs)
+ : AllocPolicy(Move(aRhs))
#ifdef DEBUG
- , entered(false)
+ , mEntered(false)
#endif
{
- mLength = rhs.mLength;
- mCapacity = rhs.mCapacity;
+ mLength = aRhs.mLength;
+ mCapacity = aRhs.mCapacity;
#ifdef DEBUG
- mReserved = rhs.mReserved;
+ mReserved = aRhs.mReserved;
#endif
- if (rhs.usingInlineStorage()) {
+ if (aRhs.usingInlineStorage()) {
/* We can't move the buffer over in this case, so copy elements. */
- mBegin = static_cast<T*>(storage.addr());
- Impl::moveConstruct(mBegin, rhs.beginNoCheck(), rhs.endNoCheck());
+ mBegin = static_cast<T*>(mStorage.addr());
+ Impl::moveConstruct(mBegin, aRhs.beginNoCheck(), aRhs.endNoCheck());
/*
- * Leave rhs's mLength, mBegin, mCapacity, and mReserved as they are.
+ * Leave aRhs's mLength, mBegin, mCapacity, and mReserved as they are.
* The elements in its in-line storage still need to be destroyed.
*/
} else {
/*
* Take src's buffer, and turn src into an empty vector using
* in-line storage.
*/
- mBegin = rhs.mBegin;
- rhs.mBegin = static_cast<T*>(rhs.storage.addr());
- rhs.mCapacity = sInlineCapacity;
- rhs.mLength = 0;
+ mBegin = aRhs.mBegin;
+ aRhs.mBegin = static_cast<T*>(aRhs.mStorage.addr());
+ aRhs.mCapacity = kInlineCapacity;
+ aRhs.mLength = 0;
#ifdef DEBUG
- rhs.mReserved = sInlineCapacity;
+ aRhs.mReserved = kInlineCapacity;
#endif
}
}
/* Move assignment. */
template<typename T, size_t N, class AP, class TV>
-MOZ_ALWAYS_INLINE
-TV&
-VectorBase<T, N, AP, TV>::operator=(TV&& rhs)
+MOZ_ALWAYS_INLINE TV&
+VectorBase<T, N, AP, TV>::operator=(TV&& aRhs)
{
- MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
TV* tv = static_cast<TV*>(this);
tv->~TV();
- new(tv) TV(Move(rhs));
+ new(tv) TV(Move(aRhs));
return *tv;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE
VectorBase<T, N, AP, TV>::~VectorBase()
{
MOZ_REENTRANCY_GUARD_ET_AL;
Impl::destroy(beginNoCheck(), endNoCheck());
- if (!usingInlineStorage())
+ if (!usingInlineStorage()) {
this->free_(beginNoCheck());
+ }
}
/*
- * This function will create a new heap buffer with capacity newCap,
+ * This function will create a new heap buffer with capacity aNewCap,
* move all elements in the inline buffer to this new buffer,
* and fail on OOM.
*/
template<typename T, size_t N, class AP, class TV>
inline bool
-VectorBase<T, N, AP, TV>::convertToHeapStorage(size_t newCap)
+VectorBase<T, N, AP, TV>::convertToHeapStorage(size_t aNewCap)
{
MOZ_ASSERT(usingInlineStorage());
/* Allocate buffer. */
- MOZ_ASSERT(!detail::CapacityHasExcessSpace<T>(newCap));
- T* newBuf = reinterpret_cast<T*>(this->malloc_(newCap * sizeof(T)));
- if (!newBuf)
+ MOZ_ASSERT(!detail::CapacityHasExcessSpace<T>(aNewCap));
+ T* newBuf = reinterpret_cast<T*>(this->malloc_(aNewCap * sizeof(T)));
+ if (!newBuf) {
return false;
+ }
/* Copy inline elements into heap buffer. */
Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
Impl::destroy(beginNoCheck(), endNoCheck());
/* Switch in heap buffer. */
mBegin = newBuf;
/* mLength is unchanged. */
- mCapacity = newCap;
+ mCapacity = aNewCap;
return true;
}
template<typename T, size_t N, class AP, class TV>
MOZ_NEVER_INLINE bool
-VectorBase<T, N, AP, TV>::growStorageBy(size_t incr)
+VectorBase<T, N, AP, TV>::growStorageBy(size_t aIncr)
{
- MOZ_ASSERT(mLength + incr > mCapacity);
+ MOZ_ASSERT(mLength + aIncr > mCapacity);
MOZ_ASSERT_IF(!usingInlineStorage(),
!detail::CapacityHasExcessSpace<T>(mCapacity));
/*
* When choosing a new capacity, its size should is as close to 2**N bytes
* as possible. 2**N-sized requests are best because they are unlikely to
* be rounded up by the allocator. Asking for a 2**N number of elements
* isn't as good, because if sizeof(T) is not a power-of-two that would
* result in a non-2**N request size.
*/
size_t newCap;
- if (incr == 1) {
+ if (aIncr == 1) {
if (usingInlineStorage()) {
/* This case occurs in ~70--80% of the calls to this function. */
size_t newSize =
- tl::RoundUpPow2<(sInlineCapacity + 1) * sizeof(T)>::value;
+ tl::RoundUpPow2<(kInlineCapacity + 1) * sizeof(T)>::value;
newCap = newSize / sizeof(T);
goto convert;
}
if (mLength == 0) {
/* This case occurs in ~0--10% of the calls to this function. */
newCap = 1;
goto grow;
@@ -734,145 +770,153 @@ VectorBase<T, N, AP, TV>::growStorageBy(
}
/*
* If we reach here, the existing capacity will have a size that is already
* as close to 2^N as sizeof(T) will allow. Just double the capacity, and
* then there might be space for one more element.
*/
newCap = mLength * 2;
- if (detail::CapacityHasExcessSpace<T>(newCap))
+ if (detail::CapacityHasExcessSpace<T>(newCap)) {
newCap += 1;
+ }
} else {
/* This case occurs in ~2% of the calls to this function. */
- size_t newMinCap = mLength + incr;
+ size_t newMinCap = mLength + aIncr;
- /* Did mLength + incr overflow? Will newCap * sizeof(T) overflow? */
+ /* Did mLength + aIncr overflow? Will newCap * sizeof(T) overflow? */
if (newMinCap < mLength ||
newMinCap & tl::MulOverflowMask<2 * sizeof(T)>::value)
{
this->reportAllocOverflow();
return false;
}
size_t newMinSize = newMinCap * sizeof(T);
size_t newSize = RoundUpPow2(newMinSize);
newCap = newSize / sizeof(T);
}
if (usingInlineStorage()) {
- convert:
+convert:
return convertToHeapStorage(newCap);
}
grow:
return Impl::growTo(*this, newCap);
}
template<typename T, size_t N, class AP, class TV>
inline bool
-VectorBase<T, N, AP, TV>::initCapacity(size_t request)
+VectorBase<T, N, AP, TV>::initCapacity(size_t aRequest)
{
MOZ_ASSERT(empty());
MOZ_ASSERT(usingInlineStorage());
- if (request == 0)
+ if (aRequest == 0) {
return true;
- T* newbuf = reinterpret_cast<T*>(this->malloc_(request * sizeof(T)));
- if (!newbuf)
+ }
+ T* newbuf = reinterpret_cast<T*>(this->malloc_(aRequest * sizeof(T)));
+ if (!newbuf) {
return false;
+ }
mBegin = newbuf;
- mCapacity = request;
+ mCapacity = aRequest;
#ifdef DEBUG
- mReserved = request;
+ mReserved = aRequest;
#endif
return true;
}
template<typename T, size_t N, class AP, class TV>
inline bool
-VectorBase<T, N, AP, TV>::reserve(size_t request)
+VectorBase<T, N, AP, TV>::reserve(size_t aRequest)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- if (request > mCapacity && !growStorageBy(request - mLength))
+ if (aRequest > mCapacity && !growStorageBy(aRequest - mLength)) {
return false;
-
+ }
#ifdef DEBUG
- if (request > mReserved)
- mReserved = request;
+ if (aRequest > mReserved) {
+ mReserved = aRequest;
+ }
MOZ_ASSERT(mLength <= mReserved);
MOZ_ASSERT(mReserved <= mCapacity);
#endif
return true;
}
template<typename T, size_t N, class AP, class TV>
inline void
-VectorBase<T, N, AP, TV>::shrinkBy(size_t incr)
+VectorBase<T, N, AP, TV>::shrinkBy(size_t aIncr)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- MOZ_ASSERT(incr <= mLength);
- Impl::destroy(endNoCheck() - incr, endNoCheck());
- mLength -= incr;
+ MOZ_ASSERT(aIncr <= mLength);
+ Impl::destroy(endNoCheck() - aIncr, endNoCheck());
+ mLength -= aIncr;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::growBy(size_t incr)
+VectorBase<T, N, AP, TV>::growBy(size_t aIncr)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- if (incr > mCapacity - mLength && !growStorageBy(incr))
+ if (aIncr > mCapacity - mLength && !growStorageBy(aIncr)) {
return false;
-
- MOZ_ASSERT(mLength + incr <= mCapacity);
- T* newend = endNoCheck() + incr;
+ }
+ MOZ_ASSERT(mLength + aIncr <= mCapacity);
+ T* newend = endNoCheck() + aIncr;
Impl::initialize(endNoCheck(), newend);
- mLength += incr;
+ mLength += aIncr;
#ifdef DEBUG
- if (mLength > mReserved)
+ if (mLength > mReserved) {
mReserved = mLength;
+ }
#endif
return true;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::growByUninitialized(size_t incr)
+VectorBase<T, N, AP, TV>::growByUninitialized(size_t aIncr)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- if (incr > mCapacity - mLength && !growStorageBy(incr))
+ if (aIncr > mCapacity - mLength && !growStorageBy(aIncr)) {
return false;
-
- MOZ_ASSERT(mLength + incr <= mCapacity);
- mLength += incr;
+ }
+ MOZ_ASSERT(mLength + aIncr <= mCapacity);
+ mLength += aIncr;
#ifdef DEBUG
- if (mLength > mReserved)
+ if (mLength > mReserved) {
mReserved = mLength;
+ }
#endif
return true;
}
template<typename T, size_t N, class AP, class TV>
inline bool
-VectorBase<T, N, AP, TV>::resize(size_t newLength)
+VectorBase<T, N, AP, TV>::resize(size_t aNewLength)
{
size_t curLength = mLength;
- if (newLength > curLength)
- return growBy(newLength - curLength);
- shrinkBy(curLength - newLength);
+ if (aNewLength > curLength) {
+ return growBy(aNewLength - curLength);
+ }
+ shrinkBy(curLength - aNewLength);
return true;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::resizeUninitialized(size_t newLength)
+VectorBase<T, N, AP, TV>::resizeUninitialized(size_t aNewLength)
{
size_t curLength = mLength;
- if (newLength > curLength)
- return growByUninitialized(newLength - curLength);
- shrinkBy(curLength - newLength);
+ if (aNewLength > curLength) {
+ return growByUninitialized(aNewLength - curLength);
+ }
+ shrinkBy(curLength - aNewLength);
return true;
}
template<typename T, size_t N, class AP, class TV>
inline void
VectorBase<T, N, AP, TV>::clear()
{
MOZ_REENTRANCY_GUARD_ET_AL;
@@ -881,188 +925,196 @@ VectorBase<T, N, AP, TV>::clear()
}
template<typename T, size_t N, class AP, class TV>
inline void
VectorBase<T, N, AP, TV>::clearAndFree()
{
clear();
- if (usingInlineStorage())
+ if (usingInlineStorage()) {
return;
-
+ }
this->free_(beginNoCheck());
- mBegin = static_cast<T*>(storage.addr());
- mCapacity = sInlineCapacity;
+ mBegin = static_cast<T*>(mStorage.addr());
+ mCapacity = kInlineCapacity;
#ifdef DEBUG
- mReserved = sInlineCapacity;
+ mReserved = kInlineCapacity;
#endif
}
template<typename T, size_t N, class AP, class TV>
inline bool
-VectorBase<T, N, AP, TV>::canAppendWithoutRealloc(size_t needed) const
+VectorBase<T, N, AP, TV>::canAppendWithoutRealloc(size_t aNeeded) const
{
- return mLength + needed <= mCapacity;
+ return mLength + aNeeded <= mCapacity;
}
template<typename T, size_t N, class AP, class TV>
template<typename U, size_t O, class BP, class UV>
MOZ_ALWAYS_INLINE void
-VectorBase<T, N, AP, TV>::internalAppendAll(const VectorBase<U, O, BP, UV>& other)
+VectorBase<T, N, AP, TV>::internalAppendAll(
+ const VectorBase<U, O, BP, UV>& aOther)
{
- internalAppend(other.begin(), other.length());
+ internalAppend(aOther.begin(), aOther.length());
}
template<typename T, size_t N, class AP, class TV>
template<typename U>
MOZ_ALWAYS_INLINE void
-VectorBase<T, N, AP, TV>::internalAppend(U&& u)
+VectorBase<T, N, AP, TV>::internalAppend(U&& aU)
{
MOZ_ASSERT(mLength + 1 <= mReserved);
MOZ_ASSERT(mReserved <= mCapacity);
- new(endNoCheck()) T(Forward<U>(u));
+ new(endNoCheck()) T(Forward<U>(aU));
++mLength;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::appendN(const T& t, size_t needed)
+VectorBase<T, N, AP, TV>::appendN(const T& aT, size_t aNeeded)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- if (mLength + needed > mCapacity && !growStorageBy(needed))
+ if (mLength + aNeeded > mCapacity && !growStorageBy(aNeeded)) {
return false;
-
+ }
#ifdef DEBUG
- if (mLength + needed > mReserved)
- mReserved = mLength + needed;
+ if (mLength + aNeeded > mReserved) {
+ mReserved = mLength + aNeeded;
+ }
#endif
- internalAppendN(t, needed);
+ internalAppendN(aT, aNeeded);
return true;
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE void
-VectorBase<T, N, AP, TV>::internalAppendN(const T& t, size_t needed)
+VectorBase<T, N, AP, TV>::internalAppendN(const T& aT, size_t aNeeded)
{
- MOZ_ASSERT(mLength + needed <= mReserved);
+ MOZ_ASSERT(mLength + aNeeded <= mReserved);
MOZ_ASSERT(mReserved <= mCapacity);
- Impl::copyConstructN(endNoCheck(), needed, t);
- mLength += needed;
+ Impl::copyConstructN(endNoCheck(), aNeeded, aT);
+ mLength += aNeeded;
}
template<typename T, size_t N, class AP, class TV>
template<typename U>
inline T*
-VectorBase<T, N, AP, TV>::insert(T* p, U&& val)
+VectorBase<T, N, AP, TV>::insert(T* aP, U&& aVal)
{
- MOZ_ASSERT(begin() <= p);
- MOZ_ASSERT(p <= end());
- size_t pos = p - begin();
+ MOZ_ASSERT(begin() <= aP);
+ MOZ_ASSERT(aP <= end());
+ size_t pos = aP - begin();
MOZ_ASSERT(pos <= mLength);
size_t oldLength = mLength;
if (pos == oldLength) {
- if (!append(Forward<U>(val)))
+ if (!append(Forward<U>(aVal))) {
return nullptr;
+ }
} else {
T oldBack = Move(back());
- if (!append(Move(oldBack))) /* Dup the last element. */
+ if (!append(Move(oldBack))) { /* Dup the last element. */
return nullptr;
- for (size_t i = oldLength; i > pos; --i)
+ }
+ for (size_t i = oldLength; i > pos; --i) {
(*this)[i] = Move((*this)[i - 1]);
- (*this)[pos] = Forward<U>(val);
+ }
+ (*this)[pos] = Forward<U>(aVal);
}
return begin() + pos;
}
template<typename T, size_t N, class AP, class TV>
inline void
-VectorBase<T, N, AP, TV>::erase(T* it)
+VectorBase<T, N, AP, TV>::erase(T* aIt)
{
- MOZ_ASSERT(begin() <= it);
- MOZ_ASSERT(it < end());
- while (it + 1 < end()) {
- *it = Move(*(it + 1));
- ++it;
+ MOZ_ASSERT(begin() <= aIt);
+ MOZ_ASSERT(aIt < end());
+ while (aIt + 1 < end()) {
+ *aIt = Move(*(aIt + 1));
+ ++aIt;
}
popBack();
}
template<typename T, size_t N, class AP, class TV>
inline void
-VectorBase<T, N, AP, TV>::erase(T* b, T *e)
+VectorBase<T, N, AP, TV>::erase(T* aBegin, T* aEnd)
{
- MOZ_ASSERT(begin() <= b);
- MOZ_ASSERT(b <= e);
- MOZ_ASSERT(e <= end());
- while (e < end())
- *b++ = Move(*e++);
- shrinkBy(e - b);
+ MOZ_ASSERT(begin() <= aBegin);
+ MOZ_ASSERT(aBegin <= aEnd);
+ MOZ_ASSERT(aEnd <= end());
+ while (aEnd < end()) {
+ *aBegin++ = Move(*aEnd++);
+ }
+ shrinkBy(aEnd - aBegin);
}
template<typename T, size_t N, class AP, class TV>
template<typename U>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::append(const U* insBegin, const U* insEnd)
+VectorBase<T, N, AP, TV>::append(const U* aInsBegin, const U* aInsEnd)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- size_t needed = PointerRangeSize(insBegin, insEnd);
- if (mLength + needed > mCapacity && !growStorageBy(needed))
+ size_t aNeeded = PointerRangeSize(aInsBegin, aInsEnd);
+ if (mLength + aNeeded > mCapacity && !growStorageBy(aNeeded)) {
return false;
-
+ }
#ifdef DEBUG
- if (mLength + needed > mReserved)
- mReserved = mLength + needed;
+ if (mLength + aNeeded > mReserved) {
+ mReserved = mLength + aNeeded;
+ }
#endif
- internalAppend(insBegin, needed);
+ internalAppend(aInsBegin, aNeeded);
return true;
}
template<typename T, size_t N, class AP, class TV>
template<typename U>
MOZ_ALWAYS_INLINE void
-VectorBase<T, N, AP, TV>::internalAppend(const U* insBegin, size_t insLength)
+VectorBase<T, N, AP, TV>::internalAppend(const U* aInsBegin, size_t aInsLength)
{
- MOZ_ASSERT(mLength + insLength <= mReserved);
+ MOZ_ASSERT(mLength + aInsLength <= mReserved);
MOZ_ASSERT(mReserved <= mCapacity);
- Impl::copyConstruct(endNoCheck(), insBegin, insBegin + insLength);
- mLength += insLength;
+ Impl::copyConstruct(endNoCheck(), aInsBegin, aInsBegin + aInsLength);
+ mLength += aInsLength;
}
template<typename T, size_t N, class AP, class TV>
template<typename U>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::append(U&& u)
+VectorBase<T, N, AP, TV>::append(U&& aU)
{
MOZ_REENTRANCY_GUARD_ET_AL;
- if (mLength == mCapacity && !growStorageBy(1))
+ if (mLength == mCapacity && !growStorageBy(1)) {
return false;
-
+ }
#ifdef DEBUG
- if (mLength + 1 > mReserved)
+ if (mLength + 1 > mReserved) {
mReserved = mLength + 1;
+ }
#endif
- internalAppend(Forward<U>(u));
+ internalAppend(Forward<U>(aU));
return true;
}
template<typename T, size_t N, class AP, class TV>
template<typename U, size_t O, class BP, class UV>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::appendAll(const VectorBase<U, O, BP, UV>& other)
+VectorBase<T, N, AP, TV>::appendAll(const VectorBase<U, O, BP, UV>& aOther)
{
- return append(other.begin(), other.length());
+ return append(aOther.begin(), aOther.length());
}
template<typename T, size_t N, class AP, class TV>
template<class U>
MOZ_ALWAYS_INLINE bool
-VectorBase<T, N, AP, TV>::append(const U *insBegin, size_t insLength)
+VectorBase<T, N, AP, TV>::append(const U* aInsBegin, size_t aInsLength)
{
- return append(insBegin, insBegin + insLength);
+ return append(aInsBegin, aInsBegin + aInsLength);
}
template<typename T, size_t N, class AP, class TV>
MOZ_ALWAYS_INLINE void
VectorBase<T, N, AP, TV>::popBack()
{
MOZ_REENTRANCY_GUARD_ET_AL;
MOZ_ASSERT(!empty());
@@ -1081,106 +1133,108 @@ VectorBase<T, N, AP, TV>::popCopy()
template<typename T, size_t N, class AP, class TV>
inline T*
VectorBase<T, N, AP, TV>::extractRawBuffer()
{
T* ret;
if (usingInlineStorage()) {
ret = reinterpret_cast<T*>(this->malloc_(mLength * sizeof(T)));
- if (!ret)
+ if (!ret) {
return nullptr;
+ }
Impl::copyConstruct(ret, beginNoCheck(), endNoCheck());
Impl::destroy(beginNoCheck(), endNoCheck());
/* mBegin, mCapacity are unchanged. */
mLength = 0;
} else {
ret = mBegin;
- mBegin = static_cast<T*>(storage.addr());
+ mBegin = static_cast<T*>(mStorage.addr());
mLength = 0;
- mCapacity = sInlineCapacity;
+ mCapacity = kInlineCapacity;
#ifdef DEBUG
- mReserved = sInlineCapacity;
+ mReserved = kInlineCapacity;
#endif
}
return ret;
}
template<typename T, size_t N, class AP, class TV>
inline void
-VectorBase<T, N, AP, TV>::replaceRawBuffer(T* p, size_t aLength)
+VectorBase<T, N, AP, TV>::replaceRawBuffer(T* aP, size_t aLength)
{
MOZ_REENTRANCY_GUARD_ET_AL;
/* Destroy what we have. */
Impl::destroy(beginNoCheck(), endNoCheck());
- if (!usingInlineStorage())
+ if (!usingInlineStorage()) {
this->free_(beginNoCheck());
+ }
/* Take in the new buffer. */
- if (aLength <= sInlineCapacity) {
+ if (aLength <= kInlineCapacity) {
/*
- * We convert to inline storage if possible, even though p might
+ * We convert to inline storage if possible, even though aP might
* otherwise be acceptable. Maybe this behaviour should be
* specifiable with an argument to this function.
*/
- mBegin = static_cast<T*>(storage.addr());
+ mBegin = static_cast<T*>(mStorage.addr());
mLength = aLength;
- mCapacity = sInlineCapacity;
- Impl::moveConstruct(mBegin, p, p + aLength);
- Impl::destroy(p, p + aLength);
- this->free_(p);
+ mCapacity = kInlineCapacity;
+ Impl::moveConstruct(mBegin, aP, aP + aLength);
+ Impl::destroy(aP, aP + aLength);
+ this->free_(aP);
} else {
- mBegin = p;
+ mBegin = aP;
mLength = aLength;
mCapacity = aLength;
}
#ifdef DEBUG
mReserved = aLength;
#endif
}
template<typename T, size_t N, class AP, class TV>
inline size_t
-VectorBase<T, N, AP, TV>::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+VectorBase<T, N, AP, TV>::sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
- return usingInlineStorage() ? 0 : mallocSizeOf(beginNoCheck());
+ return usingInlineStorage() ? 0 : aMallocSizeOf(beginNoCheck());
}
template<typename T, size_t N, class AP, class TV>
inline size_t
-VectorBase<T, N, AP, TV>::sizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
+VectorBase<T, N, AP, TV>::sizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
{
- return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf);
}
template<typename T, size_t N, class AP, class TV>
inline void
-VectorBase<T, N, AP, TV>::swap(TV& other)
+VectorBase<T, N, AP, TV>::swap(TV& aOther)
{
static_assert(N == 0,
"still need to implement this for N != 0");
// This only works when inline storage is always empty.
- if (!usingInlineStorage() && other.usingInlineStorage()) {
- other.mBegin = mBegin;
+ if (!usingInlineStorage() && aOther.usingInlineStorage()) {
+ aOther.mBegin = mBegin;
mBegin = inlineStorage();
- } else if (usingInlineStorage() && !other.usingInlineStorage()) {
- mBegin = other.mBegin;
- other.mBegin = other.inlineStorage();
- } else if (!usingInlineStorage() && !other.usingInlineStorage()) {
- Swap(mBegin, other.mBegin);
+ } else if (usingInlineStorage() && !aOther.usingInlineStorage()) {
+ mBegin = aOther.mBegin;
+ aOther.mBegin = aOther.inlineStorage();
+ } else if (!usingInlineStorage() && !aOther.usingInlineStorage()) {
+ Swap(mBegin, aOther.mBegin);
} else {
// This case is a no-op, since we'd set both to use their inline storage.
}
- Swap(mLength, other.mLength);
- Swap(mCapacity, other.mCapacity);
+ Swap(mLength, aOther.mLength);
+ Swap(mCapacity, aOther.mCapacity);
#ifdef DEBUG
- Swap(mReserved, other.mReserved);
+ Swap(mReserved, aOther.mReserved);
#endif
}
/*
* STL-like container providing a short-lived, dynamic buffer. Vector calls the
* constructors/destructors of all elements stored in its internal buffer, so
* non-PODs may be safely used. Additionally, Vector will store the first N
* elements in-place before resorting to dynamic allocation.
@@ -1201,24 +1255,25 @@ template<typename T,
size_t MinInlineCapacity = 0,
class AllocPolicy = MallocAllocPolicy>
class Vector
: public VectorBase<T,
MinInlineCapacity,
AllocPolicy,
Vector<T, MinInlineCapacity, AllocPolicy> >
{
- typedef VectorBase<T, MinInlineCapacity, AllocPolicy, Vector> Base;
+ typedef VectorBase<T, MinInlineCapacity, AllocPolicy, Vector> Base;
- public:
- explicit Vector(AllocPolicy alloc = AllocPolicy()) : Base(alloc) {}
- Vector(Vector&& vec) : Base(Move(vec)) {}
- Vector& operator=(Vector&& vec) {
- return Base::operator=(Move(vec));
- }
+public:
+ explicit Vector(AllocPolicy alloc = AllocPolicy()) : Base(alloc) {}
+ Vector(Vector&& vec) : Base(Move(vec)) {}
+ Vector& operator=(Vector&& aOther)
+ {
+ return Base::operator=(Move(aOther));
+ }
};
} // namespace mozilla
#ifdef _MSC_VER
#pragma warning(pop)
#endif
--- a/mfbt/WeakPtr.h
+++ b/mfbt/WeakPtr.h
@@ -21,37 +21,38 @@
* class, where ClassName is the name of your class.
*
* The overhead of WeakPtr is that accesses to 'Foo' becomes an additional
* dereference, and an additional heap allocated pointer sized object shared
* between all of the WeakPtrs.
*
* Example of usage:
*
- * // To have a class C support weak pointers, inherit from SupportsWeakPtr<C>.
+ * // To have a class C support weak pointers, inherit from
+ * // SupportsWeakPtr<C>.
* class C : public SupportsWeakPtr<C>
* {
- * public:
- * MOZ_DECLARE_REFCOUNTED_TYPENAME(C)
- * int num;
- * void act();
+ * public:
+ * MOZ_DECLARE_REFCOUNTED_TYPENAME(C)
+ * int mNum;
+ * void act();
* };
*
- * C* ptr = new C();
+ * C* ptr = new C();
*
* // Get weak pointers to ptr. The first time asWeakPtr is called
* // a reference counted WeakReference object is created that
* // can live beyond the lifetime of 'ptr'. The WeakReference
* // object will be notified of 'ptr's destruction.
* WeakPtr<C> weak = ptr->asWeakPtr();
* WeakPtr<C> other = ptr->asWeakPtr();
*
* // Test a weak pointer for validity before using it.
* if (weak) {
- * weak->num = 17;
+ * weak->mNum = 17;
* weak->act();
* }
*
* // Destroying the underlying object clears weak pointers to it.
* delete ptr;
*
* MOZ_ASSERT(!weak, "Deleting |ptr| clears weak pointers to it.");
* MOZ_ASSERT(!other, "Deleting |ptr| clears all weak pointers to it.");
@@ -76,125 +77,126 @@
namespace mozilla {
template <typename T, class WeakReference> class WeakPtrBase;
template <typename T, class WeakReference> class SupportsWeakPtrBase;
namespace detail {
-// This can live beyond the lifetime of the class derived from SupportsWeakPtrBase.
+// This can live beyond the lifetime of the class derived from
+// SupportsWeakPtrBase.
template<class T>
class WeakReference : public ::mozilla::RefCounted<WeakReference<T> >
{
- public:
- explicit WeakReference(T* p) : ptr(p) {}
- T* get() const {
- return ptr;
- }
+public:
+ explicit WeakReference(T* p) : mPtr(p) {}
+
+ T* get() const { return mPtr; }
#ifdef MOZ_REFCOUNTED_LEAK_CHECKING
#ifdef XP_WIN
#define snprintf _snprintf
#endif
- const char* typeName() const {
- static char nameBuffer[1024];
- const char* innerType = ptr->typeName();
- // We could do fancier length checks at runtime, but innerType is
- // controlled by us so we can ensure that this never causes a buffer
- // overflow by this assertion.
- MOZ_ASSERT(strlen(innerType) + sizeof("WeakReference<>") < ArrayLength(nameBuffer),
- "Exceedingly large type name");
- snprintf(nameBuffer, ArrayLength(nameBuffer), "WeakReference<%s>", innerType);
- // This is usually not OK, but here we are returning a pointer to a static
- // buffer which will immediately be used by the caller.
- return nameBuffer;
- }
- size_t typeSize() const {
- return sizeof(*this);
- }
+ const char* typeName() const
+ {
+ static char nameBuffer[1024];
+ const char* innerType = mPtr->typeName();
+ // We could do fancier length checks at runtime, but innerType is
+ // controlled by us so we can ensure that this never causes a buffer
+ // overflow by this assertion.
+ MOZ_ASSERT(strlen(innerType) + sizeof("WeakReference<>") <
+ ArrayLength(nameBuffer),
+ "Exceedingly large type name");
+ snprintf(nameBuffer, ArrayLength(nameBuffer), "WeakReference<%s>",
+ innerType);
+ // This is usually not OK, but here we are returning a pointer to a static
+ // buffer which will immediately be used by the caller.
+ return nameBuffer;
+ }
+
+ size_t typeSize() const { return sizeof(*this); }
#undef snprintf
#endif
- private:
- friend class WeakPtrBase<T, WeakReference<T> >;
- friend class SupportsWeakPtrBase<T, WeakReference<T> >;
- void detach() {
- ptr = nullptr;
- }
- T* ptr;
+private:
+ friend class WeakPtrBase<T, WeakReference<T> >;
+ friend class SupportsWeakPtrBase<T, WeakReference<T> >;
+
+ void detach() { mPtr = nullptr; }
+
+ T* mPtr;
};
} // namespace detail
template <typename T, class WeakReference>
class SupportsWeakPtrBase
{
- public:
- WeakPtrBase<T, WeakReference> asWeakPtr() {
- if (!weakRef)
- weakRef = new WeakReference(static_cast<T*>(this));
- return WeakPtrBase<T, WeakReference>(weakRef);
+public:
+ WeakPtrBase<T, WeakReference> asWeakPtr()
+ {
+ if (!weakRef) {
+ weakRef = new WeakReference(static_cast<T*>(this));
}
+ return WeakPtrBase<T, WeakReference>(weakRef);
+ }
- protected:
- ~SupportsWeakPtrBase() {
- static_assert(IsBaseOf<SupportsWeakPtrBase<T, WeakReference>, T>::value,
- "T must derive from SupportsWeakPtrBase<T, WeakReference>");
- if (weakRef)
- weakRef->detach();
+protected:
+ ~SupportsWeakPtrBase()
+ {
+ static_assert(IsBaseOf<SupportsWeakPtrBase<T, WeakReference>, T>::value,
+ "T must derive from SupportsWeakPtrBase<T, WeakReference>");
+ if (weakRef) {
+ weakRef->detach();
}
+ }
- private:
- friend class WeakPtrBase<T, WeakReference>;
+private:
+ friend class WeakPtrBase<T, WeakReference>;
- RefPtr<WeakReference> weakRef;
+ RefPtr<WeakReference> weakRef;
};
template <typename T>
class SupportsWeakPtr : public SupportsWeakPtrBase<T, detail::WeakReference<T> >
{
};
template <typename T, class WeakReference>
class WeakPtrBase
{
- public:
- WeakPtrBase(const WeakPtrBase<T, WeakReference>& o) : ref(o.ref) {}
- // Ensure that ref is dereferenceable in the uninitialized state
- WeakPtrBase() : ref(new WeakReference(nullptr)) {}
+public:
+ WeakPtrBase(const WeakPtrBase<T, WeakReference>& aOther)
+ : mRef(aOther.mRef)
+ {}
- operator T*() const {
- return ref->get();
- }
- T& operator*() const {
- return *ref->get();
- }
+ // Ensure that mRef is dereferenceable in the uninitialized state.
+ WeakPtrBase() : mRef(new WeakReference(nullptr)) {}
+
+ operator T*() const { return mRef->get(); }
+ T& operator*() const { return *mRef->get(); }
- T* operator->() const {
- return ref->get();
- }
+ T* operator->() const { return mRef->get(); }
+
+ T* get() const { return mRef->get(); }
- T* get() const {
- return ref->get();
- }
+private:
+ friend class SupportsWeakPtrBase<T, WeakReference>;
- private:
- friend class SupportsWeakPtrBase<T, WeakReference>;
+ explicit WeakPtrBase(const RefPtr<WeakReference> &aOther) : mRef(aOther) {}
- explicit WeakPtrBase(const RefPtr<WeakReference> &o) : ref(o) {}
-
- RefPtr<WeakReference> ref;
+ RefPtr<WeakReference> mRef;
};
template <typename T>
class WeakPtr : public WeakPtrBase<T, detail::WeakReference<T> >
{
- typedef WeakPtrBase<T, detail::WeakReference<T> > Base;
- public:
- WeakPtr(const WeakPtr<T>& o) : Base(o) {}
- MOZ_IMPLICIT WeakPtr(const Base& o) : Base(o) {}
- WeakPtr() {}
+ typedef WeakPtrBase<T, detail::WeakReference<T> > Base;
+public:
+ WeakPtr(const WeakPtr<T>& aOther) : Base(aOther) {}
+ MOZ_IMPLICIT WeakPtr(const Base& aOther) : Base(aOther) {}
+ WeakPtr() {}
};
} // namespace mozilla
#endif /* mozilla_WeakPtr_h */
--- a/mfbt/WindowsVersion.h
+++ b/mfbt/WindowsVersion.h
@@ -1,125 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_WindowsVersion_h
#define mozilla_WindowsVersion_h
#include "mozilla/Attributes.h"
#include <stdint.h>
#include <windows.h>
-namespace mozilla
+namespace mozilla {
+
+inline bool
+IsWindowsVersionOrLater(uint32_t aVersion)
{
- inline bool
- IsWindowsVersionOrLater(uint32_t aVersion)
- {
- static uint32_t minVersion = 0;
- static uint32_t maxVersion = UINT32_MAX;
+ static uint32_t minVersion = 0;
+ static uint32_t maxVersion = UINT32_MAX;
+
+ if (minVersion >= aVersion) {
+ return true;
+ }
- if (minVersion >= aVersion) {
- return true;
- }
+ if (aVersion >= maxVersion) {
+ return false;
+ }
- if (aVersion >= maxVersion) {
- return false;
- }
+ OSVERSIONINFOEX info;
+ ZeroMemory(&info, sizeof(OSVERSIONINFOEX));
+ info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ info.dwMajorVersion = aVersion >> 24;
+ info.dwMinorVersion = (aVersion >> 16) & 0xFF;
+ info.wServicePackMajor = (aVersion >> 8) & 0xFF;
+ info.wServicePackMinor = aVersion & 0xFF;
- OSVERSIONINFOEX info;
- ZeroMemory(&info, sizeof(OSVERSIONINFOEX));
- info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
- info.dwMajorVersion = aVersion >> 24;
- info.dwMinorVersion = (aVersion >> 16) & 0xFF;
- info.wServicePackMajor = (aVersion >> 8) & 0xFF;
- info.wServicePackMinor = aVersion & 0xFF;
+ DWORDLONG conditionMask = 0;
+ VER_SET_CONDITION(conditionMask, VER_MAJORVERSION, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_MINORVERSION, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL);
+ VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMINOR, VER_GREATER_EQUAL);
+
+ if (VerifyVersionInfo(&info,
+ VER_MAJORVERSION | VER_MINORVERSION |
+ VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
+ conditionMask)) {
+ minVersion = aVersion;
+ return true;
+ }
- DWORDLONG conditionMask = 0;
- VER_SET_CONDITION(conditionMask, VER_MAJORVERSION, VER_GREATER_EQUAL);
- VER_SET_CONDITION(conditionMask, VER_MINORVERSION, VER_GREATER_EQUAL);
- VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMAJOR, VER_GREATER_EQUAL);
- VER_SET_CONDITION(conditionMask, VER_SERVICEPACKMINOR, VER_GREATER_EQUAL);
+ maxVersion = aVersion;
+ return false;
+}
- if (VerifyVersionInfo(&info,
- VER_MAJORVERSION | VER_MINORVERSION |
- VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
- conditionMask)) {
- minVersion = aVersion;
- return true;
- }
+inline bool
+IsWindowsBuildOrLater(uint32_t aBuild)
+{
+ static uint32_t minBuild = 0;
+ static uint32_t maxBuild = UINT32_MAX;
- maxVersion = aVersion;
+ if (minBuild >= aBuild) {
+ return true;
+ }
+
+ if (aBuild >= maxBuild) {
return false;
}
- inline bool
- IsWindowsBuildOrLater(uint32_t aBuild)
- {
- static uint32_t minBuild = 0;
- static uint32_t maxBuild = UINT32_MAX;
-
- if (minBuild >= aBuild) {
- return true;
- }
-
- if (aBuild >= maxBuild) {
- return false;
- }
+ OSVERSIONINFOEX info;
+ ZeroMemory(&info, sizeof(OSVERSIONINFOEX));
+ info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ info.dwBuildNumber = aBuild;
- OSVERSIONINFOEX info;
- ZeroMemory(&info, sizeof(OSVERSIONINFOEX));
- info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
- info.dwBuildNumber = aBuild;
-
- DWORDLONG conditionMask = 0;
- VER_SET_CONDITION(conditionMask, VER_BUILDNUMBER, VER_GREATER_EQUAL);
+ DWORDLONG conditionMask = 0;
+ VER_SET_CONDITION(conditionMask, VER_BUILDNUMBER, VER_GREATER_EQUAL);
- if (VerifyVersionInfo(&info, VER_BUILDNUMBER, conditionMask)) {
- minBuild = aBuild;
- return true;
- }
-
- maxBuild = aBuild;
- return false;
+ if (VerifyVersionInfo(&info, VER_BUILDNUMBER, conditionMask)) {
+ minBuild = aBuild;
+ return true;
}
- MOZ_ALWAYS_INLINE bool
- IsXPSP3OrLater()
- { return IsWindowsVersionOrLater(0x05010300ul); }
-
- MOZ_ALWAYS_INLINE bool
- IsWin2003OrLater()
- { return IsWindowsVersionOrLater(0x05020000ul); }
+ maxBuild = aBuild;
+ return false;
+}
- MOZ_ALWAYS_INLINE bool
- IsWin2003SP2OrLater()
- { return IsWindowsVersionOrLater(0x05020200ul); }
-
- MOZ_ALWAYS_INLINE bool
- IsVistaOrLater()
- { return IsWindowsVersionOrLater(0x06000000ul); }
+MOZ_ALWAYS_INLINE bool
+IsXPSP3OrLater()
+{
+ return IsWindowsVersionOrLater(0x05010300ul);
+}
- MOZ_ALWAYS_INLINE bool
- IsVistaSP1OrLater()
- { return IsWindowsVersionOrLater(0x06000100ul); }
-
- MOZ_ALWAYS_INLINE bool
- IsWin7OrLater()
- { return IsWindowsVersionOrLater(0x06010000ul); }
+MOZ_ALWAYS_INLINE bool
+IsWin2003OrLater()
+{
+ return IsWindowsVersionOrLater(0x05020000ul);
+}
- MOZ_ALWAYS_INLINE bool
- IsWin7SP1OrLater()
- { return IsWindowsVersionOrLater(0x06010100ul); }
+MOZ_ALWAYS_INLINE bool
+IsWin2003SP2OrLater()
+{
+ return IsWindowsVersionOrLater(0x05020200ul);
+}
- MOZ_ALWAYS_INLINE bool
- IsWin8OrLater()
- { return IsWindowsVersionOrLater(0x06020000ul); }
-
- MOZ_ALWAYS_INLINE bool
- IsNotWin7PreRTM()
- {
- return IsWin7SP1OrLater() || !IsWin7OrLater() ||
- IsWindowsBuildOrLater(7600);
- }
+MOZ_ALWAYS_INLINE bool
+IsVistaOrLater()
+{
+ return IsWindowsVersionOrLater(0x06000000ul);
}
+MOZ_ALWAYS_INLINE bool
+IsVistaSP1OrLater()
+{
+ return IsWindowsVersionOrLater(0x06000100ul);
+}
+
+MOZ_ALWAYS_INLINE bool
+IsWin7OrLater()
+{
+ return IsWindowsVersionOrLater(0x06010000ul);
+}
+
+MOZ_ALWAYS_INLINE bool
+IsWin7SP1OrLater()
+{
+ return IsWindowsVersionOrLater(0x06010100ul);
+}
+
+MOZ_ALWAYS_INLINE bool
+IsWin8OrLater()
+{
+ return IsWindowsVersionOrLater(0x06020000ul);
+}
+
+MOZ_ALWAYS_INLINE bool
+IsNotWin7PreRTM()
+{
+ return IsWin7SP1OrLater() || !IsWin7OrLater() ||
+ IsWindowsBuildOrLater(7600);
+}
+
+} // namespace mozilla
+
#endif /* mozilla_WindowsVersion_h */
--- a/mfbt/tests/TestSHA1.cpp
+++ b/mfbt/tests/TestSHA1.cpp
@@ -192,13 +192,14 @@ main()
static const uint8_t expected[20] =
{ 0xc8, 0xf2, 0x09, 0x59, 0x4e, 0x64, 0x40, 0xaa, 0x7b, 0xf7, 0xb8, 0xe0,
0xfa, 0x44, 0xb2, 0x31, 0x95, 0xad, 0x94, 0x81 };
static_assert(sizeof(expected) == sizeof(SHA1Sum::Hash),
"expected-data size should be the same as the actual hash "
"size");
- for (size_t i = 0; i < SHA1Sum::HashSize; i++)
+ for (size_t i = 0; i < SHA1Sum::kHashSize; i++) {
MOZ_RELEASE_ASSERT(hash[i] == expected[i]);
+ }
return 0;
}
--- a/mozglue/android/NativeCrypto.cpp
+++ b/mozglue/android/NativeCrypto.cpp
@@ -59,16 +59,16 @@ extern "C" JNIEXPORT jbyteArray JNICALL
SHA1Sum sha1;
SHA1Sum::Hash hashResult;
sha1.update((void *) str, (uint32_t) strLen);
sha1.finish(hashResult);
env->ReleaseByteArrayElements(jstr, str, JNI_ABORT);
- jbyteArray out = env->NewByteArray(SHA1Sum::HashSize);
+ jbyteArray out = env->NewByteArray(SHA1Sum::kHashSize);
if (out == NULL) {
return NULL;
}
- env->SetByteArrayRegion(out, 0, SHA1Sum::HashSize, (jbyte *) hashResult);
+ env->SetByteArrayRegion(out, 0, SHA1Sum::kHashSize, (jbyte *) hashResult);
return out;
}
--- a/mozglue/linker/ElfLoader.h
+++ b/mozglue/linker/ElfLoader.h
@@ -73,17 +73,17 @@ class LibHandle;
namespace mozilla {
namespace detail {
template <> inline void RefCounted<LibHandle, AtomicRefCount>::Release() const;
template <> inline RefCounted<LibHandle, AtomicRefCount>::~RefCounted()
{
- MOZ_ASSERT(refCnt == 0x7fffdead);
+ MOZ_ASSERT(mRefCnt == 0x7fffdead);
}
} /* namespace detail */
} /* namespace mozilla */
/**
* Abstract class for loaded libraries. Libraries may be loaded through the
* system linker or this linker, both cases will be derived from this class.
@@ -214,37 +214,37 @@ private:
char *path;
/* Mappable object keeping the result of GetMappable() */
mutable mozilla::RefPtr<Mappable> mappable;
};
/**
* Specialized RefCounted<LibHandle>::Release. Under normal operation, when
- * refCnt reaches 0, the LibHandle is deleted. Its refCnt is however increased
- * to 1 on normal builds, and 0x7fffdead on debug builds so that the LibHandle
- * can still be referenced while the destructor is executing. The refCnt is
- * allowed to grow > 0x7fffdead, but not to decrease under that value, which
- * would mean too many Releases from within the destructor.
+ * mRefCnt reaches 0, the LibHandle is deleted. Its mRefCnt is however
+ * increased to 1 on normal builds, and 0x7fffdead on debug builds so that the
+ * LibHandle can still be referenced while the destructor is executing. The
+ * mRefCnt is allowed to grow > 0x7fffdead, but not to decrease under that
+ * value, which would mean too many Releases from within the destructor.
*/
namespace mozilla {
namespace detail {
template <> inline void RefCounted<LibHandle, AtomicRefCount>::Release() const {
#ifdef DEBUG
- if (refCnt > 0x7fff0000)
- MOZ_ASSERT(refCnt > 0x7fffdead);
+ if (mRefCnt > 0x7fff0000)
+ MOZ_ASSERT(mRefCnt > 0x7fffdead);
#endif
- MOZ_ASSERT(refCnt > 0);
- if (refCnt > 0) {
- if (0 == --refCnt) {
+ MOZ_ASSERT(mRefCnt > 0);
+ if (mRefCnt > 0) {
+ if (0 == --mRefCnt) {
#ifdef DEBUG
- refCnt = 0x7fffdead;
+ mRefCnt = 0x7fffdead;
#else
- refCnt = 1;
+ mRefCnt = 1;
#endif
delete static_cast<const LibHandle*>(this);
}
}
}
} /* namespace detail */
} /* namespace mozilla */
--- a/netwerk/cache2/CacheFileIOManager.h
+++ b/netwerk/cache2/CacheFileIOManager.h
@@ -110,17 +110,17 @@ public:
{
public:
typedef const SHA1Sum::Hash& KeyType;
typedef const SHA1Sum::Hash* KeyTypePointer;
HandleHashKey(KeyTypePointer aKey)
{
MOZ_COUNT_CTOR(HandleHashKey);
- mHash = (SHA1Sum::Hash*)new uint8_t[SHA1Sum::HashSize];
+ mHash = (SHA1Sum::Hash*)new uint8_t[SHA1Sum::kHashSize];
memcpy(mHash, aKey, sizeof(SHA1Sum::Hash));
}
HandleHashKey(const HandleHashKey& aOther)
{
NS_NOTREACHED("HandleHashKey copy constructor is forbidden!");
}
~HandleHashKey()
{