Bug 1591132 - Make it easy to switch on and off these assertions in different build configurations. r=nika draft
authorEmilio Cobos Álvarez <emilio@crisal.io>
Thu, 24 Oct 2019 16:42:04 +0000
changeset 2396070 01599056406018f893caf9bd4aa59661bb3e93bb
parent 2396069 b1df5fb7a3eff6921ba1bd50c543f33294336fbd
child 2396071 c9282b78fc063915867d61fd09d43c2851f9fd0e
push id439690
push userreviewbot
push dateThu, 24 Oct 2019 16:42:25 +0000
treeherdertry@f953926e99b6 [default view] [failures only]
reviewersnika
bugs1591132
milestone72.0a1
Bug 1591132 - Make it easy to switch on and off these assertions in different build configurations. r=nika Put them behind a MOZ_HASH_TABLE_CHECKS_ENABLED define, which right now is only defined in DEBUG builds, preserving behavior. MakeImmutable becomes an empty inline function when disabled, which should be zero-cost. Differential Diff: PHID-DIFF-t4kzjdkwaejb7jlc6xns
dom/indexedDB/ActorsParent.cpp
toolkit/components/telemetry/core/TelemetryOrigin.cpp
xpcom/ds/PLDHashTable.cpp
xpcom/ds/PLDHashTable.h
xpcom/ds/nsBaseHashtable.h
xpcom/ds/nsStaticNameTable.cpp
xpcom/ds/nsTHashtable.h
--- a/dom/indexedDB/ActorsParent.cpp
+++ b/dom/indexedDB/ActorsParent.cpp
@@ -14461,23 +14461,19 @@ void VersionChangeTransaction::UpdateMet
         MOZ_ASSERT(indexIter.Key());
         RefPtr<FullIndexMetadata>& index = indexIter.Data();
         MOZ_ASSERT(index);
 
         if (index->mDeleted) {
           indexIter.Remove();
         }
       }
-#ifdef DEBUG
       metadata->mIndexes.MarkImmutable();
-#endif
-    }
-#ifdef DEBUG
+    }
     info->mMetadata->mObjectStores.MarkImmutable();
-#endif
   } else {
     // Replace metadata pointers for all live databases.
     info->mMetadata = oldMetadata.forget();
 
     for (auto& liveDatabase : info->mLiveDatabases) {
       liveDatabase->mMetadata = info->mMetadata;
     }
   }
@@ -18604,20 +18600,17 @@ nsresult DatabaseOperationBase::GetUniqu
 
   if (NS_WARN_IF(aMaybeUniqueIndexTable.ref().Count() != indexCount)) {
     IDB_REPORT_INTERNAL_ERR();
     aMaybeUniqueIndexTable.reset();
     NS_WARNING("out of memory");
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
-#ifdef DEBUG
   aMaybeUniqueIndexTable.ref().MarkImmutable();
-#endif
-
   return NS_OK;
 }
 
 // static
 nsresult DatabaseOperationBase::IndexDataValuesFromUpdateInfos(
     const nsTArray<IndexUpdateInfo>& aUpdateInfos,
     const UniqueIndexTable& aUniqueIndexTable,
     nsTArray<IndexDataValue>& aIndexValues) {
@@ -24178,21 +24171,19 @@ bool ObjectStoreAddOrPutRequestOp::Init(
       if (NS_WARN_IF(!mUniqueIndexTable.ref().Put(indexId, unique, fallible))) {
         return false;
       }
     }
   } else if (mOverwrite) {
     mUniqueIndexTable.emplace();
   }
 
-#ifdef DEBUG
   if (mUniqueIndexTable.isSome()) {
     mUniqueIndexTable.ref().MarkImmutable();
   }
-#endif
 
   const nsTArray<FileAddInfo>& fileAddInfos = mParams.fileAddInfos();
 
   if (!fileAddInfos.IsEmpty()) {
     const uint32_t count = fileAddInfos.Length();
 
     if (NS_WARN_IF(!mStoredFileInfos.SetCapacity(count, fallible))) {
       return false;
--- a/toolkit/components/telemetry/core/TelemetryOrigin.cpp
+++ b/toolkit/components/telemetry/core/TelemetryOrigin.cpp
@@ -314,20 +314,18 @@ void TelemetryOrigin::InitializeGlobalSt
 
   // Add the meta-origin for tracking recordings to untracked origins.
   gOriginToIndexMap->Put(kUnknownOrigin, gOriginHashesList->Length());
 
   gMetricToOriginBag = MakeUnique<IdToOriginBag>();
 
   // This map shouldn't change at runtime, so make debug builds complain
   // if it tries.
-#ifdef DEBUG
   gOriginToIndexMap->MarkImmutable();
   gHashToIndexMap->MarkImmutable();
-#endif  // DEBUG
 
   gInitDone = true;
 }
 
 void TelemetryOrigin::DeInitializeGlobalState() {
   if (!XRE_IsParentProcess()) {
     return;
   }
--- a/xpcom/ds/PLDHashTable.cpp
+++ b/xpcom/ds/PLDHashTable.cpp
@@ -16,17 +16,17 @@
 #include "nsPointerHashKeys.h"
 #include "mozilla/Likely.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/ChaosMode.h"
 
 using namespace mozilla;
 
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
 
 class AutoReadOp {
   Checker& mChk;
 
  public:
   explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); }
   ~AutoReadOp() { mChk.EndReadOp(); }
 };
@@ -217,26 +217,26 @@ PLDHashTable& PLDHashTable::operator=(PL
   this->~PLDHashTable();
   new (KnownNotNull, this) PLDHashTable(ops, aOther.mEntrySize, 0);
 
   // Move non-const pieces over.
   mHashShift = std::move(aOther.mHashShift);
   mEntryCount = std::move(aOther.mEntryCount);
   mRemovedCount = std::move(aOther.mRemovedCount);
   mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration);
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   mChecker = std::move(aOther.mChecker);
 #endif
 
   recordreplay::MovePLDHashTableContents(aOther.mOps, mOps);
 
   // Clear up |aOther| so its destruction will be a no-op and it reports being
   // empty.
   {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
     AutoDestructorOp op(mChecker);
 #endif
     aOther.mEntryCount = 0;
     aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
   }
 
   return *this;
 }
@@ -280,17 +280,17 @@ bool PLDHashTable::MatchSlotKeyhash(Slot
 }
 
 // Compute the address of the indexed entry in table.
 auto PLDHashTable::SlotForIndex(uint32_t aIndex) const -> Slot {
   return mEntryStore.SlotForIndex(aIndex, mEntrySize, CapacityFromHashShift());
 }
 
 PLDHashTable::~PLDHashTable() {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoDestructorOp op(mChecker);
 #endif
 
   if (!mEntryStore.Get()) {
     recordreplay::DestroyPLDHashTableCallbacks(mOps);
     return;
   }
 
@@ -490,33 +490,33 @@ PLDHashTable::ComputeKeyHash(const void*
     keyHash -= 2;
   }
   keyHash &= ~kCollisionFlag;
 
   return keyHash;
 }
 
 PLDHashEntryHdr* PLDHashTable::Search(const void* aKey) const {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoReadOp op(mChecker);
 #endif
 
   if (!mEntryStore.Get()) {
     return nullptr;
   }
 
   return SearchTable<ForSearchOrRemove>(
       aKey, ComputeKeyHash(aKey),
       [&](Slot& slot) -> PLDHashEntryHdr* { return slot.ToEntry(); },
       [&]() -> PLDHashEntryHdr* { return nullptr; });
 }
 
 PLDHashEntryHdr* PLDHashTable::Add(const void* aKey,
                                    const mozilla::fallible_t&) {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoWriteOp op(mChecker);
 #endif
 
   // Allocate the entry storage if it hasn't already been allocated.
   if (!mEntryStore.Get()) {
     uint32_t nbytes;
     // We already checked this in the constructor, so it must still be true.
     MOZ_RELEASE_ASSERT(
@@ -588,17 +588,17 @@ PLDHashEntryHdr* PLDHashTable::Add(const
       // which is double the current size.
       NS_ABORT_OOM(2 * EntrySize() * EntryCount());
     }
   }
   return entry;
 }
 
 void PLDHashTable::Remove(const void* aKey) {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoWriteOp op(mChecker);
 #endif
 
   if (!mEntryStore.Get()) {
     return;
   }
 
   PLDHashNumber keyHash = ComputeKeyHash(aKey);
@@ -609,17 +609,17 @@ void PLDHashTable::Remove(const void* aK
         ShrinkIfAppropriate();
       },
       [&]() {
         // Do nothing.
       });
 }
 
 void PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry) {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoWriteOp op(mChecker);
 #endif
 
   RawRemove(aEntry);
   ShrinkIfAppropriate();
 }
 
 void PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry) {
@@ -664,17 +664,17 @@ void PLDHashTable::ShrinkIfAppropriate()
     MOZ_ASSERT(deltaLog2 <= 0);
 
     (void)ChangeTable(deltaLog2);
   }
 }
 
 size_t PLDHashTable::ShallowSizeOfExcludingThis(
     MallocSizeOf aMallocSizeOf) const {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   AutoReadOp op(mChecker);
 #endif
 
   return aMallocSizeOf(mEntryStore.Get());
 }
 
 size_t PLDHashTable::ShallowSizeOfIncludingThis(
     MallocSizeOf aMallocSizeOf) const {
@@ -700,17 +700,17 @@ PLDHashTable::Iterator::Iterator(Iterato
 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable)
     : mTable(aTable),
       mCurrent(mTable->mEntryStore.SlotForIndex(0, mTable->mEntrySize,
                                                 mTable->Capacity())),
       mNexts(0),
       mNextsLimit(mTable->EntryCount()),
       mHaveRemoved(false),
       mEntrySize(aTable->mEntrySize) {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   mTable->mChecker.StartReadOp();
 #endif
 
   if (ChaosMode::isActive(ChaosFeature::HashTableIteration) &&
       mTable->Capacity() > 0) {
     // Start iterating at a random entry. It would be even more chaotic to
     // iterate in fully random order, but that's harder.
     uint32_t capacity = mTable->CapacityFromHashShift();
@@ -728,44 +728,44 @@ PLDHashTable::Iterator::Iterator(PLDHash
 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable, EndIteratorTag aTag)
     : mTable(aTable),
       mCurrent(mTable->mEntryStore.SlotForIndex(0, mTable->mEntrySize,
                                                 mTable->Capacity())),
       mNexts(mTable->EntryCount()),
       mNextsLimit(mTable->EntryCount()),
       mHaveRemoved(false),
       mEntrySize(aTable->mEntrySize) {
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   mTable->mChecker.StartReadOp();
 #endif
 
   MOZ_ASSERT(Done());
 }
 
 PLDHashTable::Iterator::Iterator(const Iterator& aOther)
     : mTable(aOther.mTable),
       mCurrent(aOther.mCurrent),
       mNexts(aOther.mNexts),
       mNextsLimit(aOther.mNextsLimit),
       mHaveRemoved(aOther.mHaveRemoved),
       mEntrySize(aOther.mEntrySize) {
   // TODO: Is this necessary?
   MOZ_ASSERT(!mHaveRemoved);
 
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   mTable->mChecker.StartReadOp();
 #endif
 }
 
 PLDHashTable::Iterator::~Iterator() {
   if (mTable) {
     if (mHaveRemoved) {
       mTable->ShrinkIfAppropriate();
     }
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
     mTable->mChecker.EndReadOp();
 #endif
   }
 }
 
 MOZ_ALWAYS_INLINE bool PLDHashTable::Iterator::IsOnNonLiveEntry() const {
   MOZ_ASSERT(!Done());
   return !mCurrent.IsLive();
@@ -815,12 +815,8 @@ MOZ_ALWAYS_INLINE void PLDHashTable::Ite
 
   mCurrent = Slot(entry, &hashes[slotIndex]);
 }
 
 void PLDHashTable::Iterator::Remove() {
   mTable->RawRemove(mCurrent);
   mHaveRemoved = true;
 }
-
-#ifdef DEBUG
-void PLDHashTable::MarkImmutable() { mChecker.SetNonWritable(); }
-#endif
--- a/xpcom/ds/PLDHashTable.h
+++ b/xpcom/ds/PLDHashTable.h
@@ -18,16 +18,20 @@
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Move.h"
 #include "mozilla/Types.h"
 #include "nscore.h"
 
 using PLDHashNumber = mozilla::HashNumber;
 static const uint32_t kPLDHashNumberBits = mozilla::kHashNumberBits;
 
+#ifdef DEBUG
+#define MOZ_HASH_TABLE_CHECKS_ENABLED 1
+#endif
+
 class PLDHashTable;
 struct PLDHashTableOps;
 
 // Table entry header structure.
 //
 // In order to allow in-line allocation of key and value, we do not declare
 // either here. Instead, the API uses const void *key as a formal parameter.
 // The key need not be stored in the entry; it may be part of the value, but
@@ -47,17 +51,17 @@ struct PLDHashEntryHdr {
   PLDHashEntryHdr& operator=(const PLDHashEntryHdr&) = delete;
   PLDHashEntryHdr(PLDHashEntryHdr&&) = default;
   PLDHashEntryHdr& operator=(PLDHashEntryHdr&&) = default;
 
  private:
   friend class PLDHashTable;
 };
 
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
 
 // This class does three kinds of checking:
 //
 // - that calls to one of |mOps| or to an enumerator do not cause re-entry into
 //   the table in an unsafe way;
 //
 // - that multiple threads do not access the table in an unsafe way;
 //
@@ -373,17 +377,17 @@ class PLDHashTable {
   const PLDHashTableOps* const mOps;  // Virtual operations; see below.
   EntryStore mEntryStore;             // (Lazy) entry storage and generation.
   uint16_t mGeneration;               // The storage generation.
   uint8_t mHashShift;                 // Multiplicative hash shift.
   const uint8_t mEntrySize;           // Number of bytes in an entry.
   uint32_t mEntryCount;               // Number of entries in table.
   uint32_t mRemovedCount;             // Removed entry sentinels in table.
 
-#ifdef DEBUG
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
   mutable Checker mChecker;
 #endif
 
  public:
   // Table capacity limit; do not exceed. The max capacity used to be 1<<23 but
   // that occasionally that wasn't enough. Making it much bigger than 1<<26
   // probably isn't worthwhile -- tables that big are kind of ridiculous.
   // Also, the growth operation will (deliberately) fail if |capacity *
@@ -510,22 +514,24 @@ class PLDHashTable {
   // Measure the size of the table's entry storage. If the entries contain
   // pointers to other heap blocks, you have to iterate over the table and
   // measure those separately; hence the "Shallow" prefix.
   size_t ShallowSizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
   // Like ShallowSizeOfExcludingThis(), but includes sizeof(*this).
   size_t ShallowSizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
-#ifdef DEBUG
   // Mark a table as immutable for the remainder of its lifetime. This
   // changes the implementation from asserting one set of invariants to
   // asserting a different set.
-  void MarkImmutable();
+  void MarkImmutable() {
+#ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
+    mChecker.SetNonWritable();
 #endif
+  }
 
   // If you use PLDHashEntryStub or a subclass of it as your entry struct, and
   // if your entries move via memcpy and clear via memset(0), you can use these
   // stub operations.
   static const PLDHashTableOps* StubOps();
 
   // The individual stub operations in StubOps().
   static PLDHashNumber HashVoidPtrKeyStub(const void* aKey);
--- a/xpcom/ds/nsBaseHashtable.h
+++ b/xpcom/ds/nsBaseHashtable.h
@@ -506,19 +506,17 @@ class nsBaseHashtable
 
   /**
    * Swap the elements in this hashtable with the elements in aOther.
    */
   void SwapElements(nsBaseHashtable& aOther) {
     nsTHashtable<EntryType>::SwapElements(aOther);
   }
 
-#ifdef DEBUG
   using nsTHashtable<EntryType>::MarkImmutable;
-#endif
 };
 
 //
 // nsBaseHashtableET definitions
 //
 
 template <class KeyClass, class DataType>
 nsBaseHashtableET<KeyClass, DataType>::nsBaseHashtableET(KeyTypePointer aKey)
--- a/xpcom/ds/nsStaticNameTable.cpp
+++ b/xpcom/ds/nsStaticNameTable.cpp
@@ -129,19 +129,17 @@ nsStaticCaseInsensitiveNameTable::nsStat
     // zero, in which case this assertion won't fail. But if the index is
     // non-zero (highly likely) then it will fail. In other words, this
     // assertion is likely but not guaranteed to detect if an entry is already
     // used.
     MOZ_ASSERT(entry->mIndex == 0, "Entry already exists!");
 
     entry->mIndex = index;
   }
-#ifdef DEBUG
   mNameTable.MarkImmutable();
-#endif
 }
 
 nsStaticCaseInsensitiveNameTable::~nsStaticCaseInsensitiveNameTable() {
   // manually call the destructor on placement-new'ed objects
   for (uint32_t index = 0; index < mNameTable.EntryCount(); index++) {
     mNameArray[index].~nsDependentCString();
   }
   free((void*)mNameArray);
--- a/xpcom/ds/nsTHashtable.h
+++ b/xpcom/ds/nsTHashtable.h
@@ -304,25 +304,23 @@ class MOZ_NEEDS_NO_VTABLE_TYPE nsTHashta
    * Swap the elements in this hashtable with the elements in aOther.
    */
   void SwapElements(nsTHashtable<EntryType>& aOther) {
     MOZ_ASSERT_IF(this->mTable.Ops() && aOther.mTable.Ops(),
                   this->mTable.Ops() == aOther.mTable.Ops());
     mozilla::Swap(this->mTable, aOther.mTable);
   }
 
-#ifdef DEBUG
   /**
    * Mark the table as constant after initialization.
    *
    * This will prevent assertions when a read-only hash is accessed on multiple
    * threads without synchronization.
    */
   void MarkImmutable() { mTable.MarkImmutable(); }
-#endif
 
  protected:
   PLDHashTable mTable;
 
   static PLDHashNumber s_HashKey(const void* aKey);
 
   static bool s_MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey);
 
@@ -510,20 +508,17 @@ class nsTHashtable<nsPtrHashKey<T>>
 
   using Base::Clear;
   using Base::Count;
   using Base::GetGeneration;
   using Base::IsEmpty;
 
   using Base::ShallowSizeOfExcludingThis;
   using Base::ShallowSizeOfIncludingThis;
-
-#ifdef DEBUG
   using Base::MarkImmutable;
-#endif
 
   /* Wrapper functions */
   EntryType* GetEntry(T* aKey) const {
     return reinterpret_cast<EntryType*>(Base::GetEntry(aKey));
   }
 
   bool Contains(T* aKey) const { return Base::Contains(aKey); }