author | Nicholas Nethercote <nnethercote@mozilla.com> |
Mon, 25 Aug 2014 17:29:14 -0700 | |
changeset 201815 | 9ebed11d8a2bf847913e14be0e9f33a6c3421040 |
parent 201814 | 2accec91fd6e1cae7e0d62185a6d56bc60098c96 |
child 201816 | 52b9465cc90b3141a46cd05daf52c7eded1c328d |
push id | 48265 |
push user | nnethercote@mozilla.com |
push date | Wed, 27 Aug 2014 05:20:30 +0000 |
treeherder | mozilla-inbound@52b9465cc90b [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | roc |
bugs | 1058335 |
milestone | 34.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
xpcom/glue/pldhash.cpp | file | annotate | diff | comparison | revisions | |
xpcom/glue/pldhash.h | file | annotate | diff | comparison | revisions |
--- a/xpcom/glue/pldhash.cpp +++ b/xpcom/glue/pldhash.cpp @@ -38,29 +38,29 @@ * allowed (and therefore the level is 0 or 1, depending on whether they * incremented it). * * Only PL_DHashTableFinish needs to allow this special value. */ #define IMMUTABLE_RECURSION_LEVEL ((uint16_t)-1) #define RECURSION_LEVEL_SAFE_TO_FINISH(table_) \ - (table_->recursionLevel == 0 || \ - table_->recursionLevel == IMMUTABLE_RECURSION_LEVEL) + (table_->mRecursionLevel == 0 || \ + table_->mRecursionLevel == IMMUTABLE_RECURSION_LEVEL) #define INCREMENT_RECURSION_LEVEL(table_) \ do { \ - if (table_->recursionLevel != IMMUTABLE_RECURSION_LEVEL) \ - ++table_->recursionLevel; \ + if (table_->mRecursionLevel != IMMUTABLE_RECURSION_LEVEL) \ + ++table_->mRecursionLevel; \ } while(0) #define DECREMENT_RECURSION_LEVEL(table_) \ do { \ - if (table_->recursionLevel != IMMUTABLE_RECURSION_LEVEL) { \ - MOZ_ASSERT(table_->recursionLevel > 0); \ - --table_->recursionLevel; \ + if (table_->mRecursionLevel != IMMUTABLE_RECURSION_LEVEL) { \ + MOZ_ASSERT(table_->mRecursionLevel > 0); \ + --table_->mRecursionLevel; \ } \ } while(0) #else #define INCREMENT_RECURSION_LEVEL(table_) do { } while(0) #define DECREMENT_RECURSION_LEVEL(table_) do { } while(0) @@ -114,46 +114,46 @@ PL_DHashMatchStringKey(PLDHashTable* aTa (stub->key && aKey && strcmp((const char*)stub->key, (const char*)aKey) == 0); } MOZ_ALWAYS_INLINE void PLDHashTable::MoveEntryStub(const PLDHashEntryHdr* aFrom, PLDHashEntryHdr* aTo) { - memcpy(aTo, aFrom, entrySize); + memcpy(aTo, aFrom, mEntrySize); } void PL_DHashMoveEntryStub(PLDHashTable* aTable, const PLDHashEntryHdr* aFrom, PLDHashEntryHdr* aTo) { aTable->MoveEntryStub(aFrom, aTo); } MOZ_ALWAYS_INLINE void PLDHashTable::ClearEntryStub(PLDHashEntryHdr* aEntry) { - memset(aEntry, 0, entrySize); + memset(aEntry, 0, mEntrySize); } void PL_DHashClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry) { aTable->ClearEntryStub(aEntry); } MOZ_ALWAYS_INLINE void PLDHashTable::FreeStringKey(PLDHashEntryHdr* aEntry) { const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry; free((void*)stub->key); - memset(aEntry, 0, entrySize); + memset(aEntry, 0, mEntrySize); } void PL_DHashFreeStringKey(PLDHashTable* aTable, PLDHashEntryHdr* aEntry) { aTable->FreeStringKey(aEntry); } @@ -266,34 +266,34 @@ PLDHashTable::Init(const PLDHashTableOps if (capacity < PL_DHASH_MIN_CAPACITY) { capacity = PL_DHASH_MIN_CAPACITY; } int log2 = CeilingLog2(capacity); capacity = 1u << log2; MOZ_ASSERT(capacity <= PL_DHASH_MAX_CAPACITY); - hashShift = PL_DHASH_BITS - log2; - entrySize = aEntrySize; - entryCount = removedCount = 0; - generation = 0; + mHashShift = PL_DHASH_BITS - log2; + mEntrySize = aEntrySize; + mEntryCount = mRemovedCount = 0; + mGeneration = 0; uint32_t nbytes; if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) { return false; // overflowed } - entryStore = (char*)aOps->allocTable(this, nbytes); - if (!entryStore) { + mEntryStore = (char*)aOps->allocTable(this, nbytes); + if (!mEntryStore) { return false; } - memset(entryStore, 0, nbytes); - METER(memset(&stats, 0, sizeof(stats))); + memset(mEntryStore, 0, nbytes); + METER(memset(&mStats, 0, sizeof(mStats))); #ifdef DEBUG - recursionLevel = 0; + mRecursionLevel = 0; #endif return true; } bool PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps, void* aData, uint32_t aEntrySize, @@ -308,17 +308,17 @@ PL_DHashTableInit(PLDHashTable* aTable, { if (!PL_DHashTableInit(aTable, aOps, aData, aEntrySize, fallible_t(), aLength)) { if (aLength > PL_DHASH_MAX_INITIAL_LENGTH) { MOZ_CRASH(); // the asked-for length was too big } uint32_t capacity = MinCapacity(aLength), nbytes; if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) { - MOZ_CRASH(); // the required entryStore size was too big + MOZ_CRASH(); // the required mEntryStore size was too big } NS_ABORT_OOM(nbytes); // allocation failed } } /* * Double hashing needs the second hash code to be relatively prime to table * size, so we simply make hash2 odd. @@ -342,109 +342,109 @@ PL_DHashTableInit(PLDHashTable* aTable, #define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0 /* Match an entry's keyHash against an unstored one computed from a key. */ #define MATCH_ENTRY_KEYHASH(entry,hash0) \ (((entry)->keyHash & ~COLLISION_FLAG) == (hash0)) /* Compute the address of the indexed entry in table. */ #define ADDRESS_ENTRY(table, index) \ - ((PLDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize)) + ((PLDHashEntryHdr *)((table)->mEntryStore + (index) * (table)->mEntrySize)) MOZ_ALWAYS_INLINE void PLDHashTable::Finish() { INCREMENT_RECURSION_LEVEL(this); /* Call finalize before clearing entries, so it can enumerate them. */ ops->finalize(this); /* Clear any remaining live entries. */ - char* entryAddr = entryStore; - char* entryLimit = entryAddr + Capacity() * entrySize; + char* entryAddr = mEntryStore; + char* entryLimit = entryAddr + Capacity() * mEntrySize; while (entryAddr < entryLimit) { PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr; if (ENTRY_IS_LIVE(entry)) { - METER(stats.removeEnums++); + METER(mStats.mRemoveEnums++); ops->clearEntry(this, entry); } - entryAddr += entrySize; + entryAddr += mEntrySize; } DECREMENT_RECURSION_LEVEL(this); MOZ_ASSERT(RECURSION_LEVEL_SAFE_TO_FINISH(this)); /* Free entry storage last. */ - ops->freeTable(this, entryStore); + ops->freeTable(this, mEntryStore); } void PL_DHashTableFinish(PLDHashTable* aTable) { aTable->Finish(); } PLDHashEntryHdr* PL_DHASH_FASTCALL PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash, PLDHashOperator aOp) { - METER(stats.searches++); + METER(mStats.mSearches++); NS_ASSERTION(!(aKeyHash & COLLISION_FLAG), "!(aKeyHash & COLLISION_FLAG)"); /* Compute the primary hash address. */ - PLDHashNumber hash1 = HASH1(aKeyHash, hashShift); + PLDHashNumber hash1 = HASH1(aKeyHash, mHashShift); PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1); /* Miss: return space for a new entry. */ if (PL_DHASH_ENTRY_IS_FREE(entry)) { - METER(stats.misses++); + METER(mStats.mMisses++); return entry; } /* Hit: return entry. */ PLDHashMatchEntry matchEntry = ops->matchEntry; if (MATCH_ENTRY_KEYHASH(entry, aKeyHash) && matchEntry(this, entry, aKey)) { - METER(stats.hits++); + METER(mStats.mHits++); return entry; } /* Collision: double hash. */ - int sizeLog2 = PL_DHASH_BITS - hashShift; - PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, hashShift); + int sizeLog2 = PL_DHASH_BITS - mHashShift; + PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, mHashShift); uint32_t sizeMask = (1u << sizeLog2) - 1; /* Save the first removed entry pointer so PL_DHASH_ADD can recycle it. */ PLDHashEntryHdr* firstRemoved = nullptr; for (;;) { if (MOZ_UNLIKELY(ENTRY_IS_REMOVED(entry))) { if (!firstRemoved) { firstRemoved = entry; } } else { if (aOp == PL_DHASH_ADD) { entry->keyHash |= COLLISION_FLAG; } } - METER(stats.steps++); + METER(mStats.mSteps++); hash1 -= hash2; hash1 &= sizeMask; entry = ADDRESS_ENTRY(this, hash1); if (PL_DHASH_ENTRY_IS_FREE(entry)) { - METER(stats.misses++); + METER(mStats.mMisses++); return (firstRemoved && aOp == PL_DHASH_ADD) ? firstRemoved : entry; } if (MATCH_ENTRY_KEYHASH(entry, aKeyHash) && matchEntry(this, entry, aKey)) { - METER(stats.hits++); + METER(mStats.mHits++); return entry; } } /* NOTREACHED */ return nullptr; } @@ -456,212 +456,211 @@ PLDHashTable::SearchTable(const void* aK * structure. * Avoiding the need for |aKey| means we can avoid needing a way to map * entries to keys, which means callers can use complex key types more * easily. */ PLDHashEntryHdr* PL_DHASH_FASTCALL PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash) { - METER(stats.searches++); + METER(mStats.mSearches++); NS_ASSERTION(!(aKeyHash & COLLISION_FLAG), "!(aKeyHash & COLLISION_FLAG)"); /* Compute the primary hash address. */ - PLDHashNumber hash1 = HASH1(aKeyHash, hashShift); + PLDHashNumber hash1 = HASH1(aKeyHash, mHashShift); PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1); /* Miss: return space for a new entry. */ if (PL_DHASH_ENTRY_IS_FREE(entry)) { - METER(stats.misses++); + METER(mStats.mMisses++); return entry; } /* Collision: double hash. */ - int sizeLog2 = PL_DHASH_BITS - hashShift; - PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, hashShift); + int sizeLog2 = PL_DHASH_BITS - mHashShift; + PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, mHashShift); uint32_t sizeMask = (1u << sizeLog2) - 1; for (;;) { NS_ASSERTION(!ENTRY_IS_REMOVED(entry), "!ENTRY_IS_REMOVED(entry)"); entry->keyHash |= COLLISION_FLAG; - METER(stats.steps++); + METER(mStats.mSteps++); hash1 -= hash2; hash1 &= sizeMask; entry = ADDRESS_ENTRY(this, hash1); if (PL_DHASH_ENTRY_IS_FREE(entry)) { - METER(stats.misses++); + METER(mStats.mMisses++); return entry; } } /* NOTREACHED */ return nullptr; } bool PLDHashTable::ChangeTable(int aDeltaLog2) { /* Look, but don't touch, until we succeed in getting new entry store. */ - int oldLog2 = PL_DHASH_BITS - hashShift; + int oldLog2 = PL_DHASH_BITS - mHashShift; int newLog2 = oldLog2 + aDeltaLog2; uint32_t newCapacity = 1u << newLog2; if (newCapacity > PL_DHASH_MAX_CAPACITY) { return false; } uint32_t nbytes; - if (!SizeOfEntryStore(newCapacity, entrySize, &nbytes)) { + if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) { return false; // overflowed } char* newEntryStore = (char*)ops->allocTable(this, nbytes); if (!newEntryStore) { return false; } /* We can't fail from here on, so update table parameters. */ #ifdef DEBUG - uint32_t recursionLevelTmp = recursionLevel; + uint32_t recursionLevelTmp = mRecursionLevel; #endif - hashShift = PL_DHASH_BITS - newLog2; - removedCount = 0; - generation++; + mHashShift = PL_DHASH_BITS - newLog2; + mRemovedCount = 0; + mGeneration++; /* Assign the new entry store to table. */ memset(newEntryStore, 0, nbytes); char* oldEntryStore; char* oldEntryAddr; - oldEntryAddr = oldEntryStore = entryStore; - entryStore = newEntryStore; + oldEntryAddr = oldEntryStore = mEntryStore; + mEntryStore = newEntryStore; PLDHashMoveEntry moveEntry = ops->moveEntry; #ifdef DEBUG - recursionLevel = recursionLevelTmp; + mRecursionLevel = recursionLevelTmp; #endif /* Copy only live entries, leaving removed ones behind. */ uint32_t oldCapacity = 1u << oldLog2; for (uint32_t i = 0; i < oldCapacity; ++i) { PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr; if (ENTRY_IS_LIVE(oldEntry)) { oldEntry->keyHash &= ~COLLISION_FLAG; PLDHashEntryHdr* newEntry = FindFreeEntry(oldEntry->keyHash); NS_ASSERTION(PL_DHASH_ENTRY_IS_FREE(newEntry), "PL_DHASH_ENTRY_IS_FREE(newEntry)"); moveEntry(this, oldEntry, newEntry); newEntry->keyHash = oldEntry->keyHash; } - oldEntryAddr += entrySize; + oldEntryAddr += mEntrySize; } ops->freeTable(this, oldEntryStore); return true; } MOZ_ALWAYS_INLINE PLDHashEntryHdr* PLDHashTable::Operate(const void* aKey, PLDHashOperator aOp) { PLDHashEntryHdr* entry; - MOZ_ASSERT(aOp == PL_DHASH_LOOKUP || recursionLevel == 0); + MOZ_ASSERT(aOp == PL_DHASH_LOOKUP || mRecursionLevel == 0); INCREMENT_RECURSION_LEVEL(this); PLDHashNumber keyHash = ops->hashKey(this, aKey); keyHash *= PL_DHASH_GOLDEN_RATIO; /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */ ENSURE_LIVE_KEYHASH(keyHash); keyHash &= ~COLLISION_FLAG; switch (aOp) { case PL_DHASH_LOOKUP: - METER(stats.lookups++); + METER(mStats.mLookups++); entry = SearchTable(aKey, keyHash, aOp); break; case PL_DHASH_ADD: { /* * If alpha is >= .75, grow or compress the table. If aKey is already * in the table, we may grow once more than necessary, but only if we * are on the edge of being overloaded. */ uint32_t capacity = Capacity(); - if (entryCount + removedCount >= MaxLoad(capacity)) { + if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) { /* Compress if a quarter or more of all entries are removed. */ int deltaLog2; - if (removedCount >= capacity >> 2) { - METER(stats.compresses++); + if (mRemovedCount >= capacity >> 2) { + METER(mStats.mCompresses++); deltaLog2 = 0; } else { - METER(stats.grows++); + METER(mStats.mGrows++); deltaLog2 = 1; } /* * Grow or compress the table. If ChangeTable() fails, allow * overloading up to the secondary max. Once we hit the secondary * max, return null. */ if (!ChangeTable(deltaLog2) && - entryCount + removedCount >= - MaxLoadOnGrowthFailure(capacity)) { - METER(stats.addFailures++); + mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) { + METER(mStats.mAddFailures++); entry = nullptr; break; } } /* * Look for entry after possibly growing, so we don't have to add it, * then skip it while growing the table and re-add it after. */ entry = SearchTable(aKey, keyHash, aOp); if (!ENTRY_IS_LIVE(entry)) { /* Initialize the entry, indicating that it's no longer free. */ - METER(stats.addMisses++); + METER(mStats.mAddMisses++); if (ENTRY_IS_REMOVED(entry)) { - METER(stats.addOverRemoved++); - removedCount--; + METER(mStats.mAddOverRemoved++); + mRemovedCount--; keyHash |= COLLISION_FLAG; } if (ops->initEntry && !ops->initEntry(this, entry, aKey)) { /* We haven't claimed entry yet; fail with null return. */ - memset(entry + 1, 0, entrySize - sizeof(*entry)); + memset(entry + 1, 0, mEntrySize - sizeof(*entry)); entry = nullptr; break; } entry->keyHash = keyHash; - entryCount++; + mEntryCount++; } METER(else { - stats.addHits++; + mStats.mAddHits++; }); break; } case PL_DHASH_REMOVE: entry = SearchTable(aKey, keyHash, aOp); if (ENTRY_IS_LIVE(entry)) { /* Clear this entry and mark it as "removed". */ - METER(stats.removeHits++); + METER(mStats.mRemoveHits++); PL_DHashTableRawRemove(this, entry); /* Shrink if alpha is <= .25 and the table isn't too small already. */ uint32_t capacity = Capacity(); if (capacity > PL_DHASH_MIN_CAPACITY && - entryCount <= MinLoad(capacity)) { - METER(stats.shrinks++); + mEntryCount <= MinLoad(capacity)) { + METER(mStats.mShrinks++); (void) ChangeTable(-1); } } METER(else { - stats.removeMisses++; + mStats.mRemoveMisses++; }); entry = nullptr; break; default: NS_NOTREACHED("0"); entry = nullptr; } @@ -675,102 +674,102 @@ PLDHashEntryHdr* PL_DHASH_FASTCALL PL_DHashTableOperate(PLDHashTable* aTable, const void* aKey, PLDHashOperator aOp) { return aTable->Operate(aKey, aOp); } MOZ_ALWAYS_INLINE void PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry) { - MOZ_ASSERT(recursionLevel != IMMUTABLE_RECURSION_LEVEL); + MOZ_ASSERT(mRecursionLevel != IMMUTABLE_RECURSION_LEVEL); NS_ASSERTION(ENTRY_IS_LIVE(aEntry), "ENTRY_IS_LIVE(aEntry)"); /* Load keyHash first in case clearEntry() goofs it. */ PLDHashNumber keyHash = aEntry->keyHash; ops->clearEntry(this, aEntry); if (keyHash & COLLISION_FLAG) { MARK_ENTRY_REMOVED(aEntry); - removedCount++; + mRemovedCount++; } else { - METER(stats.removeFrees++); + METER(mStats.mRemoveFrees++); MARK_ENTRY_FREE(aEntry); } - entryCount--; + mEntryCount--; } void PL_DHashTableRawRemove(PLDHashTable* aTable, PLDHashEntryHdr* aEntry) { aTable->RawRemove(aEntry); } MOZ_ALWAYS_INLINE uint32_t PLDHashTable::Enumerate(PLDHashEnumerator aEtor, void* aArg) { INCREMENT_RECURSION_LEVEL(this); - char* entryAddr = entryStore; + char* entryAddr = mEntryStore; uint32_t capacity = Capacity(); - uint32_t tableSize = capacity * entrySize; + uint32_t tableSize = capacity * mEntrySize; char* entryLimit = entryAddr + tableSize; uint32_t i = 0; bool didRemove = false; if (ChaosMode::isActive()) { // Start iterating at a random point in the hashtable. It would be // even more chaotic to iterate in fully random order, but that's a lot // more work. - entryAddr += ChaosMode::randomUint32LessThan(capacity) * entrySize; + entryAddr += ChaosMode::randomUint32LessThan(capacity) * mEntrySize; if (entryAddr >= entryLimit) { entryAddr -= tableSize; } } for (uint32_t e = 0; e < capacity; ++e) { PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr; if (ENTRY_IS_LIVE(entry)) { PLDHashOperator op = aEtor(this, entry, i++, aArg); if (op & PL_DHASH_REMOVE) { - METER(stats.removeEnums++); + METER(mStats.mRemoveEnums++); PL_DHashTableRawRemove(this, entry); didRemove = true; } if (op & PL_DHASH_STOP) { break; } } - entryAddr += entrySize; + entryAddr += mEntrySize; if (entryAddr >= entryLimit) { entryAddr -= tableSize; } } - MOZ_ASSERT(!didRemove || recursionLevel == 1); + MOZ_ASSERT(!didRemove || mRecursionLevel == 1); /* * Shrink or compress if a quarter or more of all entries are removed, or * if the table is underloaded according to the minimum alpha, and is not * minimal-size already. Do this only if we removed above, so non-removing - * enumerations can count on stable |entryStore| until the next + * enumerations can count on stable |mEntryStore| until the next * non-lookup-Operate or removing-Enumerate. */ if (didRemove && - (removedCount >= capacity >> 2 || + (mRemovedCount >= capacity >> 2 || (capacity > PL_DHASH_MIN_CAPACITY && - entryCount <= MinLoad(capacity)))) { - METER(stats.enumShrinks++); - capacity = entryCount; + mEntryCount <= MinLoad(capacity)))) { + METER(mStats.mEnumShrinks++); + capacity = mEntryCount; capacity += capacity >> 1; if (capacity < PL_DHASH_MIN_CAPACITY) { capacity = PL_DHASH_MIN_CAPACITY; } uint32_t ceiling = CeilingLog2(capacity); - ceiling -= PL_DHASH_BITS - hashShift; + ceiling -= PL_DHASH_BITS - mHashShift; (void) ChangeTable(ceiling); } DECREMENT_RECURSION_LEVEL(this); return i; } @@ -800,17 +799,17 @@ SizeOfEntryExcludingThisEnumerator(PLDHa } MOZ_ALWAYS_INLINE size_t PLDHashTable::SizeOfExcludingThis( PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis, MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */) const { size_t n = 0; - n += aMallocSizeOf(entryStore); + n += aMallocSizeOf(mEntryStore); if (aSizeOfEntryExcludingThis) { SizeOfEntryExcludingThisArg arg2 = { 0, aSizeOfEntryExcludingThis, aMallocSizeOf, aArg }; PL_DHashTableEnumerate(const_cast<PLDHashTable*>(this), SizeOfEntryExcludingThisEnumerator, &arg2); n += arg2.total; } @@ -845,17 +844,17 @@ PL_DHashTableSizeOfIncludingThis( return aTable->SizeOfIncludingThis(aSizeOfEntryExcludingThis, aMallocSizeOf, aArg); } #ifdef DEBUG MOZ_ALWAYS_INLINE void PLDHashTable::MarkImmutable() { - recursionLevel = IMMUTABLE_RECURSION_LEVEL; + mRecursionLevel = IMMUTABLE_RECURSION_LEVEL; } void PL_DHashMarkTableImmutable(PLDHashTable* aTable) { aTable->MarkImmutable(); } #endif @@ -865,93 +864,92 @@ PL_DHashMarkTableImmutable(PLDHashTable* void PLDHashTable::DumpMeter(PLDHashEnumerator aDump, FILE* aFp) { PLDHashNumber hash1, hash2, maxChainHash1, maxChainHash2; double sqsum, mean, variance, sigma; PLDHashEntryHdr* entry; - char* entryAddr = entryStore; - int sizeLog2 = PL_DHASH_BITS - hashShift; + char* entryAddr = mEntryStore; + int sizeLog2 = PL_DHASH_BITS - mHashShift; uint32_t capacity = Capacity(); uint32_t sizeMask = (1u << sizeLog2) - 1; uint32_t chainCount = 0, maxChainLen = 0; hash2 = 0; sqsum = 0; for (uint32_t i = 0; i < capacity; i++) { entry = (PLDHashEntryHdr*)entryAddr; - entryAddr += entrySize; + entryAddr += mEntrySize; if (!ENTRY_IS_LIVE(entry)) { continue; } - hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift); + hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, mHashShift); PLDHashNumber saveHash1 = hash1; PLDHashEntryHdr* probe = ADDRESS_ENTRY(this, hash1); uint32_t chainLen = 1; if (probe == entry) { /* Start of a (possibly unit-length) chain. */ chainCount++; } else { - hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2, - hashShift); + hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2, mHashShift); do { chainLen++; hash1 -= hash2; hash1 &= sizeMask; probe = ADDRESS_ENTRY(this, hash1); } while (probe != entry); } sqsum += chainLen * chainLen; if (chainLen > maxChainLen) { maxChainLen = chainLen; maxChainHash1 = saveHash1; maxChainHash2 = hash2; } } - if (entryCount && chainCount) { - mean = (double)entryCount / chainCount; - variance = chainCount * sqsum - entryCount * entryCount; + if (mEntryCount && chainCount) { + mean = (double)mEntryCount / chainCount; + variance = chainCount * sqsum - mEntryCount * mEntryCount; if (variance < 0 || chainCount == 1) { variance = 0; } else { variance /= chainCount * (chainCount - 1); } sigma = sqrt(variance); } else { mean = sigma = 0; } fprintf(aFp, "Double hashing statistics:\n"); - fprintf(aFp, " table size (in entries): %u\n", tableSize); - fprintf(aFp, " number of entries: %u\n", entryCount); - fprintf(aFp, " number of removed entries: %u\n", removedCount); - fprintf(aFp, " number of searches: %u\n", stats.searches); - fprintf(aFp, " number of hits: %u\n", stats.hits); - fprintf(aFp, " number of misses: %u\n", stats.misses); + fprintf(aFp, " capacity (in entries): %u\n", Capacity()); + fprintf(aFp, " number of entries: %u\n", mEntryCount); + fprintf(aFp, " number of removed entries: %u\n", mRemovedCount); + fprintf(aFp, " number of searches: %u\n", mStats.mSearches); + fprintf(aFp, " number of hits: %u\n", mStats.mHits); + fprintf(aFp, " number of misses: %u\n", mStats.mMisses); fprintf(aFp, " mean steps per search: %g\n", - stats.searches ? (double)stats.steps / stats.searches : 0.); + mStats.mSearches ? (double)mStats.mSteps / mStats.mSearches : 0.); fprintf(aFp, " mean hash chain length: %g\n", mean); fprintf(aFp, " standard deviation: %g\n", sigma); fprintf(aFp, " maximum hash chain length: %u\n", maxChainLen); - fprintf(aFp, " number of lookups: %u\n", stats.lookups); - fprintf(aFp, " adds that made a new entry: %u\n", stats.addMisses); - fprintf(aFp, "adds that recycled removeds: %u\n", stats.addOverRemoved); - fprintf(aFp, " adds that found an entry: %u\n", stats.addHits); - fprintf(aFp, " add failures: %u\n", stats.addFailures); - fprintf(aFp, " useful removes: %u\n", stats.removeHits); - fprintf(aFp, " useless removes: %u\n", stats.removeMisses); - fprintf(aFp, "removes that freed an entry: %u\n", stats.removeFrees); - fprintf(aFp, " removes while enumerating: %u\n", stats.removeEnums); - fprintf(aFp, " number of grows: %u\n", stats.grows); - fprintf(aFp, " number of shrinks: %u\n", stats.shrinks); - fprintf(aFp, " number of compresses: %u\n", stats.compresses); - fprintf(aFp, "number of enumerate shrinks: %u\n", stats.enumShrinks); + fprintf(aFp, " number of lookups: %u\n", mStats.mLookups); + fprintf(aFp, " adds that made a new entry: %u\n", mStats.mAddMisses); + fprintf(aFp, "adds that recycled removeds: %u\n", mStats.mAddOverRemoved); + fprintf(aFp, " adds that found an entry: %u\n", mStats.mAddHits); + fprintf(aFp, " add failures: %u\n", mStats.mAddFailures); + fprintf(aFp, " useful removes: %u\n", mStats.mRemoveHits); + fprintf(aFp, " useless removes: %u\n", mStats.mRemoveMisses); + fprintf(aFp, "removes that freed an entry: %u\n", mStats.mRemoveFrees); + fprintf(aFp, " removes while enumerating: %u\n", mStats.mRemoveEnums); + fprintf(aFp, " number of grows: %u\n", mStats.mGrows); + fprintf(aFp, " number of shrinks: %u\n", mStats.mShrinks); + fprintf(aFp, " number of compresses: %u\n", mStats.mCompresses); + fprintf(aFp, "number of enumerate shrinks: %u\n", mStats.mEnumShrinks); if (aDump && maxChainLen && hash2) { fputs("Maximum hash chain:\n", aFp); hash1 = maxChainHash1; hash2 = maxChainHash2; entry = ADDRESS_ENTRY(this, hash1); uint32_t i = 0; do {
--- a/xpcom/glue/pldhash.h +++ b/xpcom/glue/pldhash.h @@ -9,30 +9,34 @@ /* * Double hashing, a la Knuth 6. */ #include "mozilla/fallible.h" #include "mozilla/MemoryReporting.h" #include "mozilla/Types.h" #include "nscore.h" +#ifdef PL_DHASHMETER +#include <stdio.h> +#endif + #if defined(__GNUC__) && defined(__i386__) #define PL_DHASH_FASTCALL __attribute__ ((regparm (3),stdcall)) #elif defined(XP_WIN) #define PL_DHASH_FASTCALL __fastcall #else #define PL_DHASH_FASTCALL #endif /* * Table capacity limit; do not exceed. The max capacity used to be 1<<23 but * that occasionally that wasn't enough. Making it much bigger than 1<<26 * probably isn't worthwhile -- tables that big are kind of ridiculous. Also, - * the growth operation will (deliberately) fail if |capacity * entrySize| - * overflows a uint32_t, and entrySize is always at least 8 bytes. + * the growth operation will (deliberately) fail if |capacity * mEntrySize| + * overflows a uint32_t, and mEntrySize is always at least 8 bytes. */ #define PL_DHASH_MAX_CAPACITY ((uint32_t)1 << 26) #define PL_DHASH_MIN_CAPACITY 8 /* * Making this half of the max capacity ensures it'll fit. Nobody should need * an initial length anywhere nearly this large, anyway. @@ -117,42 +121,42 @@ typedef enum PLDHashOperator * * count = PL_DHashTableEnumerate(table, etor, arg); * * PL_DHashTableEnumerate calls etor like so: * * op = etor(table, entry, number, arg); * * where number is a zero-based ordinal assigned to live entries according to - * their order in aTable->entryStore. + * their order in aTable->mEntryStore. * * The return value, op, is treated as a set of flags. If op is PL_DHASH_NEXT, * then continue enumerating. If op contains PL_DHASH_REMOVE, then clear (via * aTable->ops->clearEntry) and free entry. Then we check whether op contains * PL_DHASH_STOP; if so, stop enumerating and return the number of live entries * that were enumerated so far. Return the total number of live entries when * enumeration completes normally. * * If etor calls PL_DHashTableOperate on table with op != PL_DHASH_LOOKUP, it * must return PL_DHASH_STOP; otherwise undefined behavior results. * - * If any enumerator returns PL_DHASH_REMOVE, aTable->entryStore may be shrunk + * If any enumerator returns PL_DHASH_REMOVE, aTable->mEntryStore may be shrunk * or compressed after enumeration, but before PL_DHashTableEnumerate returns. * Such an enumerator therefore can't safely set aside entry pointers, but an * enumerator that never returns PL_DHASH_REMOVE can set pointers to entries * aside, e.g., to avoid copying live entries into an array of the entry type. * Copying entry pointers is cheaper, and safe so long as the caller of such a * "stable" Enumerate doesn't use the set-aside pointers after any call either * to PL_DHashTableOperate, or to an "unstable" form of Enumerate, which might - * grow or shrink entryStore. + * grow or shrink mEntryStore. * * If your enumerator wants to remove certain entries, but set aside pointers * to other entries that it retains, it can use PL_DHashTableRawRemove on the * entries to be removed, returning PL_DHASH_NEXT to skip them. Likewise, if - * you want to remove entries, but for some reason you do not want entryStore + * you want to remove entries, but for some reason you do not want mEntryStore * to be shrunk or compressed, you can call PL_DHashTableRawRemove safely on * the entry being enumerated, rather than returning PL_DHASH_REMOVE. */ typedef PLDHashOperator (*PLDHashEnumerator)(PLDHashTable* aTable, PLDHashEntryHdr* aHdr, uint32_t aNumber, void* aArg); typedef size_t (*PLDHashSizeOfEntryExcludingThisFun)( @@ -217,86 +221,86 @@ typedef size_t (*PLDHashSizeOfEntryExclu * * The current implementation uses a lower bound of 0.25 for alpha when * deciding whether to shrink the table (while still respecting * PL_DHASH_MIN_CAPACITY). * * Note a qualitative difference between chaining and double hashing: under * chaining, entry addresses are stable across table shrinks and grows. With * double hashing, you can't safely hold an entry pointer and use it after an - * ADD or REMOVE operation, unless you sample aTable->generation before adding + * ADD or REMOVE operation, unless you sample aTable->mGeneration before adding * or removing, and compare the sample after, dereferencing the entry pointer - * only if aTable->generation has not changed. + * only if aTable->mGeneration has not changed. * * The moral of this story: there is no one-size-fits-all hash table scheme, * but for small table entry size, and assuming entry address stability is not * required, double hashing wins. */ struct PLDHashTable { /* * Virtual operations; see below. This field is public because it's commonly * zeroed to indicate that a table is no longer live. */ const PLDHashTableOps* ops; void* data; /* ops- and instance-specific data */ private: - int16_t hashShift; /* multiplicative hash shift */ + int16_t mHashShift; /* multiplicative hash shift */ /* - * |recursionLevel| is only used in debug builds, but is present in opt + * |mRecursionLevel| is only used in debug builds, but is present in opt * builds to avoid binary compatibility problems when mixing DEBUG and * non-DEBUG components. (Actually, even if it were removed, * sizeof(PLDHashTable) wouldn't change, due to struct padding.) */ - uint16_t recursionLevel; /* used to detect unsafe re-entry */ - uint32_t entrySize; /* number of bytes in an entry */ - uint32_t entryCount; /* number of entries in table */ - uint32_t removedCount; /* removed entry sentinels in table */ - uint32_t generation; /* entry storage generation number */ - char* entryStore; /* entry storage */ + uint16_t mRecursionLevel;/* used to detect unsafe re-entry */ + uint32_t mEntrySize; /* number of bytes in an entry */ + uint32_t mEntryCount; /* number of entries in table */ + uint32_t mRemovedCount; /* removed entry sentinels in table */ + uint32_t mGeneration; /* entry storage generation number */ + char* mEntryStore; /* entry storage */ #ifdef PL_DHASHMETER struct PLDHashStats { - uint32_t searches; /* total number of table searches */ - uint32_t steps; /* hash chain links traversed */ - uint32_t hits; /* searches that found key */ - uint32_t misses; /* searches that didn't find key */ - uint32_t lookups; /* number of PL_DHASH_LOOKUPs */ - uint32_t addMisses; /* adds that miss, and do work */ - uint32_t addOverRemoved; /* adds that recycled a removed entry */ - uint32_t addHits; /* adds that hit an existing entry */ - uint32_t addFailures; /* out-of-memory during add growth */ - uint32_t removeHits; /* removes that hit, and do work */ - uint32_t removeMisses; /* useless removes that miss */ - uint32_t removeFrees; /* removes that freed entry directly */ - uint32_t removeEnums; /* removes done by Enumerate */ - uint32_t grows; /* table expansions */ - uint32_t shrinks; /* table contractions */ - uint32_t compresses; /* table compressions */ - uint32_t enumShrinks; /* contractions after Enumerate */ - } stats; + uint32_t mSearches; /* total number of table searches */ + uint32_t mSteps; /* hash chain links traversed */ + uint32_t mHits; /* searches that found key */ + uint32_t mMisses; /* searches that didn't find key */ + uint32_t mLookups; /* number of PL_DHASH_LOOKUPs */ + uint32_t mAddMisses; /* adds that miss, and do work */ + uint32_t mAddOverRemoved;/* adds that recycled a removed entry */ + uint32_t mAddHits; /* adds that hit an existing entry */ + uint32_t mAddFailures; /* out-of-memory during add growth */ + uint32_t mRemoveHits; /* removes that hit, and do work */ + uint32_t mRemoveMisses; /* useless removes that miss */ + uint32_t mRemoveFrees; /* removes that freed entry directly */ + uint32_t mRemoveEnums; /* removes done by Enumerate */ + uint32_t mGrows; /* table expansions */ + uint32_t mShrinks; /* table contractions */ + uint32_t mCompresses; /* table compressions */ + uint32_t mEnumShrinks; /* contractions after Enumerate */ + } mStats; #endif public: /* * Size in entries (gross, not net of free and removed sentinels) for table. - * We store hashShift rather than sizeLog2 to optimize the collision-free case - * in SearchTable. + * We store mHashShift rather than sizeLog2 to optimize the collision-free + * case in SearchTable. */ uint32_t Capacity() const { - return ((uint32_t)1 << (PL_DHASH_BITS - hashShift)); + return ((uint32_t)1 << (PL_DHASH_BITS - mHashShift)); } - uint32_t EntrySize() const { return entrySize; } - uint32_t EntryCount() const { return entryCount; } - uint32_t Generation() const { return generation; } + uint32_t EntrySize() const { return mEntrySize; } + uint32_t EntryCount() const { return mEntryCount; } + uint32_t Generation() const { return mGeneration; } bool Init(const PLDHashTableOps* aOps, void* aData, uint32_t aEntrySize, const mozilla::fallible_t&, uint32_t aLength); void Finish(); PLDHashEntryHdr* Operate(const void* aKey, PLDHashOperator aOp); @@ -331,17 +335,17 @@ private: SearchTable(const void* aKey, PLDHashNumber aKeyHash, PLDHashOperator aOp); PLDHashEntryHdr* PL_DHASH_FASTCALL FindFreeEntry(PLDHashNumber aKeyHash); bool ChangeTable(int aDeltaLog2); }; /* - * Table space at entryStore is allocated and freed using these callbacks. + * Table space at mEntryStore is allocated and freed using these callbacks. * The allocator should return null on error only (not if called with aNBytes * equal to 0; but note that pldhash.c code will never call with 0 aNBytes). */ typedef void* (*PLDHashAllocTable)(PLDHashTable* aTable, uint32_t aNBytes); typedef void (*PLDHashFreeTable)(PLDHashTable* aTable, void* aPtr); /* @@ -482,17 +486,17 @@ NS_COM_GLUE void PL_DHashFinalizeStub(PL * if your entries move via memcpy and clear via memset(0), you can use these * stub operations. */ NS_COM_GLUE const PLDHashTableOps* PL_DHashGetStubOps(void); /* * Dynamically allocate a new PLDHashTable using malloc, initialize it using * PL_DHashTableInit, and return its address. Return null on malloc failure. - * Note that the entry storage at aTable->entryStore will be allocated using + * Note that the entry storage at aTable->mEntryStore will be allocated using * the aOps->allocTable callback. */ NS_COM_GLUE PLDHashTable* PL_NewDHashTable( const PLDHashTableOps* aOps, void* aData, uint32_t aEntrySize, uint32_t aLength = PL_DHASH_DEFAULT_INITIAL_LENGTH); /* * Finalize aTable's data, free its entry storage (via aTable->ops->freeTable), @@ -565,17 +569,17 @@ PL_DHashTableOperate(PLDHashTable* aTabl PLDHashOperator aOp); /* * Remove an entry already accessed via LOOKUP or ADD. * * NB: this is a "raw" or low-level routine, intended to be used only where * the inefficiency of a full PL_DHashTableOperate (which rehashes in order * to find the entry given its key) is not tolerable. This function does not - * shrink the table if it is underloaded. It does not update stats #ifdef + * shrink the table if it is underloaded. It does not update mStats #ifdef * PL_DHASHMETER, either. */ NS_COM_GLUE void PL_DHashTableRawRemove(PLDHashTable* aTable, PLDHashEntryHdr* aEntry); NS_COM_GLUE uint32_t PL_DHashTableEnumerate(PLDHashTable* aTable, PLDHashEnumerator aEtor, void* aArg); @@ -613,15 +617,13 @@ NS_COM_GLUE size_t PL_DHashTableSizeOfIn * When the table is marked as immutable, the re-entry assertions will * no longer trigger erroneously due to multi-threaded access. Instead, * mutations will cause assertions. */ NS_COM_GLUE void PL_DHashMarkTableImmutable(PLDHashTable* aTable); #endif #ifdef PL_DHASHMETER -#include <stdio.h> - NS_COM_GLUE void PL_DHashTableDumpMeter(PLDHashTable* aTable, PLDHashEnumerator aDump, FILE* aFp); #endif #endif /* pldhash_h___ */