Bug 1050035 (part 1, attempt 2) - Lazily allocate PLDHashTable::mEntryStore. r=froydnj.
authorNicholas Nethercote <nnethercote@mozilla.com>
Sun, 01 Feb 2015 14:56:33 -0800
changeset 231093 a9248b3d5c1f1be63b8f8377dc30a1c3db537c80
parent 231092 b590778549ab55bb903c38596c291fbf590dab1e
child 231094 60192399a18ef0e8863aa21aa48b4ace64a69e8d
push id28344
push userryanvm@gmail.com
push dateFri, 27 Feb 2015 18:20:08 +0000
treeherdermozilla-central@9dd9d1e5b43c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1050035
milestone39.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1050035 (part 1, attempt 2) - Lazily allocate PLDHashTable::mEntryStore. r=froydnj. This makes zero-element hash tables, which are common, smaller, and also avoids unnecessary malloc/free pairs. I did some measurements during some basic browsing of a few sites. I found that 35% of all live tables were empty with a few tabs open. And cumulatively, for the whole session, 45% of tables never had an element added to them.
xpcom/glue/pldhash.cpp
xpcom/glue/pldhash.h
xpcom/tests/TestPLDHash.cpp
--- a/xpcom/glue/pldhash.cpp
+++ b/xpcom/glue/pldhash.cpp
@@ -245,21 +245,18 @@ PLDHashTable::Init(const PLDHashTableOps
   mEntrySize = aEntrySize;
   mEntryCount = mRemovedCount = 0;
   mGeneration = 0;
   uint32_t nbytes;
   if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
     return false;  // overflowed
   }
 
-  mEntryStore = (char*)malloc(nbytes);
-  if (!mEntryStore) {
-    return false;
-  }
-  memset(mEntryStore, 0, nbytes);
+  mEntryStore = nullptr;
+
   METER(memset(&mStats, 0, sizeof(mStats)));
 
   // Set this only once we reach a point where we know we can't fail.
   mOps = aOps;
 
 #ifdef DEBUG
   mRecursionLevel = 0;
 #endif
@@ -366,16 +363,17 @@ PL_DHashTableFinish(PLDHashTable* aTable
 // previously-removed entry. If |IsAdd| is false, the return value is null on a
 // miss, and will never be a previously-removed entry on a hit. This
 // distinction is a bit grotty but this function is hot enough that these
 // differences are worthwhile.
 template <PLDHashTable::SearchReason Reason>
 PLDHashEntryHdr* PL_DHASH_FASTCALL
 PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash)
 {
+  MOZ_ASSERT(mEntryStore);
   METER(mStats.mSearches++);
   NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
                "!(aKeyHash & COLLISION_FLAG)");
 
   /* Compute the primary hash address. */
   PLDHashNumber hash1 = HASH1(aKeyHash, mHashShift);
   PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1);
 
@@ -446,16 +444,17 @@ PLDHashTable::SearchTable(const void* aK
  * Avoiding the need for |aKey| means we can avoid needing a way to map
  * entries to keys, which means callers can use complex key types more
  * easily.
  */
 PLDHashEntryHdr* PL_DHASH_FASTCALL
 PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
 {
   METER(mStats.mSearches++);
+  MOZ_ASSERT(mEntryStore);
   NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
                "!(aKeyHash & COLLISION_FLAG)");
 
   /* Compute the primary hash address. */
   PLDHashNumber hash1 = HASH1(aKeyHash, mHashShift);
   PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1);
 
   /* Miss: return space for a new entry. */
@@ -487,16 +486,18 @@ PLDHashTable::FindFreeEntry(PLDHashNumbe
 
   /* NOTREACHED */
   return nullptr;
 }
 
 bool
 PLDHashTable::ChangeTable(int aDeltaLog2)
 {
+  MOZ_ASSERT(mEntryStore);
+
   /* Look, but don't touch, until we succeed in getting new entry store. */
   int oldLog2 = PL_DHASH_BITS - mHashShift;
   int newLog2 = oldLog2 + aDeltaLog2;
   uint32_t newCapacity = 1u << newLog2;
   if (newCapacity > PL_DHASH_MAX_CAPACITY) {
     return false;
   }
 
@@ -545,16 +546,18 @@ PLDHashTable::ChangeTable(int aDeltaLog2
 
   free(oldEntryStore);
   return true;
 }
 
 MOZ_ALWAYS_INLINE PLDHashNumber
 PLDHashTable::ComputeKeyHash(const void* aKey)
 {
+  MOZ_ASSERT(mEntryStore);
+
   PLDHashNumber keyHash = mOps->hashKey(this, aKey);
   keyHash *= PL_DHASH_GOLDEN_RATIO;
 
   /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */
   ENSURE_LIVE_KEYHASH(keyHash);
   keyHash &= ~COLLISION_FLAG;
 
   return keyHash;
@@ -564,41 +567,58 @@ MOZ_ALWAYS_INLINE PLDHashEntryHdr*
 PLDHashTable::Search(const void* aKey)
 {
   MOZ_ASSERT(IsInitialized());
 
   INCREMENT_RECURSION_LEVEL(this);
 
   METER(mStats.mSearches++);
 
-  PLDHashNumber keyHash = ComputeKeyHash(aKey);
-  PLDHashEntryHdr* entry = SearchTable<ForSearchOrRemove>(aKey, keyHash);
+  PLDHashEntryHdr* entry =
+    mEntryStore ? SearchTable<ForSearchOrRemove>(aKey, ComputeKeyHash(aKey))
+                : nullptr;
 
   DECREMENT_RECURSION_LEVEL(this);
 
   return entry;
 }
 
 MOZ_ALWAYS_INLINE PLDHashEntryHdr*
 PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
 {
   MOZ_ASSERT(IsInitialized());
 
   PLDHashNumber keyHash;
   PLDHashEntryHdr* entry;
+  uint32_t capacity;
 
   MOZ_ASSERT(mRecursionLevel == 0);
   INCREMENT_RECURSION_LEVEL(this);
 
+  // Allocate the entry storage if it hasn't already been allocated.
+  if (!mEntryStore) {
+    uint32_t nbytes;
+    // We already checked this in Init(), so it must still be true.
+    MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize,
+                                        &nbytes));
+    mEntryStore = (char*)malloc(nbytes);
+    if (!mEntryStore) {
+      METER(mStats.mAddFailures++);
+      entry = nullptr;
+      goto exit;
+    }
+    memset(mEntryStore, 0, nbytes);
+  }
+
   /*
    * If alpha is >= .75, grow or compress the table.  If aKey is already
    * in the table, we may grow once more than necessary, but only if we
    * are on the edge of being overloaded.
    */
-  uint32_t capacity = Capacity();
+  capacity = Capacity();
   if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
     /* Compress if a quarter or more of all entries are removed. */
     int deltaLog2;
     if (mRemovedCount >= capacity >> 2) {
       METER(mStats.mCompresses++);
       deltaLog2 = 0;
     } else {
       METER(mStats.mGrows++);
@@ -642,26 +662,48 @@ PLDHashTable::Add(const void* aKey, cons
     mStats.mAddHits++;
   });
 
 exit:
   DECREMENT_RECURSION_LEVEL(this);
   return entry;
 }
 
+MOZ_ALWAYS_INLINE PLDHashEntryHdr*
+PLDHashTable::Add(const void* aKey)
+{
+  PLDHashEntryHdr* entry = Add(aKey, fallible);
+  if (!entry) {
+    if (!mEntryStore) {
+      // We OOM'd while allocating the initial entry storage.
+      uint32_t nbytes;
+      (void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes);
+      NS_ABORT_OOM(nbytes);
+    } else {
+      // We failed to resize the existing entry storage, either due to OOM or
+      // because we exceeded the maximum table capacity or size; report it as
+      // an OOM. The multiplication by 2 gets us the size we tried to allocate,
+      // which is double the current size.
+      NS_ABORT_OOM(2 * EntrySize() * EntryCount());
+    }
+  }
+  return entry;
+}
+
 MOZ_ALWAYS_INLINE void
 PLDHashTable::Remove(const void* aKey)
 {
   MOZ_ASSERT(IsInitialized());
 
   MOZ_ASSERT(mRecursionLevel == 0);
   INCREMENT_RECURSION_LEVEL(this);
 
-  PLDHashNumber keyHash = ComputeKeyHash(aKey);
-  PLDHashEntryHdr* entry = SearchTable<ForSearchOrRemove>(aKey, keyHash);
+  PLDHashEntryHdr* entry =
+    mEntryStore ? SearchTable<ForSearchOrRemove>(aKey, ComputeKeyHash(aKey))
+                : nullptr;
   if (entry) {
     /* Clear this entry and mark it as "removed". */
     METER(mStats.mRemoveHits++);
     PL_DHashTableRawRemove(this, entry);
 
     /* Shrink if alpha is <= .25 and the table isn't too small already. */
     uint32_t capacity = Capacity();
     if (capacity > PL_DHASH_MIN_CAPACITY &&
@@ -688,34 +730,30 @@ PL_DHashTableAdd(PLDHashTable* aTable, c
                  const fallible_t& aFallible)
 {
   return aTable->Add(aKey, aFallible);
 }
 
 PLDHashEntryHdr* PL_DHASH_FASTCALL
 PL_DHashTableAdd(PLDHashTable* aTable, const void* aKey)
 {
-  PLDHashEntryHdr* entry = PL_DHashTableAdd(aTable, aKey, fallible);
-  if (!entry) {
-    // Entry storage reallocation failed.
-    NS_ABORT_OOM(aTable->EntrySize() * aTable->EntryCount());
-  }
-  return entry;
+  return aTable->Add(aKey);
 }
 
 void PL_DHASH_FASTCALL
 PL_DHashTableRemove(PLDHashTable* aTable, const void* aKey)
 {
   aTable->Remove(aKey);
 }
 
 MOZ_ALWAYS_INLINE void
 PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry)
 {
   MOZ_ASSERT(IsInitialized());
+  MOZ_ASSERT(mEntryStore);
 
   MOZ_ASSERT(mRecursionLevel != IMMUTABLE_RECURSION_LEVEL);
 
   NS_ASSERTION(ENTRY_IS_LIVE(aEntry), "ENTRY_IS_LIVE(aEntry)");
 
   /* Load keyHash first in case clearEntry() goofs it. */
   PLDHashNumber keyHash = aEntry->mKeyHash;
   mOps->clearEntry(this, aEntry);
@@ -735,16 +773,20 @@ PL_DHashTableRawRemove(PLDHashTable* aTa
   aTable->RawRemove(aEntry);
 }
 
 MOZ_ALWAYS_INLINE uint32_t
 PLDHashTable::Enumerate(PLDHashEnumerator aEtor, void* aArg)
 {
   MOZ_ASSERT(IsInitialized());
 
+  if (!mEntryStore) {
+    return 0;
+  }
+
   INCREMENT_RECURSION_LEVEL(this);
 
   // Please keep this method in sync with the PLDHashTable::Iterator constructor
   // and ::NextEntry methods below in this file.
   char* entryAddr = mEntryStore;
   uint32_t capacity = Capacity();
   uint32_t tableSize = capacity * mEntrySize;
   char* entryLimit = entryAddr + tableSize;
@@ -837,16 +879,20 @@ SizeOfEntryExcludingThisEnumerator(PLDHa
 
 MOZ_ALWAYS_INLINE size_t
 PLDHashTable::SizeOfExcludingThis(
     PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
     MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */) const
 {
   MOZ_ASSERT(IsInitialized());
 
+  if (!mEntryStore) {
+    return 0;
+  }
+
   size_t n = 0;
   n += aMallocSizeOf(mEntryStore);
   if (aSizeOfEntryExcludingThis) {
     SizeOfEntryExcludingThisArg arg2 = {
       0, aSizeOfEntryExcludingThis, aMallocSizeOf, aArg
     };
     PL_DHashTableEnumerate(const_cast<PLDHashTable*>(this),
                            SizeOfEntryExcludingThisEnumerator, &arg2);
@@ -959,16 +1005,17 @@ PLDHashEntryHdr* PLDHashTable::Iterator:
   char* entryLimit = mEntryAddr + tableSize;
 
   // Strictly speaking, we don't need to iterate over the full capacity each
   // time. However, it is simpler to do so rather than unnecessarily track the
   // current number of entries checked as opposed to only live entries. If debug
   // checks pass, then this method will only iterate through the full capacity
   // once. If they fail, then this loop may end up returning the early entries
   // more than once.
+  MOZ_ASSERT_IF(capacity > 0, mTable->mEntryStore);
   for (uint32_t e = 0; e < capacity; ++e) {
     PLDHashEntryHdr* entry = (PLDHashEntryHdr*)mEntryAddr;
 
     // Increment the count before returning so we don't keep returning the same
     // address. This may wrap around if ChaosMode is enabled.
     mEntryAddr += mTable->mEntrySize;
     if (mEntryAddr >= entryLimit) {
       mEntryAddr -= tableSize;
@@ -1016,16 +1063,17 @@ PLDHashTable::DumpMeter(PLDHashEnumerato
   char* entryAddr = mEntryStore;
   int sizeLog2 = PL_DHASH_BITS - mHashShift;
   uint32_t capacity = Capacity();
   uint32_t sizeMask = (1u << sizeLog2) - 1;
   uint32_t chainCount = 0, maxChainLen = 0;
   hash2 = 0;
   sqsum = 0;
 
+  MOZ_ASSERT_IF(capacity > 0, mEntryStore);
   for (uint32_t i = 0; i < capacity; i++) {
     entry = (PLDHashEntryHdr*)entryAddr;
     entryAddr += mEntrySize;
     if (!ENTRY_IS_LIVE(entry)) {
       continue;
     }
     hash1 = HASH1(entry->mKeyHash & ~COLLISION_FLAG, mHashShift);
     PLDHashNumber saveHash1 = hash1;
--- a/xpcom/glue/pldhash.h
+++ b/xpcom/glue/pldhash.h
@@ -142,16 +142,19 @@ typedef PLDHashOperator (*PLDHashEnumera
 typedef size_t (*PLDHashSizeOfEntryExcludingThisFun)(
   PLDHashEntryHdr* aHdr, mozilla::MallocSizeOf aMallocSizeOf, void* aArg);
 
 /*
  * A PLDHashTable is currently 8 words (without the PL_DHASHMETER overhead)
  * on most architectures, and may be allocated on the stack or within another
  * structure or class (see below for the Init and Finish functions to use).
  *
+ * No entry storage is allocated until the first element is added. This means
+ * that empty hash tables are cheap, which is good because they are common.
+ *
  * There used to be a long, math-heavy comment here about the merits of
  * double hashing vs. chaining; it was removed in bug 1058335. In short, double
  * hashing is more space-efficient unless the element size gets large (in which
  * case you should keep using double hashing but switch to using pointer
  * elements). Also, with double hashing, you can't safely hold an entry pointer
  * and use it after an ADD or REMOVE operation, unless you sample
  * aTable->mGeneration before adding or removing, and compare the sample after,
  * dereferencing the entry pointer only if aTable->mGeneration has not changed.
@@ -170,18 +173,17 @@ private:
    */
 protected:
   mutable uint16_t    mRecursionLevel;/* used to detect unsafe re-entry */
 private:
   uint32_t            mEntrySize;     /* number of bytes in an entry */
   uint32_t            mEntryCount;    /* number of entries in table */
   uint32_t            mRemovedCount;  /* removed entry sentinels in table */
   uint32_t            mGeneration;    /* entry storage generation number */
-  char*               mEntryStore;    /* entry storage */
-
+  char*               mEntryStore;    /* entry storage; allocated lazily */
 #ifdef PL_DHASHMETER
   struct PLDHashStats
   {
     uint32_t        mSearches;      /* total number of table searches */
     uint32_t        mSteps;         /* hash chain links traversed */
     uint32_t        mHits;          /* searches that found key */
     uint32_t        mMisses;        /* searches that didn't find key */
     uint32_t        mSearches;      /* number of Search() calls */
@@ -221,35 +223,36 @@ public:
   bool IsInitialized() const { return !!mOps; }
 
   // These should be used rarely.
   const PLDHashTableOps* const Ops() { return mOps; }
   void SetOps(const PLDHashTableOps* aOps) { mOps = aOps; }
 
   /*
    * Size in entries (gross, not net of free and removed sentinels) for table.
-   * We store mHashShift rather than sizeLog2 to optimize the collision-free
-   * case in SearchTable.
+   * This can be zero if no elements have been added yet, in which case the
+   * entry storage will not have yet been allocated.
    */
   uint32_t Capacity() const
   {
-    return ((uint32_t)1 << (PL_DHASH_BITS - mHashShift));
+    return mEntryStore ? CapacityFromHashShift() : 0;
   }
 
   uint32_t EntrySize()  const { return mEntrySize; }
   uint32_t EntryCount() const { return mEntryCount; }
   uint32_t Generation() const { return mGeneration; }
 
   bool Init(const PLDHashTableOps* aOps, uint32_t aEntrySize,
             const mozilla::fallible_t&, uint32_t aLength);
 
   void Finish();
 
   PLDHashEntryHdr* Search(const void* aKey);
   PLDHashEntryHdr* Add(const void* aKey, const mozilla::fallible_t&);
+  PLDHashEntryHdr* Add(const void* aKey);
   void Remove(const void* aKey);
 
   void RawRemove(PLDHashEntryHdr* aEntry);
 
   uint32_t Enumerate(PLDHashEnumerator aEtor, void* aArg);
 
   size_t SizeOfIncludingThis(
     PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
@@ -292,16 +295,23 @@ public:
     uint32_t mEntryOffset;            /* The number of the elements returned */
   };
 
   Iterator Iterate() const { return Iterator(this); }
 
 private:
   static bool EntryIsFree(PLDHashEntryHdr* aEntry);
 
+  // We store mHashShift rather than sizeLog2 to optimize the collision-free
+  // case in SearchTable.
+  uint32_t CapacityFromHashShift() const
+  {
+    return ((uint32_t)1 << (PL_DHASH_BITS - mHashShift));
+  }
+
   PLDHashNumber ComputeKeyHash(const void* aKey);
 
   enum SearchReason { ForSearchOrRemove, ForAdd };
 
   template <SearchReason Reason>
   PLDHashEntryHdr* PL_DHASH_FASTCALL
     SearchTable(const void* aKey, PLDHashNumber aKeyHash);
 
@@ -434,22 +444,24 @@ PLDHashTable* PL_NewDHashTable(
 /*
  * Free |aTable|'s entry storage and |aTable| itself (both via
  * aTable->mOps->freeTable). Use this function to destroy a PLDHashTable that
  * was allocated on the heap via PL_NewDHashTable().
  */
 void PL_DHashTableDestroy(PLDHashTable* aTable);
 
 /*
- * Initialize aTable with aOps, aEntrySize, and aCapacity. The table's initial
- * capacity will be chosen such that |aLength| elements can be inserted without
- * rehashing. If |aLength| is a power-of-two, this capacity will be |2*length|.
+ * Initialize aTable with aOps and aEntrySize. The table's initial capacity
+ * will be chosen such that |aLength| elements can be inserted without
+ * rehashing; if |aLength| is a power-of-two, this capacity will be |2*length|.
+ * However, because entry storage is allocated lazily, this initial capacity
+ * won't be relevant until the first element is added; prior to that the
+ * capacity will be zero.
  *
- * This function will crash if it can't allocate enough memory, or if
- * |aEntrySize| and/or |aLength| are too large.
+ * This function will crash if |aEntrySize| and/or |aLength| are too large.
  */
 void PL_DHashTableInit(
   PLDHashTable* aTable, const PLDHashTableOps* aOps,
   uint32_t aEntrySize, uint32_t aLength = PL_DHASH_DEFAULT_INITIAL_LENGTH);
 
 /*
  * Initialize aTable. This is the same as PL_DHashTableInit, except that it
  * returns a boolean indicating success, rather than crashing on failure.
--- a/xpcom/tests/TestPLDHash.cpp
+++ b/xpcom/tests/TestPLDHash.cpp
@@ -2,20 +2,18 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <stdio.h>
 #include "pldhash.h"
 
-// pldhash is very widely used and so any basic bugs in it are likely to be
-// exposed through normal usage.  Therefore, this test currently focusses on
-// extreme cases relating to maximum table capacity and potential overflows,
-// which are unlikely to be hit during normal execution.
+// This test mostly focuses on edge cases. But more coverage of normal
+// operations wouldn't be a bad thing.
 
 namespace TestPLDHash {
 
 static bool test_pldhash_Init_capacity_ok()
 {
   PLDHashTable t;
 
   // Check that the constructor nulls |ops|.
@@ -101,16 +99,77 @@ static bool test_pldhash_Init_overflow()
   // Check that |ops| is still null.
   if (t.IsInitialized()) {
     return false;
   }
 
   return true;
 }
 
+static bool test_pldhash_lazy_storage()
+{
+  PLDHashTable t;
+  PL_DHashTableInit(&t, PL_DHashGetStubOps(), sizeof(PLDHashEntryStub));
+
+  // PLDHashTable allocates entry storage lazily. Check that all the non-add
+  // operations work appropriately when the table is empty and the storage
+  // hasn't yet been allocated.
+
+  if (!t.IsInitialized()) {
+    return false;
+  }
+
+  if (t.Capacity() != 0) {
+    return false;
+  }
+
+  if (t.EntrySize() != sizeof(PLDHashEntryStub)) {
+    return false;
+  }
+
+  if (t.EntryCount() != 0) {
+    return false;
+  }
+
+  if (t.Generation() != 0) {
+    return false;
+  }
+
+  if (PL_DHashTableSearch(&t, (const void*)1)) {
+    return false;   // search succeeded?
+  }
+
+  // No result to check here, but call it to make sure it doesn't crash.
+  PL_DHashTableRemove(&t, (const void*)2);
+
+  // Using a null |enumerator| should be fine because it shouldn't be called
+  // for an empty table.
+  PLDHashEnumerator enumerator = nullptr;
+  if (PL_DHashTableEnumerate(&t, enumerator, nullptr) != 0) {
+    return false;   // enumeration count is non-zero?
+  }
+
+  for (PLDHashTable::Iterator iter = t.Iterate();
+       iter.HasMoreEntries();
+       iter.NextEntry()) {
+    return false; // shouldn't hit this on an empty table
+  }
+
+  // Using a null |mallocSizeOf| should be fine because it shouldn't be called
+  // for an empty table.
+  mozilla::MallocSizeOf mallocSizeOf = nullptr;
+  if (PL_DHashTableSizeOfExcludingThis(&t, nullptr, mallocSizeOf) != 0) {
+    return false;   // size is non-zero?
+  }
+
+  PL_DHashTableFinish(&t);
+
+  return true;
+}
+
 // See bug 931062, we skip this test on Android due to OOM.
 #ifndef MOZ_WIDGET_ANDROID
 // We insert the integers 0.., so this is has function is (a) as simple as
 // possible, and (b) collision-free.  Both of which are good, because we want
 // this test to be as fast as possible.
 static PLDHashNumber
 hash(PLDHashTable *table, const void *key)
 {
@@ -163,16 +222,17 @@ typedef bool (*TestFunc)();
 
 static const struct Test {
   const char* name;
   TestFunc    func;
 } tests[] = {
   DECL_TEST(test_pldhash_Init_capacity_ok),
   DECL_TEST(test_pldhash_Init_capacity_too_large),
   DECL_TEST(test_pldhash_Init_overflow),
+  DECL_TEST(test_pldhash_lazy_storage),
 // See bug 931062, we skip this test on Android due to OOM.
 #ifndef MOZ_WIDGET_ANDROID
   DECL_TEST(test_pldhash_grow_to_max_capacity),
 #endif
   { nullptr, nullptr }
 };
 
 } // namespace TestPLDHash