Bug 1232672 - Use MOZ_WARN_UNUSED_RESULT to make hash table clients check for failure r=luke r=billm r=njn
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 22 Dec 2015 13:29:43 +0000
changeset 277306 c46eebf3397eaff07cc45e7c9ecf925da4402c97
parent 277305 f7f7f81ce57b8b8065633262ed98dbecaa4d5c0c
child 277307 dd740170e03903c803aa8b7aff4fb45b77b162bd
push id69438
push userjcoppeard@mozilla.com
push dateTue, 22 Dec 2015 13:33:41 +0000
treeherdermozilla-inbound@dd740170e039 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke, billm, njn
bugs1232672
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1232672 - Use MOZ_WARN_UNUSED_RESULT to make hash table clients check for failure r=luke r=billm r=njn
js/public/HashTable.h
js/src/builtin/Eval.cpp
js/src/gc/Verifier.cpp
js/src/jsapi-tests/testHashTable.cpp
js/src/vm/MemoryMetrics.cpp
js/src/vm/SPSProfiler.cpp
js/xpconnect/src/XPCWrappedNativeScope.cpp
js/xpconnect/src/xpcprivate.h
memory/replace/dmd/DMD.cpp
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -68,17 +68,17 @@ class HashMap
 
   public:
     typedef typename HashPolicy::Lookup Lookup;
     typedef TableEntry Entry;
 
     // HashMap construction is fallible (due to OOM); thus the user must call
     // init after constructing a HashMap and check the return value.
     explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a)  {}
-    bool init(uint32_t len = 16)                      { return impl.init(len); }
+    MOZ_WARN_UNUSED_RESULT bool init(uint32_t len = 16) { return impl.init(len); }
     bool initialized() const                          { return impl.initialized(); }
 
     // Return whether the given lookup value is present in the map. E.g.:
     //
     //   typedef HashMap<int,char> HM;
     //   HM h;
     //   if (HM::Ptr p = h.lookup(3)) {
     //     const HM::Entry& e = *p; // p acts like a pointer to Entry
@@ -131,29 +131,29 @@ class HashMap
     //    assert(p->key == 3);
     //    char val = p->value;
     typedef typename Impl::AddPtr AddPtr;
     AddPtr lookupForAdd(const Lookup& l) const {
         return impl.lookupForAdd(l);
     }
 
     template<typename KeyInput, typename ValueInput>
-    bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+    MOZ_WARN_UNUSED_RESULT bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
         return impl.add(p,
                         mozilla::Forward<KeyInput>(k),
                         mozilla::Forward<ValueInput>(v));
     }
 
     template<typename KeyInput>
-    bool add(AddPtr& p, KeyInput&& k) {
+    MOZ_WARN_UNUSED_RESULT bool add(AddPtr& p, KeyInput&& k) {
         return impl.add(p, mozilla::Forward<KeyInput>(k), Value());
     }
 
     template<typename KeyInput, typename ValueInput>
-    bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+    MOZ_WARN_UNUSED_RESULT bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
         return impl.relookupOrAdd(p, k,
                                   mozilla::Forward<KeyInput>(k),
                                   mozilla::Forward<ValueInput>(v));
     }
 
     // |all()| returns a Range containing |count()| elements. E.g.:
     //
     //   typedef HashMap<int,char> HM;
@@ -212,43 +212,45 @@ class HashMap
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Overwrite existing value with v. Return false on oom.
     template<typename KeyInput, typename ValueInput>
-    bool put(KeyInput&& k, ValueInput&& v) {
+    MOZ_WARN_UNUSED_RESULT bool put(KeyInput&& k, ValueInput&& v) {
         AddPtr p = lookupForAdd(k);
         if (p) {
             p->value() = mozilla::Forward<ValueInput>(v);
             return true;
         }
         return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
     }
 
     // Like put, but assert that the given key is not already present.
     template<typename KeyInput, typename ValueInput>
-    bool putNew(KeyInput&& k, ValueInput&& v) {
+    MOZ_WARN_UNUSED_RESULT bool putNew(KeyInput&& k, ValueInput&& v) {
         return impl.putNew(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
     }
 
     // Only call this to populate an empty map after reserving space with init().
     template<typename KeyInput, typename ValueInput>
     void putNewInfallible(KeyInput&& k, ValueInput&& v) {
         impl.putNewInfallible(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
     }
 
     // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
     Ptr lookupWithDefault(const Key& k, const Value& defaultValue) {
         AddPtr p = lookupForAdd(k);
         if (p)
             return p;
-        (void)add(p, k, defaultValue);  // p is left false-y on oom.
+        bool ok = add(p, k, defaultValue);
+        MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
+        (void)ok;
         return p;
     }
 
     // Remove if present.
     void remove(const Lookup& l) {
         if (Ptr p = lookup(l))
             remove(p);
     }
@@ -318,17 +320,17 @@ class HashSet
 
   public:
     typedef typename HashPolicy::Lookup Lookup;
     typedef T Entry;
 
     // HashSet construction is fallible (due to OOM); thus the user must call
     // init after constructing a HashSet and check the return value.
     explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a)  {}
-    bool init(uint32_t len = 16)                      { return impl.init(len); }
+    MOZ_WARN_UNUSED_RESULT bool init(uint32_t len = 16) { return impl.init(len); }
     bool initialized() const                          { return impl.initialized(); }
 
     // Return whether the given lookup value is present in the map. E.g.:
     //
     //   typedef HashSet<int> HS;
     //   HS h;
     //   if (HS::Ptr p = h.lookup(3)) {
     //     assert(*p == 3);   // p acts like a pointer to int
@@ -376,22 +378,22 @@ class HashSet
     //    assert(*p == 3);
     //
     // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
     // entry |t|, where the caller ensures match(l,t).
     typedef typename Impl::AddPtr AddPtr;
     AddPtr lookupForAdd(const Lookup& l) const        { return impl.lookupForAdd(l); }
 
     template <typename U>
-    bool add(AddPtr& p, U&& u) {
+    MOZ_WARN_UNUSED_RESULT bool add(AddPtr& p, U&& u) {
         return impl.add(p, mozilla::Forward<U>(u));
     }
 
     template <typename U>
-    bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
+    MOZ_WARN_UNUSED_RESULT bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
         return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u));
     }
 
     // |all()| returns a Range containing |count()| elements:
     //
     //   typedef HashSet<int> HS;
     //   HS h;
     //   for (HS::Range r = h.all(); !r.empty(); r.popFront())
@@ -448,29 +450,29 @@ class HashSet
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Add |u| if it is not present already. Return false on oom.
     template <typename U>
-    bool put(U&& u) {
+    MOZ_WARN_UNUSED_RESULT bool put(U&& u) {
         AddPtr p = lookupForAdd(u);
         return p ? true : add(p, mozilla::Forward<U>(u));
     }
 
     // Like put, but assert that the given key is not already present.
     template <typename U>
-    bool putNew(U&& u) {
+    MOZ_WARN_UNUSED_RESULT bool putNew(U&& u) {
         return impl.putNew(u, mozilla::Forward<U>(u));
     }
 
     template <typename U>
-    bool putNew(const Lookup& l, U&& u) {
+    MOZ_WARN_UNUSED_RESULT bool putNew(const Lookup& l, U&& u) {
         return impl.putNew(l, mozilla::Forward<U>(u));
     }
 
     // Only call this to populate an empty set after reserving space with init().
     template <typename U>
     void putNewInfallible(const Lookup& l, U&& u) {
         impl.putNewInfallible(l, mozilla::Forward<U>(u));
     }
@@ -1630,17 +1632,17 @@ class HashTable : private AllocPolicy
         mozilla::ReentrancyGuard g(*this);
         HashNumber keyHash = prepareHash(l);
         Entry& entry = lookup(l, keyHash, sCollisionBit);
         AddPtr p(entry, *this, keyHash);
         return p;
     }
 
     template <typename... Args>
-    bool add(AddPtr& p, Args&&... args)
+    MOZ_WARN_UNUSED_RESULT bool add(AddPtr& p, Args&&... args)
     {
         mozilla::ReentrancyGuard g(*this);
         MOZ_ASSERT(table);
         MOZ_ASSERT(!p.found());
         MOZ_ASSERT(!(p.keyHash & sCollisionBit));
 
         // Changing an entry from removed to live does not affect whether we
         // are overloaded and can be handled separately.
@@ -1693,32 +1695,32 @@ class HashTable : private AllocPolicy
 #ifdef JS_DEBUG
         mutationCount++;
 #endif
     }
 
     // Note: |l| may be alias arguments in |args|, so this function must take
     // care not to use |l| after moving |args|.
     template <typename... Args>
-    bool putNew(const Lookup& l, Args&&... args)
+    MOZ_WARN_UNUSED_RESULT bool putNew(const Lookup& l, Args&&... args)
     {
         if (!this->checkSimulatedOOM())
             return false;
 
         if (checkOverloaded() == RehashFailed)
             return false;
 
         putNewInfallible(l, mozilla::Forward<Args>(args)...);
         return true;
     }
 
     // Note: |l| may be a reference to a piece of |u|, so this function
     // must take care not to use |l| after moving |u|.
     template <typename... Args>
-    bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+    MOZ_WARN_UNUSED_RESULT bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
     {
 #ifdef JS_DEBUG
         p.generation = generation();
         p.mutationCount = mutationCount;
 #endif
         {
             mozilla::ReentrancyGuard g(*this);
             MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
--- a/js/src/builtin/Eval.cpp
+++ b/js/src/builtin/Eval.cpp
@@ -88,18 +88,20 @@ class EvalScriptGuard
     explicit EvalScriptGuard(JSContext* cx)
         : cx_(cx), script_(cx), lookup_(cx), lookupStr_(cx) {}
 
     ~EvalScriptGuard() {
         if (script_) {
             script_->cacheForEval();
             EvalCacheEntry cacheEntry = {lookupStr_, script_, lookup_.callerScript, lookup_.pc};
             lookup_.str = lookupStr_;
-            if (lookup_.str && IsEvalCacheCandidate(script_))
-                cx_->runtime()->evalCache.relookupOrAdd(p_, lookup_, cacheEntry);
+            if (lookup_.str && IsEvalCacheCandidate(script_)) {
+                bool ok = cx_->runtime()->evalCache.relookupOrAdd(p_, lookup_, cacheEntry);
+                (void)ok; // Ignore failure to add cache entry.
+            }
         }
     }
 
     void lookupInEvalCache(JSLinearString* str, JSScript* callerScript, jsbytecode* pc)
     {
         lookupStr_ = str;
         lookup_.str = str;
         lookup_.callerScript = callerScript;
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -141,17 +141,21 @@ MakeNode(VerifyPreTracer* trc, void* thi
         if (trc->edgeptr >= trc->term) {
             trc->edgeptr = trc->term;
             return nullptr;
         }
 
         node->thing = thing;
         node->count = 0;
         node->kind = kind;
-        trc->nodemap.add(p, thing, node);
+        if (!trc->nodemap.add(p, thing, node)) {
+            trc->edgeptr = trc->term;
+            return nullptr;
+        }
+
         return node;
     }
     return nullptr;
 }
 
 static VerifyNode*
 NextNode(VerifyNode* node)
 {
--- a/js/src/jsapi-tests/testHashTable.cpp
+++ b/js/src/jsapi-tests/testHashTable.cpp
@@ -103,93 +103,100 @@ AddLowKeys(IntMap* am, IntMap* bm, int s
 {
     size_t i = 0;
     srand(seed);
     while (i < TestSize) {
         uint32_t n = rand() & 0x0000FFFF;
         if (!am->has(n)) {
             if (bm->has(n))
                 return false;
-            am->putNew(n, n);
-            bm->putNew(n, n);
+
+            if (!am->putNew(n, n) || !bm->putNew(n, n))
+                return false;
             i++;
         }
     }
     return true;
 }
 
 static bool
 AddLowKeys(IntSet* as, IntSet* bs, int seed)
 {
     size_t i = 0;
     srand(seed);
     while (i < TestSize) {
         uint32_t n = rand() & 0x0000FFFF;
         if (!as->has(n)) {
             if (bs->has(n))
                 return false;
-            as->putNew(n);
-            bs->putNew(n);
+            if (!as->putNew(n) || !bs->putNew(n))
+                return false;
             i++;
         }
     }
     return true;
 }
 
 template <class NewKeyFunction>
 static bool
 SlowRekey(IntMap* m) {
     IntMap tmp;
-    tmp.init();
+    if (!tmp.init())
+        return false;
 
     for (IntMap::Range r = m->all(); !r.empty(); r.popFront()) {
         if (NewKeyFunction::shouldBeRemoved(r.front().key()))
             continue;
         uint32_t hi = NewKeyFunction::rekey(r.front().key());
         if (tmp.has(hi))
             return false;
-        tmp.putNew(hi, r.front().value());
+        if (!tmp.putNew(hi, r.front().value()))
+            return false;
     }
 
     m->clear();
     for (IntMap::Range r = tmp.all(); !r.empty(); r.popFront()) {
-        m->putNew(r.front().key(), r.front().value());
+        if (!m->putNew(r.front().key(), r.front().value()))
+            return false;
     }
 
     return true;
 }
 
 template <class NewKeyFunction>
 static bool
 SlowRekey(IntSet* s) {
     IntSet tmp;
-    tmp.init();
+    if (!tmp.init())
+        return false;
 
     for (IntSet::Range r = s->all(); !r.empty(); r.popFront()) {
         if (NewKeyFunction::shouldBeRemoved(r.front()))
             continue;
         uint32_t hi = NewKeyFunction::rekey(r.front());
         if (tmp.has(hi))
             return false;
-        tmp.putNew(hi);
+        if (!tmp.putNew(hi))
+            return false;
     }
 
     s->clear();
     for (IntSet::Range r = tmp.all(); !r.empty(); r.popFront()) {
-        s->putNew(r.front());
+        if (!s->putNew(r.front()))
+            return false;
     }
 
     return true;
 }
 
 BEGIN_TEST(testHashRekeyManual)
 {
     IntMap am, bm;
-    am.init();
-    bm.init();
+    CHECK(am.init());
+    CHECK(bm.init());
     for (size_t i = 0; i < TestIterations; ++i) {
 #ifdef FUZZ
         fprintf(stderr, "map1: %lu\n", i);
 #endif
         CHECK(AddLowKeys(&am, &bm, i));
         CHECK(MapsAreEqual(am, bm));
 
         for (IntMap::Enum e(am); !e.empty(); e.popFront()) {
@@ -200,18 +207,18 @@ BEGIN_TEST(testHashRekeyManual)
         CHECK(SlowRekey<LowToHigh>(&bm));
 
         CHECK(MapsAreEqual(am, bm));
         am.clear();
         bm.clear();
     }
 
     IntSet as, bs;
-    as.init();
-    bs.init();
+    CHECK(as.init());
+    CHECK(bs.init());
     for (size_t i = 0; i < TestIterations; ++i) {
 #ifdef FUZZ
         fprintf(stderr, "set1: %lu\n", i);
 #endif
         CHECK(AddLowKeys(&as, &bs, i));
         CHECK(SetsAreEqual(as, bs));
 
         for (IntSet::Enum e(as); !e.empty(); e.popFront()) {
@@ -228,18 +235,18 @@ BEGIN_TEST(testHashRekeyManual)
 
     return true;
 }
 END_TEST(testHashRekeyManual)
 
 BEGIN_TEST(testHashRekeyManualRemoval)
 {
     IntMap am, bm;
-    am.init();
-    bm.init();
+    CHECK(am.init());
+    CHECK(bm.init());
     for (size_t i = 0; i < TestIterations; ++i) {
 #ifdef FUZZ
         fprintf(stderr, "map2: %lu\n", i);
 #endif
         CHECK(AddLowKeys(&am, &bm, i));
         CHECK(MapsAreEqual(am, bm));
 
         for (IntMap::Enum e(am); !e.empty(); e.popFront()) {
@@ -254,18 +261,18 @@ BEGIN_TEST(testHashRekeyManualRemoval)
         CHECK(SlowRekey<LowToHighWithRemoval>(&bm));
 
         CHECK(MapsAreEqual(am, bm));
         am.clear();
         bm.clear();
     }
 
     IntSet as, bs;
-    as.init();
-    bs.init();
+    CHECK(as.init());
+    CHECK(bs.init());
     for (size_t i = 0; i < TestIterations; ++i) {
 #ifdef FUZZ
         fprintf(stderr, "set1: %lu\n", i);
 #endif
         CHECK(AddLowKeys(&as, &bs, i));
         CHECK(SetsAreEqual(as, bs));
 
         for (IntSet::Enum e(as); !e.empty(); e.popFront()) {
@@ -322,17 +329,17 @@ struct MoveOnlyType {
     MoveOnlyType& operator=(const MoveOnlyType&) = delete;
 };
 
 BEGIN_TEST(testHashSetOfMoveOnlyType)
 {
     typedef js::HashSet<MoveOnlyType, MoveOnlyType::HashPolicy, js::SystemAllocPolicy> Set;
 
     Set set;
-    set.init();
+    CHECK(set.init());
 
     MoveOnlyType a(1);
 
-    set.put(mozilla::Move(a)); // This shouldn't generate a compiler error.
+    CHECK(set.put(mozilla::Move(a))); // This shouldn't generate a compiler error.
 
     return true;
 }
 END_TEST(testHashSetOfMoveOnlyType)
--- a/js/src/vm/MemoryMetrics.cpp
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -384,19 +384,20 @@ AddClassInfo(Granularity granularity, Co
              JS::ClassInfo& info)
 {
     if (granularity == FineGrained) {
         if (!className)
             className = "<no class name>";
         CompartmentStats::ClassesHashMap::AddPtr p =
             cStats->allClasses->lookupForAdd(className);
         if (!p) {
+            bool ok = cStats->allClasses->add(p, className, info);
             // Ignore failure -- we just won't record the
             // object/shape/base-shape as notable.
-            (void)cStats->allClasses->add(p, className, info);
+            (void)ok;
         } else {
             p->value().add(info);
         }
     }
 }
 
 // The various kinds of hashing are expensive, and the results are unused when
 // doing coarse-grained measurements. Skipping them more than doubles the
@@ -439,34 +440,36 @@ StatsCellCallback(JSRuntime* rt, void* d
         cStats->typeInferenceTypeScripts += script->sizeOfTypeScript(rtStats->mallocSizeOf_);
         jit::AddSizeOfBaselineData(script, rtStats->mallocSizeOf_, &cStats->baselineData,
                                    &cStats->baselineStubsFallback);
         cStats->ionData += jit::SizeOfIonData(script, rtStats->mallocSizeOf_);
 
         ScriptSource* ss = script->scriptSource();
         SourceSet::AddPtr entry = closure->seenSources.lookupForAdd(ss);
         if (!entry) {
-            (void)closure->seenSources.add(entry, ss); // Not much to be done on failure.
+            bool ok = closure->seenSources.add(entry, ss);
+            (void)ok; // Not much to be done on failure.
 
             JS::ScriptSourceInfo info;  // This zeroes all the sizes.
             ss->addSizeOfIncludingThis(rtStats->mallocSizeOf_, &info);
             MOZ_ASSERT(info.compressed == 0 || info.uncompressed == 0);
 
             rtStats->runtime.scriptSourceInfo.add(info);
 
             if (granularity == FineGrained) {
                 const char* filename = ss->filename();
                 if (!filename)
                     filename = "<no filename>";
 
                 JS::RuntimeSizes::ScriptSourcesHashMap::AddPtr p =
                     rtStats->runtime.allScriptSources->lookupForAdd(filename);
                 if (!p) {
+                    bool ok = rtStats->runtime.allScriptSources->add(p, filename, info);
                     // Ignore failure -- we just won't record the script source as notable.
-                    (void)rtStats->runtime.allScriptSources->add(p, filename, info);
+                    (void)ok;
                 } else {
                     p->value().add(info);
                 }
             }
         }
 
         break;
       }
@@ -487,18 +490,19 @@ StatsCellCallback(JSRuntime* rt, void* d
         zStats->stringInfo.add(info);
 
         // The primary use case for anonymization is automated crash submission
         // (to help detect OOM crashes). In that case, we don't want to pay the
         // memory cost required to do notable string detection.
         if (granularity == FineGrained && !closure->anonymize) {
             ZoneStats::StringsHashMap::AddPtr p = zStats->allStrings->lookupForAdd(str);
             if (!p) {
+                bool ok = zStats->allStrings->add(p, str, info);
                 // Ignore failure -- we just won't record the string as notable.
-                (void)zStats->allStrings->add(p, str, info);
+                (void)ok;
             } else {
                 p->value().add(info);
             }
         }
         break;
       }
 
       case JS::TraceKind::Symbol:
--- a/js/src/vm/SPSProfiler.cpp
+++ b/js/src/vm/SPSProfiler.cpp
@@ -38,16 +38,19 @@ SPSProfiler::SPSProfiler(JSRuntime* rt)
 
 bool
 SPSProfiler::init()
 {
     lock_ = PR_NewLock();
     if (lock_ == nullptr)
         return false;
 
+    if (!strings.init())
+        return false;
+
     return true;
 }
 
 SPSProfiler::~SPSProfiler()
 {
     if (strings.initialized()) {
         for (ProfileStringMap::Enum e(strings); !e.empty(); e.popFront())
             js_free(const_cast<char*>(e.front().value()));
@@ -56,18 +59,18 @@ SPSProfiler::~SPSProfiler()
         PR_DestroyLock(lock_);
 }
 
 void
 SPSProfiler::setProfilingStack(ProfileEntry* stack, uint32_t* size, uint32_t max)
 {
     AutoSPSLock lock(lock_);
     MOZ_ASSERT_IF(size_ && *size_ != 0, !enabled());
-    if (!strings.initialized())
-        strings.init();
+    MOZ_ASSERT(strings.initialized());
+
     stack_ = stack;
     size_  = size;
     max_   = max;
 }
 
 void
 SPSProfiler::setEventMarker(void (*fn)(const char*))
 {
--- a/js/xpconnect/src/XPCWrappedNativeScope.cpp
+++ b/js/xpconnect/src/XPCWrappedNativeScope.cpp
@@ -695,17 +695,18 @@ XPCWrappedNativeScope::SetExpandoChain(J
 
 /* static */ bool
 XPCWrappedNativeScope::SetAddonInterposition(JSContext* cx,
                                              JSAddonId* addonId,
                                              nsIAddonInterposition* interp)
 {
     if (!gInterpositionMap) {
         gInterpositionMap = new InterpositionMap();
-        gInterpositionMap->init();
+        bool ok = gInterpositionMap->init();
+        NS_ENSURE_TRUE(ok, false);
 
         // Make sure to clear the map at shutdown.
         // Note: this will take care of gInterpositionWhitelists too.
         nsContentUtils::RegisterShutdownObserver(new ClearInterpositionsObserver());
     }
     if (interp) {
         bool ok = gInterpositionMap->put(addonId, interp);
         NS_ENSURE_TRUE(ok, false);
@@ -751,17 +752,21 @@ XPCWrappedNativeScope::UpdateInterpositi
     // need more interpositions, change the capacity of the array please.
     static const size_t MAX_INTERPOSITION = 8;
     if (!gInterpositionWhitelists)
         gInterpositionWhitelists = new InterpositionWhitelistArray(MAX_INTERPOSITION);
 
     MOZ_RELEASE_ASSERT(MAX_INTERPOSITION > gInterpositionWhitelists->Length() + 1);
     InterpositionWhitelistPair* newPair = gInterpositionWhitelists->AppendElement();
     newPair->interposition = interposition;
-    newPair->whitelist.init();
+    if (!newPair->whitelist.init()) {
+        JS_ReportOutOfMemory(cx);
+        return false;
+    }
+
     whitelist = &newPair->whitelist;
 
     RootedValue whitelistVal(cx);
     nsresult rv = interposition->GetWhitelist(&whitelistVal);
     if (NS_FAILED(rv)) {
         JS_ReportError(cx, "Could not get the whitelist from the interposition.");
         return false;
     }
@@ -813,17 +818,20 @@ XPCWrappedNativeScope::UpdateInterpositi
             if (!str) {
                 JS_ReportError(cx, "String internization failed.");
                 return false;
             }
 
             // By internizing the id's we ensure that they won't get
             // GCed so we can use them as hash keys.
             jsid id = INTERNED_STRING_TO_JSID(cx, str);
-            whitelist->put(JSID_BITS(id));
+            if (!whitelist->put(JSID_BITS(id))) {
+                JS_ReportOutOfMemory(cx);
+                return false;
+            }
         }
     }
 
     return true;
 }
 
 /***************************************************************************/
 
--- a/js/xpconnect/src/xpcprivate.h
+++ b/js/xpconnect/src/xpcprivate.h
@@ -1123,17 +1123,18 @@ public:
                         js::MovableCellHasher<JS::Heap<JSObject*>>,
                         js::SystemAllocPolicy> DOMExpandoSet;
 
     bool RegisterDOMExpandoObject(JSObject* expando) {
         // Expandos are proxy objects, and proxies are always tenured.
         JS::AssertGCThingMustBeTenured(expando);
         if (!mDOMExpandoSet) {
             mDOMExpandoSet = new DOMExpandoSet();
-            mDOMExpandoSet->init(8);
+            if (!mDOMExpandoSet->init(8))
+                return false;
         }
         return mDOMExpandoSet->put(JS::Heap<JSObject*>(expando));
     }
     void RemoveDOMExpandoObject(JSObject* expando) {
         if (mDOMExpandoSet) {
             DOMExpandoSet::Ptr p = mDOMExpandoSet->lookup(JS::Heap<JSObject*>(expando));
             MOZ_ASSERT(p.found());
             mDOMExpandoSet->remove(p);
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -605,28 +605,31 @@ public:
 
 //---------------------------------------------------------------------------
 // Location service
 //---------------------------------------------------------------------------
 
 class StringTable
 {
 public:
-  StringTable() { (void)mSet.init(64); }
+  StringTable()
+  {
+    MOZ_ALWAYS_TRUE(mSet.init(64));
+  }
 
   const char*
   Intern(const char* aString)
   {
     StringHashSet::AddPtr p = mSet.lookupForAdd(aString);
     if (p) {
       return *p;
     }
 
     const char* newString = InfallibleAllocPolicy::strdup_(aString);
-    (void)mSet.add(p, newString);
+    MOZ_ALWAYS_TRUE(mSet.add(p, newString));
     return newString;
   }
 
   size_t
   SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
   {
     size_t n = 0;
     n += mSet.sizeOfExcludingThis(aMallocSizeOf);
@@ -780,17 +783,17 @@ StackTrace::Get(Thread* aT)
     } else {
       tmp.mLength = 0;
     }
   }
 
   StackTraceTable::AddPtr p = gStackTraceTable->lookupForAdd(&tmp);
   if (!p) {
     StackTrace* stnew = InfallibleAllocPolicy::new_<StackTrace>(tmp);
-    (void)gStackTraceTable->add(p, stnew);
+    MOZ_ALWAYS_TRUE(gStackTraceTable->add(p, stnew));
   }
   return *p;
 }
 
 //---------------------------------------------------------------------------
 // Heap blocks
 //---------------------------------------------------------------------------
 
@@ -929,24 +932,24 @@ public:
   bool ReportedOnAlloc2() const
   {
     MOZ_ASSERT(gOptions->IsDarkMatterMode());
     return mReportStackTrace_mReportedOnAlloc[1].Tag();
   }
 
   void AddStackTracesToTable(StackTraceSet& aStackTraces) const
   {
-    aStackTraces.put(AllocStackTrace());  // never null
+    MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));  // never null
     if (gOptions->IsDarkMatterMode()) {
       const StackTrace* st;
       if ((st = ReportStackTrace1())) {     // may be null
-        aStackTraces.put(st);
+        MOZ_ALWAYS_TRUE(aStackTraces.put(st));
       }
       if ((st = ReportStackTrace2())) {     // may be null
-        aStackTraces.put(st);
+        MOZ_ALWAYS_TRUE(aStackTraces.put(st));
       }
     }
   }
 
   uint32_t NumReports() const
   {
     MOZ_ASSERT(gOptions->IsDarkMatterMode());
     if (ReportStackTrace2()) {
@@ -1047,17 +1050,17 @@ public:
 
   const StackTrace* AllocStackTrace() const
   {
     return mAllocStackTrace_mIsSampled.Ptr();
   }
 
   void AddStackTracesToTable(StackTraceSet& aStackTraces) const
   {
-    aStackTraces.put(AllocStackTrace());  // never null
+    MOZ_ALWAYS_TRUE(aStackTraces.put(AllocStackTrace()));  // never null
   }
 
   // Hash policy.
 
   typedef DeadBlock Lookup;
 
   static uint32_t hash(const DeadBlock& aB)
   {
@@ -1085,31 +1088,31 @@ static DeadBlockTable* gDeadBlockTable =
 // Add the dead block to the dead block table, if that's appropriate.
 void MaybeAddToDeadBlockTable(const DeadBlock& aDb)
 {
   if (gOptions->IsCumulativeMode() && aDb.AllocStackTrace()) {
     AutoLockState lock;
     if (DeadBlockTable::AddPtr p = gDeadBlockTable->lookupForAdd(aDb)) {
       p->value() += 1;
     } else {
-      gDeadBlockTable->add(p, aDb, 1);
+      MOZ_ALWAYS_TRUE(gDeadBlockTable->add(p, aDb, 1));
     }
   }
 }
 
 // Add a pointer to each live stack trace into the given StackTraceSet.  (A
 // stack trace is live if it's used by one of the live blocks.)
 static void
 GatherUsedStackTraces(StackTraceSet& aStackTraces)
 {
   MOZ_ASSERT(gStateLock->IsLocked());
   MOZ_ASSERT(Thread::Fetch()->InterceptsAreBlocked());
 
   aStackTraces.finish();
-  aStackTraces.init(512);
+  MOZ_ALWAYS_TRUE(aStackTraces.init(512));
 
   for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
     r.front().AddStackTracesToTable(aStackTraces);
   }
 
   for (auto r = gDeadBlockTable->all(); !r.empty(); r.popFront()) {
     r.front().key().AddStackTracesToTable(aStackTraces);
   }
@@ -1165,22 +1168,22 @@ AllocCallback(void* aPtr, size_t aReqSiz
     // blame this allocation for |sampleBelowSize| bytes.  This precludes the
     // measurement of slop.
     gSmallBlockActualSizeCounter += actualSize;
     if (gSmallBlockActualSizeCounter >= sampleBelowSize) {
       gSmallBlockActualSizeCounter -= sampleBelowSize;
 
       LiveBlock b(aPtr, sampleBelowSize, StackTrace::Get(aT),
                   /* isSampled */ true);
-      (void)gLiveBlockTable->putNew(aPtr, b);
+      MOZ_ALWAYS_TRUE(gLiveBlockTable->putNew(aPtr, b));
     }
   } else {
     // If this block size is larger than the sample size, record it exactly.
     LiveBlock b(aPtr, aReqSize, StackTrace::Get(aT), /* isSampled */ false);
-    (void)gLiveBlockTable->putNew(aPtr, b);
+    MOZ_ALWAYS_TRUE(gLiveBlockTable->putNew(aPtr, b));
   }
 }
 
 static void
 FreeCallback(void* aPtr, Thread* aT, DeadBlock* aDeadBlock)
 {
   if (!aPtr) {
     return;
@@ -1585,26 +1588,27 @@ Init(const malloc_table_t* aMallocTable)
   gSmallBlockActualSizeCounter = 0;
 
   DMD_CREATE_TLS_INDEX(gTlsIndex);
 
   {
     AutoLockState lock;
 
     gStackTraceTable = InfallibleAllocPolicy::new_<StackTraceTable>();
-    gStackTraceTable->init(8192);
+    MOZ_ALWAYS_TRUE(gStackTraceTable->init(8192));
 
     gLiveBlockTable = InfallibleAllocPolicy::new_<LiveBlockTable>();
-    gLiveBlockTable->init(8192);
+    MOZ_ALWAYS_TRUE(gLiveBlockTable->init(8192));
 
     // Create this even if the mode isn't Cumulative (albeit with a small
     // size), in case the mode is changed later on (as is done by SmokeDMD.cpp,
     // for example).
     gDeadBlockTable = InfallibleAllocPolicy::new_<DeadBlockTable>();
-    gDeadBlockTable->init(gOptions->IsCumulativeMode() ? 8192 : 4);
+    size_t tableSize = gOptions->IsCumulativeMode() ? 8192 : 4;
+    MOZ_ALWAYS_TRUE(gDeadBlockTable->init(tableSize));
   }
 
   gIsDMDInitialized = true;
 }
 
 //---------------------------------------------------------------------------
 // Block reporting and unreporting
 //---------------------------------------------------------------------------
@@ -1715,27 +1719,31 @@ DMDFuncs::ClearReports()
   for (auto r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
     r.front().UnreportIfNotReportedOnAlloc();
   }
 }
 
 class ToIdStringConverter final
 {
 public:
-  ToIdStringConverter() : mNextId(0) { mIdMap.init(512); }
+  ToIdStringConverter()
+    : mNextId(0)
+  {
+    MOZ_ALWAYS_TRUE(mIdMap.init(512));
+  }
 
   // Converts a pointer to a unique ID. Reuses the existing ID for the pointer
   // if it's been seen before.
   const char* ToIdString(const void* aPtr)
   {
     uint32_t id;
     PointerIdMap::AddPtr p = mIdMap.lookupForAdd(aPtr);
     if (!p) {
       id = mNextId++;
-      (void)mIdMap.add(p, aPtr, id);
+      MOZ_ALWAYS_TRUE(mIdMap.add(p, aPtr, id));
     } else {
       id = p->value();
     }
     return Base32(id);
   }
 
   size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
   {
@@ -1823,20 +1831,20 @@ AnalyzeImpl(UniquePtr<JSONWriteFunc> aWr
 {
   AutoBlockIntercepts block(Thread::Fetch());
   AutoLockState lock;
 
   // Allocate this on the heap instead of the stack because it's fairly large.
   auto locService = InfallibleAllocPolicy::new_<CodeAddressService>();
 
   StackTraceSet usedStackTraces;
-  usedStackTraces.init(512);
+  MOZ_ALWAYS_TRUE(usedStackTraces.init(512));
 
   PointerSet usedPcs;
-  usedPcs.init(512);
+  MOZ_ALWAYS_TRUE(usedPcs.init(512));
 
   size_t iscSize;
 
   static int analysisCount = 1;
   StatusMsg("Dump %d {\n", analysisCount++);
 
   JSONWriter writer(Move(aWriter));
   writer.Start();
@@ -1931,17 +1939,17 @@ AnalyzeImpl(UniquePtr<JSONWriteFunc> aWr
     {
       for (auto r = usedStackTraces.all(); !r.empty(); r.popFront()) {
         const StackTrace* const st = r.front();
         writer.StartArrayProperty(isc.ToIdString(st), writer.SingleLineStyle);
         {
           for (uint32_t i = 0; i < st->Length(); i++) {
             const void* pc = st->Pc(i);
             writer.StringElement(isc.ToIdString(pc));
-            usedPcs.put(pc);
+            MOZ_ALWAYS_TRUE(usedPcs.put(pc));
           }
         }
         writer.EndArray();
       }
     }
     writer.EndObject();
 
     StatusMsg("  Constructing the stack frame table...\n");