Bug 1207519 - Prevent HashTable shrink from ignoring allocation failures that may have been reported r=Waldo
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 30 Sep 2015 11:34:49 +0100
changeset 300385 2e82f6299d4a1084418f295c737be821b6074cdb
parent 300384 3a2de2c3581ad331eaf8757f81dc78a31723caf1
child 300386 4468b86a62039d9f63788845f357aeb76fa9ab5d
push id1001
push userraliiev@mozilla.com
push dateMon, 18 Jan 2016 19:06:03 +0000
treeherdermozilla-release@8b89261f3ac4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersWaldo
bugs1207519
milestone44.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1207519 - Prevent HashTable shrink from ignoring allocation failures that may have been reported r=Waldo
js/public/HashTable.h
js/src/ds/LifoAlloc.h
js/src/jit/JitAllocPolicy.h
js/src/jsalloc.h
js/src/vm/MallocProvider.h
js/src/vm/Runtime.h
layout/style/nsNthIndexCache.h
memory/replace/dmd/DMD.cpp
mfbt/AllocPolicy.h
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -1119,21 +1119,34 @@ class HashTable : private AllocPolicy
         HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
 
         // Avoid reserved hash codes.
         if (!isLiveHash(keyHash))
             keyHash -= (sRemovedKey + 1);
         return keyHash & ~sCollisionBit;
     }
 
-    static Entry* createTable(AllocPolicy& alloc, uint32_t capacity)
+    enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+
+    static Entry* createTable(AllocPolicy& alloc, uint32_t capacity,
+                              FailureBehavior reportFailure = ReportFailure)
     {
         static_assert(sFreeKey == 0,
                       "newly-calloc'd tables have to be considered empty");
-        return alloc.template pod_calloc<Entry>(capacity);
+        if (reportFailure)
+            return alloc.template pod_calloc<Entry>(capacity);
+
+        return alloc.template maybe_pod_calloc<Entry>(capacity);
+    }
+
+    static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
+    {
+        static_assert(sFreeKey == 0,
+                      "newly-calloc'd tables have to be considered empty");
+        return alloc.template maybe_pod_calloc<Entry>(capacity);
     }
 
     static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity)
     {
         Entry* end = oldTable + capacity;
         for (Entry* e = oldTable; e < end; ++e)
             e->destroyIfLive();
         alloc.free_(oldTable);
@@ -1362,29 +1375,30 @@ class HashTable : private AllocPolicy
                 METER(stats.misses++);
                 return *entry;
             }
         }
     }
 
     enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
 
-    RebuildStatus changeTableSize(int deltaLog2)
+    RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
     {
         // Look, but don't touch, until we succeed in getting new entry store.
         Entry* oldTable = table;
         uint32_t oldCap = capacity();
         uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
         uint32_t newCapacity = JS_BIT(newLog2);
         if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
-            this->reportAllocOverflow();
+            if (reportFailure)
+                this->reportAllocOverflow();
             return RehashFailed;
         }
 
-        Entry* newTable = createTable(*this, newCapacity);
+        Entry* newTable = createTable(*this, newCapacity, reportFailure);
         if (!newTable)
             return RehashFailed;
 
         // We can't fail from here on, so update table parameters.
         setTableSizeLog2(newLog2);
         removedCount = 0;
         gen++;
         table = newTable;
@@ -1400,39 +1414,44 @@ class HashTable : private AllocPolicy
             }
         }
 
         // All entries have been destroyed, no need to destroyTable.
         this->free_(oldTable);
         return Rehashed;
     }
 
-    RebuildStatus checkOverloaded()
+    bool shouldCompressTable()
+    {
+        // Compress if a quarter or more of all entries are removed.
+        return removedCount >= (capacity() >> 2);
+    }
+
+    RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
     {
         if (!overloaded())
             return NotOverloaded;
 
-        // Compress if a quarter or more of all entries are removed.
         int deltaLog2;
-        if (removedCount >= (capacity() >> 2)) {
+        if (shouldCompressTable()) {
             METER(stats.compresses++);
             deltaLog2 = 0;
         } else {
             METER(stats.grows++);
             deltaLog2 = 1;
         }
 
-        return changeTableSize(deltaLog2);
+        return changeTableSize(deltaLog2, reportFailure);
     }
 
     // Infallibly rehash the table if we are overloaded with removals.
     void checkOverRemoved()
     {
         if (overloaded()) {
-            if (checkOverloaded() == RehashFailed)
+            if (checkOverloaded(DontReportFailure) == RehashFailed)
                 rehashTableInPlace();
         }
     }
 
     void remove(Entry& e)
     {
         MOZ_ASSERT(table);
         METER(stats.removes++);
@@ -1449,35 +1468,34 @@ class HashTable : private AllocPolicy
         mutationCount++;
 #endif
     }
 
     void checkUnderloaded()
     {
         if (underloaded()) {
             METER(stats.shrinks++);
-            (void) changeTableSize(-1);
+            (void) changeTableSize(-1, DontReportFailure);
         }
     }
 
     // Resize the table down to the largest capacity which doesn't underload the
     // table.  Since we call checkUnderloaded() on every remove, you only need
     // to call this after a bulk removal of items done without calling remove().
     void compactIfUnderloaded()
     {
         int32_t resizeLog2 = 0;
         uint32_t newCapacity = capacity();
         while (wouldBeUnderloaded(newCapacity, entryCount)) {
             newCapacity = newCapacity >> 1;
             resizeLog2--;
         }
 
-        if (resizeLog2 != 0) {
-            changeTableSize(resizeLog2);
-        }
+        if (resizeLog2 != 0)
+            (void) changeTableSize(resizeLog2, DontReportFailure);
     }
 
     // This is identical to changeTableSize(currentSize), but without requiring
     // a second table.  We do this by recycling the collision bits to tell us if
     // the element is already inserted or still waiting to be inserted.  Since
     // already-inserted elements win any conflicts, we get the same table as we
     // would have gotten through random insertion order.
     void rehashTableInPlace()
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -526,40 +526,52 @@ class LifoAllocPolicy
 {
     LifoAlloc& alloc_;
 
   public:
     MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc)
       : alloc_(alloc)
     {}
     template <typename T>
-    T* pod_malloc(size_t numElems) {
+    T* maybe_pod_malloc(size_t numElems) {
         if (MOZ_UNLIKELY(numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value))
             return nullptr;
         size_t bytes = numElems * sizeof(T);
         void* p = fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
         return static_cast<T*>(p);
     }
     template <typename T>
-    T* pod_calloc(size_t numElems) {
+    T* maybe_pod_calloc(size_t numElems) {
         T* p = pod_malloc<T>(numElems);
         if (fb == Fallible && !p)
             return nullptr;
         memset(p, 0, numElems * sizeof(T));
         return p;
     }
     template <typename T>
-    T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+    T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
         T* n = pod_malloc<T>(newSize);
         if (fb == Fallible && !n)
             return nullptr;
         MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
         memcpy(n, p, Min(oldSize * sizeof(T), newSize * sizeof(T)));
         return n;
     }
+    template <typename T>
+    T* pod_malloc(size_t numElems) {
+        return maybe_pod_malloc<T>(numElems);
+    }
+    template <typename T>
+    T* pod_calloc(size_t numElems) {
+        return maybe_pod_calloc<T>(numElems);
+    }
+    template <typename T>
+    T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+        return maybe_pod_realloc<T>(p, oldSize, newSize);
+    }
     void free_(void* p) {
     }
     void reportAllocOverflow() const {
     }
     bool checkSimulatedOOM() const {
         return fb == Infallible || !js::oom::ShouldFailWithOOM();
     }
 };
--- a/js/src/jit/JitAllocPolicy.h
+++ b/js/src/jit/JitAllocPolicy.h
@@ -73,57 +73,73 @@ class JitAllocPolicy
 {
     TempAllocator& alloc_;
 
   public:
     MOZ_IMPLICIT JitAllocPolicy(TempAllocator& alloc)
       : alloc_(alloc)
     {}
     template <typename T>
-    T* pod_malloc(size_t numElems) {
+    T* maybe_pod_malloc(size_t numElems) {
         if (MOZ_UNLIKELY(numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value))
             return nullptr;
         return static_cast<T*>(alloc_.allocate(numElems * sizeof(T)));
     }
     template <typename T>
-    T* pod_calloc(size_t numElems) {
+    T* maybe_pod_calloc(size_t numElems) {
         T* p = pod_malloc<T>(numElems);
         if (MOZ_LIKELY(p))
             memset(p, 0, numElems * sizeof(T));
         return p;
     }
     template <typename T>
-    T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+    T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
         T* n = pod_malloc<T>(newSize);
         if (MOZ_UNLIKELY(!n))
             return n;
         MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
         memcpy(n, p, Min(oldSize * sizeof(T), newSize * sizeof(T)));
         return n;
     }
+    template <typename T>
+    T* pod_malloc(size_t numElems) {
+        return maybe_pod_malloc<T>(numElems);
+    }
+    template <typename T>
+    T* pod_calloc(size_t numElems) {
+        return maybe_pod_calloc<T>(numElems);
+    }
+    template <typename T>
+    T* pod_realloc(T* ptr, size_t oldSize, size_t newSize) {
+        return maybe_pod_realloc<T>(ptr, oldSize, newSize);
+    }
     void free_(void* p) {
     }
     void reportAllocOverflow() const {
     }
     bool checkSimulatedOOM() const {
         return !js::oom::ShouldFailWithOOM();
     }
 };
 
 class OldJitAllocPolicy
 {
   public:
     OldJitAllocPolicy()
     {}
     template <typename T>
-    T* pod_malloc(size_t numElems) {
+    T* maybe_pod_malloc(size_t numElems) {
         if (MOZ_UNLIKELY(numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value))
             return nullptr;
         return static_cast<T*>(GetJitContext()->temp->allocate(numElems * sizeof(T)));
     }
+    template <typename T>
+    T* pod_malloc(size_t numElems) {
+        return maybe_pod_malloc<T>(numElems);
+    }
     void free_(void* p) {
     }
     void reportAllocOverflow() const {
     }
     bool checkSimulatedOOM() const {
         return !js::oom::ShouldFailWithOOM();
     }
 };
--- a/js/src/jsalloc.h
+++ b/js/src/jsalloc.h
@@ -26,20 +26,25 @@ enum class AllocFunction {
 };
 
 struct ContextFriendFields;
 
 /* Policy for using system memory functions and doing no error reporting. */
 class SystemAllocPolicy
 {
   public:
-    template <typename T> T* pod_malloc(size_t numElems) { return js_pod_malloc<T>(numElems); }
-    template <typename T> T* pod_calloc(size_t numElems) { return js_pod_calloc<T>(numElems); }
+    template <typename T> T* maybe_pod_malloc(size_t numElems) { return js_pod_malloc<T>(numElems); }
+    template <typename T> T* maybe_pod_calloc(size_t numElems) { return js_pod_calloc<T>(numElems); }
+    template <typename T> T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+        return js_pod_realloc<T>(p, oldSize, newSize);
+    }
+    template <typename T> T* pod_malloc(size_t numElems) { return maybe_pod_malloc<T>(numElems); }
+    template <typename T> T* pod_calloc(size_t numElems) { return maybe_pod_calloc<T>(numElems); }
     template <typename T> T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
-        return js_pod_realloc<T>(p, oldSize, newSize);
+        return maybe_pod_realloc<T>(p, oldSize, newSize);
     }
     void free_(void* p) { js_free(p); }
     void reportAllocOverflow() const {}
     bool checkSimulatedOOM() const {
         return !js::oom::ShouldFailWithOOM();
     }
 };
 
@@ -66,34 +71,49 @@ class TempAllocPolicy
     JS_FRIEND_API(void*) onOutOfMemory(AllocFunction allocFunc, size_t nbytes,
                                        void* reallocPtr = nullptr);
 
   public:
     MOZ_IMPLICIT TempAllocPolicy(JSContext* cx) : cx_((ContextFriendFields*) cx) {} // :(
     MOZ_IMPLICIT TempAllocPolicy(ContextFriendFields* cx) : cx_(cx) {}
 
     template <typename T>
+    T* maybe_pod_malloc(size_t numElems) {
+        return js_pod_malloc<T>(numElems);
+    }
+
+    template <typename T>
+    T* maybe_pod_calloc(size_t numElems) {
+        return js_pod_calloc<T>(numElems);
+    }
+
+    template <typename T>
+    T* maybe_pod_realloc(T* prior, size_t oldSize, size_t newSize) {
+        return js_pod_realloc<T>(prior, oldSize, newSize);
+    }
+
+    template <typename T>
     T* pod_malloc(size_t numElems) {
-        T* p = js_pod_malloc<T>(numElems);
+        T* p = maybe_pod_malloc<T>(numElems);
         if (MOZ_UNLIKELY(!p))
             p = static_cast<T*>(onOutOfMemory(AllocFunction::Malloc, numElems * sizeof(T)));
         return p;
     }
 
     template <typename T>
     T* pod_calloc(size_t numElems) {
-        T* p = js_pod_calloc<T>(numElems);
+        T* p = maybe_pod_calloc<T>(numElems);
         if (MOZ_UNLIKELY(!p))
             p = static_cast<T*>(onOutOfMemory(AllocFunction::Calloc, numElems * sizeof(T)));
         return p;
     }
 
     template <typename T>
     T* pod_realloc(T* prior, size_t oldSize, size_t newSize) {
-        T* p2 = js_pod_realloc<T>(prior, oldSize, newSize);
+        T* p2 = maybe_pod_realloc<T>(prior, oldSize, newSize);
         if (MOZ_UNLIKELY(!p2))
             p2 = static_cast<T*>(onOutOfMemory(AllocFunction::Realloc, newSize * sizeof(T), prior));
         return p2;
     }
 
     void free_(void* p) {
         js_free(p);
     }
--- a/js/src/vm/MallocProvider.h
+++ b/js/src/vm/MallocProvider.h
@@ -18,18 +18,16 @@
  *     These allocators are for system memory whose lifetime is not associated
  *     with a GC thing. See js/src/jsalloc.h.
  *
  *       - SystemAllocPolicy: No extra functionality over bare allocators.
  *
  *       - TempAllocPolicy: Adds automatic error reporting to the provided
  *         Context when allocations fail.
  *
- *       - ContextAllocPolicy: forwards to the JSContext MallocProvider.
- *
  *       - RuntimeAllocPolicy: forwards to the JSRuntime MallocProvider.
  *
  *   - MallocProvider. A mixin base class that handles automatically updating
  *     the GC's state in response to allocations that are tied to a GC lifetime
  *     or are for a particular GC purpose. These allocators must only be used
  *     for memory that will be freed when a GC thing is swept.
  *
  *       - gc::Zone:  Automatically triggers zone GC.
@@ -48,32 +46,60 @@
 #include "js/Utility.h"
 
 namespace js {
 
 template<class Client>
 struct MallocProvider
 {
     template <class T>
+    T* maybe_pod_malloc(size_t numElems) {
+        size_t bytes = numElems * sizeof(T);
+        T* p = js_pod_malloc<T>(numElems);
+        if (MOZ_LIKELY(p))
+            client()->updateMallocCounter(bytes);
+        return p;
+    }
+
+    template <class T>
+    T* maybe_pod_calloc(size_t numElems) {
+        size_t bytes = numElems * sizeof(T);
+        T* p = js_pod_calloc<T>(numElems);
+        if (MOZ_LIKELY(p))
+            client()->updateMallocCounter(bytes);
+        return p;
+    }
+
+    template <class T>
+    T* maybe_pod_realloc(T* prior, size_t oldSize, size_t newSize) {
+        T* p = js_pod_realloc(prior, oldSize, newSize);
+        if (MOZ_LIKELY(p)) {
+            // For compatibility we do not account for realloc that decreases
+            // previously allocated memory.
+            if (newSize > oldSize)
+                client()->updateMallocCounter((newSize - oldSize) * sizeof(T));
+        }
+        return p;
+    }
+
+    template <class T>
     T* pod_malloc() {
         return pod_malloc<T>(1);
     }
 
     template <class T>
     T* pod_malloc(size_t numElems) {
-        size_t bytes = numElems * sizeof(T);
-        T* p = js_pod_malloc<T>(numElems);
-        if (MOZ_LIKELY(p)) {
-            client()->updateMallocCounter(bytes);
+        T* p = maybe_pod_malloc<T>(numElems);
+        if (MOZ_LIKELY(p))
             return p;
-        }
         if (numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
             client()->reportAllocationOverflow();
             return nullptr;
         }
+        size_t bytes = numElems * sizeof(T);
         p = (T*)client()->onOutOfMemory(AllocFunction::Malloc, bytes);
         if (p)
             client()->updateMallocCounter(bytes);
         return p;
     }
 
     template <class T, class U>
     T* pod_malloc_with_extra(size_t numExtra) {
@@ -105,26 +131,24 @@ struct MallocProvider
 
     template <class T>
     T* pod_calloc() {
         return pod_calloc<T>(1);
     }
 
     template <class T>
     T* pod_calloc(size_t numElems) {
-        size_t bytes = numElems * sizeof(T);
-        T* p = js_pod_calloc<T>(numElems);
-        if (MOZ_LIKELY(p)) {
-            client()->updateMallocCounter(bytes);
+        T* p = maybe_pod_calloc<T>(numElems);
+        if (MOZ_LIKELY(p))
             return p;
-        }
         if (numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
             client()->reportAllocationOverflow();
             return nullptr;
         }
+        size_t bytes = numElems * sizeof(T);
         p = (T*)client()->onOutOfMemory(AllocFunction::Calloc, bytes);
         if (p)
             client()->updateMallocCounter(bytes);
         return p;
     }
 
     template <class T, class U>
     T* pod_calloc_with_extra(size_t numExtra) {
@@ -152,24 +176,19 @@ struct MallocProvider
     mozilla::UniquePtr<T[], JS::FreePolicy>
     make_zeroed_pod_array(size_t numElems)
     {
         return mozilla::UniquePtr<T[], JS::FreePolicy>(pod_calloc<T>(numElems));
     }
 
     template <class T>
     T* pod_realloc(T* prior, size_t oldSize, size_t newSize) {
-        T* p = js_pod_realloc(prior, oldSize, newSize);
-        if (MOZ_LIKELY(p)) {
-            // For compatibility we do not account for realloc that decreases
-            // previously allocated memory.
-            if (newSize > oldSize)
-                client()->updateMallocCounter((newSize - oldSize) * sizeof(T));
+        T* p = maybe_pod_realloc(prior, oldSize, newSize);
+        if (MOZ_LIKELY(p))
             return p;
-        }
         if (newSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
             client()->reportAllocationOverflow();
             return nullptr;
         }
         p = (T*)client()->onOutOfMemory(AllocFunction::Realloc, newSize * sizeof(T), prior);
         if (p && newSize > oldSize)
             client()->updateMallocCounter((newSize - oldSize) * sizeof(T));
         return p;
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -2066,16 +2066,31 @@ SetValueRangeToNull(Value* vec, size_t l
 class RuntimeAllocPolicy
 {
     JSRuntime* const runtime;
 
   public:
     MOZ_IMPLICIT RuntimeAllocPolicy(JSRuntime* rt) : runtime(rt) {}
 
     template <typename T>
+    T* maybe_pod_malloc(size_t numElems) {
+        return runtime->maybe_pod_malloc<T>(numElems);
+    }
+
+    template <typename T>
+    T* maybe_pod_calloc(size_t numElems) {
+        return runtime->maybe_pod_calloc<T>(numElems);
+    }
+
+    template <typename T>
+    T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+        return runtime->maybe_pod_realloc<T>(p, oldSize, newSize);
+    }
+
+    template <typename T>
     T* pod_malloc(size_t numElems) {
         return runtime->pod_malloc<T>(numElems);
     }
 
     template <typename T>
     T* pod_calloc(size_t numElems) {
         return runtime->pod_calloc<T>(numElems);
     }
--- a/layout/style/nsNthIndexCache.h
+++ b/layout/style/nsNthIndexCache.h
@@ -58,18 +58,23 @@ private:
   // If 0, the node is not at any index in its parent.
   typedef int32_t CacheEntry;
 
   class SystemAllocPolicy {
   public:
     void *malloc_(size_t bytes) { return ::malloc(bytes); }
 
     template <typename T>
+    T *maybe_pod_calloc(size_t numElems) {
+      return static_cast<T *>(::calloc(numElems, sizeof(T)));
+    }
+
+    template <typename T>
     T *pod_calloc(size_t numElems) {
-      return static_cast<T *>(::calloc(numElems, sizeof(T)));
+      return maybe_pod_calloc<T>(numElems);
     }
 
     void *realloc_(void *p, size_t bytes) { return ::realloc(p, bytes); }
     void free_(void *p) { ::free(p); }
     void reportAllocOverflow() const {}
     bool checkSimulatedOOM() const { return true; }
   };
 
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -129,63 +129,83 @@ static bool gIsDMDInitialized = false;
 // It would be nice if we could use the InfallibleAllocPolicy from mozalloc,
 // but DMD cannot use mozalloc.
 //
 class InfallibleAllocPolicy
 {
   static void ExitOnFailure(const void* aP);
 
 public:
+  template <typename T>
+  static T* maybe_pod_malloc(size_t aNumElems)
+  {
+    if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
+      return nullptr;
+    return (T*)gMallocTable->malloc(aNumElems * sizeof(T));
+  }
+
+  template <typename T>
+  static T* maybe_pod_calloc(size_t aNumElems)
+  {
+    return (T*)gMallocTable->calloc(aNumElems, sizeof(T));
+  }
+
+  template <typename T>
+  static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+  {
+    if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
+      return nullptr;
+    return (T*)gMallocTable->realloc(aPtr, aNewSize * sizeof(T));
+  }
+
   static void* malloc_(size_t aSize)
   {
     void* p = gMallocTable->malloc(aSize);
     ExitOnFailure(p);
     return p;
   }
 
   template <typename T>
   static T* pod_malloc(size_t aNumElems)
   {
-    if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
-      return nullptr;
-    void* p = gMallocTable->malloc(aNumElems * sizeof(T));
+    T* p = maybe_pod_malloc<T>(aNumElems);
     ExitOnFailure(p);
-    return (T*)p;
+    return p;
   }
 
   static void* calloc_(size_t aSize)
   {
     void* p = gMallocTable->calloc(1, aSize);
     ExitOnFailure(p);
     return p;
   }
 
   template <typename T>
   static T* pod_calloc(size_t aNumElems)
   {
-    void* p = gMallocTable->calloc(aNumElems, sizeof(T));
+    T* p = maybe_pod_calloc<T>(aNumElems);
     ExitOnFailure(p);
-    return (T*)p;
+    return p;
   }
 
   // This realloc_ is the one we use for direct reallocs within DMD.
   static void* realloc_(void* aPtr, size_t aNewSize)
   {
     void* p = gMallocTable->realloc(aPtr, aNewSize);
     ExitOnFailure(p);
     return p;
   }
 
   // This realloc_ is required for this to be a JS container AllocPolicy.
   template <typename T>
   static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
   {
-    if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value)
-      return nullptr;
-    return (T*)InfallibleAllocPolicy::realloc_((void *)aPtr, aNewSize * sizeof(T));
+    T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize);
+    ExitOnFailure(p);
+    return p;
   }
 
   static void* memalign_(size_t aAlignment, size_t aSize)
   {
     void* p = gMallocTable->memalign(aAlignment, aSize);
     ExitOnFailure(p);
     return p;
   }
--- a/mfbt/AllocPolicy.h
+++ b/mfbt/AllocPolicy.h
@@ -21,16 +21,23 @@ namespace mozilla {
 
 /*
  * Allocation policies are used to implement the standard allocation behaviors
  * in a customizable way.  Additionally, custom behaviors may be added to these
  * behaviors, such as additionally reporting an error through an out-of-band
  * mechanism when OOM occurs.  The concept modeled here is as follows:
  *
  *  - public copy constructor, assignment, destructor
+ *  - template <typename T> T* maybe_pod_malloc(size_t)
+ *      Fallible, but doesn't report an error on OOM.
+ *  - template <typename T> T* maybe_pod_calloc(size_t)
+ *      Fallible, but doesn't report an error on OOM.
+ *  - template <typename T> T* maybe_pod_realloc(T*, size_t, size_t)
+ *      Fallible, but doesn't report an error on OOM.  The old allocation
+ *      size is passed in, in addition to the new allocation size requested.
  *  - template <typename T> T* pod_malloc(size_t)
  *      Responsible for OOM reporting when null is returned.
  *  - template <typename T> T* pod_calloc(size_t)
  *      Responsible for OOM reporting when null is returned.
  *  - template <typename T> T* pod_realloc(T*, size_t, size_t)
  *      Responsible for OOM reporting when null is returned.  The old allocation
  *      size is passed in, in addition to the new allocation size requested.
  *  - void free_(void*)
@@ -59,39 +66,57 @@ namespace mozilla {
 /*
  * A policy that straightforwardly uses malloc/calloc/realloc/free and adds no
  * extra behaviors.
  */
 class MallocAllocPolicy
 {
 public:
   template <typename T>
-  T* pod_malloc(size_t aNumElems)
+  T* maybe_pod_malloc(size_t aNumElems)
   {
     if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
       return nullptr;
     }
     return static_cast<T*>(malloc(aNumElems * sizeof(T)));
   }
 
   template <typename T>
-  T* pod_calloc(size_t aNumElems)
+  T* maybe_pod_calloc(size_t aNumElems)
   {
     return static_cast<T*>(calloc(aNumElems, sizeof(T)));
   }
 
   template <typename T>
-  T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+  T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
   {
     if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
       return nullptr;
     }
     return static_cast<T*>(realloc(aPtr, aNewSize * sizeof(T)));
   }
 
+  template <typename T>
+  T* pod_malloc(size_t aNumElems)
+  {
+    return maybe_pod_malloc<T>(aNumElems);
+  }
+
+  template <typename T>
+  T* pod_calloc(size_t aNumElems)
+  {
+    return maybe_pod_calloc<T>(aNumElems);
+  }
+
+  template <typename T>
+  T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize)
+  {
+    return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
+  }
+
   void free_(void* aPtr)
   {
     free(aPtr);
   }
 
   void reportAllocOverflow() const
   {
   }