Bug 1201057 - Use AutoEnterOOMUnsafeRegion in places where we can't handle OOM r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Mon, 21 Sep 2015 14:31:51 +0100
changeset 296995 40fae3130e1a816d0074f4c72cdc3e5e6ff5c1e6
parent 296994 1d051e09610645d5b33321293b52103641f40268
child 296996 ab53cb21bbbea9b7b1824633f4b34c42866e0985
push id5392
push userraliiev@mozilla.com
push dateMon, 14 Dec 2015 20:08:23 +0000
treeherdermozilla-beta@16ce8562a975 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1201057
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1201057 - Use AutoEnterOOMUnsafeRegion in places where we can't handle OOM r=terrence
js/src/builtin/TestingFunctions.cpp
js/src/builtin/TypedObject.cpp
js/src/ds/Fifo.h
js/src/ds/LifoAlloc.h
js/src/gc/Marking.cpp
js/src/gc/Nursery.cpp
js/src/gc/StoreBuffer.h
js/src/gc/Zone.h
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/irregexp/RegExpEngine.cpp
js/src/irregexp/RegExpMacroAssembler.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonBuilder.cpp
js/src/jit/LoopUnroller.cpp
js/src/jit/MIRGraph.cpp
js/src/jit/arm/Simulator-arm.cpp
js/src/jscompartment.cpp
js/src/jsgc.cpp
js/src/jsobj.cpp
js/src/vm/ArgumentsObject.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/Debugger.cpp
js/src/vm/Debugger.h
js/src/vm/HelperThreads.cpp
js/src/vm/NativeObject.cpp
js/src/vm/NativeObject.h
js/src/vm/Runtime.h
js/src/vm/SavedStacks.cpp
js/src/vm/TypeInference.cpp
js/src/vm/UnboxedObject.cpp
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -1379,50 +1379,52 @@ DisplayName(JSContext* cx, unsigned argc
     JSString* str = fun->displayAtom();
     args.rval().setString(str ? str : cx->runtime()->emptyString);
     return true;
 }
 
 static JSObject*
 ShellObjectMetadataCallback(JSContext* cx, JSObject*)
 {
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     RootedObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx));
     if (!obj)
-        CrashAtUnhandlableOOM("ShellObjectMetadataCallback");
+        oomUnsafe.crash("ShellObjectMetadataCallback");
 
     RootedObject stack(cx, NewDenseEmptyArray(cx));
     if (!stack)
-        CrashAtUnhandlableOOM("ShellObjectMetadataCallback");
+        oomUnsafe.crash("ShellObjectMetadataCallback");
 
     static int createdIndex = 0;
     createdIndex++;
 
     if (!JS_DefineProperty(cx, obj, "index", createdIndex, 0,
                            JS_STUBGETTER, JS_STUBSETTER))
     {
-        CrashAtUnhandlableOOM("ShellObjectMetadataCallback");
+        oomUnsafe.crash("ShellObjectMetadataCallback");
     }
 
     if (!JS_DefineProperty(cx, obj, "stack", stack, 0,
                            JS_STUBGETTER, JS_STUBSETTER))
     {
-        CrashAtUnhandlableOOM("ShellObjectMetadataCallback");
+        oomUnsafe.crash("ShellObjectMetadataCallback");
     }
 
     int stackIndex = 0;
     RootedId id(cx);
     RootedValue callee(cx);
     for (NonBuiltinScriptFrameIter iter(cx); !iter.done(); ++iter) {
         if (iter.isFunctionFrame() && iter.compartment() == cx->compartment()) {
             id = INT_TO_JSID(stackIndex);
             RootedObject callee(cx, iter.callee(cx));
             if (!JS_DefinePropertyById(cx, stack, id, callee, 0,
                                        JS_STUBGETTER, JS_STUBSETTER))
             {
-                CrashAtUnhandlableOOM("ShellObjectMetadataCallback");
+                oomUnsafe.crash("ShellObjectMetadataCallback");
             }
             stackIndex++;
         }
     }
 
     return obj;
 }
 
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -1515,18 +1515,21 @@ OutlineTypedObject::attach(JSContext* cx
     if (buffer.forInlineTypedObject()) {
         InlineTypedObject& realOwner = buffer.firstView()->as<InlineTypedObject>();
         attach(cx, realOwner, offset);
         return;
     }
 
     buffer.setHasTypedObjectViews();
 
-    if (!buffer.addView(cx, this))
-        CrashAtUnhandlableOOM("TypedObject::attach");
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        if (!buffer.addView(cx, this))
+            oomUnsafe.crash("TypedObject::attach");
+    }
 
     setOwnerAndData(&buffer, buffer.dataPointer() + offset);
 }
 
 void
 OutlineTypedObject::attach(JSContext* cx, TypedObject& typedObj, int32_t offset)
 {
     MOZ_ASSERT(!isAttached());
@@ -3021,18 +3024,19 @@ TraceListVisitor::visitReference(Referen
     VectorType* offsets;
     switch (descr.type()) {
       case ReferenceTypeDescr::TYPE_ANY: offsets = &valueOffsets; break;
       case ReferenceTypeDescr::TYPE_OBJECT: offsets = &objectOffsets; break;
       case ReferenceTypeDescr::TYPE_STRING: offsets = &stringOffsets; break;
       default: MOZ_CRASH("Invalid kind");
     }
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!offsets->append((uintptr_t) mem))
-        CrashAtUnhandlableOOM("TraceListVisitor::visitReference");
+        oomUnsafe.crash("TraceListVisitor::visitReference");
 }
 
 bool
 TraceListVisitor::fillList(Vector<int32_t>& entries)
 {
     return entries.appendAll(stringOffsets) &&
            entries.append(-1) &&
            entries.appendAll(objectOffsets) &&
--- a/js/src/ds/Fifo.h
+++ b/js/src/ds/Fifo.h
@@ -130,18 +130,19 @@ class Fifo
     bool popFront() {
         MOZ_ASSERT(!empty());
         T t(mozilla::Move(front()));
         front_.popBack();
         if (!fixup()) {
             // Attempt to remain in a valid state by reinserting the element
             // back at the front. If we can't remain in a valid state in the
             // face of OOMs, crash.
+            AutoEnterOOMUnsafeRegion oomUnsafe;
             if (!front_.append(mozilla::Move(t)))
-                CrashAtUnhandlableOOM("js::Fifo::popFront");
+                oomUnsafe.crash("js::Fifo::popFront");
             return false;
         }
         return true;
     }
 
     // Clear all elements from the queue.
     void clear() {
         front_.clear();
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -272,19 +272,20 @@ class LifoAlloc
     MOZ_ALWAYS_INLINE
     void* alloc(size_t n) {
         JS_OOM_POSSIBLY_FAIL();
         return allocImpl(n);
     }
 
     MOZ_ALWAYS_INLINE
     void* allocInfallible(size_t n) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (void* result = allocImpl(n))
             return result;
-        CrashAtUnhandlableOOM("LifoAlloc::allocInfallible");
+        oomUnsafe.crash("LifoAlloc::allocInfallible");
         return nullptr;
     }
 
     // Ensures that enough space exists to satisfy N bytes worth of
     // allocation requests, not necessarily contiguous. Note that this does
     // not guarantee a successful single allocation of N bytes.
     MOZ_ALWAYS_INLINE
     bool ensureUnusedApproximate(size_t n) {
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2055,19 +2055,20 @@ js::TenuringTracer::moveToTenured(JSObje
 
     AllocKind dstKind = src->allocKindForTenure(nursery());
     Zone* zone = src->zone();
 
     TenuredCell* t = zone->arenas.allocateFromFreeList(dstKind, Arena::thingSize(dstKind));
     if (!t) {
         zone->arenas.checkEmptyFreeList(dstKind);
         AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         t = zone->arenas.allocateFromArena(zone, dstKind, maybeStartBackgroundAllocation);
         if (!t)
-            CrashAtUnhandlableOOM("Failed to allocate object while tenuring.");
+            oomUnsafe.crash("Failed to allocate object while tenuring.");
     }
     JSObject* dst = reinterpret_cast<JSObject*>(t);
     tenuredSize += moveObjectToTenured(dst, src, dstKind);
 
     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
     overlay->forwardTo(dst);
     insertIntoFixupList(overlay);
 
@@ -2207,19 +2208,24 @@ js::TenuringTracer::moveSlotsToTenured(N
 
     if (!nursery().isInside(src->slots_)) {
         nursery().removeMallocedBuffer(src->slots_);
         return 0;
     }
 
     Zone* zone = src->zone();
     size_t count = src->numDynamicSlots();
-    dst->slots_ = zone->pod_malloc<HeapSlot>(count);
-    if (!dst->slots_)
-        CrashAtUnhandlableOOM("Failed to allocate slots while tenuring.");
+
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        dst->slots_ = zone->pod_malloc<HeapSlot>(count);
+        if (!dst->slots_)
+            oomUnsafe.crash("Failed to allocate slots while tenuring.");
+    }
+
     PodCopy(dst->slots_, src->slots_, count);
     nursery().setSlotsForwardingPointer(src->slots_, dst->slots_, count);
     return count * sizeof(HeapSlot);
 }
 
 size_t
 js::TenuringTracer::moveElementsToTenured(NativeObject* dst, NativeObject* src, AllocKind dstKind)
 {
@@ -2244,19 +2250,24 @@ js::TenuringTracer::moveElementsToTenure
         dst->as<ArrayObject>().setFixedElements();
         dstHeader = dst->as<ArrayObject>().getElementsHeader();
         js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
         nursery().setElementsForwardingPointer(srcHeader, dstHeader, nslots);
         return nslots * sizeof(HeapSlot);
     }
 
     MOZ_ASSERT(nslots >= 2);
-    dstHeader = reinterpret_cast<ObjectElements*>(zone->pod_malloc<HeapSlot>(nslots));
-    if (!dstHeader)
-        CrashAtUnhandlableOOM("Failed to allocate elements while tenuring.");
+
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        dstHeader = reinterpret_cast<ObjectElements*>(zone->pod_malloc<HeapSlot>(nslots));
+        if (!dstHeader)
+            oomUnsafe.crash("Failed to allocate elements while tenuring.");
+    }
+
     js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
     nursery().setElementsForwardingPointer(srcHeader, dstHeader, nslots);
     dst->elements_ = dstHeader->elements();
     return nslots * sizeof(HeapSlot);
 }
 
 
 /*** IsMarked / IsAboutToBeFinalized **************************************************************/
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -310,24 +310,25 @@ Nursery::setForwardingPointer(void* oldD
     // Bug 1196210: If a zero-capacity header lands in the last 2 words of the
     // jemalloc chunk abutting the start of the nursery, the (invalid) newData
     // pointer will appear to be "inside" the nursery.
     MOZ_ASSERT(!isInside(newData) || uintptr_t(newData) == heapStart_);
 
     if (direct) {
         *reinterpret_cast<void**>(oldData) = newData;
     } else {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!forwardedBuffers.initialized() && !forwardedBuffers.init())
-            CrashAtUnhandlableOOM("Nursery::setForwardingPointer");
+            oomUnsafe.crash("Nursery::setForwardingPointer");
 #ifdef DEBUG
         if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData))
             MOZ_ASSERT(p->value() == newData);
 #endif
         if (!forwardedBuffers.put(oldData, newData))
-            CrashAtUnhandlableOOM("Nursery::setForwardingPointer");
+            oomUnsafe.crash("Nursery::setForwardingPointer");
     }
 }
 
 void
 Nursery::setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots, uint32_t nslots)
 {
     // Slot arrays always have enough space for a forwarding pointer, since the
     // number of slots is never zero.
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -102,18 +102,19 @@ class StoreBuffer
             }
             stores_.remove(v);
         }
 
         /* Move any buffered stores to the canonical store set. */
         void sinkStore(StoreBuffer* owner) {
             MOZ_ASSERT(stores_.initialized());
             if (last_) {
+                AutoEnterOOMUnsafeRegion oomUnsafe;
                 if (!stores_.put(last_))
-                    CrashAtUnhandlableOOM("Failed to allocate for MonoTypeBuffer::put.");
+                    oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
             }
             last_ = T();
 
             if (MOZ_UNLIKELY(stores_.count() > MaxEntries))
                 owner->setAboutToOverflow();
         }
 
         bool has(StoreBuffer* owner, const T& v) {
@@ -162,25 +163,26 @@ class StoreBuffer
 
         template <typename T>
         void put(StoreBuffer* owner, const T& t) {
             MOZ_ASSERT(storage_);
 
             /* Ensure T is derived from BufferableRef. */
             (void)static_cast<const BufferableRef*>(&t);
 
+            AutoEnterOOMUnsafeRegion oomUnsafe;
             unsigned size = sizeof(T);
             unsigned* sizep = storage_->pod_malloc<unsigned>();
             if (!sizep)
-                CrashAtUnhandlableOOM("Failed to allocate for GenericBuffer::put.");
+                oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
             *sizep = size;
 
             T* tp = storage_->new_<T>(t);
             if (!tp)
-                CrashAtUnhandlableOOM("Failed to allocate for GenericBuffer::put.");
+                oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
 
             if (isAboutToOverflow())
                 owner->setAboutToOverflow();
         }
 
         size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
             return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
         }
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -253,18 +253,19 @@ struct Zone : public JS::shadow::Zone,
   public:
     bool hasDebuggers() const { return debuggers && debuggers->length(); }
     DebuggerVector* getDebuggers() const { return debuggers; }
     DebuggerVector* getOrCreateDebuggers(JSContext* cx);
 
     void enqueueForPromotionToTenuredLogging(JSObject& obj) {
         MOZ_ASSERT(hasDebuggers());
         MOZ_ASSERT(!IsInsideNursery(&obj));
+        js::AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!awaitingTenureLogging.append(&obj))
-            js::CrashAtUnhandlableOOM("Zone::enqueueForPromotionToTenuredLogging");
+            oomUnsafe.crash("Zone::enqueueForPromotionToTenuredLogging");
     }
     void logPromotionsToTenured();
 
     js::gc::ArenaLists arenas;
 
     js::TypeZone types;
 
     // The set of compartments in this zone.
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -982,18 +982,22 @@ NativeRegExpMacroAssembler::PopRegister(
 void
 NativeRegExpMacroAssembler::PushBacktrack(Label* label)
 {
     JitSpew(SPEW_PREFIX "PushBacktrack");
 
     CodeOffsetLabel patchOffset = masm.movWithPatch(ImmPtr(nullptr), temp0);
 
     MOZ_ASSERT(!label->bound());
-    if (!labelPatches.append(LabelPatch(label, patchOffset)))
-        CrashAtUnhandlableOOM("NativeRegExpMacroAssembler::PushBacktrack");
+
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        if (!labelPatches.append(LabelPatch(label, patchOffset)))
+            oomUnsafe.crash("NativeRegExpMacroAssembler::PushBacktrack");
+    }
 
     PushBacktrack(temp0);
     CheckBacktrackStackLimit();
 }
 
 void
 NativeRegExpMacroAssembler::BindBacktrack(Label* label)
 {
--- a/js/src/irregexp/RegExpEngine.cpp
+++ b/js/src/irregexp/RegExpEngine.cpp
@@ -1504,18 +1504,19 @@ class irregexp::RegExpCompiler
     }
 
     RegExpCode Assemble(JSContext* cx,
                         RegExpMacroAssembler* assembler,
                         RegExpNode* start,
                         int capture_count);
 
     inline void AddWork(RegExpNode* node) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!work_list_.append(node))
-            CrashAtUnhandlableOOM("AddWork");
+            oomUnsafe.crash("AddWork");
     }
 
     static const int kImplementationOffset = 0;
     static const int kNumberOfRegistersOffset = 0;
     static const int kCodeOffset = 1;
 
     RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
     EndNode* accept() { return accept_; }
@@ -2389,19 +2390,23 @@ BoyerMooreLookahead::EmitSkipInstruction
             masm->CheckCharacter(single_character, &cont);
         }
         masm->AdvanceCurrentPosition(lookahead_width);
         masm->JumpOrBacktrack(&again);
         masm->Bind(&cont);
         return true;
     }
 
-    uint8_t* boolean_skip_table = static_cast<uint8_t*>(js_malloc(kSize));
-    if (!boolean_skip_table || !masm->shared->addTable(boolean_skip_table))
-        CrashAtUnhandlableOOM("Table malloc");
+    uint8_t* boolean_skip_table;
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        boolean_skip_table = static_cast<uint8_t*>(js_malloc(kSize));
+        if (!boolean_skip_table || !masm->shared->addTable(boolean_skip_table))
+            oomUnsafe.crash("Table malloc");
+    }
 
     int skip_distance = GetSkipTable(min_lookahead, max_lookahead, boolean_skip_table);
     MOZ_ASSERT(skip_distance != 0);
 
     jit::Label cont, again;
     masm->Bind(&again);
     masm->LoadCurrentCharacter(max_lookahead, &cont, true);
     masm->CheckBitInTable(boolean_skip_table, &cont);
@@ -3096,19 +3101,23 @@ EmitUseLookupTable(RegExpMacroAssembler*
         }
         bit ^= 1;
     }
     for (int i = j; i < kSize; i++) {
         templ[i] = bit;
     }
 
     // TODO(erikcorry): Cache these.
-    uint8_t* ba = static_cast<uint8_t*>(js_malloc(kSize));
-    if (!ba || !masm->shared->addTable(ba))
-        CrashAtUnhandlableOOM("Table malloc");
+    uint8_t* ba;
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        ba = static_cast<uint8_t*>(js_malloc(kSize));
+        if (!ba || !masm->shared->addTable(ba))
+            oomUnsafe.crash("Table malloc");
+    }
 
     for (int i = 0; i < kSize; i++)
         ba[i] = templ[i];
 
     masm->CheckBitInTable(ba, on_bit_set);
     if (on_bit_clear != fall_through)
         masm->JumpOrBacktrack(on_bit_clear);
 }
--- a/js/src/irregexp/RegExpMacroAssembler.cpp
+++ b/js/src/irregexp/RegExpMacroAssembler.cpp
@@ -518,17 +518,19 @@ InterpretedRegExpMacroAssembler::Emit8(u
         Expand();
     *reinterpret_cast<unsigned char*>(buffer_ + pc_) = word;
     pc_ += 1;
 }
 
 void
 InterpretedRegExpMacroAssembler::Expand()
 {
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     int newLength = Max(100, length_ * 2);
     if (newLength < length_ + 4)
-        CrashAtUnhandlableOOM("InterpretedRegExpMacroAssembler::Expand");
+        oomUnsafe.crash("InterpretedRegExpMacroAssembler::Expand");
 
     buffer_ = (uint8_t*) js_realloc(buffer_, newLength);
     if (!buffer_)
-        CrashAtUnhandlableOOM("InterpretedRegExpMacroAssembler::Expand");
+        oomUnsafe.crash("InterpretedRegExpMacroAssembler::Expand");
     length_ = newLength;
 }
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -317,18 +317,21 @@ MaybeFoldConditionBlock(MIRGraph& graph,
         if (testBlock->isLoopBackedge())
             return;
         testBlock = testBlock->getSuccessor(0);
         if (testBlock->numPredecessors() != 1)
             return;
     }
 
     // Make sure the test block does not have any outgoing loop backedges.
-    if (!SplitCriticalEdgesForBlock(graph, testBlock))
-        CrashAtUnhandlableOOM("MaybeFoldConditionBlock");
+    {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
+        if (!SplitCriticalEdgesForBlock(graph, testBlock))
+            oomUnsafe.crash("MaybeFoldConditionBlock");
+    }
 
     MPhi* phi;
     MTest* finalTest;
     if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest))
         return;
 
     MDefinition* trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
     MDefinition* falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
@@ -2927,18 +2930,19 @@ LinearSum::add(MDefinition* term, int32_
             if (terms_[i].scale == 0) {
                 terms_[i] = terms_.back();
                 terms_.popBack();
             }
             return true;
         }
     }
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!terms_.append(LinearTerm(term, scale)))
-        CrashAtUnhandlableOOM("LinearSum::add");
+        oomUnsafe.crash("LinearSum::add");
 
     return true;
 }
 
 bool
 LinearSum::add(int32_t constant)
 {
     return SafeAdd(constant, constant_, &constant_);
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -11099,18 +11099,19 @@ IonBuilder::convertUnboxedObjects(MDefin
 
     BaselineInspector::ObjectGroupVector list(alloc());
     for (size_t i = 0; i < types->getObjectCount(); i++) {
         TypeSet::ObjectKey* key = obj->resultTypeSet()->getObject(i);
         if (!key || !key->isGroup())
             continue;
 
         if (UnboxedLayout* layout = key->group()->maybeUnboxedLayout()) {
+            AutoEnterOOMUnsafeRegion oomUnsafe;
             if (layout->nativeGroup() && !list.append(key->group()))
-                CrashAtUnhandlableOOM("IonBuilder::convertUnboxedObjects");
+                oomUnsafe.crash("IonBuilder::convertUnboxedObjects");
         }
     }
 
     return convertUnboxedObjects(obj, list);
 }
 
 MDefinition*
 IonBuilder::convertUnboxedObjects(MDefinition* obj,
--- a/js/src/jit/LoopUnroller.cpp
+++ b/js/src/jit/LoopUnroller.cpp
@@ -41,17 +41,17 @@ struct LoopUnroller
     MBasicBlock* oldPreheader;
     MBasicBlock* newPreheader;
 
     // Map terms in the original loop to terms in the current unrolled iteration.
     DefinitionMap unrolledDefinitions;
 
     MDefinition* getReplacementDefinition(MDefinition* def);
     MResumePoint* makeReplacementResumePoint(MBasicBlock* block, MResumePoint* rp);
-    void makeReplacementInstruction(MInstruction* ins);
+    bool makeReplacementInstruction(MInstruction* ins);
 
     void go(LoopIterationBound* bound);
 };
 
 } // namespace
 
 MDefinition*
 LoopUnroller::getReplacementDefinition(MDefinition* def)
@@ -71,54 +71,56 @@ LoopUnroller::getReplacementDefinition(M
         MConstant* constant = MConstant::New(alloc, def->toConstant()->value());
         oldPreheader->insertBefore(*oldPreheader->begin(), constant);
         return constant;
     }
 
     return p->value();
 }
 
-void
+bool
 LoopUnroller::makeReplacementInstruction(MInstruction* ins)
 {
     MDefinitionVector inputs(alloc);
     for (size_t i = 0; i < ins->numOperands(); i++) {
         MDefinition* old = ins->getOperand(i);
         MDefinition* replacement = getReplacementDefinition(old);
         if (!inputs.append(replacement))
-            CrashAtUnhandlableOOM("LoopUnroller::makeReplacementDefinition");
+            return false;
     }
 
     MInstruction* clone = ins->clone(alloc, inputs);
 
     unrolledBackedge->add(clone);
 
     if (!unrolledDefinitions.putNew(ins, clone))
-        CrashAtUnhandlableOOM("LoopUnroller::makeReplacementDefinition");
+        return false;
 
     if (MResumePoint* old = ins->resumePoint()) {
         MResumePoint* rp = makeReplacementResumePoint(unrolledBackedge, old);
         clone->setResumePoint(rp);
     }
+
+    return true;
 }
 
 MResumePoint*
 LoopUnroller::makeReplacementResumePoint(MBasicBlock* block, MResumePoint* rp)
 {
     MDefinitionVector inputs(alloc);
     for (size_t i = 0; i < rp->numOperands(); i++) {
         MDefinition* old = rp->getOperand(i);
         MDefinition* replacement = old->isUnused() ? old : getReplacementDefinition(old);
         if (!inputs.append(replacement))
-            CrashAtUnhandlableOOM("LoopUnroller::makeReplacementResumePoint");
+            return nullptr;
     }
 
     MResumePoint* clone = MResumePoint::New(alloc, block, rp, inputs);
     if (!clone)
-        CrashAtUnhandlableOOM("LoopUnroller::makeReplacementResumePoint");
+        return nullptr;
 
     return clone;
 }
 
 void
 LoopUnroller::go(LoopIterationBound* bound)
 {
     // For now we always unroll loops the same number of times.
@@ -224,51 +226,57 @@ LoopUnroller::go(LoopIterationBound* bou
     newPreheader->discardAllResumePoints();
 
     // Insert new blocks at their RPO position, and update block ids.
     graph.insertBlockAfter(oldPreheader, unrolledHeader);
     graph.insertBlockAfter(unrolledHeader, unrolledBackedge);
     graph.insertBlockAfter(unrolledBackedge, newPreheader);
     graph.renumberBlocksAfter(oldPreheader);
 
+    // We don't tolerate allocation failure after this point.
+    // TODO: This is a bit drastic, is it possible to improve this?
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     if (!unrolledDefinitions.init())
-        CrashAtUnhandlableOOM("LoopUnroller::go");
+        oomUnsafe.crash("LoopUnroller::go");
 
     // Add phis to the unrolled loop header which correspond to the phis in the
     // original loop header.
     MOZ_ASSERT(header->getPredecessor(0) == oldPreheader);
     for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
         MPhi* old = *iter;
         MOZ_ASSERT(old->numOperands() == 2);
         MPhi* phi = MPhi::New(alloc);
         phi->setResultType(old->type());
         phi->setResultTypeSet(old->resultTypeSet());
         phi->setRange(old->range());
 
         unrolledHeader->addPhi(phi);
 
         if (!phi->reserveLength(2))
-            CrashAtUnhandlableOOM("LoopUnroller::go");
+            oomUnsafe.crash("LoopUnroller::go");
 
         // Set the first input for the phi for now. We'll set the second after
         // finishing the unroll.
         phi->addInput(old->getOperand(0));
 
         // The old phi will now take the value produced by the unrolled loop.
         old->replaceOperand(0, phi);
 
         if (!unrolledDefinitions.putNew(old, phi))
-            CrashAtUnhandlableOOM("LoopUnroller::go");
+            oomUnsafe.crash("LoopUnroller::go");
     }
 
     // The loop condition can bail out on e.g. integer overflow, so make a
     // resume point based on the initial resume point of the original header.
     MResumePoint* headerResumePoint = header->entryResumePoint();
     if (headerResumePoint) {
         MResumePoint* rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint);
+        if (!rp)
+            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
         unrolledHeader->setEntryResumePoint(rp);
 
         // Perform an interrupt check at the start of the unrolled loop.
         unrolledHeader->add(MInterruptCheck::New(alloc));
     }
 
     // Generate code for the test in the unrolled loop.
     for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
@@ -280,53 +288,58 @@ LoopUnroller::go(LoopIterationBound* bou
     MTest* unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader);
     unrolledHeader->end(unrolledTest);
 
     // Make an entry resume point for the unrolled body. The unrolled header
     // does not have side effects on stack values, even if the original loop
     // header does, so use the same resume point as for the unrolled header.
     if (headerResumePoint) {
         MResumePoint* rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint);
+        if (!rp)
+            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
         unrolledBackedge->setEntryResumePoint(rp);
     }
 
     // Make an entry resume point for the new preheader. There are no
     // instructions which use this but some other stuff wants one to be here.
     if (headerResumePoint) {
         MResumePoint* rp = makeReplacementResumePoint(newPreheader, headerResumePoint);
+        if (!rp)
+            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
         newPreheader->setEntryResumePoint(rp);
     }
 
     // Generate the unrolled code.
     MOZ_ASSERT(UnrollCount > 1);
     size_t unrollIndex = 0;
     while (true) {
         // Clone the contents of the original loop into the unrolled loop body.
         for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
             MBasicBlock* block = bodyBlocks[i];
             for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
                 MInstruction* ins = *iter;
                 if (ins->canClone()) {
-                    makeReplacementInstruction(*iter);
+                    if (!makeReplacementInstruction(*iter))
+                        oomUnsafe.crash("LoopUnroller::makeReplacementDefinition");
                 } else {
                     // Control instructions are handled separately.
                     MOZ_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck());
                 }
             }
         }
 
         // Compute the value of each loop header phi after the execution of
         // this unrolled iteration.
         MDefinitionVector phiValues(alloc);
         MOZ_ASSERT(header->getPredecessor(1) == backedge);
         for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
             MPhi* old = *iter;
             MDefinition* oldInput = old->getOperand(1);
             if (!phiValues.append(getReplacementDefinition(oldInput)))
-                CrashAtUnhandlableOOM("LoopUnroller::go");
+                oomUnsafe.crash("LoopUnroller::go");
         }
 
         unrolledDefinitions.clear();
 
         if (unrollIndex == UnrollCount - 1) {
             // We're at the end of the last unrolled iteration, set the
             // backedge input for the unrolled loop phis.
             size_t phiIndex = 0;
@@ -338,17 +351,17 @@ LoopUnroller::go(LoopIterationBound* bou
             break;
         }
 
         // Update the map for the phis in the next iteration.
         size_t phiIndex = 0;
         for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
             MPhi* old = *iter;
             if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++]))
-                CrashAtUnhandlableOOM("LoopUnroller::go");
+                oomUnsafe.crash("LoopUnroller::go");
         }
         MOZ_ASSERT(phiIndex == phiValues.length());
 
         unrollIndex++;
     }
 
     MGoto* backedgeJump = MGoto::New(alloc, unrolledHeader);
     unrolledBackedge->end(backedgeJump);
@@ -358,17 +371,17 @@ LoopUnroller::go(LoopIterationBound* bou
     oldPreheader->discardLastIns();
     oldPreheader->end(MGoto::New(alloc, unrolledHeader));
 
     // Place the new preheader before the original loop.
     newPreheader->end(MGoto::New(alloc, header));
 
     // Cleanup the MIR graph.
     if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge))
-        CrashAtUnhandlableOOM("LoopUnroller::go");
+        oomUnsafe.crash("LoopUnroller::go");
     header->replacePredecessor(oldPreheader, newPreheader);
     oldPreheader->setSuccessorWithPhis(unrolledHeader, 0);
     newPreheader->setSuccessorWithPhis(header, 0);
     unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1);
 }
 
 bool
 jit::UnrollLoops(MIRGraph& graph, const LoopIterationBoundVector& bounds)
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -102,18 +102,19 @@ MIRGenerator::abort(const char* message,
 
 void
 MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
 {
     for (size_t i = 0; i < abortedPreliminaryGroups_.length(); i++) {
         if (group == abortedPreliminaryGroups_[i])
             return;
     }
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!abortedPreliminaryGroups_.append(group))
-        CrashAtUnhandlableOOM("addAbortedPreliminaryGroup");
+        oomUnsafe.crash("addAbortedPreliminaryGroup");
 }
 
 bool
 MIRGenerator::needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const
 {
     // A heap access needs a bounds-check branch if we're not relying on signal
     // handlers to catch errors, and if it's not proven to be within bounds.
     // We use signal-handlers on x64, but on x86 there isn't enough address
@@ -1132,26 +1133,28 @@ MBasicBlock::addPredecessorSameInputsAs(
 {
     MOZ_ASSERT(pred);
     MOZ_ASSERT(predecessors_.length() > 0);
 
     // Predecessors must be finished, and at the correct stack depth.
     MOZ_ASSERT(pred->hasLastIns());
     MOZ_ASSERT(!pred->successorWithPhis());
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     if (!phisEmpty()) {
         size_t existingPosition = indexForPredecessor(existingPred);
         for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
             if (!iter->addInputSlow(iter->getOperand(existingPosition)))
-                CrashAtUnhandlableOOM("MBasicBlock::addPredecessorAdjustPhis");
+                oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
         }
     }
 
     if (!predecessors_.append(pred))
-        CrashAtUnhandlableOOM("MBasicBlock::addPredecessorAdjustPhis");
+        oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
 }
 
 bool
 MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred)
 {
     // Predecessors must be finished.
     MOZ_ASSERT(pred && pred->hasLastIns());
     return predecessors_.append(pred);
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -962,19 +962,20 @@ static CachePage*
 GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page)
 {
     MOZ_ASSERT(Simulator::ICacheCheckingEnabled);
 
     Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
     if (p)
         return p->value();
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     CachePage* new_page = js_new<CachePage>();
     if (!new_page || !i_cache.add(p, page, new_page))
-        CrashAtUnhandlableOOM("Simulator CachePage");
+        oomUnsafe.crash("Simulator CachePage");
 
     return new_page;
 }
 
 // Flush from start up to and not including start + size.
 static void
 FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size)
 {
@@ -1192,19 +1193,20 @@ class Redirection
         Redirection* current = sim->redirection();
         for (; current != nullptr; current = current->next_) {
             if (current->nativeFunction_ == nativeFunction) {
                 MOZ_ASSERT(current->type() == type);
                 return current;
             }
         }
 
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
         if (!redir)
-            CrashAtUnhandlableOOM("Simulator redirection");
+            oomUnsafe.crash("Simulator redirection");
         new(redir) Redirection(nativeFunction, type, sim);
         return redir;
     }
 
     static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
         uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
         uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
         return reinterpret_cast<Redirection*>(addrOfRedirection);
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -814,24 +814,25 @@ JSCompartment::clearObjectMetadata()
 }
 
 void
 JSCompartment::setNewObjectMetadata(JSContext* cx, JSObject* obj)
 {
     assertSameCompartment(cx, this, obj);
 
     if (JSObject* metadata = objectMetadataCallback(cx, obj)) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         assertSameCompartment(cx, metadata);
         if (!objectMetadataTable) {
             objectMetadataTable = cx->new_<ObjectWeakMap>(cx);
             if (!objectMetadataTable || !objectMetadataTable->init())
-                CrashAtUnhandlableOOM("setNewObjectMetadata");
+                oomUnsafe.crash("setNewObjectMetadata");
         }
         if (!objectMetadataTable->add(cx, obj, metadata))
-            CrashAtUnhandlableOOM("setNewObjectMetadata");
+            oomUnsafe.crash("setNewObjectMetadata");
     }
 }
 
 static bool
 AddInnerLazyFunctionsFromScript(JSScript* script, AutoObjectVector& lazyFunctions)
 {
     if (!script->hasObjects())
         return true;
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -2010,29 +2010,40 @@ ArenaList::pickArenasToRelocate(size_t& 
 #ifdef DEBUG
 inline bool
 PtrIsInRange(const void* ptr, const void* start, size_t length)
 {
     return uintptr_t(ptr) - uintptr_t(start) < length;
 }
 #endif
 
-static bool
+static TenuredCell*
+AllocRelocatedCell(Zone* zone, AllocKind thingKind, size_t thingSize)
+{
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+    void* dstAlloc = zone->arenas.allocateFromFreeList(thingKind, thingSize);
+    if (!dstAlloc)
+        dstAlloc = GCRuntime::refillFreeListInGC(zone, thingKind);
+    if (!dstAlloc) {
+        // This can only happen in zeal mode or debug builds as we don't
+        // otherwise relocate more cells than we have existing free space
+        // for.
+        oomUnsafe.crash("Could not allocate new arena while compacting");
+    }
+    return TenuredCell::fromPointer(dstAlloc);
+}
+
+static void
 RelocateCell(Zone* zone, TenuredCell* src, AllocKind thingKind, size_t thingSize)
 {
     JS::AutoSuppressGCAnalysis nogc(zone->runtimeFromMainThread());
 
     // Allocate a new cell.
     MOZ_ASSERT(zone == src->zone());
-    void* dstAlloc = zone->arenas.allocateFromFreeList(thingKind, thingSize);
-    if (!dstAlloc)
-        dstAlloc = GCRuntime::refillFreeListInGC(zone, thingKind);
-    if (!dstAlloc)
-        return false;
-    TenuredCell* dst = TenuredCell::fromPointer(dstAlloc);
+    TenuredCell* dst = AllocRelocatedCell(zone, thingKind, thingSize);
 
     // Copy source cell contents to destination.
     memcpy(dst, src, thingSize);
 
     if (IsObjectAllocKind(thingKind)) {
         JSObject* srcObj = static_cast<JSObject*>(static_cast<Cell*>(src));
         JSObject* dstObj = static_cast<JSObject*>(static_cast<Cell*>(dst));
 
@@ -2063,40 +2074,33 @@ RelocateCell(Zone* zone, TenuredCell* sr
     }
 
     // Copy the mark bits.
     dst->copyMarkBitsFrom(src);
 
     // Mark source cell as forwarded and leave a pointer to the destination.
     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
     overlay->forwardTo(dst);
-
-    return true;
 }
 
 static void
 RelocateArena(ArenaHeader* aheader, SliceBudget& sliceBudget)
 {
     MOZ_ASSERT(aheader->allocated());
     MOZ_ASSERT(!aheader->hasDelayedMarking);
     MOZ_ASSERT(!aheader->markOverflow);
     MOZ_ASSERT(!aheader->allocatedDuringIncremental);
 
     Zone* zone = aheader->zone;
 
     AllocKind thingKind = aheader->getAllocKind();
     size_t thingSize = aheader->getThingSize();
 
     for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
-        if (!RelocateCell(zone, i.getCell(), thingKind, thingSize)) {
-            // This can only happen in zeal mode or debug builds as we don't
-            // otherwise relocate more cells than we have existing free space
-            // for.
-            CrashAtUnhandlableOOM("Could not allocate new arena while compacting");
-        }
+        RelocateCell(zone, i.getCell(), thingKind, thingSize);
         sliceBudget.step();
     }
 
 #ifdef DEBUG
     for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
         TenuredCell* src = i.getCell();
         MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
         TenuredCell* dest = Forwarded(src);
@@ -3385,18 +3389,22 @@ GCHelperState::setState(State state)
 }
 
 void
 GCHelperState::startBackgroundThread(State newState)
 {
     MOZ_ASSERT(!thread && state() == IDLE && newState != IDLE);
     setState(newState);
 
-    if (!HelperThreadState().gcHelperWorklist().append(this))
-        CrashAtUnhandlableOOM("Could not add to pending GC helpers list");
+    {
+        AutoEnterOOMUnsafeRegion noOOM;
+        if (!HelperThreadState().gcHelperWorklist().append(this))
+            noOOM.crash("Could not add to pending GC helpers list");
+    }
+
     HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
 }
 
 void
 GCHelperState::waitForBackgroundThread()
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
@@ -4199,18 +4207,19 @@ js::gc::MarkingValidator::nonIncremental
             return;
     }
 
     gc::WeakKeyTable savedWeakKeys;
     if (!savedWeakKeys.init())
         return;
 
     for (gc::WeakKeyTable::Range r = gc->marker.weakKeys.all(); !r.empty(); r.popFront()) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!savedWeakKeys.put(Move(r.front().key), Move(r.front().value)))
-            CrashAtUnhandlableOOM("saving weak keys table for validator");
+            oomUnsafe.crash("saving weak keys table for validator");
     }
 
     /*
      * After this point, the function should run to completion, so we shouldn't
      * do anything fallible.
      */
     initialized = true;
 
@@ -4277,18 +4286,19 @@ js::gc::MarkingValidator::nonIncremental
     }
 
     for (GCCompartmentsIter c(runtime); !c.done(); c.next())
         WeakMapBase::unmarkCompartment(c);
     WeakMapBase::restoreMarkedWeakMaps(markedWeakMaps);
 
     gc->marker.weakKeys.clear();
     for (gc::WeakKeyTable::Range r = savedWeakKeys.all(); !r.empty(); r.popFront()) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!gc->marker.weakKeys.put(Move(r.front().key), Move(r.front().value)))
-            CrashAtUnhandlableOOM("restoring weak keys table for validator");
+            oomUnsafe.crash("restoring weak keys table for validator");
     }
 
     gc->incrementalState = state;
 }
 
 void
 js::gc::MarkingValidator::validate()
 {
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -1493,50 +1493,51 @@ js::XDRObjectLiteral(XDRState<mode>* xdr
 }
 
 template bool
 js::XDRObjectLiteral(XDRState<XDR_ENCODE>* xdr, MutableHandleObject obj);
 
 template bool
 js::XDRObjectLiteral(XDRState<XDR_DECODE>* xdr, MutableHandleObject obj);
 
-void
+bool
 NativeObject::fillInAfterSwap(JSContext* cx, const Vector<Value>& values, void* priv)
 {
     // This object has just been swapped with some other object, and its shape
     // no longer reflects its allocated size. Correct this information and
     // fill the slots in with the specified values.
     MOZ_ASSERT(slotSpan() == values.length());
 
     // Make sure the shape's numFixedSlots() is correct.
     size_t nfixed = gc::GetGCKindSlots(asTenured().getAllocKind(), getClass());
     if (nfixed != shape_->numFixedSlots()) {
         if (!generateOwnShape(cx))
-            CrashAtUnhandlableOOM("fillInAfterSwap");
+            return false;
         shape_->setNumFixedSlots(nfixed);
     }
 
     if (hasPrivate())
         setPrivate(priv);
     else
         MOZ_ASSERT(!priv);
 
     if (slots_) {
         js_free(slots_);
         slots_ = nullptr;
     }
 
     if (size_t ndynamic = dynamicSlotsCount(nfixed, values.length(), getClass())) {
         slots_ = cx->zone()->pod_malloc<HeapSlot>(ndynamic);
         if (!slots_)
-            CrashAtUnhandlableOOM("fillInAfterSwap");
+            return false;
         Debug_SetSlotRangeToCrashOnTouch(slots_, ndynamic);
     }
 
     initSlotRange(0, values.begin(), values.length());
+    return true;
 }
 
 void
 JSObject::fixDictionaryShapeAfterSwap()
 {
     // Dictionary shapes can point back to their containing objects, so after
     // swapping the guts of those objects fix the pointers up.
     if (isNative() && as<NativeObject>().inDictionaryMode())
@@ -1547,22 +1548,24 @@ JSObject::fixDictionaryShapeAfterSwap()
 bool
 JSObject::swap(JSContext* cx, HandleObject a, HandleObject b)
 {
     // Ensure swap doesn't cause a finalizer to not be run.
     MOZ_ASSERT(IsBackgroundFinalized(a->asTenured().getAllocKind()) ==
                IsBackgroundFinalized(b->asTenured().getAllocKind()));
     MOZ_ASSERT(a->compartment() == b->compartment());
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     AutoCompartment ac(cx, a);
 
     if (!a->getGroup(cx))
-        CrashAtUnhandlableOOM("JSObject::swap");
+        oomUnsafe.crash("JSObject::swap");
     if (!b->getGroup(cx))
-        CrashAtUnhandlableOOM("JSObject::swap");
+        oomUnsafe.crash("JSObject::swap");
 
     /*
      * Neither object may be in the nursery, but ensure we update any embedded
      * nursery pointers in either object.
      */
     MOZ_ASSERT(!IsInsideNursery(a) && !IsInsideNursery(b));
     cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(a);
     cx->runtime()->gc.storeBuffer.putWholeCellFromMainThread(b);
@@ -1611,42 +1614,42 @@ JSObject::swap(JSContext* cx, HandleObje
 
         // Remember the original values from the objects.
         Vector<Value> avals(cx);
         void* apriv = nullptr;
         if (na) {
             apriv = na->hasPrivate() ? na->getPrivate() : nullptr;
             for (size_t i = 0; i < na->slotSpan(); i++) {
                 if (!avals.append(na->getSlot(i)))
-                    CrashAtUnhandlableOOM("JSObject::swap");
+                    oomUnsafe.crash("JSObject::swap");
             }
         }
         Vector<Value> bvals(cx);
         void* bpriv = nullptr;
         if (nb) {
             bpriv = nb->hasPrivate() ? nb->getPrivate() : nullptr;
             for (size_t i = 0; i < nb->slotSpan(); i++) {
                 if (!bvals.append(nb->getSlot(i)))
-                    CrashAtUnhandlableOOM("JSObject::swap");
+                    oomUnsafe.crash("JSObject::swap");
             }
         }
 
         // Swap the main fields of the objects, whether they are native objects or proxies.
         char tmp[sizeof(JSObject_Slots0)];
         js_memcpy(&tmp, a, sizeof tmp);
         js_memcpy(a, b, sizeof tmp);
         js_memcpy(b, &tmp, sizeof tmp);
 
         a->fixDictionaryShapeAfterSwap();
         b->fixDictionaryShapeAfterSwap();
 
-        if (na)
-            b->as<NativeObject>().fillInAfterSwap(cx, avals, apriv);
-        if (nb)
-            a->as<NativeObject>().fillInAfterSwap(cx, bvals, bpriv);
+        if (na && !b->as<NativeObject>().fillInAfterSwap(cx, avals, apriv))
+            oomUnsafe.crash("fillInAfterSwap");
+        if (nb && !a->as<NativeObject>().fillInAfterSwap(cx, bvals, bpriv))
+            oomUnsafe.crash("fillInAfterSwap");
     }
 
     // Swapping the contents of two objects invalidates type sets which contain
     // either of the objects, so mark all such sets as unknown.
     MarkObjectGroupUnknownProperties(cx, a->group());
     MarkObjectGroupUnknownProperties(cx, b->group());
 
     /*
--- a/js/src/vm/ArgumentsObject.cpp
+++ b/js/src/vm/ArgumentsObject.cpp
@@ -606,20 +606,21 @@ ArgumentsObject::objectMovedDuringMinorG
 
     Nursery& nursery = trc->runtime()->gc.nursery;
 
     if (!nursery.isInside(nsrc->data())) {
         nursery.removeMallocedBuffer(nsrc->data());
         return 0;
     }
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     uint32_t nbytes = nsrc->data()->dataBytes;
     uint8_t* data = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
     if (!data)
-        CrashAtUnhandlableOOM("Failed to allocate ArgumentsObject data while tenuring.");
+        oomUnsafe.crash("Failed to allocate ArgumentsObject data while tenuring.");
     ndst->initFixedSlot(DATA_SLOT, PrivateValue(data));
 
     mozilla::PodCopy(data, reinterpret_cast<uint8_t*>(nsrc->data()), nbytes);
 
     ArgumentsData* dstData = ndst->data();
     dstData->deletedBits = reinterpret_cast<size_t*>(dstData->args + dstData->numArgs);
 
     return nbytes;
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -518,18 +518,19 @@ ArrayBufferObject::neuter(JSContext* cx,
 
     // When neutering a buffer with typed object views, any jitcode which
     // accesses such views needs to be deoptimized so that neuter checks are
     // performed. This is done by setting a compartment wide flag indicating
     // that buffers with typed object views have been neutered.
     if (buffer->hasTypedObjectViews()) {
         // Make sure the global object's group has been instantiated, so the
         // flag change will be observed.
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!cx->global()->getGroup(cx))
-            CrashAtUnhandlableOOM("ArrayBufferObject::neuter");
+            oomUnsafe.crash("ArrayBufferObject::neuter");
         MarkObjectGroupFlags(cx, cx->global(), OBJECT_FLAG_TYPED_OBJECT_NEUTERED);
         cx->compartment()->neuteredTypedObjects = 1;
     }
 
     // Neuter all views on the buffer, clear out the list of views and the
     // buffer's data.
 
     if (InnerViewTable::ViewVector* views = cx->compartment()->innerViews.maybeViewsUnbarriered(buffer)) {
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -1708,22 +1708,24 @@ Debugger::TenurePromotionsLogEntry::Tenu
     frame(getObjectAllocationSite(obj)),
     size(JS::ubi::Node(&obj).size(rt->debuggerMallocSizeOf))
 { }
 
 
 void
 Debugger::logTenurePromotion(JSRuntime* rt, JSObject& obj, double when)
 {
+    AutoEnterOOMUnsafeRegion oomUnsafe;
+
     if (!tenurePromotionsLog.emplaceBack(rt, obj, when))
-        CrashAtUnhandlableOOM("Debugger::logTenurePromotion");
+        oomUnsafe.crash("Debugger::logTenurePromotion");
 
     if (tenurePromotionsLog.length() > maxTenurePromotionsLogLength) {
         if (!tenurePromotionsLog.popFront())
-            CrashAtUnhandlableOOM("Debugger::logTenurePromotion");
+            oomUnsafe.crash("Debugger::logTenurePromotion");
         MOZ_ASSERT(tenurePromotionsLog.length() == maxTenurePromotionsLogLength);
         tenurePromotionsLogOverflowed = true;
     }
 }
 
 bool
 Debugger::appendAllocationSite(JSContext* cx, HandleObject obj, HandleSavedFrame frame,
                                double when)
@@ -3432,25 +3434,25 @@ Debugger::addDebuggeeGlobal(JSContext* c
     globalDebuggersGuard.release();
     debuggeesGuard.release();
     zoneDebuggersGuard.release();
     debuggeeZonesGuard.release();
     allocationsTrackingGuard.release();
     return true;
 }
 
-bool
+void
 Debugger::recomputeDebuggeeZoneSet()
 {
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     debuggeeZones.clear();
     for (auto range = debuggees.all(); !range.empty(); range.popFront()) {
         if (!debuggeeZones.put(range.front()->zone()))
-            return false;
-    }
-    return true;
+            oomUnsafe.crash("Debugger::removeDebuggeeGlobal");
+    }
 }
 
 template<typename V>
 static Debugger**
 findDebuggerInVector(Debugger* dbg, V* vec)
 {
     Debugger** p;
     for (p = vec->begin(); p != vec->end(); p++) {
@@ -3509,18 +3511,18 @@ Debugger::removeDebuggeeGlobal(FreeOp* f
      */
     globalDebuggersVector->erase(findDebuggerInVector(this, globalDebuggersVector));
 
     if (debugEnum)
         debugEnum->removeFront();
     else
         debuggees.remove(global);
 
-    if (!recomputeDebuggeeZoneSet())
-        CrashAtUnhandlableOOM("Debugger::removeDebuggeeGlobal");
+    recomputeDebuggeeZoneSet();
+
     if (!debuggeeZones.has(global->zone()))
         zoneDebuggersVector->erase(findDebuggerInVector(this, zoneDebuggersVector));
 
     /* Remove all breakpoints for the debuggee. */
     Breakpoint* nextbp;
     for (Breakpoint* bp = firstBreakpoint(); bp; bp = nextbp) {
         nextbp = bp->nextInDebugger();
         if (bp->site->script->compartment() == global->compartment())
--- a/js/src/vm/Debugger.h
+++ b/js/src/vm/Debugger.h
@@ -367,17 +367,17 @@ class Debugger : private mozilla::Linked
     static const size_t DEFAULT_MAX_LOG_LENGTH = 5000;
 
     bool appendAllocationSite(JSContext* cx, HandleObject obj, HandleSavedFrame frame,
                               double when);
 
     /*
      * Recompute the set of debuggee zones based on the set of debuggee globals.
      */
-    bool recomputeDebuggeeZoneSet();
+    void recomputeDebuggeeZoneSet();
 
     /*
      * Return true if there is an existing object metadata callback for the
      * given global's compartment that will prevent our instrumentation of
      * allocations.
      */
     static bool cannotTrackAllocations(const GlobalObject& global);
 
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -112,18 +112,19 @@ js::StartOffThreadIonCompile(JSContext* 
 /*
  * Move an IonBuilder for which compilation has either finished, failed, or
  * been cancelled into the global finished compilation list. All off thread
  * compilations which are started must eventually be finished.
  */
 static void
 FinishOffThreadIonCompile(jit::IonBuilder* builder)
 {
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!HelperThreadState().ionFinishedList().append(builder))
-        CrashAtUnhandlableOOM("FinishOffThreadIonCompile");
+        oomUnsafe.crash("FinishOffThreadIonCompile");
 }
 
 static inline bool
 CompiledScriptMatches(JSCompartment* compartment, JSScript* script, JSScript* target)
 {
     if (script)
         return target == script;
     return target->compartment() == compartment;
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -359,18 +359,19 @@ NativeObject::setLastPropertyMakeNative(
 
     size_t oldSpan = shape->numFixedSlots();
     size_t newSpan = shape->slotSpan();
 
     initializeSlotRange(0, oldSpan);
 
     // A failure at this point will leave the object as a mutant, and we
     // can't recover.
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (oldSpan != newSpan && !updateSlotsForSpan(cx, oldSpan, newSpan))
-        CrashAtUnhandlableOOM("NativeObject::setLastPropertyMakeNative");
+        oomUnsafe.crash("NativeObject::setLastPropertyMakeNative");
 }
 
 bool
 NativeObject::setSlotSpan(ExclusiveContext* cx, uint32_t span)
 {
     MOZ_ASSERT(inDictionaryMode());
 
     size_t oldSpan = lastProperty()->base()->slotSpan();
--- a/js/src/vm/NativeObject.h
+++ b/js/src/vm/NativeObject.h
@@ -708,17 +708,17 @@ class NativeObject : public JSObject
      * 1. getter and setter must be normalized based on flags (see jsscope.cpp).
      * 2. Checks for non-extensibility must be done by callers.
      */
     static Shape*
     addPropertyInternal(ExclusiveContext* cx, HandleNativeObject obj, HandleId id,
                         JSGetterOp getter, JSSetterOp setter, uint32_t slot, unsigned attrs,
                         unsigned flags, ShapeTable::Entry* entry, bool allowDictionary);
 
-    void fillInAfterSwap(JSContext* cx, const Vector<Value>& values, void* priv);
+    bool fillInAfterSwap(JSContext* cx, const Vector<Value>& values, void* priv);
 
   public:
     // Return true if this object has been converted from shared-immutable
     // prototype-rooted shape storage to dictionary-shapes in a doubly-linked
     // list.
     bool inDictionaryMode() const {
         return lastProperty()->inDictionary();
     }
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -1849,18 +1849,19 @@ FreeOp::free_(void* p)
 
 inline void
 FreeOp::freeLater(void* p)
 {
     // FreeOps other than the defaultFreeOp() are constructed on the stack,
     // and won't hold onto the pointers to free indefinitely.
     MOZ_ASSERT(this != runtime()->defaultFreeOp());
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!freeLaterList.append(p))
-        CrashAtUnhandlableOOM("FreeOp::freeLater");
+        oomUnsafe.crash("FreeOp::freeLater");
 }
 
 /*
  * RAII class that takes the GC lock while it is live.
  *
  * Note that the lock may be temporarily released by use of AutoUnlockGC when
  * passed a non-const reference to this class.
  */
--- a/js/src/vm/SavedStacks.cpp
+++ b/js/src/vm/SavedStacks.cpp
@@ -1364,22 +1364,23 @@ SavedStacksMetadataCallback(JSContext* c
         // until we take the next sample. Any value for X less than (~P)^n
         // yields a skip count greater than n, so the likelihood of a skip count
         // greater than n is (~P)^n, as required.
         double notSamplingProb = 1.0 - stacks.allocationSamplingProbability;
         stacks.allocationSkipCount = std::floor(std::log(random_nextDouble(&stacks.rngState)) /
                                                 std::log(notSamplingProb));
     }
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     RootedSavedFrame frame(cx);
     if (!stacks.saveCurrentStack(cx, &frame))
-        CrashAtUnhandlableOOM("SavedStacksMetadataCallback");
+        oomUnsafe.crash("SavedStacksMetadataCallback");
 
     if (!Debugger::onLogAllocationSite(cx, obj, frame, JS_GetCurrentEmbedderTime()))
-        CrashAtUnhandlableOOM("SavedStacksMetadataCallback");
+        oomUnsafe.crash("SavedStacksMetadataCallback");
 
     MOZ_ASSERT_IF(frame, !frame->is<WrapperObject>());
     return frame;
 }
 
 #ifdef JS_CRASH_DIAGNOSTICS
 void
 CompartmentChecker::check(SavedStacks* stacks)
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -1272,19 +1272,22 @@ TypeSet::ObjectKey::ensureTrackedPropert
 
 void
 js::EnsureTrackPropertyTypes(JSContext* cx, JSObject* obj, jsid id)
 {
     id = IdToTypeId(id);
 
     if (obj->isSingleton()) {
         AutoEnterAnalysis enter(cx);
-        if (obj->hasLazyGroup() && !obj->getGroup(cx)) {
-            CrashAtUnhandlableOOM("Could not allocate ObjectGroup in EnsureTrackPropertyTypes");
-            return;
+        if (obj->hasLazyGroup()) {
+            AutoEnterOOMUnsafeRegion oomUnsafe;
+            if (!obj->getGroup(cx)) {
+                oomUnsafe.crash("Could not allocate ObjectGroup in EnsureTrackPropertyTypes");
+                return;
+            }
         }
         if (!obj->group()->unknownProperties() && !obj->group()->getProperty(cx, obj, id)) {
             MOZ_ASSERT(obj->group()->unknownProperties());
             return;
         }
     }
 
     MOZ_ASSERT(obj->group()->unknownProperties() || TrackPropertyTypes(cx, obj, id));
@@ -2455,18 +2458,19 @@ TypeZone::processPendingRecompiles(FreeO
     MOZ_ASSERT(!recompiles.empty());
 
     /*
      * Steal the list of scripts to recompile, to make sure we don't try to
      * recursively recompile them.
      */
     RecompileInfoVector pending;
     for (size_t i = 0; i < recompiles.length(); i++) {
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!pending.append(recompiles[i]))
-            CrashAtUnhandlableOOM("processPendingRecompiles");
+            oomUnsafe.crash("processPendingRecompiles");
     }
     recompiles.clear();
 
     jit::Invalidate(*this, fop, pending);
 
     MOZ_ASSERT(recompiles.empty());
 }
 
@@ -2477,18 +2481,19 @@ TypeZone::addPendingRecompile(JSContext*
     if (!co || !co->isValid() || co->pendingInvalidation())
         return;
 
     InferSpew(ISpewOps, "addPendingRecompile: %p:%s:%" PRIuSIZE,
               co->script(), co->script()->filename(), co->script()->lineno());
 
     co->setPendingInvalidation();
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!cx->zone()->types.activeAnalysis->pendingRecompiles.append(info))
-        CrashAtUnhandlableOOM("Could not update pendingRecompiles");
+        oomUnsafe.crash("Could not update pendingRecompiles");
 }
 
 void
 TypeZone::addPendingRecompile(JSContext* cx, JSScript* script)
 {
     MOZ_ASSERT(script);
 
     CancelOffThreadIonCompile(cx->compartment(), script);
@@ -3763,20 +3768,21 @@ TypeNewScript::maybeAnalyze(JSContext* c
         // been hooked into it.
         MOZ_ASSERT(group->unboxedLayout().newScript() == this);
         destroyNewScript.group = nullptr;
 
         // Clear out the template object, which is not used for TypeNewScripts
         // with an unboxed layout. Currently it is a mutant object with a
         // non-native group and native shape, so make it safe for GC by changing
         // its group to the default for its prototype.
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         ObjectGroup* plainGroup = ObjectGroup::defaultNewGroup(cx, &PlainObject::class_,
                                                                group->proto());
         if (!plainGroup)
-            CrashAtUnhandlableOOM("TypeNewScript::maybeAnalyze");
+            oomUnsafe.crash("TypeNewScript::maybeAnalyze");
         templateObject_->setGroup(plainGroup);
         templateObject_ = nullptr;
 
         return true;
     }
 
     if (prefixShape->slotSpan() == templateObject()->slotSpan()) {
         // The definite properties analysis found exactly the properties that
@@ -3853,20 +3859,20 @@ TypeNewScript::rollbackPartiallyInitiali
         Value thisv = iter.thisv(cx);
         if (!thisv.isObject() ||
             thisv.toObject().hasLazyGroup() ||
             thisv.toObject().group() != group)
         {
             continue;
         }
 
-        if (thisv.toObject().is<UnboxedPlainObject>() &&
-            !UnboxedPlainObject::convertToNative(cx, &thisv.toObject()))
-        {
-            CrashAtUnhandlableOOM("rollbackPartiallyInitializedObjects");
+        if (thisv.toObject().is<UnboxedPlainObject>()) {
+            AutoEnterOOMUnsafeRegion oomUnsafe;
+            if (!UnboxedPlainObject::convertToNative(cx, &thisv.toObject()))
+                oomUnsafe.crash("rollbackPartiallyInitializedObjects");
         }
 
         // Found a matching frame.
         RootedPlainObject obj(cx, &thisv.toObject().as<PlainObject>());
 
         // Whether all identified 'new' properties have been initialized.
         bool finished = false;
 
@@ -3990,23 +3996,25 @@ ConstraintTypeSet::trace(Zone* zone, JST
 
         clearObjects();
         objectCount = 0;
         for (unsigned i = 0; i < oldCapacity; i++) {
             ObjectKey* key = oldArray[i];
             if (!key)
                 continue;
             TraceObjectKey(trc, &key);
+
+            AutoEnterOOMUnsafeRegion oomUnsafe;
             ObjectKey** pentry =
                 TypeHashSet::Insert<ObjectKey*, ObjectKey, ObjectKey>
                     (zone->types.typeLifoAlloc, objectSet, objectCount, key);
-            if (pentry)
-                *pentry = key;
-            else
-                CrashAtUnhandlableOOM("ConstraintTypeSet::trace");
+            if (!pentry)
+                oomUnsafe.crash("ConstraintTypeSet::trace");
+
+            *pentry = key;
         }
         setBaseObjectCount(objectCount);
     } else if (objectCount == 1) {
         ObjectKey* key = (ObjectKey*) objectSet;
         TraceObjectKey(trc, &key);
         objectSet = reinterpret_cast<ObjectKey**>(key);
     }
 }
--- a/js/src/vm/UnboxedObject.cpp
+++ b/js/src/vm/UnboxedObject.cpp
@@ -1219,19 +1219,20 @@ UnboxedArrayObject::objectMovedDuringMin
     // possible, the nursery will have picked an allocation size that is large
     // enough.
     size_t nbytes = nsrc->capacity() * nsrc->elementSize();
     if (offsetOfInlineElements() + nbytes <= GetGCKindBytes(allocKind)) {
         ndst->setInlineElements();
     } else {
         MOZ_ASSERT(allocKind == gc::AllocKind::OBJECT0);
 
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         uint8_t* data = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
         if (!data)
-            CrashAtUnhandlableOOM("Failed to allocate unboxed array elements while tenuring.");
+            oomUnsafe.crash("Failed to allocate unboxed array elements while tenuring.");
         ndst->elements_ = data;
     }
 
     PodCopy(ndst->elements(), nsrc->elements(), nsrc->initializedLength() * nsrc->elementSize());
 
     // Set a forwarding pointer for the element buffers in case they were
     // preserved on the stack by Ion.
     bool direct = nsrc->capacity() * nsrc->elementSize() >= sizeof(uintptr_t);
@@ -1878,18 +1879,20 @@ UnboxedArrayObject::fillAfterConvert(Exc
     setInlineElements();
 
     setLength(cx, NextValue(values, valueCursor).toInt32());
 
     int32_t initlen = NextValue(values, valueCursor).toInt32();
     if (!initlen)
         return;
 
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!growElements(cx, initlen))
-        CrashAtUnhandlableOOM("UnboxedArrayObject::fillAfterConvert");
+        oomUnsafe.crash("UnboxedArrayObject::fillAfterConvert");
+
     setInitializedLength(initlen);
 
     for (size_t i = 0; i < size_t(initlen); i++)
         JS_ALWAYS_TRUE(initElement(cx, i, NextValue(values, valueCursor)));
 }
 
 static bool
 GetValuesFromPreliminaryPlainObject(PlainObject* obj, AutoValueVector& values)