Merge inbound to mozilla-central. a=merge
authorBogdan Tara <btara@mozilla.com>
Thu, 10 Jan 2019 19:20:47 +0200
changeset 510355 d0a6668cf2fe907399cff20030b7b8218d56f005
parent 510340 a51746f37520891be96522dbce8d1bade45d8b14 (current diff)
parent 510354 65174e3014705687e7ca1050d5d1ffbe81126ac0 (diff)
child 510379 be769aa73f883b218cf655519a08aa874500ee7e
child 510406 9fa45d06580879b4c8fc463142d5c1e04dbfb11a
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge inbound to mozilla-central. a=merge
--- a/dom/security/nsCSPService.cpp
+++ b/dom/security/nsCSPService.cpp
@@ -263,25 +263,25 @@ CSPService::AsyncOnChannelRedirect(nsICh
   }
 
   nsCOMPtr<nsIURI> newUri;
   nsresult rv = newChannel->GetURI(getter_AddRefs(newUri));
   NS_ENSURE_SUCCESS(rv, rv);
 
   nsCOMPtr<nsILoadInfo> loadInfo = oldChannel->GetLoadInfo();
 
-  nsCOMPtr<nsICSPEventListener> cspEventListener;
-  rv = loadInfo->GetCspEventListener(getter_AddRefs(cspEventListener));
-  NS_ENSURE_SUCCESS(rv, rv);
-
   // if no loadInfo on the channel, nothing for us to do
   if (!loadInfo) {
     return NS_OK;
   }
 
+  nsCOMPtr<nsICSPEventListener> cspEventListener;
+  rv = loadInfo->GetCspEventListener(getter_AddRefs(cspEventListener));
+  NS_ENSURE_SUCCESS(rv, rv);
+
   // No need to continue processing if CSP is disabled or if the protocol
   // is *not* subject to CSP.
   // Please note, the correct way to opt-out of CSP using a custom
   // protocolHandler is to set one of the nsIProtocolHandler flags
   // that are whitelistet in subjectToCSP()
   nsContentPolicyType policyType = loadInfo->InternalContentPolicyType();
   if (!StaticPrefs::security_csp_enable() ||
       !subjectToCSP(newUri, policyType)) {
--- a/js/src/builtin/Array.cpp
+++ b/js/src/builtin/Array.cpp
@@ -4419,17 +4419,17 @@ void js::ArraySpeciesLookup::initialize(
   arraySpeciesShape_ = speciesShape;
   canonicalSpeciesFunc_ = speciesFun;
 #endif
   arrayProtoShape_ = arrayProto->lastProperty();
   arrayProtoConstructorSlot_ = ctorShape->slot();
 }
 
 void js::ArraySpeciesLookup::reset() {
-  JS_POISON(this, 0xBB, sizeof(*this), MemCheckKind::MakeUndefined);
+  AlwaysPoison(this, 0xBB, sizeof(*this), MemCheckKind::MakeUndefined);
   state_ = State::Uninitialized;
 }
 
 bool js::ArraySpeciesLookup::isArrayStateStillSane() {
   MOZ_ASSERT(state_ == State::Initialized);
 
   // Ensure that Array.prototype still has the expected shape.
   if (arrayProto_->lastProperty() != arrayProtoShape_) {
--- a/js/src/builtin/Promise.cpp
+++ b/js/src/builtin/Promise.cpp
@@ -4690,17 +4690,17 @@ void js::PromiseLookup::initialize(JSCon
 #endif
   promiseProtoShape_ = promiseProto->lastProperty();
   promiseResolveSlot_ = resolveShape->slot();
   promiseProtoConstructorSlot_ = ctorShape->slot();
   promiseProtoThenSlot_ = thenShape->slot();
 }
 
 void js::PromiseLookup::reset() {
-  JS_POISON(this, 0xBB, sizeof(*this), MemCheckKind::MakeUndefined);
+  AlwaysPoison(this, 0xBB, sizeof(*this), MemCheckKind::MakeUndefined);
   state_ = State::Uninitialized;
 }
 
 bool js::PromiseLookup::isPromiseStateStillSane(JSContext* cx) {
   MOZ_ASSERT(state_ == State::Initialized);
 
   NativeObject* promiseProto = getPromisePrototype(cx);
   MOZ_ASSERT(promiseProto);
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -5895,17 +5895,17 @@ gc::ZealModeHelpText),
 "  Returns whether asm.js compilation is currently available or whether it is disabled\n"
 "  (e.g., by the debugger)."),
 
     JS_FN_HELP("isSimdAvailable", IsSimdAvailable, 0, 0,
 "isSimdAvailable",
 "  Returns true if SIMD extensions are supported on this platform."),
 
     JS_FN_HELP("getJitCompilerOptions", GetJitCompilerOptions, 0, 0,
-"getCompilerOptions()",
+"getJitCompilerOptions()",
 "  Return an object describing some of the JIT compiler options.\n"),
 
     JS_FN_HELP("isAsmJSModule", IsAsmJSModule, 1, 0,
 "isAsmJSModule(fn)",
 "  Returns whether the given value is a function containing \"use asm\" that has been\n"
 "  validated according to the asm.js spec."),
 
     JS_FN_HELP("isAsmJSModuleLoadedFromCache", IsAsmJSModuleLoadedFromCache, 1, 0,
--- a/js/src/frontend/NameFunctions.cpp
+++ b/js/src/frontend/NameFunctions.cpp
@@ -980,18 +980,18 @@ class NameResolver {
 
     // It would be nice to common up the repeated |parents[initialParents]|
     // in a single variable, but the #if condition required to prevent an
     // unused-variable warning across three separate conditionally-expanded
     // macros would be super-ugly.  :-(
     MOZ_ASSERT(parents[initialParents] == cur,
                "pushed child shouldn't change underneath us");
 
-    JS_POISON(&parents[initialParents], 0xFF, sizeof(parents[initialParents]),
-              MemCheckKind::MakeUndefined);
+    AlwaysPoison(&parents[initialParents], 0xFF, sizeof(parents[initialParents]),
+                 MemCheckKind::MakeUndefined);
 
     return true;
   }
 };
 
 } /* anonymous namespace */
 
 bool frontend::NameFunctions(JSContext* cx, ParseNode* pn) {
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -729,18 +729,18 @@ void BackgroundAllocTask::run() {
 void Chunk::init(JSRuntime* rt) {
   /* The chunk may still have some regions marked as no-access. */
   MOZ_MAKE_MEM_UNDEFINED(this, ChunkSize);
 
   /*
    * Poison the chunk. Note that decommitAllArenas() below will mark the
    * arenas as inaccessible (for memory sanitizers).
    */
-  JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize,
-            MemCheckKind::MakeUndefined);
+  Poison(this, JS_FRESH_TENURED_PATTERN, ChunkSize,
+         MemCheckKind::MakeUndefined);
 
   /*
    * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
    * on uninitialized data, which would happen before the first GC cycle.
    */
   bitmap.clear();
 
   /*
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -585,26 +585,26 @@ inline size_t Arena::finalize(FreeOp* fo
         newListTail->initBounds(firstThingOrSuccessorOfLastMarkedThing,
                                 thing - thingSize, this);
         newListTail = newListTail->nextSpanUnchecked(this);
       }
       firstThingOrSuccessorOfLastMarkedThing = thing + thingSize;
       nmarked++;
     } else {
       t->finalize(fop);
-      JS_POISON(t, JS_SWEPT_TENURED_PATTERN, thingSize,
-                MemCheckKind::MakeUndefined);
+      AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
+             MemCheckKind::MakeUndefined);
       gcTracer.traceTenuredFinalize(t);
     }
   }
 
   if (nmarked == 0) {
     // Do nothing. The caller will update the arena appropriately.
     MOZ_ASSERT(newListTail == &newListHead);
-    JS_EXTRA_POISON(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
+    DebugOnlyPoison(data, JS_SWEPT_TENURED_PATTERN, sizeof(data),
                     MemCheckKind::MakeUndefined);
     return nmarked;
   }
 
   MOZ_ASSERT(firstThingOrSuccessorOfLastMarkedThing != firstThing);
   uint_fast16_t lastMarkedThing =
       firstThingOrSuccessorOfLastMarkedThing - thingSize;
   if (lastThing == lastMarkedThing) {
@@ -923,17 +923,18 @@ GCRuntime::GCRuntime(JSRuntime* rt)
       incrementalState(gc::State::NotActive),
       initialState(gc::State::NotActive),
 #ifdef JS_GC_ZEAL
       useZeal(false),
 #endif
       lastMarkSlice(false),
       safeToYield(true),
       sweepOnBackgroundThread(false),
-      blocksToFreeAfterSweeping(
+      lifoBlocksToFree((size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+      lifoBlocksToFreeAfterMinorGC(
           (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
       sweepGroupIndex(0),
       sweepGroups(nullptr),
       currentSweepGroup(nullptr),
       sweepZone(nullptr),
       hasMarkedGrayRoots(false),
       abortSweepAfterCurrentGroup(false),
       startedCompacting(false),
@@ -956,21 +957,20 @@ GCRuntime::GCRuntime(JSRuntime* rt)
       gcCallbackDepth(0),
       alwaysPreserveCode(false),
 #ifdef DEBUG
       arenasEmptyAtShutdown(true),
 #endif
       lock(mutexid::GCLock),
       allocTask(rt, emptyChunks_.ref()),
       sweepTask(rt),
+      freeTask(rt),
       decommitTask(rt),
       nursery_(rt),
-      storeBuffer_(rt, nursery()),
-      blocksToFreeAfterMinorGC(
-          (size_t)JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE) {
+      storeBuffer_(rt, nursery()) {
   setGCMode(JSGC_MODE_GLOBAL);
 }
 
 #ifdef JS_GC_ZEAL
 
 void GCRuntime::getZealBits(uint32_t* zealBits, uint32_t* frequency,
                             uint32_t* scheduled) {
   *zealBits = zealModeBits;
@@ -1299,24 +1299,24 @@ bool GCRuntime::init(uint32_t maxbytes, 
   }
 
   return true;
 }
 
 void GCRuntime::finish() {
   // Wait for nursery background free to end and disable it to release memory.
   if (nursery().isEnabled()) {
-    nursery().waitBackgroundFreeEnd();
     nursery().disable();
   }
 
   // Wait until the background finalization and allocation stops and the
   // helper thread shuts down before we forcefully release any remaining GC
   // memory.
   sweepTask.join();
+  freeTask.join();
   allocTask.cancelAndWait();
   decommitTask.cancelAndWait();
 
 #ifdef JS_GC_ZEAL
   // Free memory associated with GC verification.
   finishVerifier();
 #endif
 
@@ -2899,17 +2899,17 @@ void GCRuntime::updateRuntimePointersToR
   jit::JitRuntime::SweepJitcodeGlobalTable(rt);
   for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) {
     cache->sweep();
   }
 
   // Type inference may put more blocks here to free.
   {
     AutoLockHelperThreadState lock;
-    blocksToFreeAfterSweeping.ref().freeAll();
+    lifoBlocksToFree.ref().freeAll();
   }
 
   // Call callbacks to get the rest of the system to fixup other untraced
   // pointers.
   callWeakPointerZonesCallbacks();
 }
 
 void GCRuntime::protectAndHoldArenas(Arena* arenaList) {
@@ -2947,21 +2947,19 @@ void GCRuntime::releaseRelocatedArenasWi
     arenaList = arenaList->next;
 
     // Clear the mark bits
     arena->unmarkAll();
 
     // Mark arena as empty
     arena->setAsFullyUnused();
 
-#if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
-    JS_POISON(reinterpret_cast<void*>(arena->thingsStart()),
-              JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
-              MemCheckKind::MakeNoAccess);
-#endif
+    AlwaysPoison(reinterpret_cast<void*>(arena->thingsStart()),
+                 JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
+                 MemCheckKind::MakeNoAccess);
 
     releaseArena(arena, lock);
     ++count;
   }
 }
 
 // In debug mode we don't always release relocated arenas straight away.
 // Sometimes protect them instead and hold onto them until the next GC sweep
@@ -3535,137 +3533,154 @@ void GCRuntime::sweepBackgroundThings(Zo
   }
 }
 
 void GCRuntime::assertBackgroundSweepingFinished() {
 #ifdef DEBUG
   {
     AutoLockHelperThreadState lock;
     MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
-    MOZ_ASSERT(blocksToFreeAfterSweeping.ref().computedSizeOfExcludingThis() ==
-               0);
   }
 
   for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
     for (auto i : AllAllocKinds()) {
       MOZ_ASSERT(!zone->arenas.arenaListsToSweep(i));
       MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(i));
     }
   }
 #endif
 }
 
-void GCRuntime::queueZonesForBackgroundSweep(ZoneList& zones) {
-  AutoLockHelperThreadState lock;
-  backgroundSweepZones.ref().transferFrom(zones);
-  if (sweepOnBackgroundThread) {
-    sweepTask.startIfIdle(lock);
-  }
-}
-
-void GCRuntime::freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo) {
-  MOZ_ASSERT(JS::RuntimeHeapIsBusy());
-  AutoLockHelperThreadState lock;
-  blocksToFreeAfterSweeping.ref().transferUnusedFrom(lifo);
-}
-
-void GCRuntime::freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo) {
-  MOZ_ASSERT(JS::RuntimeHeapIsBusy());
-  AutoLockHelperThreadState lock;
-  blocksToFreeAfterSweeping.ref().transferFrom(lifo);
-}
-
-void GCRuntime::freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo) {
-  blocksToFreeAfterMinorGC.ref().transferFrom(lifo);
-}
-
-BackgroundSweepTask::BackgroundSweepTask(JSRuntime* rt)
-    : GCParallelTaskHelper(rt), done(false) {}
-
-bool BackgroundSweepTask::isRunning() const {
-  AutoLockHelperThreadState lock;
-  return isRunningWithLockHeld(lock);
-}
-
-bool BackgroundSweepTask::isRunningWithLockHeld(
-    const AutoLockHelperThreadState& lock) const {
-  return Base::isRunningWithLockHeld(lock) && !done;
-}
-
-void BackgroundSweepTask::startIfIdle(AutoLockHelperThreadState& lock) {
-  MOZ_ASSERT(CanUseExtraThreads());
-
-  if (isRunningWithLockHeld(lock)) {
-    return;
-  }
-
-  // Join the previous invocation of the task. This will return immediately
-  // if the thread has never been started.
-  joinWithLockHeld(lock);
-
-  done = false;
-
-  if (!startWithLockHeld(lock)) {
-    AutoUnlockHelperThreadState unlock(lock);
-    runFromMainThread(runtime());
-  }
-}
-
-void BackgroundSweepTask::runFromMainThread(JSRuntime* rt) {
+void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList& zones) {
   {
     AutoLockHelperThreadState lock;
-    MOZ_ASSERT(!isRunningWithLockHeld(lock));
-    joinWithLockHeld(lock);
-    done = false;
-  }
-
-  Base::runFromMainThread(rt);
+    backgroundSweepZones.ref().transferFrom(zones);
+    if (sweepOnBackgroundThread) {
+      sweepTask.startOrRunIfIdle(lock);
+    }
+  }
+  if (!sweepOnBackgroundThread) {
+    sweepTask.joinAndRunFromMainThread(rt);
+  }
 }
 
 void BackgroundSweepTask::run() {
   AutoTraceLog logSweeping(TraceLoggerForCurrentThread(),
                            TraceLogger_GCSweeping);
 
   AutoLockHelperThreadState lock;
   AutoSetThreadIsSweeping threadIsSweeping;
 
-  MOZ_ASSERT(!done);
-
   runtime()->gc.sweepFromBackgroundThread(lock);
 
-  // Signal to the main thread that we're finished, because we release the
-  // lock again before GCParallelTask's state is changed to finished.
-  done = true;
+  // Signal to the main thread that we're about to finish, because we release
+  // the lock again before GCParallelTask's state is changed to finished.
+  setFinishing(lock);
 }
 
 void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
   do {
     ZoneList zones;
     zones.transferFrom(backgroundSweepZones.ref());
     LifoAlloc freeLifoAlloc(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
-    freeLifoAlloc.transferFrom(&blocksToFreeAfterSweeping.ref());
+    freeLifoAlloc.transferFrom(&lifoBlocksToFree.ref());
 
     AutoUnlockHelperThreadState unlock(lock);
     sweepBackgroundThings(zones, freeLifoAlloc);
 
-    // The main thread may call queueZonesForBackgroundSweep() while this is
+    // The main thread may call queueZonesAndStartBackgroundSweep() while this is
     // running so we must check there is no more work after releasing the
     // lock.
   } while (!backgroundSweepZones.ref().isEmpty());
 }
 
 void GCRuntime::waitBackgroundSweepEnd() {
   sweepTask.join();
 
   // TODO: Improve assertion to work in incremental GC?
   if (!isIncrementalGCInProgress()) {
     assertBackgroundSweepingFinished();
   }
 }
 
+void GCRuntime::queueUnusedLifoBlocksForFree(LifoAlloc* lifo) {
+  MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+  AutoLockHelperThreadState lock;
+  lifoBlocksToFree.ref().transferUnusedFrom(lifo);
+}
+
+void GCRuntime::queueAllLifoBlocksForFree(LifoAlloc* lifo) {
+  MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+  AutoLockHelperThreadState lock;
+  lifoBlocksToFree.ref().transferFrom(lifo);
+}
+
+void GCRuntime::queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo) {
+  lifoBlocksToFreeAfterMinorGC.ref().transferFrom(lifo);
+}
+
+void GCRuntime::queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers) {
+  AutoLockHelperThreadState lock;
+
+  if (!buffersToFreeAfterMinorGC.ref().empty()) {
+    // In the rare case that this hasn't processed the buffers from a previous
+    // minor GC we have to wait here.
+    MOZ_ASSERT(freeTask.isRunningWithLockHeld(lock));
+    freeTask.joinWithLockHeld(lock);
+  }
+
+  MOZ_ASSERT(buffersToFreeAfterMinorGC.ref().empty());
+  mozilla::Swap(buffersToFreeAfterMinorGC.ref(), buffers);
+}
+
+void GCRuntime::startBackgroundFree() {
+  if (CanUseExtraThreads()) {
+    AutoLockHelperThreadState lock;
+    freeTask.startOrRunIfIdle(lock);
+  } else {
+    freeTask.joinAndRunFromMainThread(rt);
+  }
+}
+
+void BackgroundFreeTask::run() {
+  AutoTraceLog logFreeing(TraceLoggerForCurrentThread(),
+                           TraceLogger_GCFree);
+
+  AutoLockHelperThreadState lock;
+
+  runtime()->gc.freeFromBackgroundThread(lock);
+
+  // Signal to the main thread that we're about to finish, because we release
+  // the lock again before GCParallelTask's state is changed to finished.
+  setFinishing(lock);
+}
+
+void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
+  do {
+    LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
+    lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
+
+    Nursery::BufferSet buffers;
+    mozilla::Swap(buffers, buffersToFreeAfterMinorGC.ref());
+
+    AutoUnlockHelperThreadState unlock(lock);
+
+    lifoBlocks.freeAll();
+
+    for (Nursery::BufferSet::Range r = buffers.all(); !r.empty(); r.popFront()) {
+      rt->defaultFreeOp()->free_(r.front());
+    }
+  } while (!lifoBlocksToFree.ref().isEmpty() ||
+           !buffersToFreeAfterMinorGC.ref().empty());
+}
+
+void GCRuntime::waitBackgroundFreeEnd() {
+  freeTask.join();
+}
+
 struct IsAboutToBeFinalizedFunctor {
   template <typename T>
   bool operator()(Cell** t) {
     mozilla::DebugOnly<const Cell*> prior = *t;
     bool result = IsAboutToBeFinalizedUnbarriered(reinterpret_cast<T**>(t));
     // Sweep should not have to deal with moved pointers, since moving GC
     // handles updating the UID table manually.
     MOZ_ASSERT(*t == prior);
@@ -3921,17 +3936,17 @@ void GCRuntime::purgeRuntime() {
 
   for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
     zone->purgeAtomCacheOrDefer();
     zone->externalStringCache().purge();
     zone->functionToStringCache().purge();
   }
 
   JSContext* cx = rt->mainContextFromOwnThread();
-  freeUnusedLifoBlocksAfterSweeping(&cx->tempLifoAlloc());
+  queueUnusedLifoBlocksForFree(&cx->tempLifoAlloc());
   cx->interpreterStack().purge(rt);
   cx->frontendCollectionPool().purge();
 
   rt->caches().purge();
 
   if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings()) {
     cache->purge();
   }
@@ -4309,16 +4324,17 @@ bool GCRuntime::beginMarkPhase(JS::gcrea
                               gcstats::PhaseKind::BUFFER_GRAY_ROOTS,
                               helperLock);
     }
     AutoUnlockHelperThreadState unlock(helperLock);
 
     // Discard JIT code. For incremental collections, the sweep phase will
     // also discard JIT code.
     DiscardJITCodeForGC(rt);
+    startBackgroundFreeAfterMinorGC();
 
     /*
      * Relazify functions after discarding JIT code (we can't relazify
      * functions with JIT code) and before the actual mark phase, so that
      * the current GC can collect the JSScripts we're unlinking here.  We do
      * this only when we're performing a shrinking GC, as too much
      * relazification can cause performance issues when we have to reparse
      * the same functions over and over.
@@ -5447,17 +5463,17 @@ static void SweepWeakMaps(GCParallelTask
 static void SweepUniqueIds(GCParallelTask* task) {
   for (SweepGroupZonesIter zone(task->runtime()); !zone.done(); zone.next()) {
     zone->sweepUniqueIds();
   }
 }
 
 void GCRuntime::startTask(GCParallelTask& task, gcstats::PhaseKind phase,
                           AutoLockHelperThreadState& locked) {
-  if (!task.startWithLockHeld(locked)) {
+  if (!CanUseExtraThreads() || !task.startWithLockHeld(locked)) {
     AutoUnlockHelperThreadState unlock(locked);
     gcstats::AutoPhase ap(stats(), phase);
     task.runFromMainThread(rt);
   }
 }
 
 void GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase,
                          AutoLockHelperThreadState& locked) {
@@ -5755,16 +5771,19 @@ bool GCRuntime::shouldYieldForZeal(ZealM
 IncrementalProgress GCRuntime::endSweepingSweepGroup(FreeOp* fop,
                                                      SliceBudget& budget) {
   {
     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
     FreeOp fop(rt);
     callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
   }
 
+  /* Free LIFO blocks on a background thread if possible. */
+  startBackgroundFree();
+
   /* Update the GC state for zones we have swept. */
   for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
     AutoLockGC lock(rt);
     zone->changeGCState(Zone::Sweep, Zone::Finished);
     zone->threshold.updateAfterGC(zone->usage.gcBytes(), invocationKind,
                                   tunables, schedulingState, lock);
     zone->updateAllGCMallocCountersOnGCEnd(lock);
     zone->arenas.unmarkPreMarkedFreeCells();
@@ -5782,21 +5801,17 @@ IncrementalProgress GCRuntime::endSweepi
     } else {
       zones.append(zone);
     }
   }
   if (sweepAtomsZone) {
     zones.append(atomsZone);
   }
 
-  queueZonesForBackgroundSweep(zones);
-
-  if (!sweepOnBackgroundThread) {
-    sweepTask.runFromMainThread(rt);
-  }
+  queueZonesAndStartBackgroundSweep(zones);
 
   return Finished;
 }
 
 void GCRuntime::beginSweepPhase(JS::gcreason::Reason reason,
                                 AutoGCSession& session) {
   /*
    * Sweep phase.
@@ -5811,19 +5826,16 @@ void GCRuntime::beginSweepPhase(JS::gcre
   AutoSetThreadIsSweeping threadIsSweeping;
 
   releaseHeldRelocatedArenas();
 
   computeNonIncrementalMarkingForValidation(session);
 
   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
 
-  sweepOnBackgroundThread = reason != JS::gcreason::DESTROY_RUNTIME &&
-                            !gcTracer.traceEnabled() && CanUseExtraThreads();
-
   hasMarkedGrayRoots = false;
 
   AssertNoWrappersInGrayList(rt);
   DropStringWrappers(rt);
 
   groupZonesForSweeping(reason);
 
   sweepActions->assertFinished();
@@ -6625,16 +6637,17 @@ void GCRuntime::endSweepPhase(bool destr
   }
 #endif
 
   AssertNoWrappersInGrayList(rt);
 }
 
 void GCRuntime::beginCompactPhase() {
   MOZ_ASSERT(!isBackgroundSweeping());
+  assertBackgroundSweepingFinished();
 
   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
 
   MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
   for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
     if (CanRelocateZone(zone)) {
       zonesToMaybeCompact.ref().append(zone);
     }
@@ -6803,17 +6816,17 @@ GCRuntime::IncrementalResult GCRuntime::
       for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         zone->setNeedsIncrementalBarrier(false);
         zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC);
         zone->arenas.unmarkPreMarkedFreeCells();
       }
 
       {
         AutoLockHelperThreadState lock;
-        blocksToFreeAfterSweeping.ref().freeAll();
+        lifoBlocksToFree.ref().freeAll();
       }
 
       lastMarkSlice = false;
       incrementalState = State::Finish;
 
       MOZ_ASSERT(!marker.shouldCheckCompartments());
 
       break;
@@ -6915,16 +6928,21 @@ static bool IsShutdownGC(JS::gcreason::R
 static bool ShouldCleanUpEverything(JS::gcreason::Reason reason,
                                     JSGCInvocationKind gckind) {
   // During shutdown, we must clean everything up, for the sake of leak
   // detection. When a runtime has no contexts, or we're doing a GC before a
   // shutdown CC, those are strong indications that we're shutting down.
   return IsShutdownGC(reason) || gckind == GC_SHRINK;
 }
 
+static bool ShouldSweepOnBackgroundThread(JS::gcreason::Reason reason) {
+  return reason != JS::gcreason::DESTROY_RUNTIME &&
+         !gcTracer.traceEnabled() && CanUseExtraThreads();
+}
+
 void GCRuntime::incrementalSlice(SliceBudget& budget,
                                  JS::gcreason::Reason reason,
                                  AutoGCSession& session) {
   AutoDisableBarriers disableBarriers(rt);
 
   bool destroyingRuntime = (reason == JS::gcreason::DESTROY_RUNTIME);
 
   number++;
@@ -6972,16 +6990,17 @@ void GCRuntime::incrementalSlice(SliceBu
     budget.makeUnlimited();
   }
 
   switch (incrementalState) {
     case State::NotActive:
       incMajorGcNumber();
       initialReason = reason;
       cleanUpEverything = ShouldCleanUpEverything(reason, invocationKind);
+      sweepOnBackgroundThread = ShouldSweepOnBackgroundThread(reason);
       isCompacting = shouldCompact();
       MOZ_ASSERT(!lastMarkSlice);
       rootsRemoved = false;
 
       incrementalState = State::MarkRoots;
 
       MOZ_FALLTHROUGH;
 
@@ -7738,17 +7757,17 @@ void js::PrepareForDebugGC(JSRuntime* rt
 void GCRuntime::onOutOfMallocMemory() {
   // Stop allocating new chunks.
   allocTask.cancelAndWait();
 
   // Make sure we release anything queued for release.
   decommitTask.join();
 
   // Wait for background free of nursery huge slots to finish.
-  nursery().waitBackgroundFreeEnd();
+  sweepTask.join();
 
   AutoLockGC lock(rt);
   onOutOfMallocMemory(lock);
 }
 
 void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
   // Release any relocated arenas we may be holding on to, without releasing
   // the GC lock.
@@ -7785,32 +7804,49 @@ void GCRuntime::minorGC(JS::gcreason::Re
   gcstats::AutoPhase ap(stats(), phase);
 
   nursery().clearMinorGCRequest();
   TraceLoggerThread* logger = TraceLoggerForCurrentThread();
   AutoTraceLog logMinorGC(logger, TraceLogger_MinorGC);
   nursery().collect(reason);
   MOZ_ASSERT(nursery().isEmpty());
 
-  blocksToFreeAfterMinorGC.ref().freeAll();
+  startBackgroundFreeAfterMinorGC();
 
 #ifdef JS_GC_ZEAL
   if (hasZealMode(ZealMode::CheckHeapAfterGC)) {
     CheckHeapAfterGC(rt);
   }
 #endif
 
   {
     AutoLockGC lock(rt);
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
       maybeAllocTriggerZoneGC(zone, lock);
     }
   }
 }
 
+void GCRuntime::startBackgroundFreeAfterMinorGC() {
+  MOZ_ASSERT(nursery().isEmpty());
+
+  {
+    AutoLockHelperThreadState lock;
+
+    lifoBlocksToFree.ref().transferFrom(&lifoBlocksToFreeAfterMinorGC.ref());
+
+    if (lifoBlocksToFree.ref().isEmpty() &&
+        buffersToFreeAfterMinorGC.ref().empty()) {
+      return;
+    }
+  }
+
+  startBackgroundFree();
+}
+
 JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSContext* cx)
     : cx(cx) {
   if (!cx->generationalDisabled) {
     cx->runtime()->gc.evictNursery(JS::gcreason::API);
     cx->nursery().disable();
   }
   ++cx->generationalDisabled;
 }
@@ -7853,17 +7889,17 @@ bool GCRuntime::gcIfRequested() {
 }
 
 void js::gc::FinishGC(JSContext* cx) {
   if (JS::IsIncrementalGCInProgress(cx)) {
     JS::PrepareForIncrementalGC(cx);
     JS::FinishIncrementalGC(cx, JS::gcreason::API);
   }
 
-  cx->nursery().waitBackgroundFreeEnd();
+  cx->runtime()->gc.waitBackgroundFreeEnd();
 }
 
 Realm* js::NewRealm(JSContext* cx, JSPrincipals* principals,
                     const JS::RealmOptions& options) {
   JSRuntime* rt = cx->runtime();
   JS_AbortIfWrongThread(cx);
 
   UniquePtr<Zone> zoneHolder;
--- a/js/src/gc/GCParallelTask.h
+++ b/js/src/gc/GCParallelTask.h
@@ -26,17 +26,17 @@ class GCParallelTask {
  public:
   using TaskFunc = void (*)(GCParallelTask*);
 
  private:
   JSRuntime* const runtime_;
   TaskFunc func_;
 
   // The state of the parallel computation.
-  enum class State { NotStarted, Dispatched, Finished };
+  enum class State { NotStarted, Dispatched, Finishing, Finished };
   UnprotectedData<State> state_;
 
   // Amount of time this task took to execute.
   MainThreadOrGCTaskData<mozilla::TimeDuration> duration_;
 
   explicit GCParallelTask(const GCParallelTask&) = delete;
 
  protected:
@@ -74,24 +74,29 @@ class GCParallelTask {
 
   // If multiple tasks are to be started or joined at once, it is more
   // efficient to take the helper thread lock once and use these methods.
   MOZ_MUST_USE bool startWithLockHeld(AutoLockHelperThreadState& locked);
   void joinWithLockHeld(AutoLockHelperThreadState& locked);
 
   // Instead of dispatching to a helper, run the task on the current thread.
   void runFromMainThread(JSRuntime* rt);
+  void joinAndRunFromMainThread(JSRuntime* rt);
+
+  // If the task is not already running, either start it or run it on the main
+  // thread if that fails.
+  void startOrRunIfIdle(AutoLockHelperThreadState& lock);
 
   // Dispatch a cancelation request.
   void cancelAndWait() {
     cancel_ = true;
     join();
   }
 
-  // Check if a task is actively running.
+  // Check if a task is running and has not called setFinishing().
   bool isRunningWithLockHeld(const AutoLockHelperThreadState& lock) const {
     return isDispatched(lock);
   }
   bool isRunning() const;
 
  private:
   void assertNotStarted() const {
     // Don't lock here because that adds extra synchronization in debug
@@ -107,26 +112,36 @@ class GCParallelTask {
   bool isFinished(const AutoLockHelperThreadState& lock) const {
     return state_ == State::Finished;
   }
   void setDispatched(const AutoLockHelperThreadState& lock) {
     MOZ_ASSERT(state_ == State::NotStarted);
     state_ = State::Dispatched;
   }
   void setFinished(const AutoLockHelperThreadState& lock) {
-    MOZ_ASSERT(state_ == State::Dispatched);
+    MOZ_ASSERT(state_ == State::Dispatched || state_ == State::Finishing);
     state_ = State::Finished;
   }
   void setNotStarted(const AutoLockHelperThreadState& lock) {
     MOZ_ASSERT(state_ == State::Finished);
     state_ = State::NotStarted;
   }
 
   void runTask() { func_(this); }
 
+ protected:
+  // Can be called to indicate that although the task is still
+  // running, it is about to finish.
+  void setFinishing(const AutoLockHelperThreadState& lock) {
+    MOZ_ASSERT(state_ == State::NotStarted || state_ == State::Dispatched);
+    if (state_ == State::Dispatched) {
+      state_ = State::Finishing;
+    }
+  }
+
   // This should be friended to HelperThread, but cannot be because it
   // would introduce several circular dependencies.
  public:
   void runFromHelperThread(AutoLockHelperThreadState& locked);
 };
 
 // CRTP template to handle cast to derived type when calling run().
 template <typename Derived>
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -101,29 +101,24 @@ class ChunkPool {
     Chunk* operator->() const { return get(); }
 
    private:
     Chunk* current_;
   };
 };
 
 class BackgroundSweepTask : public GCParallelTaskHelper<BackgroundSweepTask> {
-  using Base = GCParallelTaskHelper<BackgroundSweepTask>;
-
-  HelperThreadLockData<bool> done;
-
  public:
-  explicit BackgroundSweepTask(JSRuntime* rt);
+  explicit BackgroundSweepTask(JSRuntime* rt) : GCParallelTaskHelper(rt) {}
+  void run();
+};
 
-  bool isRunning() const;
-  bool isRunningWithLockHeld(const AutoLockHelperThreadState& lock) const;
-
-  void startIfIdle(AutoLockHelperThreadState& lock);
-  void runFromMainThread(JSRuntime* rt);
-
+class BackgroundFreeTask : public GCParallelTaskHelper<BackgroundFreeTask> {
+ public:
+  explicit BackgroundFreeTask(JSRuntime* rt) : GCParallelTaskHelper(rt) {}
   void run();
 };
 
 // Performs extra allocation off thread so that when memory is required on the
 // main thread it will already be available and waiting.
 class BackgroundAllocTask : public GCParallelTaskHelper<BackgroundAllocTask> {
   // Guarded by the GC lock.
   GCLockData<ChunkPool&> chunkPool_;
@@ -314,16 +309,17 @@ class GCRuntime {
   bool isHeapCompacting() const { return state() == State::Compact; }
   bool isForegroundSweeping() const { return state() == State::Sweep; }
   bool isBackgroundSweeping() { return sweepTask.isRunning(); }
   void waitBackgroundSweepEnd();
   void waitBackgroundSweepOrAllocEnd() {
     waitBackgroundSweepEnd();
     allocTask.cancelAndWait();
   }
+  void waitBackgroundFreeEnd();
 
   void lockGC() { lock.lock(); }
 
   void unlockGC() { lock.unlock(); }
 
 #ifdef DEBUG
   bool currentThreadHasLockedGC() const { return lock.ownedByCurrentThread(); }
 #endif  // DEBUG
@@ -470,19 +466,21 @@ class GCRuntime {
   void endVerifyPreBarriers();
   void finishVerifier();
   bool isVerifyPreBarriersEnabled() const { return !!verifyPreData; }
   bool shouldYieldForZeal(ZealMode mode);
 #else
   bool isVerifyPreBarriersEnabled() const { return false; }
 #endif
 
-  // Free certain LifoAlloc blocks when it is safe to do so.
-  void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
-  void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
+  // Queue memory memory to be freed on a background thread if possible.
+  void queueUnusedLifoBlocksForFree(LifoAlloc* lifo);
+  void queueAllLifoBlocksForFree(LifoAlloc* lifo);
+  void queueAllLifoBlocksForFreeAfterMinorGC(LifoAlloc* lifo);
+  void queueBuffersForFreeAfterMinorGC(Nursery::BufferSet& buffers);
 
   // Public here for ReleaseArenaLists and FinalizeTypedArenas.
   void releaseArena(Arena* arena, const AutoLockGC& lock);
 
   void releaseHeldRelocatedArenas();
   void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
 
   // Allocator
@@ -596,16 +594,17 @@ class GCRuntime {
   void purgeRuntime();
   MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason,
                                    AutoGCSession& session);
   bool prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut);
   bool shouldPreserveJITCode(JS::Realm* realm,
                              const mozilla::TimeStamp& currentTime,
                              JS::gcreason::Reason reason,
                              bool canAllocateMoreCode);
+  void startBackgroundFreeAfterMinorGC();
   void traceRuntimeForMajorGC(JSTracer* trc, AutoGCSession& session);
   void traceRuntimeAtoms(JSTracer* trc, const AutoAccessAtomsZone& atomsAccess);
   void traceKeptAtoms(JSTracer* trc);
   void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark);
   void maybeDoCycleCollection();
   void markCompartments();
   IncrementalProgress markUntilBudgetExhausted(SliceBudget& sliceBudget,
                                                gcstats::PhaseKind phase);
@@ -643,19 +642,20 @@ class GCRuntime {
                                         Zone* zone, AllocKind kind);
   IncrementalProgress sweepShapeTree(FreeOp* fop, SliceBudget& budget,
                                      Zone* zone);
   void endSweepPhase(bool lastGC);
   bool allCCVisibleZonesWereCollected() const;
   void sweepZones(FreeOp* fop, bool destroyingRuntime);
   void decommitAllWithoutUnlocking(const AutoLockGC& lock);
   void startDecommit();
-  void queueZonesForBackgroundSweep(ZoneList& zones);
-  void maybeStartBackgroundSweep(AutoLockHelperThreadState& lock);
+  void queueZonesAndStartBackgroundSweep(ZoneList& zones);
   void sweepFromBackgroundThread(AutoLockHelperThreadState& lock);
+  void startBackgroundFree();
+  void freeFromBackgroundThread(AutoLockHelperThreadState& lock);
   void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks);
   void assertBackgroundSweepingFinished();
   bool shouldCompact();
   void beginCompactPhase();
   IncrementalProgress compactPhase(JS::gcreason::Reason reason,
                                    SliceBudget& sliceBudget,
                                    AutoGCSession& session);
   void endCompactPhase();
@@ -858,20 +858,22 @@ class GCRuntime {
 
   /* Whether any sweeping will take place in the separate GC helper thread. */
   MainThreadData<bool> sweepOnBackgroundThread;
 
   /* Singly linked list of zones to be swept in the background. */
   HelperThreadLockData<ZoneList> backgroundSweepZones;
 
   /*
-   * Free LIFO blocks are transferred to this allocator before being freed on
-   * the background GC thread after sweeping.
+   * Free LIFO blocks are transferred to these allocators before being freed on
+   * a background thread.
    */
-  HelperThreadLockData<LifoAlloc> blocksToFreeAfterSweeping;
+  HelperThreadLockData<LifoAlloc> lifoBlocksToFree;
+  MainThreadData<LifoAlloc> lifoBlocksToFreeAfterMinorGC;
+  HelperThreadLockData<Nursery::BufferSet> buffersToFreeAfterMinorGC;
 
   /* Index of current sweep group (for stats). */
   MainThreadData<unsigned> sweepGroupIndex;
 
   /*
    * Incremental sweep state.
    */
 
@@ -1000,39 +1002,37 @@ class GCRuntime {
 #endif
 
   /* Synchronize GC heap access among GC helper threads and the main thread. */
   friend class js::AutoLockGC;
   friend class js::AutoLockGCBgAlloc;
   js::Mutex lock;
 
   friend class BackgroundSweepTask;
+  friend class BackgroundFreeTask;
 
   BackgroundAllocTask allocTask;
   BackgroundSweepTask sweepTask;
+  BackgroundFreeTask freeTask;
   BackgroundDecommitTask decommitTask;
 
   /*
    * During incremental sweeping, this field temporarily holds the arenas of
    * the current AllocKind being swept in order of increasing free space.
    */
   MainThreadData<SortedArenaList> incrementalSweepList;
 
  private:
   MainThreadData<Nursery> nursery_;
   MainThreadData<gc::StoreBuffer> storeBuffer_;
 
  public:
   Nursery& nursery() { return nursery_.ref(); }
   gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
 
-  // Free LIFO blocks are transferred to this allocator before being freed
-  // after minor GC.
-  MainThreadData<LifoAlloc> blocksToFreeAfterMinorGC;
-
   void* addressOfNurseryPosition() {
     return nursery_.refNoCheck().addressOfPosition();
   }
   const void* addressOfNurseryCurrentEnd() {
     return nursery_.refNoCheck().addressOfCurrentEnd();
   }
   const void* addressOfStringNurseryCurrentEnd() {
     return nursery_.refNoCheck().addressOfCurrentStringEnd();
@@ -1042,17 +1042,16 @@ class GCRuntime {
   }
 
   void minorGC(JS::gcreason::Reason reason,
                gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC)
       JS_HAZ_GC_CALL;
   void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
     minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
   }
-  void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
 
   friend class MarkingValidator;
   friend class AutoEnterIteration;
 };
 
 /* Prevent compartments and zones from being collected during iteration. */
 class MOZ_RAII AutoEnterIteration {
   GCRuntime* gc;
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -161,19 +161,18 @@ class FreeSpan {
       // The last space points to the next free span (which may be empty).
       const FreeSpan* next = nextSpan(arena);
       first = next->first;
       last = next->last;
     } else {
       return nullptr;  // The span is empty.
     }
     checkSpan(arena);
-    JS_EXTRA_POISON(reinterpret_cast<void*>(thing),
-                    JS_ALLOCATED_TENURED_PATTERN, thingSize,
-                    MemCheckKind::MakeUndefined);
+    DebugOnlyPoison(reinterpret_cast<void*>(thing), JS_ALLOCATED_TENURED_PATTERN,
+                    thingSize, MemCheckKind::MakeUndefined);
     return reinterpret_cast<TenuredCell*>(thing);
   }
 
   inline void checkSpan(const Arena* arena) const;
   inline void checkRange(uintptr_t first, uintptr_t last,
                          const Arena* arena) const;
 };
 
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2282,18 +2282,18 @@ bool MarkStack::resize(size_t newCapacit
   return true;
 }
 
 inline void MarkStack::poisonUnused() {
   static_assert((JS_FRESH_MARK_STACK_PATTERN & TagMask) > LastTag,
                 "The mark stack poison pattern must not look like a valid "
                 "tagged pointer");
 
-  JS_POISON(stack().begin() + topIndex_, JS_FRESH_MARK_STACK_PATTERN,
-            stack().capacity() - topIndex_, MemCheckKind::MakeUndefined);
+  AlwaysPoison(stack().begin() + topIndex_, JS_FRESH_MARK_STACK_PATTERN,
+               stack().capacity() - topIndex_, MemCheckKind::MakeUndefined);
 }
 
 size_t MarkStack::sizeOfExcludingThis(
     mozilla::MallocSizeOf mallocSizeOf) const {
   return stack().sizeOfExcludingThis(mallocSizeOf);
 }
 
 MarkStackIter::MarkStackIter(MarkStack& stack)
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -40,31 +40,16 @@ using namespace gc;
 
 using mozilla::DebugOnly;
 using mozilla::PodCopy;
 using mozilla::TimeDuration;
 using mozilla::TimeStamp;
 
 constexpr uintptr_t CanaryMagicValue = 0xDEADB15D;
 
-struct js::Nursery::FreeMallocedBuffersTask
-    : public GCParallelTaskHelper<FreeMallocedBuffersTask> {
-  explicit FreeMallocedBuffersTask(FreeOp* fop)
-      : GCParallelTaskHelper(fop->runtime()), fop_(fop) {}
-  void transferBuffersToFree(MallocedBuffersSet& buffersToFree,
-                             const AutoLockHelperThreadState& lock);
-  ~FreeMallocedBuffersTask() { join(); }
-
-  void run();
-
- private:
-  FreeOp* fop_;
-  MallocedBuffersSet buffers_;
-};
-
 #ifdef JS_GC_ZEAL
 struct js::Nursery::Canary {
   uintptr_t magicValue;
   Canary* next;
 };
 #endif
 
 namespace js {
@@ -82,29 +67,29 @@ static_assert(sizeof(js::NurseryChunk) =
               "Nursery chunk size must match gc::Chunk size.");
 
 } /* namespace js */
 
 inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t extent) {
   MOZ_ASSERT(extent <= ChunkSize);
   MOZ_MAKE_MEM_UNDEFINED(this, extent);
 
-  JS_POISON(this, JS_FRESH_NURSERY_PATTERN, extent,
-            MemCheckKind::MakeUndefined);
+  Poison(this, JS_FRESH_NURSERY_PATTERN, extent,
+         MemCheckKind::MakeUndefined);
 
   new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer());
 }
 
 inline void js::NurseryChunk::poisonAfterSweep(size_t extent) {
   MOZ_ASSERT(extent <= ChunkSize);
   // We can poison the same chunk more than once, so first make sure memory
   // sanitizers will let us poison it.
   MOZ_MAKE_MEM_UNDEFINED(this, extent);
 
-  JS_POISON(this, JS_SWEPT_NURSERY_PATTERN, extent, MemCheckKind::MakeNoAccess);
+  Poison(this, JS_SWEPT_NURSERY_PATTERN, extent, MemCheckKind::MakeNoAccess);
 }
 
 /* static */ inline js::NurseryChunk* js::NurseryChunk::fromChunk(
     Chunk* chunk) {
   return reinterpret_cast<NurseryChunk*>(chunk);
 }
 
 inline Chunk* js::NurseryChunk::toChunk(JSRuntime* rt) {
@@ -123,36 +108,29 @@ js::Nursery::Nursery(JSRuntime* rt)
       currentChunk_(0),
       maxChunkCount_(0),
       chunkCountLimit_(0),
       timeInChunkAlloc_(0),
       profileThreshold_(0),
       enableProfiling_(false),
       canAllocateStrings_(false),
       reportTenurings_(0),
-      minorGCTriggerReason_(JS::gcreason::NO_REASON),
-      freeMallocedBuffersTask(nullptr)
+      minorGCTriggerReason_(JS::gcreason::NO_REASON)
 #ifdef JS_GC_ZEAL
       ,
       lastCanary_(nullptr)
 #endif
 {
   const char* env = getenv("MOZ_NURSERY_STRINGS");
   if (env && *env) {
     canAllocateStrings_ = (*env == '1');
   }
 }
 
 bool js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGCBgAlloc& lock) {
-  freeMallocedBuffersTask =
-      js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
-  if (!freeMallocedBuffersTask) {
-    return false;
-  }
-
   // The nursery is permanently disabled when recording or replaying. Nursery
   // collections may occur at non-deterministic points in execution.
   if (mozilla::recordreplay::IsRecordingOrReplaying()) {
     maxNurseryBytes = 0;
   }
 
   /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
   chunkCountLimit_ = maxNurseryBytes >> ChunkShift;
@@ -201,17 +179,16 @@ bool js::Nursery::init(uint32_t maxNurse
   }
 
   MOZ_ASSERT(isEnabled());
   return true;
 }
 
 js::Nursery::~Nursery() {
   disable();
-  js_delete(freeMallocedBuffersTask);
 }
 
 void js::Nursery::enable() {
   MOZ_ASSERT(isEmpty());
   MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
   if (isEnabled() || !chunkCountLimit()) {
     return;
   }
@@ -392,17 +369,17 @@ void* js::Nursery::allocate(size_t size)
 
   void* thing = (void*)position();
   position_ = position() + size;
   // We count this regardless of the profiler's state, assuming that it costs
   // just as much to count it, as to check the profiler's state and decide not
   // to count it.
   stats().noteNurseryAlloc();
 
-  JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
+  DebugOnlyPoison(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
                   MemCheckKind::MakeUndefined);
 
 #ifdef JS_GC_ZEAL
   if (runtime()->gc.hasZealMode(ZealMode::CheckNursery)) {
     auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
     canary->magicValue = CanaryMagicValue;
     canary->next = nullptr;
     if (lastCanary_) {
@@ -984,17 +961,17 @@ void js::Nursery::doCollection(JS::gcrea
   endProfile(ProfileKey::UpdateJitActivations);
 
   startProfile(ProfileKey::ObjectsTenuredCallback);
   rt->gc.callObjectsTenuredCallback();
   endProfile(ProfileKey::ObjectsTenuredCallback);
 
   // Sweep.
   startProfile(ProfileKey::FreeMallocedBuffers);
-  freeMallocedBuffers();
+  rt->gc.queueBuffersForFreeAfterMinorGC(mallocedBuffers);
   endProfile(ProfileKey::FreeMallocedBuffers);
 
   startProfile(ProfileKey::ClearNursery);
   clear();
   endProfile(ProfileKey::ClearNursery);
 
   startProfile(ProfileKey::ClearStoreBuffer);
   runtime()->gc.storeBuffer().clear();
@@ -1012,67 +989,21 @@ void js::Nursery::doCollection(JS::gcrea
   previousGC.reason = reason;
   previousGC.nurseryCapacity = initialNurseryCapacity;
   previousGC.nurseryLazyCapacity = spaceToEnd(allocatedChunkCount());
   previousGC.nurseryUsedBytes = initialNurseryUsedBytes;
   previousGC.tenuredBytes = mover.tenuredSize;
   previousGC.tenuredCells = mover.tenuredCells;
 }
 
-void js::Nursery::FreeMallocedBuffersTask::transferBuffersToFree(
-    MallocedBuffersSet& buffersToFree, const AutoLockHelperThreadState& lock) {
-  // Transfer the contents of the source set to the task's buffers_ member by
-  // swapping the sets, which also clears the source.
-  MOZ_ASSERT(!isRunningWithLockHeld(lock));
-  MOZ_ASSERT(buffers_.empty());
-  mozilla::Swap(buffers_, buffersToFree);
-}
-
-void js::Nursery::FreeMallocedBuffersTask::run() {
-  for (MallocedBuffersSet::Range r = buffers_.all(); !r.empty(); r.popFront()) {
-    fop_->free_(r.front());
-  }
-  buffers_.clear();
-}
-
 bool js::Nursery::registerMallocedBuffer(void* buffer) {
   MOZ_ASSERT(buffer);
   return mallocedBuffers.putNew(buffer);
 }
 
-void js::Nursery::freeMallocedBuffers() {
-  if (mallocedBuffers.empty()) {
-    return;
-  }
-
-  bool started;
-  {
-    AutoLockHelperThreadState lock;
-    freeMallocedBuffersTask->joinWithLockHeld(lock);
-    freeMallocedBuffersTask->transferBuffersToFree(mallocedBuffers, lock);
-    started = freeMallocedBuffersTask->startWithLockHeld(lock);
-  }
-
-  if (!started) {
-    freeMallocedBuffersTask->runFromMainThread(runtime());
-  }
-
-  MOZ_ASSERT(mallocedBuffers.empty());
-}
-
-void js::Nursery::waitBackgroundFreeEnd() {
-  // We may finishRoots before nursery init if runtime init fails.
-  if (!isEnabled()) {
-    return;
-  }
-
-  MOZ_ASSERT(freeMallocedBuffersTask);
-  freeMallocedBuffersTask->join();
-}
-
 void js::Nursery::sweep(JSTracer* trc) {
   // Sweep unique IDs first before we sweep any tables that may be keyed based
   // on them.
   for (Cell* cell : cellsWithUid_) {
     JSObject* obj = static_cast<JSObject*>(cell);
     if (!IsForwarded(obj)) {
       obj->zone()->removeUniqueId(obj);
     } else {
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -145,16 +145,18 @@ class Nursery {
     char byte;
   };
 
   struct StringLayout {
     JS::Zone* zone;
     CellAlignedByte cell;
   };
 
+  using BufferSet = HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>;
+
   explicit Nursery(JSRuntime* rt);
   ~Nursery();
 
   MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGCBgAlloc& lock);
 
   unsigned chunkCountLimit() const { return chunkCountLimit_; }
 
   // Number of allocated (ready to use) chunks.
@@ -292,32 +294,30 @@ class Nursery {
    * should be freed at the end of a minor GC. Buffers are unregistered when
    * their owning objects are tenured.
    */
   bool registerMallocedBuffer(void* buffer);
 
   /* Mark a malloced buffer as no longer needing to be freed. */
   void removeMallocedBuffer(void* buffer) { mallocedBuffers.remove(buffer); }
 
-  void waitBackgroundFreeEnd();
-
   MOZ_MUST_USE bool addedUniqueIdToCell(gc::Cell* cell) {
     MOZ_ASSERT(IsInsideNursery(cell));
     MOZ_ASSERT(isEnabled());
     return cellsWithUid_.append(cell);
   }
 
   MOZ_MUST_USE bool queueDictionaryModeObjectToSweep(NativeObject* obj);
 
   size_t sizeOfHeapCommitted() const {
     return allocatedChunkCount() * gc::ChunkSize;
   }
   size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
     size_t total = 0;
-    for (MallocedBuffersSet::Range r = mallocedBuffers.all(); !r.empty();
+    for (BufferSet::Range r = mallocedBuffers.all(); !r.empty();
          r.popFront()) {
       total += mallocSizeOf(r.front());
     }
     total += mallocedBuffers.shallowSizeOfExcludingThis(mallocSizeOf);
     return total;
   }
 
   // The number of bytes from the start position to the end of the nursery.
@@ -484,23 +484,17 @@ class Nursery {
    */
   float calcPromotionRate(bool* validForTenuring) const;
 
   /*
    * The set of externally malloced buffers potentially kept live by objects
    * stored in the nursery. Any external buffers that do not belong to a
    * tenured thing at the end of a minor GC must be freed.
    */
-  typedef HashSet<void*, PointerHasher<void*>, SystemAllocPolicy>
-      MallocedBuffersSet;
-  MallocedBuffersSet mallocedBuffers;
-
-  // A task structure used to free the malloced bufers on a background thread.
-  struct FreeMallocedBuffersTask;
-  FreeMallocedBuffersTask* freeMallocedBuffersTask;
+  BufferSet mallocedBuffers;
 
   /*
    * During a collection most hoisted slot and element buffers indicate their
    * new location with a forwarding pointer at the base. This does not work
    * for buffers whose length is less than pointer width, or when different
    * buffers might overlap each other. For these, an entry in the following
    * table is used.
    */
@@ -586,19 +580,16 @@ class Nursery {
   void setIndirectForwardingPointer(void* oldData, void* newData);
 
   inline void setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots,
                                         uint32_t nslots);
   inline void setElementsForwardingPointer(ObjectElements* oldHeader,
                                            ObjectElements* newHeader,
                                            uint32_t capacity);
 
-  /* Free malloced pointers owned by freed things in the nursery. */
-  void freeMallocedBuffers();
-
   /*
    * Updates pointers to nursery objects that have been tenured and discards
    * pointers to objects that have been freed.
    */
   void sweep(JSTracer* trc);
 
   /*
    * Frees all non-live nursery-allocated things at the end of a minor
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -533,16 +533,20 @@ Zone* ZoneList::front() const {
 void ZoneList::append(Zone* zone) {
   ZoneList singleZone(zone);
   transferFrom(singleZone);
 }
 
 void ZoneList::transferFrom(ZoneList& other) {
   check();
   other.check();
+  if (!other.head) {
+    return;
+  }
+
   MOZ_ASSERT(tail != other.tail);
 
   if (tail) {
     tail->listNext_ = other.head;
   } else {
     head = other.head;
   }
   tail = other.tail;
--- a/js/src/jit-test/tests/wasm/gc/anyref-global-prebarrier.js
+++ b/js/src/jit-test/tests/wasm/gc/anyref-global-prebarrier.js
@@ -1,10 +1,17 @@
 // |jit-test| skip-if: !wasmGcEnabled()
 
+// Do not run the test if we're jit-compiling JS, since it's the wasm frames
+// we're interested in and eager JS compilation can upset the test.
+
+opts = getJitCompilerOptions();
+if (opts['ion.enable'] || opts['baseline.enable'])
+  quit();
+
 const { startProfiling, endProfiling, assertEqPreciseStacks, isSingleStepProfilingEnabled } = WasmHelpers;
 
 let e = wasmEvalText(`(module
     (gc_feature_opt_in 2)
     (global $g (mut anyref) (ref.null))
     (func (export "set") (param anyref) get_local 0 set_global $g)
 )`).exports;
 
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -50,18 +50,18 @@ using namespace js::jit;
   MOZ_ASSERT(stackVal->kind() != StackValue::Stack);
   return SlotIgnore;
 }
 
 void ICStubSpace::freeAllAfterMinorGC(Zone* zone) {
   if (zone->isAtomsZone()) {
     MOZ_ASSERT(allocator_.isEmpty());
   } else {
-    zone->runtimeFromMainThread()->gc.freeAllLifoBlocksAfterMinorGC(
-        &allocator_);
+    JSRuntime* rt = zone->runtimeFromMainThread();
+    rt->gc.queueAllLifoBlocksForFreeAfterMinorGC(&allocator_);
   }
 }
 
 static bool CheckFrame(InterpreterFrame* fp) {
   if (fp->isDebuggerEvalFrame()) {
     // Debugger eval-in-frame. These are likely short-running scripts so
     // don't bother compiling them for now.
     JitSpew(JitSpew_BaselineAbort, "debugger frame");
--- a/js/src/jit/ExecutableAllocator.cpp
+++ b/js/src/jit/ExecutableAllocator.cpp
@@ -288,19 +288,19 @@ void ExecutableAllocator::addSizeOfCode(
 
     // Use the pool's mark bit to indicate we made the pool writable.
     // This avoids reprotecting a pool multiple times.
     if (!pool->isMarked()) {
       reprotectPool(rt, pool, ProtectionSetting::Writable);
       pool->mark();
     }
 
-    // Note: we use memset instead of JS_POISON because we want to poison
+    // Note: we use memset instead of js::Poison because we want to poison
     // JIT code in release builds too. Furthermore, we don't want the
-    // invalid-ObjectValue poisoning JS_POISON does in debug builds.
+    // invalid-ObjectValue poisoning js::Poison does in debug builds.
     memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
     MOZ_MAKE_MEM_NOACCESS(ranges[i].start, ranges[i].size);
   }
 
   // Make the pools executable again and drop references.
   for (size_t i = 0; i < ranges.length(); i++) {
     ExecutablePool* pool = ranges[i].pool;
     if (pool->isMarked()) {
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -90,17 +90,18 @@ static bool DepthFirstSearchUse(MIRGener
 
       MDefinition* cdef = consumer->toDefinition();
       if (!cdef->isPhi()) {
         // The producer is explicitly used by a definition.
         return push(producer, use);
       }
 
       MPhi* cphi = cdef->toPhi();
-      if (cphi->getUsageAnalysis() == PhiUsage::Used || cphi->isUseRemoved()) {
+      if (cphi->getUsageAnalysis() == PhiUsage::Used || cphi->isUseRemoved() ||
+          cphi->isImplicitlyUsed()) {
         // The information got cached on the Phi the last time it
         // got visited, or when flagging operands of removed
         // instructions.
         return push(producer, use);
       }
 
       if (cphi->isInWorklist() || cphi == producer) {
         // We are already iterating over the uses of this Phi
@@ -220,17 +221,18 @@ static bool FlagPhiInputsAsHavingRemoved
     // between the |block| and its successor |succ|.
     MDefinition* def = phi->getOperand(predIndex);
     if (def->isUseRemoved()) {
       continue;
     }
 
     // If the Phi is either Used or Unused, set the UseRemoved flag
     // accordingly.
-    if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isUseRemoved()) {
+    if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isUseRemoved() ||
+        phi->isImplicitlyUsed()) {
       def->setUseRemoved();
       continue;
     } else if (phi->getUsageAnalysis() == PhiUsage::Unused) {
       continue;
     }
 
     // We do not know if the Phi was Used or Unused, iterate over all uses
     // with a depth-search of uses. Returns the matching stack in the
--- a/js/src/jsutil.h
+++ b/js/src/jsutil.h
@@ -18,16 +18,26 @@
 #include "mozilla/PodOperations.h"
 
 #include <limits.h>
 
 #include "js/Initialization.h"
 #include "js/Utility.h"
 #include "js/Value.h"
 
+/* Crash diagnostics by default in debug and on nightly channel. */
+#if defined(DEBUG) || defined(NIGHTLY_BUILD)
+#define JS_CRASH_DIAGNOSTICS 1
+#endif
+
+/* Enable poisoning in crash-diagnostics and zeal builds. */
+#if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
+#define JS_GC_POISONING 1
+#endif
+
 #if defined(JS_DEBUG)
 #define JS_DIAGNOSTICS_ASSERT(expr) MOZ_ASSERT(expr)
 #elif defined(JS_CRASH_DIAGNOSTICS)
 #define JS_DIAGNOSTICS_ASSERT(expr)         \
   do {                                      \
     if (MOZ_UNLIKELY(!(expr))) MOZ_CRASH(); \
   } while (0)
 #else
@@ -282,16 +292,17 @@ static MOZ_ALWAYS_INLINE void SetMemChec
       MOZ_MAKE_MEM_NOACCESS(ptr, bytes);
       return;
   }
   MOZ_CRASH("Invalid kind");
 }
 
 namespace js {
 
+// Unconditionally poison a region on memory.
 static inline void AlwaysPoison(void* ptr, uint8_t value, size_t num,
                                 MemCheckKind kind) {
   // Without a valid Value tag, a poisoned Value may look like a valid
   // floating point number. To ensure that we crash more readily when
   // observing a poisoned Value, we make the poison an invalid ObjectValue.
   // Unfortunately, this adds about 2% more overhead, so we can only enable
   // it in debug.
 #if defined(DEBUG)
@@ -315,38 +326,32 @@ static inline void AlwaysPoison(void* pt
 #endif  // !DEBUG
 
   SetMemCheckKind(ptr, num, kind);
 }
 
 // JSGC_DISABLE_POISONING environment variable
 extern bool gDisablePoisoning;
 
+// Poison a region of memory in debug and nightly builds (plus builds where GC
+// zeal is configured). Can be disabled by setting the JSGC_DISABLE_POISONING
+// environment variable.
 static inline void Poison(void* ptr, uint8_t value, size_t num,
                           MemCheckKind kind) {
+#if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
   if (!js::gDisablePoisoning) {
     AlwaysPoison(ptr, value, num, kind);
   }
+#endif
+}
+
+// Poison a region of memory in debug builds. Can be disabled by setting the
+// JSGC_DISABLE_POISONING environment variable.
+static inline void DebugOnlyPoison(void* ptr, uint8_t value, size_t num,
+                                   MemCheckKind kind) {
+#if defined(DEBUG)
+  Poison(ptr, value, num, kind);
+#endif
 }
 
 }  // namespace js
 
-/* Crash diagnostics by default in debug and on nightly channel. */
-#if defined(DEBUG) || defined(NIGHTLY_BUILD)
-#define JS_CRASH_DIAGNOSTICS 1
-#endif
-
-/* Enable poisoning in crash-diagnostics and zeal builds. */
-#if defined(JS_CRASH_DIAGNOSTICS) || defined(JS_GC_ZEAL)
-#define JS_POISON(p, val, size, kind) js::Poison(p, val, size, kind)
-#define JS_GC_POISONING 1
-#else
-#define JS_POISON(p, val, size, kind) ((void)0)
-#endif
-
-/* Enable even more poisoning in purely debug builds. */
-#if defined(DEBUG)
-#define JS_EXTRA_POISON(p, val, size, kind) js::Poison(p, val, size, kind)
-#else
-#define JS_EXTRA_POISON(p, val, size, kind) ((void)0)
-#endif
-
 #endif /* jsutil_h */
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -1523,16 +1523,17 @@ js::GCParallelTask::~GCParallelTask() {
   // Only most-derived classes' destructors may do the join: base class
   // destructors run after those for derived classes' members, so a join in a
   // base class can't ensure that the task is done using the members. All we
   // can do now is check that someone has previously stopped the task.
   assertNotStarted();
 }
 
 bool js::GCParallelTask::startWithLockHeld(AutoLockHelperThreadState& lock) {
+  MOZ_ASSERT(CanUseExtraThreads());
   assertNotStarted();
 
   // If we do the shutdown GC before running anything, we may never
   // have initialized the helper threads. Just use the serial path
   // since we cannot safely intialize them at this point.
   if (!HelperThreadState().threads) {
     return false;
   }
@@ -1547,16 +1548,31 @@ bool js::GCParallelTask::startWithLockHe
   return true;
 }
 
 bool js::GCParallelTask::start() {
   AutoLockHelperThreadState helperLock;
   return startWithLockHeld(helperLock);
 }
 
+void js::GCParallelTask::startOrRunIfIdle(AutoLockHelperThreadState& lock) {
+  if (isRunningWithLockHeld(lock)) {
+    return;
+  }
+
+  // Join the previous invocation of the task. This will return immediately
+  // if the thread has never been started.
+  joinWithLockHeld(lock);
+
+  if (!startWithLockHeld(lock)) {
+    AutoUnlockHelperThreadState unlock(lock);
+    runFromMainThread(runtime());
+  }
+}
+
 void js::GCParallelTask::joinWithLockHeld(AutoLockHelperThreadState& lock) {
   if (isNotStarted(lock)) {
     return;
   }
 
   while (!isFinished(lock)) {
     HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
   }
@@ -1575,16 +1591,26 @@ static inline TimeDuration TimeSince(Tim
   // Sadly this happens sometimes.
   MOZ_ASSERT(now >= prev);
   if (now < prev) {
     now = prev;
   }
   return now - prev;
 }
 
+void GCParallelTask::joinAndRunFromMainThread(JSRuntime* rt) {
+  {
+    AutoLockHelperThreadState lock;
+    MOZ_ASSERT(!isRunningWithLockHeld(lock));
+    joinWithLockHeld(lock);
+  }
+
+  runFromMainThread(rt);
+}
+
 void js::GCParallelTask::runFromMainThread(JSRuntime* rt) {
   assertNotStarted();
   MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(rt));
   TimeStamp timeStart = ReallyNow();
   runTask();
   duration_ = TimeSince(timeStart);
 }
 
--- a/js/src/vm/Iteration.cpp
+++ b/js/src/vm/Iteration.cpp
@@ -669,18 +669,18 @@ static PropertyIteratorObject* CreatePro
 /**
  * Initialize a sentinel NativeIterator whose purpose is only to act as the
  * start/end of the circular linked list of NativeIterators in
  * ObjectRealm::enumerators.
  */
 NativeIterator::NativeIterator() {
   // Do our best to enforce that nothing in |this| except the two fields set
   // below is ever observed.
-  JS_POISON(static_cast<void*>(this), 0xCC, sizeof(*this),
-            MemCheckKind::MakeUndefined);
+  AlwaysPoison(static_cast<void*>(this), 0xCC, sizeof(*this),
+               MemCheckKind::MakeUndefined);
 
   // These are the only two fields in sentinel NativeIterators that are
   // examined, in ObjectRealm::sweepNativeIterators.  Everything else is
   // only examined *if* it's a NativeIterator being traced by a
   // PropertyIteratorObject that owns it, and nothing owns this iterator.
   prev_ = next_ = this;
 }
 
--- a/js/src/vm/JSScript.cpp
+++ b/js/src/vm/JSScript.cpp
@@ -3560,17 +3560,17 @@ void JSScript::finalize(FreeOp* fop) {
 #ifdef MOZ_VTUNE
   if (realm()->scriptVTuneIdMap) {
     // Note: we should only get here if the VTune JIT profiler is running.
     realm()->scriptVTuneIdMap->remove(this);
   }
 #endif
 
   if (data_) {
-    JS_POISON(data_, 0xdb, computedSizeOfData(), MemCheckKind::MakeNoAccess);
+    AlwaysPoison(data_, 0xdb, computedSizeOfData(), MemCheckKind::MakeNoAccess);
     fop->free_(data_);
   }
 
   if (scriptData_) {
     scriptData_->decRefCount();
   }
 
   // In most cases, our LazyScript's script pointer will reference this
--- a/js/src/vm/Scope.h
+++ b/js/src/vm/Scope.h
@@ -171,18 +171,18 @@ class TrailingNamesArray {
 
  public:
   // Explicitly ensure no one accidentally allocates scope data without
   // poisoning its trailing names.
   TrailingNamesArray() = delete;
 
   explicit TrailingNamesArray(size_t nameCount) {
     if (nameCount) {
-      JS_POISON(&data_, 0xCC, sizeof(BindingName) * nameCount,
-                MemCheckKind::MakeUndefined);
+      AlwaysPoison(&data_, 0xCC, sizeof(BindingName) * nameCount,
+                   MemCheckKind::MakeUndefined);
     }
   }
 
   BindingName* start() { return reinterpret_cast<BindingName*>(ptr()); }
 
   BindingName& get(size_t i) { return start()[i]; }
   BindingName& operator[](size_t i) { return get(i); }
 };
--- a/js/src/vm/Stack-inl.h
+++ b/js/src/vm/Stack-inl.h
@@ -190,17 +190,17 @@ inline CallObject& InterpreterFrame::cal
 inline void InterpreterFrame::unsetIsDebuggee() {
   MOZ_ASSERT(!script()->isDebuggee());
   flags_ &= ~DEBUGGEE;
 }
 
 /*****************************************************************************/
 
 inline void InterpreterStack::purge(JSRuntime* rt) {
-  rt->gc.freeUnusedLifoBlocksAfterSweeping(&allocator_);
+  rt->gc.queueUnusedLifoBlocksForFree(&allocator_);
 }
 
 uint8_t* InterpreterStack::allocateFrame(JSContext* cx, size_t size) {
   size_t maxFrames;
   if (cx->realm()->principals() == cx->runtime()->trustedPrincipals()) {
     maxFrames = MAX_FRAMES_TRUSTED;
   } else {
     maxFrames = MAX_FRAMES;
--- a/js/src/vm/TraceLoggingTypes.h
+++ b/js/src/vm/TraceLoggingTypes.h
@@ -15,16 +15,17 @@
 #define TRACELOGGER_TREE_ITEMS(_)              \
   _(AnnotateScripts)                           \
   _(Baseline)                                  \
   _(BaselineCompilation)                       \
   _(Engine)                                    \
   _(GC)                                        \
   _(GCAllocation)                              \
   _(GCSweeping)                                \
+  _(GCFree)                                    \
   _(Interpreter)                               \
   _(InlinedScripts)                            \
   _(IonAnalysis)                               \
   _(IonCompilation)                            \
   _(IonLinking)                                \
   _(IonMonkey)                                 \
   _(IrregexpCompile)                           \
   _(IrregexpExecute)                           \
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -4464,19 +4464,19 @@ void ConstraintTypeSet::sweep(const Auto
         flags |= TYPE_FLAG_ANYOBJECT;
         clearObjects();
         objectCount = 0;
         break;
       }
     }
     setBaseObjectCount(objectCount);
     // Note: -1/+1 to also poison the capacity field.
-    JS_POISON(oldArray - 1, JS_SWEPT_TI_PATTERN,
-              (oldCapacity + 1) * sizeof(oldArray[0]),
-              MemCheckKind::MakeUndefined);
+    AlwaysPoison(oldArray - 1, JS_SWEPT_TI_PATTERN,
+                 (oldCapacity + 1) * sizeof(oldArray[0]),
+                 MemCheckKind::MakeUndefined);
   } else if (objectCount == 1) {
     ObjectKey* key = (ObjectKey*)objectSet;
     if (!IsObjectKeyAboutToBeFinalized(&key)) {
       objectSet = reinterpret_cast<ObjectKey**>(key);
     } else {
       // As above, mark type sets containing objects with unknown
       // properties as unknown.
       if (key->isGroup() &&
@@ -4502,18 +4502,18 @@ void ConstraintTypeSet::sweep(const Auto
         MOZ_ASSERT(zone->types.typeLifoAlloc().contains(copy));
         copy->setNext(constraintList_);
         constraintList_ = copy;
       } else {
         zone->types.setOOMSweepingTypes();
       }
     }
     TypeConstraint* next = constraint->next();
-    JS_POISON(constraint, JS_SWEPT_TI_PATTERN, sizeof(TypeConstraint),
-              MemCheckKind::MakeUndefined);
+    AlwaysPoison(constraint, JS_SWEPT_TI_PATTERN, sizeof(TypeConstraint),
+                 MemCheckKind::MakeUndefined);
     constraint = next;
   }
 }
 
 inline void ObjectGroup::clearProperties(const AutoSweepObjectGroup& sweep) {
   // We're about to remove edges from the group to property ids. Incremental
   // GC should know about these edges.
   if (zone()->needsIncrementalBarrier()) {
@@ -4578,16 +4578,22 @@ void ObjectGroup::sweep(const AutoSweepO
    */
   unsigned propertyCount = basePropertyCount(sweep);
   if (propertyCount >= 2) {
     unsigned oldCapacity = TypeHashSet::Capacity(propertyCount);
     Property** oldArray = propertySet;
 
     MOZ_RELEASE_ASSERT(uintptr_t(oldArray[-1]) == oldCapacity);
 
+    auto poisonArray = mozilla::MakeScopeExit([oldArray, oldCapacity] {
+      size_t size = sizeof(Property*) * (oldCapacity + 1);
+      AlwaysPoison(oldArray - 1, JS_SWEPT_TI_PATTERN, size,
+                   MemCheckKind::MakeUndefined);
+    });
+
     unsigned oldPropertyCount = propertyCount;
     unsigned oldPropertiesFound = 0;
 
     clearProperties(sweep);
     propertyCount = 0;
     for (unsigned i = 0; i < oldCapacity; i++) {
       Property* prop = oldArray[i];
       if (prop) {
@@ -4596,24 +4602,24 @@ void ObjectGroup::sweep(const AutoSweepO
         if (singleton() && !prop->types.constraintList(sweep) &&
             !zone()->isPreservingCode()) {
           /*
            * Don't copy over properties of singleton objects when their
            * presence will not be required by jitcode or type constraints
            * (i.e. for the definite properties analysis). The contents of
            * these type sets will be regenerated as necessary.
            */
-          JS_POISON(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
-                    MemCheckKind::MakeUndefined);
+          AlwaysPoison(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
+                       MemCheckKind::MakeUndefined);
           continue;
         }
 
         Property* newProp = typeLifoAlloc.new_<Property>(*prop);
-        JS_POISON(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
-                  MemCheckKind::MakeUndefined);
+        AlwaysPoison(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
+                     MemCheckKind::MakeUndefined);
         if (newProp) {
           Property** pentry = TypeHashSet::Insert<jsid, Property, Property>(
               typeLifoAlloc, propertySet, propertyCount, newProp->id);
           if (pentry) {
             *pentry = newProp;
             newProp->types.sweep(sweep, zone());
             continue;
           }
@@ -4629,23 +4635,23 @@ void ObjectGroup::sweep(const AutoSweepO
     MOZ_RELEASE_ASSERT(oldPropertyCount == oldPropertiesFound);
     setBasePropertyCount(sweep, propertyCount);
   } else if (propertyCount == 1) {
     Property* prop = (Property*)propertySet;
     prop->types.checkMagic();
     if (singleton() && !prop->types.constraintList(sweep) &&
         !zone()->isPreservingCode()) {
       // Skip, as above.
-      JS_POISON(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
-                MemCheckKind::MakeUndefined);
+      AlwaysPoison(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
+                   MemCheckKind::MakeUndefined);
       clearProperties(sweep);
     } else {
       Property* newProp = typeLifoAlloc.new_<Property>(*prop);
-      JS_POISON(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
-                MemCheckKind::MakeUndefined);
+      AlwaysPoison(prop, JS_SWEPT_TI_PATTERN, sizeof(Property),
+                   MemCheckKind::MakeUndefined);
       if (newProp) {
         propertySet = (Property**)newProp;
         newProp->types.sweep(sweep, zone());
       } else {
         zone()->types.setOOMSweepingTypes();
         addFlags(sweep,
                  OBJECT_FLAG_DYNAMIC_MASK | OBJECT_FLAG_UNKNOWN_PROPERTIES);
         clearProperties(sweep);
@@ -4766,17 +4772,17 @@ void TypeZone::beginSweep() {
   // Clear the analysis pool, but don't release its data yet. While sweeping
   // types any live data will be allocated into the pool.
   sweepTypeLifoAlloc.ref().steal(&typeLifoAlloc());
 
   generation = !generation;
 }
 
 void TypeZone::endSweep(JSRuntime* rt) {
-  rt->gc.freeAllLifoBlocksAfterSweeping(&sweepTypeLifoAlloc.ref());
+  rt->gc.queueAllLifoBlocksForFree(&sweepTypeLifoAlloc.ref());
 }
 
 void TypeZone::clearAllNewScriptsOnOOM() {
   for (auto iter = zone()->cellIter<ObjectGroup>(); !iter.done(); iter.next()) {
     ObjectGroup* group = iter;
     if (!IsAboutToBeFinalizedUnbarriered(&group)) {
       group->maybeClearNewScriptOnOOM();
     }
--- a/toolkit/components/clearsitedata/ClearSiteData.cpp
+++ b/toolkit/components/clearsitedata/ClearSiteData.cpp
@@ -169,19 +169,32 @@ ClearSiteData::Observe(nsISupports* aSub
     return NS_OK;
   }
 
   ClearDataFromChannel(channel);
   return NS_OK;
 }
 
 void ClearSiteData::ClearDataFromChannel(nsIHttpChannel* aChannel) {
+  MOZ_ASSERT(aChannel);
+
   nsresult rv;
   nsCOMPtr<nsIURI> uri;
 
+  uint32_t status;
+  rv = aChannel->GetResponseStatus(&status);
+  if (NS_WARN_IF(NS_FAILED(rv))) {
+    return;
+  }
+
+  // We just care about 2xx response status.
+  if (status < 200 || status >= 300) {
+    return;
+  }
+
   nsIScriptSecurityManager* ssm = nsContentUtils::GetSecurityManager();
   if (NS_WARN_IF(!ssm)) {
     return;
   }
 
   nsCOMPtr<nsIPrincipal> principal;
   rv = ssm->GetChannelResultPrincipal(aChannel, getter_AddRefs(principal));
   if (NS_WARN_IF(NS_FAILED(rv))) {
--- a/toolkit/xre/nsXREDirProvider.cpp
+++ b/toolkit/xre/nsXREDirProvider.cpp
@@ -1439,31 +1439,47 @@ nsresult nsXREDirProvider::GetSysUserExt
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = AppendSysUserExtensionPath(localDir);
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = EnsureDirectoryExists(localDir);
   NS_ENSURE_SUCCESS(rv, rv);
 
+#if defined(XP_WIN) && defined(MOZ_SANDBOX)
+  // This is used in sandbox rules, so we need to make sure it doesn't contain
+  // any junction points or symlinks or the sandbox will reject those rules.
+  if (!mozilla::widget::WinUtils::ResolveJunctionPointsAndSymLinks(localDir)) {
+    NS_WARNING("Failed to resolve sys user extensions directory.");
+  }
+#endif
+
   localDir.forget(aFile);
   return NS_OK;
 }
 
 nsresult nsXREDirProvider::GetSysUserExtensionsDevDirectory(nsIFile** aFile) {
   nsCOMPtr<nsIFile> localDir;
   nsresult rv = GetUserDataDirectoryHome(getter_AddRefs(localDir), false);
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = AppendSysUserExtensionsDevPath(localDir);
   NS_ENSURE_SUCCESS(rv, rv);
 
   rv = EnsureDirectoryExists(localDir);
   NS_ENSURE_SUCCESS(rv, rv);
 
+#if defined(XP_WIN) && defined(MOZ_SANDBOX)
+  // This is used in sandbox rules, so we need to make sure it doesn't contain
+  // any junction points or symlinks or the sandbox will reject those rules.
+  if (!mozilla::widget::WinUtils::ResolveJunctionPointsAndSymLinks(localDir)) {
+    NS_WARNING("Failed to resolve sys user extensions dev directory.");
+  }
+#endif
+
   localDir.forget(aFile);
   return NS_OK;
 }
 
 #if defined(XP_UNIX) || defined(XP_MACOSX)
 nsresult nsXREDirProvider::GetSystemExtensionsDirectory(nsIFile** aFile) {
   nsresult rv;
   nsCOMPtr<nsIFile> localDir;