Merge mozilla-central to autoland. a=merge CLOSED TREE
authorBrindusan Cristian <cbrindusan@mozilla.com>
Sat, 16 Jun 2018 12:50:22 +0300
changeset 477008 da2cbc08384489ec52701807ffce1061baba30d1
parent 477007 70bab1329de06ec38a8fecd3e553b68007f4a6d2 (current diff)
parent 476997 9a847d79eb8ea28ea2d7504ab7c65bf9ea598928 (diff)
child 477009 3250ddd5dd08c26d8310ebcaf42544cbd4fc1f52
push id9374
push userjlund@mozilla.com
push dateMon, 18 Jun 2018 21:43:20 +0000
treeherdermozilla-beta@160e085dfb0b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland. a=merge CLOSED TREE
--- a/dom/events/TouchEvent.cpp
+++ b/dom/events/TouchEvent.cpp
@@ -217,16 +217,42 @@ TouchEvent::PrefEnabled(JSContext* aCx, 
       docShell = win->GetDocShell();
     }
   }
   return PrefEnabled(docShell);
 }
 
 // static
 bool
+TouchEvent::PlatformSupportsTouch()
+{
+#if defined(MOZ_WIDGET_ANDROID)
+  // Touch support is always enabled on android.
+  return true;
+#elif defined(XP_WIN) || defined(MOZ_WIDGET_GTK)
+  static bool sDidCheckTouchDeviceSupport = false;
+  static bool sIsTouchDeviceSupportPresent = false;
+  // On Windows and GTK3 we auto-detect based on device support.
+  if (!sDidCheckTouchDeviceSupport) {
+    sDidCheckTouchDeviceSupport = true;
+    sIsTouchDeviceSupportPresent = WidgetUtils::IsTouchDeviceSupportPresent();
+    // But touch events are only actually supported if APZ is enabled. If
+    // APZ is disabled globally, we can check that once and incorporate that
+    // into the cached state. If APZ is enabled, we need to further check
+    // based on the widget, which we do below (and don't cache that result).
+    sIsTouchDeviceSupportPresent &= gfxPlatform::AsyncPanZoomEnabled();
+  }
+  return sIsTouchDeviceSupportPresent;
+#else
+  return false;
+#endif
+}
+
+// static
+bool
 TouchEvent::PrefEnabled(nsIDocShell* aDocShell)
 {
   static bool sPrefCached = false;
   static int32_t sPrefCacheValue = 0;
 
   uint32_t touchEventsOverride = nsIDocShell::TOUCHEVENTS_OVERRIDE_NONE;
   if (aDocShell) {
     aDocShell->GetTouchEventsOverride(&touchEventsOverride);
@@ -239,44 +265,39 @@ TouchEvent::PrefEnabled(nsIDocShell* aDo
 
   bool enabled = false;
   if (touchEventsOverride == nsIDocShell::TOUCHEVENTS_OVERRIDE_ENABLED) {
     enabled = true;
   } else if (touchEventsOverride == nsIDocShell::TOUCHEVENTS_OVERRIDE_DISABLED) {
     enabled = false;
   } else {
     if (sPrefCacheValue == 2) {
-#if defined(MOZ_WIDGET_ANDROID)
-      // Touch support is always enabled on B2G and android.
-      enabled = true;
-#elif defined(XP_WIN) || defined(MOZ_WIDGET_GTK)
-      static bool sDidCheckTouchDeviceSupport = false;
-      static bool sIsTouchDeviceSupportPresent = false;
-      // On Windows and GTK3 we auto-detect based on device support.
-      if (!sDidCheckTouchDeviceSupport) {
-        sDidCheckTouchDeviceSupport = true;
-        sIsTouchDeviceSupportPresent = WidgetUtils::IsTouchDeviceSupportPresent();
-        // But touch events are only actually supported if APZ is enabled. If
-        // APZ is disabled globally, we can check that once and incorporate that
-        // into the cached state. If APZ is enabled, we need to further check
-        // based on the widget, which we do below (and don't cache that result).
-        sIsTouchDeviceSupportPresent &= gfxPlatform::AsyncPanZoomEnabled();
+      enabled = PlatformSupportsTouch();
+
+      static bool firstTime = true;
+      // The touch screen data seems to be inaccurate in the parent process,
+      // and we really need the crash annotation in child processes.
+      if (firstTime && !XRE_IsParentProcess()) {
+        CrashReporter::AnnotateCrashReport(NS_LITERAL_CSTRING("HasDeviceTouchScreen"),
+                                           enabled ?
+                                             NS_LITERAL_CSTRING("1") :
+                                             NS_LITERAL_CSTRING("0"));
+        firstTime = false;
       }
-      enabled = sIsTouchDeviceSupportPresent;
+
+#if defined(XP_WIN) || defined(MOZ_WIDGET_GTK)
       if (enabled && aDocShell) {
         // APZ might be disabled on this particular widget, in which case
         // TouchEvent support will also be disabled. Try to detect that.
         RefPtr<nsPresContext> pc;
         aDocShell->GetPresContext(getter_AddRefs(pc));
         if (pc && pc->GetRootWidget()) {
           enabled &= pc->GetRootWidget()->AsyncPanZoomEnabled();
         }
       }
-#else
-      enabled = false;
 #endif
     } else {
       enabled = !!sPrefCacheValue;
     }
   }
 
   if (enabled) {
     nsContentUtils::InitializeTouchEventTable();
--- a/dom/events/TouchEvent.h
+++ b/dom/events/TouchEvent.h
@@ -123,16 +123,17 @@ public:
                       bool aCtrlKey,
                       bool aAltKey,
                       bool aShiftKey,
                       bool aMetaKey,
                       TouchList* aTouches,
                       TouchList* aTargetTouches,
                       TouchList* aChangedTouches);
 
+  static bool PlatformSupportsTouch();
   static bool PrefEnabled(JSContext* aCx, JSObject* aGlobal);
   static bool PrefEnabled(nsIDocShell* aDocShell);
 
   static already_AddRefed<TouchEvent> Constructor(const GlobalObject& aGlobal,
                                                   const nsAString& aType,
                                                   const TouchEventInit& aParam,
                                                   ErrorResult& aRv);
 
--- a/dom/notification/Notification.cpp
+++ b/dom/notification/Notification.cpp
@@ -419,16 +419,23 @@ public:
     , mNotification(aNotification)
   {}
 
   void
   WorkerRunInternal(WorkerPrivate* aWorkerPrivate) override
   {
     mNotification->ReleaseObject();
   }
+
+  nsresult
+  Cancel() override
+  {
+    mNotification->ReleaseObject();
+    return NS_OK;
+  }
 };
 
 // Create one whenever you require ownership of the notification. Use with
 // UniquePtr<>. See Notification.h for details.
 class NotificationRef final {
   friend class WorkerNotificationObserver;
 
 private:
--- a/dom/xbl/nsXBLBinding.cpp
+++ b/dom/xbl/nsXBLBinding.cpp
@@ -101,29 +101,31 @@ static const JSClass gPrototypeJSClass =
 // Implementation /////////////////////////////////////////////////////////////////
 
 // Constructors/Destructors
 nsXBLBinding::nsXBLBinding(nsXBLPrototypeBinding* aBinding)
   : mMarkedForDeath(false)
   , mUsingContentXBLScope(false)
   , mIsShadowRootBinding(false)
   , mPrototypeBinding(aBinding)
+  , mBoundElement(nullptr)
 {
   NS_ASSERTION(mPrototypeBinding, "Must have a prototype binding!");
   // Grab a ref to the document info so the prototype binding won't die
   NS_ADDREF(mPrototypeBinding->XBLDocumentInfo());
 }
 
 // Constructor used by web components.
 nsXBLBinding::nsXBLBinding(ShadowRoot* aShadowRoot, nsXBLPrototypeBinding* aBinding)
   : mMarkedForDeath(false),
     mUsingContentXBLScope(false),
     mIsShadowRootBinding(true),
     mPrototypeBinding(aBinding),
-    mContent(aShadowRoot)
+    mContent(aShadowRoot),
+    mBoundElement(nullptr)
 {
   NS_ASSERTION(mPrototypeBinding, "Must have a prototype binding!");
   // Grab a ref to the document info so the prototype binding won't die
   NS_ADDREF(mPrototypeBinding->XBLDocumentInfo());
 }
 
 nsXBLBinding::~nsXBLBinding(void)
 {
--- a/dom/xbl/nsXBLProtoImplField.cpp
+++ b/dom/xbl/nsXBLProtoImplField.cpp
@@ -42,16 +42,17 @@ nsXBLProtoImplField::nsXBLProtoImplField
     if (readOnly.LowerCaseEqualsLiteral("true"))
       mJSAttributes |= JSPROP_READONLY;
   }
 }
 
 
 nsXBLProtoImplField::nsXBLProtoImplField(const bool aIsReadOnly)
   : mNext(nullptr),
+    mName(nullptr),
     mFieldText(nullptr),
     mFieldTextLength(0),
     mLineNumber(0)
 {
   MOZ_COUNT_CTOR(nsXBLProtoImplField);
 
   mJSAttributes = JSPROP_ENUMERATE;
   if (aIsReadOnly)
--- a/dom/xbl/nsXBLPrototypeBinding.cpp
+++ b/dom/xbl/nsXBLPrototypeBinding.cpp
@@ -113,16 +113,17 @@ nsXBLPrototypeBinding::nsXBLPrototypeBin
   mBaseBinding(nullptr),
   mInheritStyle(true),
   mCheckedBaseProto(false),
   mKeyHandlersRegistered(false),
   mChromeOnlyContent(false),
   mBindToUntrustedContent(false),
   mSimpleScopeChain(false),
   mResources(nullptr),
+  mXBLDocInfoWeak(nullptr),
   mBaseNameSpaceID(kNameSpaceID_None)
 {
   MOZ_COUNT_CTOR(nsXBLPrototypeBinding);
 }
 
 nsresult
 nsXBLPrototypeBinding::Init(const nsACString& aID,
                             nsXBLDocumentInfo* aInfo,
--- a/dom/xbl/nsXBLPrototypeHandler.cpp
+++ b/dom/xbl/nsXBLPrototypeHandler.cpp
@@ -116,17 +116,22 @@ nsXBLPrototypeHandler::nsXBLPrototypeHan
 
   // Make sure our prototype is initialized.
   ConstructPrototype(aHandlerElement);
 }
 
 nsXBLPrototypeHandler::nsXBLPrototypeHandler(nsXBLPrototypeBinding* aBinding)
   : mHandlerText(nullptr),
     mLineNumber(0),
+    mPhase(0),
+    mType(0),
+    mMisc(0),
     mReserved(XBLReservedKey_False),
+    mKeyMask(0),
+    mDetail(0),
     mNextHandler(nullptr),
     mPrototypeBinding(aBinding)
 {
   Init();
 }
 
 nsXBLPrototypeHandler::~nsXBLPrototypeHandler()
 {
--- a/dom/xbl/nsXBLResourceLoader.cpp
+++ b/dom/xbl/nsXBLResourceLoader.cpp
@@ -64,17 +64,18 @@ struct nsXBLResource
 nsXBLResourceLoader::nsXBLResourceLoader(nsXBLPrototypeBinding* aBinding,
                                          nsXBLPrototypeResources* aResources)
 :mBinding(aBinding),
  mResources(aResources),
  mResourceList(nullptr),
  mLastResource(nullptr),
  mLoadingResources(false),
  mInLoadResourcesFunc(false),
- mPendingSheets(0)
+ mPendingSheets(0),
+ mBoundDocument(nullptr)
 {
 }
 
 nsXBLResourceLoader::~nsXBLResourceLoader()
 {
   delete mResourceList;
 }
 
--- a/gfx/qcms/chain.c
+++ b/gfx/qcms/chain.c
@@ -967,16 +967,20 @@ static float* qcms_modular_transform_dat
 		    transform_fn != qcms_transform_module_clut_only &&
 		    transform_fn != qcms_transform_module_matrix &&
 		    transform_fn != qcms_transform_module_matrix_translate &&
 		    transform_fn != qcms_transform_module_LAB_to_XYZ &&
 		    transform_fn != qcms_transform_module_XYZ_to_LAB) {
 			assert(0 && "Unsupported transform module");
 			return NULL;
 		}
+		if (transform->grid_size <= 0) {
+			assert(0 && "Invalid transform");
+			return NULL;
+		}
                 transform->transform_module_fn(transform,src,dest,len);
                 dest = src;
                 src = new_src;
                 transform = transform->next_transform;
         }
         // The results end up in the src buffer because of the switching
         return src;
 }
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -611,16 +611,19 @@ WebRenderDebugPrefChangeCallback(const c
   GFX_WEBRENDER_DEBUG(".profiler",           1 << 0)
   GFX_WEBRENDER_DEBUG(".render-targets",     1 << 1)
   GFX_WEBRENDER_DEBUG(".texture-cache",      1 << 2)
   GFX_WEBRENDER_DEBUG(".gpu-time-queries",   1 << 3)
   GFX_WEBRENDER_DEBUG(".gpu-sample-queries", 1 << 4)
   GFX_WEBRENDER_DEBUG(".disable-batching",   1 << 5)
   GFX_WEBRENDER_DEBUG(".epochs",             1 << 6)
   GFX_WEBRENDER_DEBUG(".compact-profiler",   1 << 7)
+  GFX_WEBRENDER_DEBUG(".echo-driver-messages", 1 << 8)
+  GFX_WEBRENDER_DEBUG(".new-frame-indicator", 1 << 9)
+  GFX_WEBRENDER_DEBUG(".new-scene-indicator", 1 << 10)
 #undef GFX_WEBRENDER_DEBUG
 
   gfx::gfxVars::SetWebRenderDebugFlags(flags);
 }
 
 
 #if defined(USE_SKIA)
 static uint32_t GetSkiaGlyphCacheSize()
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -586,20 +586,20 @@ namespace js {
 namespace gc {
 
 static MOZ_ALWAYS_INLINE bool
 IsIncrementalBarrierNeededOnTenuredGCThing(const JS::GCCellPtr thing)
 {
     MOZ_ASSERT(thing);
     MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
 
-    // TODO: I'd like to assert !CurrentThreadIsHeapBusy() here but this gets
+    // TODO: I'd like to assert !RuntimeHeapIsBusy() here but this gets
     // called while we are tracing the heap, e.g. during memory reporting
     // (see bug 1313318).
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
 
     JS::Zone* zone = JS::GetTenuredGCThingZone(thing);
     return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
 }
 
 static MOZ_ALWAYS_INLINE void
 ExposeGCThingToActiveJS(JS::GCCellPtr thing)
 {
@@ -627,17 +627,17 @@ extern JS_PUBLIC_API(bool)
 EdgeNeedsSweepUnbarrieredSlow(T* thingp);
 
 static MOZ_ALWAYS_INLINE bool
 EdgeNeedsSweepUnbarriered(JSObject** objp)
 {
     // This function does not handle updating nursery pointers. Raw JSObject
     // pointers should be updated separately or replaced with
     // JS::Heap<JSObject*> which handles this automatically.
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
     if (IsInsideNursery(reinterpret_cast<Cell*>(*objp)))
         return false;
 
     auto zone = JS::shadow::Zone::asShadowZone(detail::GetGCThingZone(uintptr_t(*objp)));
     if (!zone->isGCSweepingOrCompacting())
         return false;
 
     return EdgeNeedsSweepUnbarrieredSlow(objp);
--- a/js/src/builtin/intl/SharedIntlData.cpp
+++ b/js/src/builtin/intl/SharedIntlData.cpp
@@ -398,17 +398,17 @@ js::intl::SharedIntlData::destroyInstanc
     ianaLinksCanonicalizedDifferentlyByICU.finish();
     upperCaseFirstLocales.finish();
 }
 
 void
 js::intl::SharedIntlData::trace(JSTracer* trc)
 {
     // Atoms are always tenured.
-    if (!JS::CurrentThreadIsHeapMinorCollecting()) {
+    if (!JS::RuntimeHeapIsMinorCollecting()) {
         availableTimeZones.trace(trc);
         ianaZonesTreatedAsLinksByICU.trace(trc);
         ianaLinksCanonicalizedDifferentlyByICU.trace(trc);
         upperCaseFirstLocales.trace(trc);
     }
 }
 
 size_t
--- a/js/src/ds/SplayTree.h
+++ b/js/src/ds/SplayTree.h
@@ -77,17 +77,17 @@ class SplayTree
     }
 
     bool contains(const T& v, T* res)
     {
         if (!root)
             return false;
         Node* last = lookup(v);
         splay(last);
-        checkCoherency(root, nullptr);
+        checkCoherency();
         if (C::compare(v, last->item) == 0) {
             *res = last->item;
             return true;
         }
         return false;
     }
 
     MOZ_MUST_USE bool insert(const T& v)
@@ -107,17 +107,17 @@ class SplayTree
         MOZ_ASSERT(cmp);
 
         Node*& parentPointer = (cmp < 0) ? last->left : last->right;
         MOZ_ASSERT(!parentPointer);
         parentPointer = element;
         element->parent = last;
 
         splay(element);
-        checkCoherency(root, nullptr);
+        checkCoherency();
         return true;
     }
 
     void remove(const T& v)
     {
         Node* last = lookup(v);
         MOZ_ASSERT(last && C::compare(v, last->item) == 0);
 
@@ -152,17 +152,17 @@ class SplayTree
         else
             swap->parent->right = swapChild;
         if (swapChild)
             swapChild->parent = swap->parent;
 
         root->item = swap->item;
         freeNode(swap);
 
-        checkCoherency(root, nullptr);
+        checkCoherency();
     }
 
     template <class Op>
     void forEach(Op op)
     {
         forEachInner<Op>(op, root);
     }
 
@@ -271,38 +271,85 @@ class SplayTree
         if (!node)
             return;
 
         forEachInner<Op>(op, node->left);
         op(node->item);
         forEachInner<Op>(op, node->right);
     }
 
-    Node* checkCoherency(Node* node, Node* minimum)
+    void checkCoherency() const
     {
 #ifdef DEBUG
         if (!enableCheckCoherency)
-            return nullptr;
-        if (!node) {
-            MOZ_ASSERT(!root);
-            return nullptr;
+            return;
+        if (!root)
+            return;
+        MOZ_ASSERT(root->parent == nullptr);
+        const Node* node = root;
+        const Node* minimum = nullptr;
+        MOZ_ASSERT_IF(node->left, node->left->parent == node);
+        MOZ_ASSERT_IF(node->right, node->right->parent == node);
+
+
+        // This is doing a depth-first search and check that the values are
+        // ordered properly.
+        while (true) {
+            // Go to the left-most child.
+            while (node->left) {
+                MOZ_ASSERT_IF(node->left, node->left->parent == node);
+                MOZ_ASSERT_IF(node->right, node->right->parent == node);
+                node = node->left;
+            }
+
+            MOZ_ASSERT_IF(minimum, C::compare(minimum->item, node->item) < 0);
+            minimum = node;
+
+            if (node->right) {
+                // Go once to the right and try again.
+                MOZ_ASSERT_IF(node->left, node->left->parent == node);
+                MOZ_ASSERT_IF(node->right, node->right->parent == node);
+                node = node->right;
+            } else {
+                // We reached a leaf node, move to the first branch to the right of
+                // our current left-most sub-tree.
+                MOZ_ASSERT(!node->left && !node->right);
+                const Node* prev = nullptr;
+
+                // Visit the parent node, to find the right branch which we have
+                // not visited yet. Either we are coming back from the right
+                // branch, or we are coming back from the left branch with no
+                // right branch to visit.
+                while (node->parent) {
+                    prev = node;
+                    node = node->parent;
+
+                    // If we came back from the left branch, visit the value.
+                    if (node->left == prev) {
+                        MOZ_ASSERT_IF(minimum, C::compare(minimum->item, node->item) < 0);
+                        minimum = node;
+                    }
+
+                    if (node->right != prev && node->right != nullptr)
+                        break;
+                }
+
+                if (!node->parent) {
+                    MOZ_ASSERT(node == root);
+                    // We reached the root node either because we came back from
+                    // the right hand side, or because the root node had a
+                    // single child.
+                    if (node->right == prev || node->right == nullptr)
+                        return;
+                }
+
+                // Go to the right node which we have not visited yet.
+                MOZ_ASSERT(node->right != prev && node->right != nullptr);
+                node = node->right;
+            }
         }
-        MOZ_ASSERT_IF(!node->parent, node == root);
-        MOZ_ASSERT_IF(minimum, C::compare(minimum->item, node->item) < 0);
-        if (node->left) {
-            MOZ_ASSERT(node->left->parent == node);
-            Node* leftMaximum = checkCoherency(node->left, minimum);
-            MOZ_ASSERT(C::compare(leftMaximum->item, node->item) < 0);
-        }
-        if (node->right) {
-            MOZ_ASSERT(node->right->parent == node);
-            return checkCoherency(node->right, node);
-        }
-        return node;
-#else
-        return nullptr;
 #endif
     }
 };
 
 }  /* namespace js */
 
 #endif /* ds_SplayTree_h */
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -279,17 +279,17 @@ GCRuntime::checkAllocatorState(JSContext
                   kind == AllocKind::ATOM ||
                   kind == AllocKind::FAT_INLINE_ATOM ||
                   kind == AllocKind::SYMBOL ||
                   kind == AllocKind::JITCODE ||
                   kind == AllocKind::SCOPE);
     MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
                   kind != AllocKind::ATOM &&
                   kind != AllocKind::FAT_INLINE_ATOM);
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
     MOZ_ASSERT(cx->isAllocAllowed());
 #endif
 
     // Crash if we perform a GC action when it is not safe.
     if (allowGC && !cx->suppressGC)
         cx->verifyIsSafeToGC();
 
     // For testing out of memory conditions
@@ -375,17 +375,17 @@ GCRuntime::refillFreeListFromAnyThread(J
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind)
 {
     // It should not be possible to allocate on the main thread while we are
     // inside a GC.
     Zone *zone = cx->zone();
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy(), "allocating while under GC");
 
     return cx->arenas()->allocateFromArena(zone, thingKind, ShouldCheckThresholds::CheckThresholds);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind)
 {
     // A GC may be happening on the main thread, but zones used by off thread
@@ -400,18 +400,18 @@ GCRuntime::refillFreeListFromHelperThrea
 GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
 {
     /*
      * Called by compacting GC to refill a free list while we are in a GC.
      */
 
     zone->arenas.checkEmptyFreeList(thingKind);
     mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
-    MOZ_ASSERT_IF(!JS::CurrentThreadIsHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
+    MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+    MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(), !rt->gc.isBackgroundSweeping());
 
     return zone->arenas.allocateFromArena(zone, thingKind, ShouldCheckThresholds::DontCheckThresholds);
 }
 
 TenuredCell*
 ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
                               ShouldCheckThresholds checkThresholds)
 {
--- a/js/src/gc/Barrier.cpp
+++ b/js/src/gc/Barrier.cpp
@@ -19,17 +19,17 @@
 #include "wasm/WasmJS.h"
 
 namespace js {
 
 bool
 RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
-    return JS::CurrentThreadIsHeapMajorCollecting();
+    return JS::RuntimeHeapIsMajorCollecting();
 }
 
 #ifdef DEBUG
 
 bool
 IsMarkedBlack(JSObject* obj)
 {
     return obj->isMarkedBlack();
--- a/js/src/gc/Cell.h
+++ b/js/src/gc/Cell.h
@@ -388,17 +388,17 @@ TenuredCell::readBarrier(TenuredCell* th
         Cell* tmp = thing;
         TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
         MOZ_ASSERT(tmp == thing);
     }
 
     if (thing->isMarkedGray()) {
         // There shouldn't be anything marked grey unless we're on the main thread.
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
-        if (!JS::CurrentThreadIsHeapCollecting())
+        if (!JS::RuntimeHeapIsCollecting())
             JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr(thing, thing->getTraceKind()));
     }
 }
 
 void
 AssertSafeToSkipBarrier(TenuredCell* thing);
 
 /* static */ MOZ_ALWAYS_INLINE void
--- a/js/src/gc/DeletePolicy.h
+++ b/js/src/gc/DeletePolicy.h
@@ -69,17 +69,17 @@ IsClearEdgesTracer(JSTracer *trc)
  * into the object and make it safe to delete.
  */
 template <typename T>
 struct GCManagedDeletePolicy
 {
     void operator()(const T* constPtr) {
         if (constPtr) {
             auto ptr = const_cast<T*>(constPtr);
-            if (JS::CurrentThreadIsHeapCollecting()) {
+            if (JS::RuntimeHeapIsCollecting()) {
                 MOZ_ASSERT(js::CurrentThreadIsGCSweeping());
                 // Do not attempt to clear out storebuffer edges.
             } else {
                 gc::ClearEdgesTracer trc;
                 ptr->trace(&trc);
             }
             js_delete(ptr);
         }
--- a/js/src/gc/GC-inl.h
+++ b/js/src/gc/GC-inl.h
@@ -125,17 +125,17 @@ class ArenaCellIterImpl
     void init(Arena* arena, CellIterNeedsBarrier mayNeedBarrier) {
         MOZ_ASSERT(!initialized);
         MOZ_ASSERT(arena);
         initialized = true;
         AllocKind kind = arena->getAllocKind();
         firstThingOffset = Arena::firstThingOffset(kind);
         thingSize = Arena::thingSize(kind);
         traceKind = MapAllocToTraceKind(kind);
-        needsBarrier = mayNeedBarrier && !JS::CurrentThreadIsHeapCollecting();
+        needsBarrier = mayNeedBarrier && !JS::RuntimeHeapIsCollecting();
         reset(arena);
     }
 
     // Use this to move from an Arena of a particular kind to another Arena of
     // the same kind.
     void reset(Arena* arena) {
         MOZ_ASSERT(initialized);
         MOZ_ASSERT(arena);
@@ -183,17 +183,17 @@ JSObject*
 ArenaCellIterImpl::get<JSObject>() const;
 
 class ArenaCellIter : public ArenaCellIterImpl
 {
   public:
     explicit ArenaCellIter(Arena* arena)
       : ArenaCellIterImpl(arena, CellIterMayNeedBarrier)
     {
-        MOZ_ASSERT(JS::CurrentThreadIsHeapTracing());
+        MOZ_ASSERT(JS::RuntimeHeapIsTracing());
     }
 };
 
 template <typename T>
 class ZoneCellIter;
 
 template <>
 class ZoneCellIter<TenuredCell> {
@@ -212,17 +212,17 @@ class ZoneCellIter<TenuredCell> {
         initForTenuredIteration(zone, kind);
     }
 
     void initForTenuredIteration(JS::Zone* zone, AllocKind kind) {
         JSRuntime* rt = zone->runtimeFromAnyThread();
 
         // If called from outside a GC, ensure that the heap is in a state
         // that allows us to iterate.
-        if (!JS::CurrentThreadIsHeapBusy()) {
+        if (!JS::RuntimeHeapIsBusy()) {
             // Assert that no GCs can occur while a ZoneCellIter is live.
             nogc.emplace();
         }
 
         // We have a single-threaded runtime, so there's no need to protect
         // against other threads iterating or allocating. However, we do have
         // background finalization; we may have to wait for this to finish if
         // it's currently active.
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -1729,17 +1729,17 @@ GCRuntime::getParameter(JSGCParamKey key
         MOZ_ASSERT(key == JSGC_NUMBER);
         return uint32_t(number);
     }
 }
 
 void
 GCRuntime::setMarkStackLimit(size_t limit, AutoLockGC& lock)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
     AutoUnlockGC unlock(lock);
     AutoStopVerifyingBarriers pauseVerification(rt, false);
     marker.setMaxCapacity(limit);
 }
 
 bool
 GCRuntime::addBlackRootsTracer(JSTraceDataOp traceOp, void* data)
 {
@@ -3325,35 +3325,35 @@ GCRuntime::triggerGC(JS::gcreason::Reaso
     /*
      * Don't trigger GCs if this is being called off the main thread from
      * onTooMuchMalloc().
      */
     if (!CurrentThreadCanAccessRuntime(rt))
         return false;
 
     /* GC is already running. */
-    if (JS::CurrentThreadIsHeapCollecting())
+    if (JS::RuntimeHeapIsCollecting())
         return false;
 
     JS::PrepareForFullGC(rt->mainContextFromOwnThread());
     requestMajorGC(reason);
     return true;
 }
 
 void
 GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
-
     if (!CurrentThreadCanAccessRuntime(rt)) {
         // Zones in use by a helper thread can't be collected.
         MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
         return;
     }
 
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+
     size_t usedBytes = zone->usage.gcBytes();
     size_t thresholdBytes = zone->threshold.gcTriggerBytes();
 
     if (usedBytes >= thresholdBytes) {
         // The threshold has been surpassed, immediately trigger a GC, which
         // will be done non-incrementally.
         triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER, usedBytes, thresholdBytes);
         return;
@@ -3389,17 +3389,17 @@ GCRuntime::maybeAllocTriggerZoneGC(Zone*
 }
 
 bool
 GCRuntime::triggerZoneGC(Zone* zone, JS::gcreason::Reason reason, size_t used, size_t threshold)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     /* GC is already running. */
-    if (JS::CurrentThreadIsHeapBusy())
+    if (JS::RuntimeHeapIsBusy())
         return false;
 
 #ifdef JS_GC_ZEAL
     if (hasZealMode(ZealMode::Alloc)) {
         MOZ_RELEASE_ASSERT(triggerGC(reason));
         return true;
     }
 #endif
@@ -3449,17 +3449,17 @@ GCRuntime::maybeGC(Zone* zone)
     }
 }
 
 void
 GCRuntime::triggerFullGCForAtoms(JSContext* cx)
 {
     MOZ_ASSERT(fullGCForAtomsRequested_);
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(cx->canCollectAtoms());
     fullGCForAtomsRequested_ = false;
     MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::DELAYED_ATOMS_GC));
 }
 
 // Do all possible decommit immediately from the current thread without
 // releasing the GC lock or allocating any memory.
 void
@@ -3700,25 +3700,25 @@ GCRuntime::queueZonesForBackgroundSweep(
     AutoLockGC lock(rt);
     backgroundSweepZones.ref().transferFrom(zones);
     helperState.maybeStartBackgroundSweep(lock, helperLock);
 }
 
 void
 GCRuntime::freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo)
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(JS::RuntimeHeapIsBusy());
     AutoLockGC lock(rt);
     blocksToFreeAfterSweeping.ref().transferUnusedFrom(lifo);
 }
 
 void
 GCRuntime::freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo)
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(JS::RuntimeHeapIsBusy());
     AutoLockGC lock(rt);
     blocksToFreeAfterSweeping.ref().transferFrom(lifo);
 }
 
 void
 GCRuntime::freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo)
 {
     blocksToFreeAfterMinorGC.ref().transferFrom(lifo);
@@ -4400,20 +4400,18 @@ GCRuntime::beginMarkPhase(JS::gcreason::
     if (!atomsZone->isCollecting())
         session.maybeLock.reset();
 
     /*
      * In an incremental GC, clear the area free lists to ensure that subsequent
      * allocations refill them and end up marking new cells back. See
      * arenaAllocatedDuringGC().
      */
-    if (isIncremental) {
-        for (GCZonesIter zone(rt); !zone.done(); zone.next())
-            zone->arenas.clearFreeLists();
-    }
+    for (GCZonesIter zone(rt); !zone.done(); zone.next())
+        zone->arenas.clearFreeLists();
 
     marker.start();
     GCMarker* gcmarker = &marker;
 
     {
         gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
         AutoLockHelperThreadState helperLock;
 
@@ -5724,18 +5722,17 @@ GCRuntime::beginSweepingSweepGroup(FreeO
     AutoSCC scc(stats(), sweepGroupIndex);
 
     bool sweepingAtoms = false;
     for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
         /* Set the GC state to sweeping. */
         zone->changeGCState(Zone::Mark, Zone::Sweep);
 
         /* Purge the ArenaLists before sweeping. */
-        if (isIncremental)
-            zone->arenas.unmarkPreMarkedFreeCells();
+        zone->arenas.unmarkPreMarkedFreeCells();
         zone->arenas.clearFreeLists();
 
         if (zone->isAtomsZone())
             sweepingAtoms = true;
 
 #ifdef DEBUG
         zone->gcLastSweepGroupIndex = sweepGroupIndex;
 #endif
@@ -5855,18 +5852,17 @@ GCRuntime::endSweepingSweepGroup(FreeOp*
 
     /* Update the GC state for zones we have swept. */
     for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
         AutoLockGC lock(rt);
         zone->changeGCState(Zone::Sweep, Zone::Finished);
         zone->threshold.updateAfterGC(zone->usage.gcBytes(), invocationKind, tunables,
                                       schedulingState, lock);
         zone->updateAllGCMallocCountersOnGCEnd(lock);
-        if (isIncremental)
-            zone->arenas.unmarkPreMarkedFreeCells();
+        zone->arenas.unmarkPreMarkedFreeCells();
     }
 
     /*
      * Start background thread to sweep zones if required, sweeping the atoms
      * zone last if present.
      */
     bool sweepAtomsZone = false;
     ZoneList zones;
@@ -5888,17 +5884,17 @@ GCRuntime::endSweepingSweepGroup(FreeOp*
 }
 
 void
 GCRuntime::beginSweepPhase(JS::gcreason::Reason reason, AutoTraceSession& session)
 {
     /*
      * Sweep phase.
      *
-     * Finalize as we sweep, outside of lock but with CurrentThreadIsHeapBusy()
+     * Finalize as we sweep, outside of lock but with RuntimeHeapIsBusy()
      * true so that any attempt to allocate a GC-thing from a finalizer will
      * fail, rather than nest badly and leave the unmarked newborn to be swept.
      */
 
     MOZ_ASSERT(!abortSweepAfterCurrentGroup);
 
     AutoSetThreadIsSweeping threadIsSweeping;
 
@@ -6871,40 +6867,41 @@ HeapStateToLabel(JS::HeapState heapState
     }
     MOZ_ASSERT_UNREACHABLE("Should have exhausted every JS::HeapState variant!");
     return nullptr;
 }
 
 /* Start a new heap session. */
 AutoTraceSession::AutoTraceSession(JSRuntime* rt, JS::HeapState heapState)
   : runtime(rt),
-    prevState(rt->mainContextFromOwnThread()->heapState),
+    prevState(rt->heapState_),
     profilingStackFrame(rt->mainContextFromOwnThread(), HeapStateToLabel(heapState),
                         ProfilingStackFrame::Category::GCCC)
 {
+    MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
     MOZ_ASSERT(prevState == JS::HeapState::Idle);
     MOZ_ASSERT(heapState != JS::HeapState::Idle);
     MOZ_ASSERT_IF(heapState == JS::HeapState::MajorCollecting, rt->gc.nursery().isEmpty());
 
     // Session always begins with lock held, see comment in class definition.
     maybeLock.emplace(rt);
 
-    rt->mainContextFromOwnThread()->heapState = heapState;
+    rt->heapState_ = heapState;
 }
 
 AutoTraceSession::~AutoTraceSession()
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
-    runtime->mainContextFromOwnThread()->heapState = prevState;
+    MOZ_ASSERT(JS::RuntimeHeapIsBusy());
+    runtime->heapState_ = prevState;
 }
 
 JS_PUBLIC_API(JS::HeapState)
-JS::CurrentThreadHeapState()
-{
-    return TlsContext.get()->heapState;
+JS::RuntimeHeapState()
+{
+    return TlsContext.get()->runtime()->heapState();
 }
 
 GCRuntime::IncrementalResult
 GCRuntime::resetIncrementalGC(gc::AbortReason reason, AutoTraceSession& session)
 {
     MOZ_ASSERT(reason != gc::AbortReason::None);
 
     switch (incrementalState) {
@@ -6940,16 +6937,21 @@ GCRuntime::resetIncrementalGC(gc::AbortR
       }
 
       case State::Sweep: {
         marker.reset();
 
         for (CompartmentsIter c(rt); !c.done(); c.next())
             c->gcState.scheduledForDestruction = false;
 
+        for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+            if (zone->isGCMarking())
+                zone->arenas.unmarkPreMarkedFreeCells();
+        }
+
         /* Finish sweeping the current sweep group, then abort. */
         abortSweepAfterCurrentGroup = true;
 
         /* Don't perform any compaction after sweeping. */
         bool wasCompacting = isCompacting;
         isCompacting = false;
 
         auto unlimited = SliceBudget::unlimited();
@@ -7068,27 +7070,16 @@ GCRuntime::pushZealSelectedObjects()
 {
 #ifdef JS_GC_ZEAL
     /* Push selected objects onto the mark stack and clear the list. */
     for (JSObject** obj = selectedForMarking.ref().begin(); obj != selectedForMarking.ref().end(); obj++)
         TraceManuallyBarrieredEdge(&marker, obj, "selected obj");
 #endif
 }
 
-void
-GCRuntime::changeToNonIncrementalGC()
-{
-    MOZ_ASSERT(isIncremental);
-
-    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-        if (zone->isGCMarking() || zone->isGCSweeping())
-            zone->arenas.unmarkPreMarkedFreeCells();
-    }
-}
-
 static bool
 IsShutdownGC(JS::gcreason::Reason reason)
 {
     return reason == JS::gcreason::SHUTDOWN_CC || reason == JS::gcreason::DESTROY_RUNTIME;
 }
 
 static bool
 ShouldCleanUpEverything(JS::gcreason::Reason reason, JSGCInvocationKind gckind)
@@ -7131,18 +7122,16 @@ GCRuntime::incrementalCollectSlice(Slice
     {
         char budgetBuffer[32];
         budget.describe(budgetBuffer, 32);
         stats().writeLogMessage("Incremental: %d, useZeal: %d, budget: %s",
             bool(isIncremental), bool(useZeal), budgetBuffer);
     }
 #endif
     MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
-    if (isIncrementalGCInProgress() && budget.isUnlimited())
-        changeToNonIncrementalGC();
 
     isIncremental = !budget.isUnlimited();
 
     if (useZeal && hasIncrementalTwoSliceZealMode()) {
         /*
          * Yields between slices occurs at predetermined points in these modes;
          * the budget is not used.
          */
@@ -7632,17 +7621,17 @@ GCRuntime::maybeDoCycleCollection()
 }
 
 void
 GCRuntime::checkCanCallAPI()
 {
     MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     /* If we attempt to invoke the GC while we are running in the GC, assert. */
-    MOZ_RELEASE_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_RELEASE_ASSERT(!JS::RuntimeHeapIsBusy());
 
     MOZ_ASSERT(rt->mainContextFromOwnThread()->isAllocAllowed());
 }
 
 bool
 GCRuntime::checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason)
 {
     if (rt->mainContextFromOwnThread()->suppressGC)
@@ -7889,17 +7878,17 @@ GCRuntime::onOutOfMallocMemory(const Aut
     // might let the OS scrape together enough pages to satisfy the failing
     // malloc request.
     decommitAllWithoutUnlocking(lock);
 }
 
 void
 GCRuntime::minorGC(JS::gcreason::Reason reason, gcstats::PhaseKind phase)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
 
     MOZ_ASSERT_IF(reason == JS::gcreason::EVICT_NURSERY,
                   !rt->mainContextFromOwnThread()->suppressGC);
     if (rt->mainContextFromOwnThread()->suppressGC)
         return;
 
     gcstats::AutoPhase ap(rt->gc.stats(), phase);
 
@@ -8288,17 +8277,17 @@ GCRuntime::runDebugGC()
     }
 
 #endif
 }
 
 void
 GCRuntime::setFullCompartmentChecks(bool enabled)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
     fullCompartmentChecks = enabled;
 }
 
 void
 GCRuntime::notifyRootsRemoved()
 {
     rootsRemoved = true;
 
@@ -8308,30 +8297,30 @@ GCRuntime::notifyRootsRemoved()
         nextScheduled = 1;
 #endif
 }
 
 #ifdef JS_GC_ZEAL
 bool
 GCRuntime::selectForMarking(JSObject* object)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
     return selectedForMarking.ref().append(object);
 }
 
 void
 GCRuntime::clearSelectedForMarking()
 {
     selectedForMarking.ref().clearAndFree();
 }
 
 void
 GCRuntime::setDeterministic(bool enabled)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
     deterministicOnly = enabled;
 }
 #endif
 
 #ifdef ENABLE_WASM_GC
 /* static */ bool
 GCRuntime::temporaryAbortIfWasmGc(JSContext* cx) {
     return cx->options().wasmGc() && cx->suppressGC;
@@ -8493,31 +8482,33 @@ AutoAssertNoNurseryAlloc::AutoAssertNoNu
 }
 
 AutoAssertNoNurseryAlloc::~AutoAssertNoNurseryAlloc()
 {
     TlsContext.get()->allowNurseryAlloc();
 }
 
 JS::AutoEnterCycleCollection::AutoEnterCycleCollection(JSRuntime* rt)
-{
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
-    TlsContext.get()->heapState = HeapState::CycleCollecting;
+  : runtime_(rt)
+{
+    MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+    runtime_->heapState_ = HeapState::CycleCollecting;
 }
 
 JS::AutoEnterCycleCollection::~AutoEnterCycleCollection()
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCycleCollecting());
-    TlsContext.get()->heapState = HeapState::Idle;
+    MOZ_ASSERT(JS::RuntimeHeapIsCycleCollecting());
+    runtime_->heapState_ = HeapState::Idle;
 }
 
 JS::AutoAssertGCCallback::AutoAssertGCCallback()
   : AutoSuppressGCAnalysis()
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
 }
 
 #endif // DEBUG
 
 JS_FRIEND_API(const char*)
 JS::GCTraceKindToAscii(JS::TraceKind kind)
 {
     switch(kind) {
@@ -8802,44 +8793,44 @@ JS_PUBLIC_API(bool)
 JS::IsIncrementalGCInProgress(JSRuntime* rt)
 {
     return rt->gc.isIncrementalGCInProgress() && !rt->gc.isVerifyPreBarriersEnabled();
 }
 
 JS_PUBLIC_API(bool)
 JS::IsIncrementalBarrierNeeded(JSContext* cx)
 {
-    if (JS::CurrentThreadIsHeapBusy())
+    if (JS::RuntimeHeapIsBusy())
         return false;
 
     auto state = cx->runtime()->gc.state();
     return state != gc::State::NotActive && state <= gc::State::Sweep;
 }
 
 JS_PUBLIC_API(void)
 JS::IncrementalPreWriteBarrier(JSObject* obj)
 {
     if (!obj)
         return;
 
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
     JSObject::writeBarrierPre(obj);
 }
 
 struct IncrementalReadBarrierFunctor {
     template <typename T> void operator()(T* t) { T::readBarrier(t); }
 };
 
 JS_PUBLIC_API(void)
 JS::IncrementalReadBarrier(GCCellPtr thing)
 {
     if (!thing)
         return;
 
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMajorCollecting());
     DispatchTyped(IncrementalReadBarrierFunctor(), thing);
 }
 
 JS_PUBLIC_API(bool)
 JS::WasIncrementalGC(JSRuntime* rt)
 {
     return rt->gc.isIncrementalGc();
 }
@@ -9192,22 +9183,22 @@ JS_PUBLIC_API(bool)
 js::gc::detail::CellIsNotGray(const Cell* cell)
 {
     // Check that a cell is not marked gray.
     //
     // Since this is a debug-only check, take account of the eventual mark state
     // of cells that will be marked black by the next GC slice in an incremental
     // GC. For performance reasons we don't do this in CellIsMarkedGrayIfKnown.
 
+    if (!CanCheckGrayBits(cell))
+        return true;
+
     // TODO: I'd like to AssertHeapIsIdle() here, but this ends up getting
     // called during GC and while iterating the heap for memory reporting.
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());
-
-    if (!CanCheckGrayBits(cell))
-        return true;
+    MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
 
     auto tc = &cell->asTenured();
     if (!detail::CellIsMarkedGray(tc))
         return true;
 
     // The cell is gray, but may eventually be marked black if we are in an
     // incremental GC and the cell is reachable by something on the mark stack.
 
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -557,17 +557,16 @@ class GCRuntime
                                            JS::gcreason::Reason reason);
     bool shouldRepeatForDeadZone(JS::gcreason::Reason reason);
     void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
                                  AutoTraceSession& session);
 
     friend class AutoCallGCCallbacks;
     void maybeCallGCCallback(JSGCStatus status);
 
-    void changeToNonIncrementalGC();
     void pushZealSelectedObjects();
     void purgeRuntime();
     MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoTraceSession& session);
     bool prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut,
                                    AutoLockForExclusiveAccess& lock);
     bool shouldPreserveJITCode(JS::Realm* realm, int64_t currentTime,
                                JS::gcreason::Reason reason, bool canAllocateMoreCode);
     void traceRuntimeForMajorGC(JSTracer* trc, AutoTraceSession& session);
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -263,17 +263,17 @@ js::CheckTracedThing(JSTracer* trc, T* t
      * if it has not then we assume it is allocated, but if it has then it is
      * either free or uninitialized in which case we check the free list.
      *
      * Further complications are that background sweeping may be running and
      * concurrently modifiying the free list and that tracing is done off
      * thread during compacting GC and reading the contents of the thing by
      * IsThingPoisoned would be racy in this case.
      */
-    MOZ_ASSERT_IF(JS::CurrentThreadIsHeapBusy() &&
+    MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy() &&
                   !zone->isGCCompacting() &&
                   !rt->gc.isBackgroundSweeping(),
                   !IsThingPoisoned(thing) || !InFreeList(thing->asTenured().arena(), thing));
 #endif
 }
 
 template <typename S>
 struct CheckTracedFunctor : public VoidDefaultAdaptor<S> {
@@ -2570,17 +2570,17 @@ GCMarker::sizeOfExcludingThis(mozilla::M
         size += zone->gcGrayRoots().sizeOfExcludingThis(mallocSizeOf);
     return size;
 }
 
 #ifdef DEBUG
 Zone*
 GCMarker::stackContainsCrossZonePointerTo(const Cell* target) const
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
 
     Zone* targetZone = target->asTenured().zone();
 
     for (MarkStackIter iter(stack); !iter.done(); iter.next()) {
         if (iter.peekTag() != MarkStack::ObjectTag)
             continue;
 
         auto source = iter.peekPtr().as<JSObject>();
@@ -3209,17 +3209,17 @@ CheckIsMarkedThing(T* thingp)
 
 #ifdef DEBUG
     MOZ_ASSERT(thingp);
     MOZ_ASSERT(*thingp);
     JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
     MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp),
                   CurrentThreadCanAccessRuntime(rt) ||
                   CurrentThreadCanAccessZone((*thingp)->zoneFromAnyThread()) ||
-                  (JS::CurrentThreadIsHeapCollecting() && rt->gc.state() == State::Sweep));
+                  (JS::RuntimeHeapIsCollecting() && rt->gc.state() == State::Sweep));
 #endif
 }
 
 template <typename T>
 static bool
 IsMarkedInternalCommon(T* thingp)
 {
     CheckIsMarkedThing(thingp);
@@ -3294,17 +3294,17 @@ js::gc::IsAboutToBeFinalizedInternal(T**
     T* thing = *thingp;
     JSRuntime* rt = thing->runtimeFromAnyThread();
 
     /* Permanent atoms are never finalized by non-owning runtimes. */
     if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && TlsContext.get()->runtime() != rt)
         return false;
 
     if (IsInsideNursery(thing)) {
-        return JS::CurrentThreadIsHeapMinorCollecting() &&
+        return JS::RuntimeHeapIsMinorCollecting() &&
                !Nursery::getForwardedPointer(reinterpret_cast<Cell**>(thingp));
     }
 
     Zone* zone = thing->asTenured().zoneFromAnyThread();
     if (zone->isGCSweeping()) {
         return IsAboutToBeFinalizedDuringSweep(thing->asTenured());
     } else if (zone->isGCCompacting() && IsForwarded(thing)) {
         *thingp = Forwarded(thing);
@@ -3509,18 +3509,18 @@ UnmarkGrayGCThing(JSRuntime* rt, JS::GCC
     gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PhaseKind::UNMARK_GRAY);
     unmarker.unmark(thing);
     return unmarker.unmarkedAny;
 }
 
 JS_FRIEND_API(bool)
 JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr thing)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCycleCollecting());
 
     JSRuntime* rt = thing.asCell()->runtimeFromMainThread();
     gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PhaseKind::BARRIER);
     return UnmarkGrayGCThing(rt, thing);
 }
 
 bool
 js::UnmarkGrayShapeRecursively(Shape* shape)
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -349,17 +349,17 @@ js::Nursery::allocateString(Zone* zone, 
     gcTracer.traceNurseryAlloc(cell, kind);
     return cell;
 }
 
 void*
 js::Nursery::allocate(size_t size)
 {
     MOZ_ASSERT(isEnabled());
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
     MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
     MOZ_ASSERT(position() % CellAlignBytes == 0);
     MOZ_ASSERT(size % CellAlignBytes == 0);
 
 #ifdef JS_GC_ZEAL
     static const size_t CanarySize = (sizeof(Nursery::Canary) + CellAlignBytes - 1) & ~CellAlignMask;
     if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
--- a/js/src/gc/PrivateIterators-inl.h
+++ b/js/src/gc/PrivateIterators-inl.h
@@ -58,17 +58,17 @@ class GrayObjectIter : public ZoneCellIt
 };
 
 class GCZonesIter
 {
     ZonesIter zone;
 
   public:
     explicit GCZonesIter(JSRuntime* rt, ZoneSelector selector = WithAtoms) : zone(rt, selector) {
-        MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
+        MOZ_ASSERT(JS::RuntimeHeapIsBusy());
         MOZ_ASSERT_IF(rt->gc.atomsZone->isCollectingFromAnyThread(),
                       !rt->hasHelperThreadZones());
 
         if (!done() && !zone->isCollectingFromAnyThread())
             next();
     }
 
     bool done() const { return zone.done(); }
--- a/js/src/gc/PublicIterators.cpp
+++ b/js/src/gc/PublicIterators.cpp
@@ -115,26 +115,26 @@ IterateGrayObjects(Zone* zone, GCThingCa
                 cellCallback(data, JS::GCCellPtr(obj.get()));
         }
     }
 }
 
 void
 js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
     AutoPrepareForTracing prep(TlsContext.get());
     ::IterateGrayObjects(zone, cellCallback, data);
 }
 
 void
 js::IterateGrayObjectsUnderCC(Zone* zone, GCThingCallback cellCallback, void* data)
 {
     mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCycleCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsCycleCollecting());
     MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
     ::IterateGrayObjects(zone, cellCallback, data);
 }
 
 JS_PUBLIC_API(void)
 JS_IterateCompartments(JSContext* cx, void* data,
                        JSIterateCompartmentCallback compartmentCallback)
 {
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -384,17 +384,17 @@ js::gc::GCRuntime::traceRuntimeCommon(JS
     // parent pointer if traceRoots actually traces anything.
     for (RealmsIter r(rt); !r.done(); r.next())
         r->traceRoots(trc, traceOrMark);
 
     // Trace helper thread roots.
     HelperThreadState().trace(trc, session);
 
     // Trace the embedding's black and gray roots.
-    if (!JS::CurrentThreadIsHeapMinorCollecting()) {
+    if (!JS::RuntimeHeapIsMinorCollecting()) {
         gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_EMBEDDING);
 
         /*
          * The embedding can register additional roots here.
          *
          * We don't need to trace these in a minor GC because all pointers into
          * the nursery should be in the store buffer, and we want to avoid the
          * time taken to trace all these roots.
@@ -523,17 +523,17 @@ js::gc::GCRuntime::bufferGrayRoots()
       grayBufferState = GrayBufferState::Okay;
     }
 }
 
 template <typename T>
 inline void
 BufferGrayRootsTracer::bufferRoot(T* thing)
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(JS::RuntimeHeapIsBusy());
     MOZ_ASSERT(thing);
     // Check if |thing| is corrupt by calling a method that touches the heap.
     MOZ_ASSERT(thing->getTraceKind() <= JS::TraceKind::Null);
 
     TenuredCell* tenured = &thing->asTenured();
 
     // This is run from a helper thread while the mutator is paused so we have
     // to use *FromAnyThread methods here.
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -401,27 +401,27 @@ class StoreBuffer
             static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
         } Hasher;
 
         static const auto FullBufferReason = JS::gcreason::FULL_SLOT_BUFFER;
     };
 
     template <typename Buffer, typename Edge>
     void unput(Buffer& buffer, const Edge& edge) {
-        MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+        MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
         if (!isEnabled())
             return;
         mozilla::ReentrancyGuard g(*this);
         buffer.unput(this, edge);
     }
 
     template <typename Buffer, typename Edge>
     void put(Buffer& buffer, const Edge& edge) {
-        MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+        MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
         if (!isEnabled())
             return;
         mozilla::ReentrancyGuard g(*this);
         if (edge.maybeInRememberedSet(nursery_))
             buffer.put(this, edge);
     }
 
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -713,17 +713,17 @@ CheckGrayMarkingTracer::check(AutoTraceS
         return true; // Ignore failure.
 
     return failures == 0;
 }
 
 JS_FRIEND_API(bool)
 js::CheckGrayMarkingState(JSRuntime* rt)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
     if (!rt->gc.areGrayBitsValid())
         return true;
 
     gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
     AutoTraceSession session(rt, JS::HeapState::Tracing);
     CheckGrayMarkingTracer tracer(rt);
     if (!tracer.init())
--- a/js/src/gc/WeakMap.h
+++ b/js/src/gc/WeakMap.h
@@ -195,17 +195,17 @@ class WeakMap : public HashMap<Key, Valu
             TraceEdge(marker, &p->value(), "WeakMap ephemeron value");
             TraceEdge(marker, &key, "proxy-preserved WeakMap ephemeron key");
             MOZ_ASSERT(key == p->key()); // No moving
         }
         key.unsafeSet(nullptr); // Prevent destructor from running barriers.
     }
 
     void trace(JSTracer* trc) override {
-        MOZ_ASSERT_IF(JS::CurrentThreadIsHeapBusy(), isInList());
+        MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy(), isInList());
 
         TraceNullableEdge(trc, &memberOf, "WeakMap owner");
 
         if (!Base::initialized())
             return;
 
         if (trc->isMarkingTracer()) {
             MOZ_ASSERT(trc->weakMapAction() == ExpandWeakMaps);
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -221,51 +221,51 @@ class Zone : public JS::shadow::Zone,
         return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
     }
     void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
 
     void beginSweepTypes(bool releaseTypes);
 
     bool hasMarkedRealms();
 
-    void scheduleGC() { MOZ_ASSERT(!CurrentThreadIsHeapBusy()); gcScheduled_ = true; }
+    void scheduleGC() { MOZ_ASSERT(!RuntimeHeapIsBusy()); gcScheduled_ = true; }
     void unscheduleGC() { gcScheduled_ = false; }
     bool isGCScheduled() { return gcScheduled_; }
 
     void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
     bool isPreservingCode() const { return gcPreserveCode_; }
 
     // Whether this zone can currently be collected. This doesn't take account
     // of AutoKeepAtoms for the atoms zone.
     bool canCollect();
 
     void changeGCState(GCState prev, GCState next) {
-        MOZ_ASSERT(CurrentThreadIsHeapBusy());
+        MOZ_ASSERT(RuntimeHeapIsBusy());
         MOZ_ASSERT(gcState() == prev);
         MOZ_ASSERT_IF(next != NoGC, canCollect());
         gcState_ = next;
     }
 
     bool isCollecting() const {
         MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
         return isCollectingFromAnyThread();
     }
 
     bool isCollectingFromAnyThread() const {
-        if (CurrentThreadIsHeapCollecting())
+        if (RuntimeHeapIsCollecting())
             return gcState_ != NoGC;
         else
             return needsIncrementalBarrier();
     }
 
     // If this returns true, all object tracing must be done with a GC marking
     // tracer.
     bool requireGCTracer() const {
         JSRuntime* rt = runtimeFromAnyThread();
-        return CurrentThreadIsHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
+        return RuntimeHeapIsMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
     }
 
     bool shouldMarkInZone() const {
         return needsIncrementalBarrier() || isGCMarking();
     }
 
     // Get a number that is incremented whenever this zone is collected, and
     // possibly at other times too.
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -579,17 +579,17 @@ jit::LazyLinkTopActivation(JSContext* cx
     MOZ_ASSERT(calleeScript->jitCodeRaw());
 
     return calleeScript->jitCodeRaw();
 }
 
 /* static */ void
 JitRuntime::Trace(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
 
     // Shared stubs are allocated in the atoms zone, so do not iterate
     // them after the atoms heap after it has been "finished."
     if (trc->runtime()->atomsAreFinished())
         return;
 
     Zone* zone = trc->runtime()->atomsZone(lock);
     for (auto i = zone->cellIter<JitCode>(); !i.done(); i.next()) {
@@ -773,17 +773,17 @@ JitCode::traceChildren(JSTracer* trc)
 
     if (jumpRelocTableBytes_) {
         uint8_t* start = code_ + jumpRelocTableOffset();
         CompactBufferReader reader(start, start + jumpRelocTableBytes_);
         MacroAssembler::TraceJumpRelocations(trc, this, reader);
     }
     if (dataRelocTableBytes_) {
         // If we're moving objects, we need writable JIT code.
-        bool movingObjects = JS::CurrentThreadIsHeapMinorCollecting() || zone()->isGCCompacting();
+        bool movingObjects = JS::RuntimeHeapIsMinorCollecting() || zone()->isGCCompacting();
         MaybeAutoWritableJitCode awjc(this, movingObjects ? Reprotect : DontReprotect);
 
         uint8_t* start = code_ + dataRelocTableOffset();
         CompactBufferReader reader(start, start + dataRelocTableBytes_);
         MacroAssembler::TraceDataRelocations(trc, this, reader);
     }
 }
 
--- a/js/src/jit/JSJitFrameIter.cpp
+++ b/js/src/jit/JSJitFrameIter.cpp
@@ -404,17 +404,17 @@ JSJitFrameIter::verifyReturnAddressUsing
     // Don't verify while off thread.
     if (!CurrentThreadCanAccessRuntime(rt))
         return true;
 
     // Don't verify if sampling is being suppressed.
     if (!TlsContext.get()->isProfilerSamplingEnabled())
         return true;
 
-    if (JS::CurrentThreadIsHeapMinorCollecting())
+    if (JS::RuntimeHeapIsMinorCollecting())
         return true;
 
     JitRuntime* jitrt = rt->jitRuntime();
 
     // Look up and print bytecode info for the native address.
     const JitcodeGlobalEntry* entry = jitrt->getJitcodeGlobalTable()->lookup(returnAddressToFp_);
     if (!entry)
         return true;
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -1335,17 +1335,17 @@ TraceJitActivations(JSContext* cx, JSTra
 {
     for (JitActivationIterator activations(cx); !activations.done(); ++activations)
         TraceJitActivation(trc, activations->asJit());
 }
 
 void
 UpdateJitActivationsForMinorGC(JSRuntime* rt)
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
     JSContext* cx = rt->mainContextFromOwnThread();
     for (JitActivationIterator activations(cx); !activations.done(); ++activations) {
         for (OnlyJSJitFrameIter iter(activations); !iter.done(); ++iter) {
             if (iter.frame().type() == JitFrame_IonJS)
                 UpdateIonJSFrameForMinorGC(rt, iter.frame());
         }
     }
 }
--- a/js/src/jit/JitcodeMap.cpp
+++ b/js/src/jit/JitcodeMap.cpp
@@ -735,17 +735,17 @@ struct Unconditionally
 };
 
 void
 JitcodeGlobalTable::traceForMinorGC(JSTracer* trc)
 {
     // Trace only entries that can directly contain nursery pointers.
 
     MOZ_ASSERT(trc->runtime()->geckoProfiler().enabled());
-    MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
 
     JSContext* cx = trc->runtime()->mainContextFromOwnThread();
     AutoSuppressProfilerSampling suppressSampling(cx);
     JitcodeGlobalEntry::IonEntry* entry = nurseryEntries_;
     while (entry) {
         entry->trace<Unconditionally>(trc);
         JitcodeGlobalEntry::IonEntry* prev = entry;
         entry = entry->nextNursery_;
@@ -785,17 +785,17 @@ JitcodeGlobalTable::markIteratively(GCMa
     // the frame was on-stack at the beginning of the sweep phase, or 2) the
     // frame was pushed between incremental sweep slices. Frames of case 1)
     // are already marked. Frames of case 2) must have been reachable to have
     // been newly pushed, and thus are already marked.
     //
     // The approach above obviates the need for read barriers. The assumption
     // above is checked in JitcodeGlobalTable::lookupForSampler.
 
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
 
     AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
 
     // If the profiler is off, rangeStart will be Nothing() and all entries are
     // considered to be expired.
     Maybe<uint64_t> rangeStart = marker->runtime()->profilerSampleBufferRangeStart();
 
     bool markedAny = false;
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -318,35 +318,35 @@ JS_GetEmptyString(JSContext* cx)
     return cx->emptyString();
 }
 
 namespace js {
 
 void
 AssertHeapIsIdle()
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
 }
 
 } // namespace js
 
 static void
 AssertHeapIsIdleOrIterating()
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
 }
 
 static void
 AssertHeapIsIdleOrStringIsFlat(JSString* str)
 {
     /*
      * We allow some functions to be called during a GC as long as the argument
      * is a flat string, since that will not cause allocation.
      */
-    MOZ_ASSERT_IF(JS::CurrentThreadIsHeapBusy(), str->isFlat());
+    MOZ_ASSERT_IF(JS::RuntimeHeapIsBusy(), str->isFlat());
 }
 
 JS_PUBLIC_API(bool)
 JS_ValueToObject(JSContext* cx, HandleValue value, MutableHandleObject objp)
 {
     AssertHeapIsIdle();
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, value);
--- a/js/src/jspubtd.h
+++ b/js/src/jspubtd.h
@@ -113,60 +113,62 @@ enum class HeapState {
     Idle,             // doing nothing with the GC heap
     Tracing,          // tracing the GC heap without collecting, e.g. IterateCompartments()
     MajorCollecting,  // doing a GC of the major heap
     MinorCollecting,  // doing a GC of the minor heap (nursery)
     CycleCollecting   // in the "Unlink" phase of cycle collection
 };
 
 JS_PUBLIC_API(HeapState)
-CurrentThreadHeapState();
+RuntimeHeapState();
 
 static inline bool
-CurrentThreadIsHeapBusy()
+RuntimeHeapIsBusy()
 {
-    return CurrentThreadHeapState() != HeapState::Idle;
+    return RuntimeHeapState() != HeapState::Idle;
 }
 
 static inline bool
-CurrentThreadIsHeapTracing()
+RuntimeHeapIsTracing()
 {
-    return CurrentThreadHeapState() == HeapState::Tracing;
+    return RuntimeHeapState() == HeapState::Tracing;
 }
 
 static inline bool
-CurrentThreadIsHeapMajorCollecting()
+RuntimeHeapIsMajorCollecting()
 {
-    return CurrentThreadHeapState() == HeapState::MajorCollecting;
+    return RuntimeHeapState() == HeapState::MajorCollecting;
 }
 
 static inline bool
-CurrentThreadIsHeapMinorCollecting()
+RuntimeHeapIsMinorCollecting()
 {
-    return CurrentThreadHeapState() == HeapState::MinorCollecting;
+    return RuntimeHeapState() == HeapState::MinorCollecting;
 }
 
 static inline bool
-CurrentThreadIsHeapCollecting()
+RuntimeHeapIsCollecting()
 {
-    HeapState state = CurrentThreadHeapState();
+    HeapState state = RuntimeHeapState();
     return state == HeapState::MajorCollecting || state == HeapState::MinorCollecting;
 }
 
 static inline bool
-CurrentThreadIsHeapCycleCollecting()
+RuntimeHeapIsCycleCollecting()
 {
-    return CurrentThreadHeapState() == HeapState::CycleCollecting;
+    return RuntimeHeapState() == HeapState::CycleCollecting;
 }
 
 // Decorates the Unlinking phase of CycleCollection so that accidental use
 // of barriered accessors results in assertions instead of leaks.
 class MOZ_STACK_CLASS JS_PUBLIC_API(AutoEnterCycleCollection)
 {
 #ifdef DEBUG
+    JSRuntime* runtime_;
+
   public:
     explicit AutoEnterCycleCollection(JSRuntime* rt);
     ~AutoEnterCycleCollection();
 #else
   public:
     explicit AutoEnterCycleCollection(JSRuntime* rt) {}
     ~AutoEnterCycleCollection() {}
 #endif
--- a/js/src/proxy/Wrapper.cpp
+++ b/js/src/proxy/Wrapper.cpp
@@ -370,17 +370,17 @@ js::UncheckedUnwrapWithoutExpose(JSObjec
             wrapped = MaybeForwarded(wrapped);
     }
     return wrapped;
 }
 
 JS_FRIEND_API(JSObject*)
 js::UncheckedUnwrap(JSObject* wrapped, bool stopAtWindowProxy, unsigned* flagsp)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(wrapped->runtimeFromAnyThread()));
 
     unsigned flags = 0;
     while (true) {
         if (!wrapped->is<WrapperObject>() ||
             MOZ_UNLIKELY(stopAtWindowProxy && IsWindowProxy(wrapped)))
         {
             break;
@@ -402,17 +402,17 @@ js::CheckedUnwrap(JSObject* obj, bool st
         if (!obj || obj == wrapper)
             return obj;
     }
 }
 
 JS_FRIEND_API(JSObject*)
 js::UnwrapOneChecked(JSObject* obj, bool stopAtWindowProxy)
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(obj->runtimeFromAnyThread()));
 
     if (!obj->is<WrapperObject>() ||
         MOZ_UNLIKELY(stopAtWindowProxy && IsWindowProxy(obj)))
     {
         return obj;
     }
 
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -1658,17 +1658,17 @@ ParseCompileOptions(JSContext* cx, Compi
 
 static void
 my_LargeAllocFailCallback()
 {
     JSContext* cx = TlsContext.get();
     if (!cx || cx->helperThread())
         return;
 
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
 
     JS::PrepareForFullGC(cx);
     cx->runtime()->gc.gc(GC_NORMAL, JS::gcreason::SHARED_MEMORY_LIMIT);
 }
 
 static const uint32_t CacheEntry_SOURCE = 0;
 static const uint32_t CacheEntry_BYTECODE = 1;
 
--- a/js/src/vm/BytecodeUtil.cpp
+++ b/js/src/vm/BytecodeUtil.cpp
@@ -1144,17 +1144,17 @@ ToDisassemblySource(JSContext* cx, Handl
         if (!copy) {
             ReportOutOfMemory(cx);
             return false;
         }
         bytes->initBytes(std::move(copy));
         return true;
     }
 
-    if (JS::CurrentThreadIsHeapBusy() || !cx->isAllocAllowed()) {
+    if (JS::RuntimeHeapIsBusy() || !cx->isAllocAllowed()) {
         UniqueChars source = JS_smprintf("<value>");
         if (!source) {
             ReportOutOfMemory(cx);
             return false;
         }
         bytes->initBytes(std::move(source));
         return true;
     }
--- a/js/src/vm/Compartment.cpp
+++ b/js/src/vm/Compartment.cpp
@@ -380,17 +380,17 @@ Compartment::wrap(JSContext* cx, Mutable
             return false;
     }
     return true;
 }
 
 void
 Compartment::traceOutgoingCrossCompartmentWrappers(JSTracer* trc)
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
     MOZ_ASSERT(!zone()->isCollectingFromAnyThread() || trc->runtime()->gc.isHeapCompacting());
 
     for (NonStringWrapperEnum e(this); !e.empty(); e.popFront()) {
         if (e.front().key().is<JSObject*>()) {
             Value v = e.front().value().unbarrieredGet();
             ProxyObject* wrapper = &v.toObject().as<ProxyObject>();
 
             /*
@@ -401,17 +401,17 @@ Compartment::traceOutgoingCrossCompartme
         }
     }
 }
 
 /* static */ void
 Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(JSTracer* trc)
 {
     gcstats::AutoPhase ap(trc->runtime()->gc.stats(), gcstats::PhaseKind::MARK_CCWS);
-    MOZ_ASSERT(JS::CurrentThreadIsHeapMajorCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
     for (CompartmentsIter c(trc->runtime()); !c.done(); c.next()) {
         if (!c->zone()->isCollecting())
             c->traceOutgoingCrossCompartmentWrappers(trc);
     }
     Debugger::traceIncomingCrossCompartmentEdges(trc);
 }
 
 void
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -1586,19 +1586,17 @@ js::GCParallelTask::runFromHelperThread(
     MOZ_ASSERT(isDispatched(lock));
 
     AutoSetContextRuntime ascr(runtime());
     gc::AutoSetThreadIsPerformingGC performingGC;
 
     {
         AutoUnlockHelperThreadState parallelSection(lock);
         TimeStamp timeStart = TimeStamp::Now();
-        TlsContext.get()->heapState = JS::HeapState::MajorCollecting;
         runTask();
-        TlsContext.get()->heapState = JS::HeapState::Idle;
         duration_ = TimeSince(timeStart);
     }
 
     setFinished(lock);
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, lock);
 }
 
 bool
--- a/js/src/vm/JSContext-inl.h
+++ b/js/src/vm/JSContext-inl.h
@@ -176,17 +176,17 @@ class CompartmentChecker
     }
 };
 
 /*
  * Don't perform these checks when called from a finalizer. The checking
  * depends on other objects not having been swept yet.
  */
 #define START_ASSERT_SAME_COMPARTMENT()                                 \
-    if (cx->heapState != JS::HeapState::Idle)                           \
+    if (JS::RuntimeHeapIsCollecting())                            \
         return;                                                         \
     CompartmentChecker c(cx)
 
 template <class T1> inline void
 releaseAssertSameCompartment(JSContext* cx, const T1& t1)
 {
     START_ASSERT_SAME_COMPARTMENT();
     c.check(t1);
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -1237,17 +1237,16 @@ JSContext::JSContext(JSRuntime* runtime,
 #ifdef JS_SIMULATOR
     simulator_(nullptr),
 #endif
 #ifdef JS_TRACE_LOGGING
     traceLogger(nullptr),
 #endif
     autoFlushICache_(nullptr),
     dtoaState(nullptr),
-    heapState(JS::HeapState::Idle),
     suppressGC(0),
 #ifdef DEBUG
     ionCompiling(false),
     ionCompilingSafeForMinorGC(false),
     performingGC(false),
     gcSweeping(false),
     gcHelperStateThread(false),
     isTouchingGrayThings(false),
@@ -1545,17 +1544,17 @@ JSContext::updateMallocCounter(size_t nb
 }
 
 #ifdef DEBUG
 
 JS::AutoCheckRequestDepth::AutoCheckRequestDepth(JSContext* cxArg)
   : cx(cxArg->helperThread() ? nullptr : cxArg)
 {
     if (cx) {
-        MOZ_ASSERT(cx->requestDepth || JS::CurrentThreadIsHeapBusy());
+        MOZ_ASSERT(cx->requestDepth || JS::RuntimeHeapIsBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
         cx->checkRequestDepth++;
     }
 }
 
 JS::AutoCheckRequestDepth::~AutoCheckRequestDepth()
 {
     if (cx) {
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -439,19 +439,16 @@ struct JSContext : public JS::RootingCon
   public:
 
     js::jit::AutoFlushICache* autoFlushICache() const;
     void setAutoFlushICache(js::jit::AutoFlushICache* afc);
 
     // State used by util/DoubleToString.cpp.
     js::ThreadData<DtoaState*> dtoaState;
 
-    // Any GC activity occurring on this thread.
-    js::ThreadData<JS::HeapState> heapState;
-
     /*
      * When this flag is non-zero, any attempt to GC will be skipped. It is used
      * to suppress GC when reporting an OOM (see ReportOutOfMemory) and in
      * debugging facilities that cannot tolerate a GC and would rather OOM
      * immediately, such as utilities exposed to GDB. Setting this flag is
      * extremely dangerous and should only be used when in an OOM situation or
      * in non-exposed debugging facilities.
      */
--- a/js/src/vm/Realm.cpp
+++ b/js/src/vm/Realm.cpp
@@ -277,17 +277,17 @@ void
 Realm::traceGlobal(JSTracer* trc)
 {
     // Trace things reachable from the realm's global. Note that these edges
     // must be swept too in case the realm is live but the global is not.
 
     savedStacks_.trace(trc);
 
     // Atoms are always tenured.
-    if (!JS::CurrentThreadIsHeapMinorCollecting())
+    if (!JS::RuntimeHeapIsMinorCollecting())
         varNames_.trace(trc);
 }
 
 void
 ObjectRealm::trace(JSTracer* trc)
 {
     if (lazyArrayBuffers)
         lazyArrayBuffers->trace(trc);
@@ -303,17 +303,17 @@ void
 Realm::traceRoots(JSTracer* trc, js::gc::GCRuntime::TraceOrMarkRuntime traceOrMark)
 {
     if (objectMetadataState_.is<PendingMetadata>()) {
         TraceRoot(trc,
                   &objectMetadataState_.as<PendingMetadata>(),
                   "on-stack object pending metadata");
     }
 
-    if (!JS::CurrentThreadIsHeapMinorCollecting()) {
+    if (!JS::RuntimeHeapIsMinorCollecting()) {
         // The global is never nursery allocated, so we don't need to
         // trace it when doing a minor collection.
         //
         // If a compartment is on-stack, we mark its global so that
         // JSContext::global() remains valid.
         if (shouldTraceGlobal() && global_.unbarrieredGet())
             TraceRoot(trc, global_.unsafeUnbarrieredForTracing(), "on-stack compartment global");
     }
@@ -338,17 +338,17 @@ Realm::traceRoots(JSTracer* trc, js::gc:
     // keys of the HashMap to avoid adding a strong reference to the JSScript
     // pointers.
     //
     // If the code coverage is either enabled with the --dump-bytecode command
     // line option, or with the PCCount JSFriend API functions, then we mark the
     // keys of the map to hold the JSScript alive.
     if (scriptCountsMap &&
         trc->runtime()->profilingScripts &&
-        !JS::CurrentThreadIsHeapMinorCollecting())
+        !JS::RuntimeHeapIsMinorCollecting())
     {
         MOZ_ASSERT_IF(!trc->runtime()->isBeingDestroyed(), collectCoverage());
         for (ScriptCountsMap::Range r = scriptCountsMap->all(); !r.empty(); r.popFront()) {
             JSScript* script = const_cast<JSScript*>(r.front().key());
             MOZ_ASSERT(script->hasScriptCounts());
             TraceRoot(trc, &script, "profilingScripts");
             MOZ_ASSERT(script == r.front().key(), "const_cast is only a work-around");
         }
--- a/js/src/vm/RegExpObject.cpp
+++ b/js/src/vm/RegExpObject.cpp
@@ -139,22 +139,22 @@ RegExpObject::trace(JSTracer* trc, JSObj
 }
 
 static inline bool
 IsMarkingTrace(JSTracer* trc)
 {
     // Determine whether tracing is happening during normal marking.  We need to
     // test all the following conditions, since:
     //
-    //   1. During TraceRuntime, CurrentThreadIsHeapBusy() is true, but the
+    //   1. During TraceRuntime, RuntimeHeapIsBusy() is true, but the
     //      tracer might not be a marking tracer.
     //   2. When a write barrier executes, IsMarkingTracer is true, but
-    //      CurrentThreadIsHeapBusy() will be false.
+    //      RuntimeHeapIsBusy() will be false.
 
-    return JS::CurrentThreadIsHeapCollecting() && trc->isMarkingTracer();
+    return JS::RuntimeHeapIsCollecting() && trc->isMarkingTracer();
 }
 
 void
 RegExpObject::trace(JSTracer* trc)
 {
     TraceNullableEdge(trc, &sharedRef(), "RegExpObject shared");
 }
 
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -127,16 +127,17 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
 #ifdef DEBUG
     activeThreadHasExclusiveAccess(false),
 #endif
     scriptDataLock(mutexid::RuntimeScriptData),
 #ifdef DEBUG
     activeThreadHasScriptDataAccess(false),
 #endif
     numActiveHelperThreadZones(0),
+    heapState_(JS::HeapState::Idle),
     numRealms(0),
     localeCallbacks(nullptr),
     defaultLocale(nullptr),
     profilingScripts(false),
     scriptAndCountsVector(nullptr),
     lcovOutput_(),
     jitRuntime_(nullptr),
     selfHostingGlobal_(nullptr),
@@ -250,17 +251,17 @@ JSRuntime::init(JSContext* cx, uint32_t 
         return false;
 
     return true;
 }
 
 void
 JSRuntime::destroyRuntime()
 {
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
     MOZ_ASSERT(childRuntimeCount == 0);
     MOZ_ASSERT(initialized_);
 
     sharedIntlData.ref().destroyInstance();
 
     if (gcInitialized) {
         /*
          * Finish any in-progress GCs first. This ensures the parseWaitingOnGC
@@ -721,17 +722,17 @@ JSRuntime::updateMallocCounter(size_t nb
     gc.updateMallocCounter(nbytes);
 }
 
 JS_FRIEND_API(void*)
 JSRuntime::onOutOfMemory(AllocFunction allocFunc, size_t nbytes, void* reallocPtr, JSContext* maybecx)
 {
     MOZ_ASSERT_IF(allocFunc != AllocFunction::Realloc, !reallocPtr);
 
-    if (JS::CurrentThreadIsHeapBusy())
+    if (JS::RuntimeHeapIsBusy())
         return nullptr;
 
     if (!oom::IsSimulatedOOMAllocation()) {
         /*
          * Retry when we are done with the background sweeping and have stopped
          * all the allocations and released the empty GC chunks.
          */
         gc.onOutOfMallocMemory();
@@ -772,17 +773,17 @@ JSRuntime::activeGCInAtomsZone()
     Zone* zone = unsafeAtomsZone();
     return (zone->needsIncrementalBarrier() && !gc.isVerifyPreBarriersEnabled()) ||
            zone->wasGCStarted();
 }
 
 bool
 JSRuntime::createAtomsAddedWhileSweepingTable()
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(!atomsAddedWhileSweeping_);
 
     atomsAddedWhileSweeping_ = js_new<AtomSet>();
     if (!atomsAddedWhileSweeping_)
         return false;
 
     if (!atomsAddedWhileSweeping_->init()) {
         destroyAtomsAddedWhileSweepingTable();
@@ -790,17 +791,17 @@ JSRuntime::createAtomsAddedWhileSweeping
     }
 
     return true;
 }
 
 void
 JSRuntime::destroyAtomsAddedWhileSweepingTable()
 {
-    MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
     MOZ_ASSERT(atomsAddedWhileSweeping_);
 
     js_delete(atomsAddedWhileSweeping_.ref());
     atomsAddedWhileSweeping_ = nullptr;
 }
 
 void
 JSRuntime::setUsedByHelperThread(Zone* zone)
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -476,16 +476,19 @@ struct JSRuntime : public js::MallocProv
     js::Mutex scriptDataLock;
 #ifdef DEBUG
     bool activeThreadHasScriptDataAccess;
 #endif
 
     // Number of zones which may be operated on by helper threads.
     mozilla::Atomic<size_t> numActiveHelperThreadZones;
 
+    // Any GC activity affecting the heap.
+    mozilla::Atomic<JS::HeapState> heapState_;
+
     friend class js::AutoLockForExclusiveAccess;
     friend class js::AutoLockScriptData;
 
   public:
     void setUsedByHelperThread(JS::Zone* zone);
     void clearUsedByHelperThread(JS::Zone* zone);
 
     bool hasHelperThreadZones() const {
@@ -503,16 +506,20 @@ struct JSRuntime : public js::MallocProv
     bool currentThreadHasScriptDataAccess() const {
         if (!hasHelperThreadZones())
             return CurrentThreadCanAccessRuntime(this) && activeThreadHasScriptDataAccess;
 
         return scriptDataLock.ownedByCurrentThread();
     }
 #endif
 
+    JS::HeapState heapState() const {
+        return heapState_;
+    }
+
     // How many realms there are across all zones. This number includes
     // off-thread context realms, so it isn't necessarily equal to the
     // number of realms visited by RealmsIter.
     js::MainThreadData<size_t> numRealms;
 
     /* Locale-specific callbacks for string conversion. */
     js::MainThreadData<const JSLocaleCallbacks*> localeCallbacks;
 
@@ -707,17 +714,17 @@ struct JSRuntime : public js::MallocProv
     js::ExclusiveAccessLockOrGCTaskData<js::SymbolRegistry> symbolRegistry_;
 
   public:
     bool initializeAtoms(JSContext* cx);
     void finishAtoms();
     bool atomsAreFinished() const { return !atoms_; }
 
     js::AtomSet* atomsForSweeping() {
-        MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
+        MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
         return atoms_;
     }
 
     js::AtomSet& atoms(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(atoms_);
         return *atoms_;
     }
     js::AtomSet& unsafeAtoms() {
--- a/js/src/vm/TypeInference.cpp
+++ b/js/src/vm/TypeInference.cpp
@@ -4140,17 +4140,17 @@ TypeNewScript::trace(JSTracer* trc)
     TraceNullableEdge(trc, &templateObject_, "TypeNewScript_templateObject");
     TraceNullableEdge(trc, &initializedShape_, "TypeNewScript_initializedShape");
     TraceNullableEdge(trc, &initializedGroup_, "TypeNewScript_initializedGroup");
 }
 
 /* static */ void
 TypeNewScript::writeBarrierPre(TypeNewScript* newScript)
 {
-    if (JS::CurrentThreadIsHeapCollecting())
+    if (JS::RuntimeHeapIsCollecting())
         return;
 
     JS::Zone* zone = newScript->function()->zoneFromAnyThread();
     if (zone->needsIncrementalBarrier())
         newScript->trace(zone->barrierTracer());
 }
 
 void
@@ -4180,17 +4180,17 @@ TraceObjectKey(JSTracer* trc, TypeSet::O
 }
 
 void
 ConstraintTypeSet::trace(Zone* zone, JSTracer* trc)
 {
     checkMagic();
 
     // ConstraintTypeSets only hold strong references during minor collections.
-    MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(JS::RuntimeHeapIsMinorCollecting());
 
     unsigned objectCount = baseObjectCount();
     if (objectCount >= 2) {
         unsigned oldCapacity = TypeHashSet::Capacity(objectCount);
         ObjectKey** oldArray = objectSet;
 
         MOZ_RELEASE_ASSERT(uintptr_t(oldArray[-1]) == oldCapacity);
 
@@ -4258,17 +4258,17 @@ ConstraintTypeSet::trace(Zone* zone, JST
 
 static inline void
 AssertGCStateForSweep(Zone* zone)
 {
     MOZ_ASSERT(zone->isGCSweepingOrCompacting());
 
     // IsAboutToBeFinalized doesn't work right on tenured objects when called
     // during a minor collection.
-    MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
+    MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
 }
 
 void
 ConstraintTypeSet::sweep(const AutoSweepBase& sweep, Zone* zone,
                          AutoClearTypeInferenceStateOnOOM& oom)
 {
     AssertGCStateForSweep(zone);
 
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -876,16 +876,19 @@ pref("gfx.webrender.debug.texture-cache"
 pref("gfx.webrender.debug.render-targets", false);
 pref("gfx.webrender.debug.alpha-primitives", false);
 pref("gfx.webrender.debug.profiler", false);
 pref("gfx.webrender.debug.gpu-time-queries", false);
 pref("gfx.webrender.debug.gpu-sample-queries", false);
 pref("gfx.webrender.debug.disable-batching", false);
 pref("gfx.webrender.debug.epochs", false);
 pref("gfx.webrender.debug.compact-profiler", false);
+pref("gfx.webrender.debug.echo-driver-messages", false);
+pref("gfx.webrender.debug.new-frame-indicator", false);
+pref("gfx.webrender.debug.new-scene-indicator", false);
 
 pref("accessibility.browsewithcaret", false);
 pref("accessibility.warn_on_browsewithcaret", true);
 
 pref("accessibility.browsewithcaret_shortcut.enabled", true);
 
 #ifndef XP_MACOSX
 // Tab focus model bit field: