Bug 1575175 - Rename memory counter classes now they're used for both GC and malloc heaps r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 21 Aug 2019 15:14:31 +0000
changeset 489231 8a10768ff3853e7c052e87b97cb5d319020ede4b
parent 489230 eb21f2893c911f0849545fc69d55545443539442
child 489232 fca2c4d16bc5886dc38d2a66288280bcddbc5467
push id93206
push userjcoppeard@mozilla.com
push dateWed, 21 Aug 2019 15:25:47 +0000
treeherderautoland@8a10768ff385 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1575175
milestone70.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1575175 - Rename memory counter classes now they're used for both GC and malloc heaps r=sfink This renames: HeapSize::gcBytes -> bytes (it's not just for GC heaps any more) ZoneThreshold -> HeapThreshold (to go with HeapSize) HeapThreshold::triggerBytes -> bytes (what else could it be?) I renamed the ZoneAllocator members to make them more uniform/consitent so we now have gcHeapSize/gcHeapThreshold, mallocHeapSize/mallocHeapThreshold etc. I also renamed the heap threshold classes. Differential Revision: https://phabricator.services.mozilla.com/D42868
js/public/GCAPI.h
js/src/builtin/TestingFunctions.cpp
js/src/gc/Allocator.cpp
js/src/gc/GC.cpp
js/src/gc/GCRuntime.h
js/src/gc/Nursery.cpp
js/src/gc/Scheduling.h
js/src/gc/Statistics.cpp
js/src/gc/Zone.cpp
js/src/gc/ZoneAllocator.h
js/src/jsfriendapi.cpp
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -140,18 +140,18 @@ typedef enum JSGCParamKey {
    *                 following parameters.
    *                 See computeZoneHeapGrowthFactorForHeapSize() in GC.cpp
    *   ThresholdFactor: 1.0 for incremental collections or
    *                    JSGC_NON_INCREMENTAL_FACTOR or
    *                    JSGC_AVOID_INTERRUPT_FACTOR for non-incremental
    *                    collections.
    *
    * The RHS of the equation above is calculated and sets
-   * zone->threshold.gcTriggerBytes(). When usage.gcBytes() surpasses
-   * threshold.gcTriggerBytes() for a zone, the zone may be scheduled for a GC.
+   * zone->gcHeapThreshold.bytes(). When gcHeapSize.bytes() exeeds
+   * gcHeapThreshold.bytes() for a zone, the zone may be scheduled for a GC.
    */
 
   /**
    * GCs less than this far apart in milliseconds will be considered
    * 'high-frequency GCs'.
    *
    * Pref: javascript.options.mem.gc_high_frequency_time_limit_ms
    * Default: HighFrequencyThreshold
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -456,32 +456,32 @@ static bool GC(JSContext* cx, unsigned a
     if (arg.isString()) {
       if (!JS_StringEqualsAscii(cx, arg.toString(), "shrinking", &shrinking)) {
         return false;
       }
     }
   }
 
 #ifndef JS_MORE_DETERMINISTIC
-  size_t preBytes = cx->runtime()->gc.heapSize.gcBytes();
+  size_t preBytes = cx->runtime()->gc.heapSize.bytes();
 #endif
 
   if (zone) {
     PrepareForDebugGC(cx->runtime());
   } else {
     JS::PrepareForFullGC(cx);
   }
 
   JSGCInvocationKind gckind = shrinking ? GC_SHRINK : GC_NORMAL;
   JS::NonIncrementalGC(cx, gckind, JS::GCReason::API);
 
   char buf[256] = {'\0'};
 #ifndef JS_MORE_DETERMINISTIC
   SprintfLiteral(buf, "before %zu, after %zu\n", preBytes,
-                 cx->runtime()->gc.heapSize.gcBytes());
+                 cx->runtime()->gc.heapSize.bytes());
 #endif
   return ReturnStringCopy(cx, args, buf);
 }
 
 static bool MinorGC(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   if (args.get(0) == BooleanValue(true)) {
     cx->runtime()->gc.storeBuffer().setAboutToOverflow(
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -368,17 +368,17 @@ bool GCRuntime::gcIfNeededAtAllocation(J
     gcIfRequested();
   }
 
   // If we have grown past our GC heap threshold while in the middle of
   // an incremental GC, we're growing faster than we're GCing, so stop
   // the world and do a full, non-incremental GC right now, if possible.
   Zone* zone = cx->zone();
   if (isIncrementalGCInProgress() &&
-      zone->zoneSize.gcBytes() > zone->threshold.gcTriggerBytes()) {
+      zone->gcHeapSize.bytes() > zone->gcHeapThreshold.bytes()) {
     PrepareZoneForGC(cx->zone());
     gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW);
   }
 
   return true;
 }
 
 template <typename T>
@@ -590,21 +590,21 @@ bool GCRuntime::wantBackgroundAllocation
 
 Arena* GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
                                 ShouldCheckThresholds checkThresholds,
                                 const AutoLockGC& lock) {
   MOZ_ASSERT(chunk->hasAvailableArenas());
 
   // Fail the allocation if we are over our heap size limits.
   if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
-      (heapSize.gcBytes() >= tunables.gcMaxBytes()))
+      (heapSize.bytes() >= tunables.gcMaxBytes()))
     return nullptr;
 
   Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
-  zone->zoneSize.addGCArena();
+  zone->gcHeapSize.addGCArena();
 
   // Trigger an incremental slice if needed.
   if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
     maybeAllocTriggerZoneGC(zone, ArenaSize);
   }
 
   return arena;
 }
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -952,17 +952,17 @@ void Chunk::updateChunkListAfterFree(JSR
     rt->gc.recycleChunk(this, lock);
   }
 }
 
 void GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock) {
   MOZ_ASSERT(arena->allocated());
   MOZ_ASSERT(!arena->onDelayedMarkingList());
 
-  arena->zone->zoneSize.removeGCArena();
+  arena->zone->gcHeapSize.removeGCArena();
   arena->release(lock);
   arena->chunk()->releaseArena(rt, arena, lock);
 }
 
 GCRuntime::GCRuntime(JSRuntime* rt)
     : rt(rt),
       systemZone(nullptr),
       atomsZone(nullptr),
@@ -1792,17 +1792,17 @@ uint32_t GCRuntime::getParameter(JSGCPar
       return uint32_t(tunables.gcMaxBytes());
     case JSGC_MIN_NURSERY_BYTES:
       MOZ_ASSERT(tunables.gcMinNurseryBytes() < UINT32_MAX);
       return uint32_t(tunables.gcMinNurseryBytes());
     case JSGC_MAX_NURSERY_BYTES:
       MOZ_ASSERT(tunables.gcMaxNurseryBytes() < UINT32_MAX);
       return uint32_t(tunables.gcMaxNurseryBytes());
     case JSGC_BYTES:
-      return uint32_t(heapSize.gcBytes());
+      return uint32_t(heapSize.bytes());
     case JSGC_NURSERY_BYTES:
       return nursery().capacity();
     case JSGC_NUMBER:
       return uint32_t(number);
     case JSGC_MODE:
       return uint32_t(mode);
     case JSGC_UNUSED_CHUNKS:
       return uint32_t(emptyChunks(lock).count());
@@ -2049,25 +2049,25 @@ extern JS_FRIEND_API bool js::AddRawValu
   }
   return ok;
 }
 
 extern JS_FRIEND_API void js::RemoveRawValueRoot(JSContext* cx, Value* vp) {
   cx->runtime()->gc.removeRoot(vp);
 }
 
-float ZoneThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
+float HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const {
   float eagerTriggerFactor = highFrequencyGC
                                  ? HighFrequencyEagerAllocTriggerFactor
                                  : LowFrequencyEagerAllocTriggerFactor;
-  return eagerTriggerFactor * gcTriggerBytes();
+  return eagerTriggerFactor * bytes();
 }
 
 /* static */
-float ZoneHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
+float GCHeapThreshold::computeZoneHeapGrowthFactorForHeapSize(
     size_t lastBytes, const GCSchedulingTunables& tunables,
     const GCSchedulingState& state) {
   if (!tunables.isDynamicHeapGrowthEnabled()) {
     return 3.0f;
   }
 
   // For small zones, our collection heuristics do not matter much: favor
   // something simple in this case.
@@ -2109,53 +2109,51 @@ float ZoneHeapThreshold::computeZoneHeap
                              ((lastBytes - lowLimit) / (highLimit - lowLimit)));
 
   MOZ_ASSERT(factor >= minRatio);
   MOZ_ASSERT(factor <= maxRatio);
   return factor;
 }
 
 /* static */
-size_t ZoneHeapThreshold::computeZoneTriggerBytes(
+size_t GCHeapThreshold::computeZoneTriggerBytes(
     float growthFactor, size_t lastBytes, JSGCInvocationKind gckind,
     const GCSchedulingTunables& tunables, const AutoLockGC& lock) {
   size_t baseMin = gckind == GC_SHRINK
                        ? tunables.minEmptyChunkCount(lock) * ChunkSize
                        : tunables.gcZoneAllocThresholdBase();
   size_t base = Max(lastBytes, baseMin);
   float trigger = float(base) * growthFactor;
   float triggerMax =
       float(tunables.gcMaxBytes()) / tunables.nonIncrementalFactor();
   return size_t(Min(triggerMax, trigger));
 }
 
-void ZoneHeapThreshold::updateAfterGC(size_t lastBytes,
-                                      JSGCInvocationKind gckind,
-                                      const GCSchedulingTunables& tunables,
-                                      const GCSchedulingState& state,
-                                      const AutoLockGC& lock) {
+void GCHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
+                                    const GCSchedulingTunables& tunables,
+                                    const GCSchedulingState& state,
+                                    const AutoLockGC& lock) {
   float growthFactor =
       computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state);
-  gcTriggerBytes_ =
+  bytes_ =
       computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock);
 }
 
 /* static */
-size_t ZoneMallocThreshold::computeZoneTriggerBytes(float growthFactor,
+size_t MallocHeapThreshold::computeZoneTriggerBytes(float growthFactor,
                                                     size_t lastBytes,
                                                     size_t baseBytes,
                                                     const AutoLockGC& lock) {
   return size_t(float(Max(lastBytes, baseBytes)) * growthFactor);
 }
 
-void ZoneMallocThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes,
+void MallocHeapThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes,
                                         float growthFactor,
                                         const AutoLockGC& lock) {
-  gcTriggerBytes_ =
-      computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
+  bytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock);
 }
 
 /* Compacting GC */
 
 bool js::gc::IsCurrentlyAnimating(const TimeStamp& lastAnimationTime,
                                   const TimeStamp& currentTime) {
   // Assume that we're currently animating if js::NotifyAnimationActivity has
   // been called in the last second.
@@ -3041,17 +3039,17 @@ void GCRuntime::clearRelocatedArenasWith
     AlwaysPoison(reinterpret_cast<void*>(arena->thingsStart()),
                  JS_MOVED_TENURED_PATTERN, arena->getThingsSpan(),
                  MemCheckKind::MakeNoAccess);
 
     // Don't count arenas as being freed by the GC if we purposely moved
     // everything to new arenas, as that will already have allocated a similar
     // number of arenas. This only happens for collections triggered by GC zeal.
     bool allArenasRelocated = ShouldRelocateAllArenas(reason);
-    arena->zone->zoneSize.removeBytes(ArenaSize, !allArenasRelocated);
+    arena->zone->gcHeapSize.removeBytes(ArenaSize, !allArenasRelocated);
 
     // Release the arena but don't return it to the chunk yet.
     arena->release(lock);
   }
 }
 
 void GCRuntime::protectAndHoldArenas(Arena* arenaList) {
   for (Arena* arena = arenaList; arena;) {
@@ -3408,18 +3406,18 @@ void GCRuntime::maybeAllocTriggerZoneGC(
     // Zones in use by a helper thread can't be collected.
     MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone());
     return;
   }
 
   MOZ_ASSERT(!JS::RuntimeHeapIsCollecting());
 
   size_t usedBytes =
-      zone->zoneSize.gcBytes();  // This already includes |nbytes|.
-  size_t thresholdBytes = zone->threshold.gcTriggerBytes();
+      zone->gcHeapSize.bytes();  // This already includes |nbytes|.
+  size_t thresholdBytes = zone->gcHeapThreshold.bytes();
   if (usedBytes < thresholdBytes) {
     return;
   }
 
   size_t niThreshold = thresholdBytes * tunables.nonIncrementalFactor();
   if (usedBytes >= niThreshold) {
     // We have passed the non-incremental threshold: immediately trigger a
     // non-incremental GC.
@@ -3454,49 +3452,49 @@ void GCRuntime::maybeAllocTriggerZoneGC(
     // has been performed.
     zone->gcDelayBytes = tunables.zoneAllocDelayBytes();
     return;
   }
 }
 
 void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
                                       const HeapSize& heap,
-                                      const ZoneThreshold& threshold,
+                                      const HeapThreshold& threshold,
                                       JS::GCReason reason) {
   rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold,
                                   reason);
 }
 
 void GCRuntime::maybeMallocTriggerZoneGC(Zone* zone) {
-  if (maybeMallocTriggerZoneGC(zone, zone->gcMallocBytes,
-                               zone->gcMallocThreshold,
+  if (maybeMallocTriggerZoneGC(zone, zone->mallocHeapSize,
+                               zone->mallocHeapThreshold,
                                JS::GCReason::TOO_MUCH_MALLOC)) {
     return;
   }
 
-  maybeMallocTriggerZoneGC(zone, zone->gcJitBytes, zone->gcJitThreshold,
+  maybeMallocTriggerZoneGC(zone, zone->jitHeapSize, zone->jitHeapThreshold,
                            JS::GCReason::TOO_MUCH_JIT_CODE);
 }
 
 bool GCRuntime::maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
-                                         const ZoneThreshold& threshold,
+                                         const HeapThreshold& threshold,
                                          JS::GCReason reason) {
   if (!CurrentThreadCanAccessRuntime(rt)) {
     // Zones in use by a helper thread can't be collected.
     MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone() ||
                JS::RuntimeHeapIsBusy());
     return false;
   }
 
   if (rt->heapState() != JS::HeapState::Idle) {
     return false;
   }
 
-  size_t usedBytes = heap.gcBytes();
-  size_t thresholdBytes = threshold.gcTriggerBytes();
+  size_t usedBytes = heap.bytes();
+  size_t thresholdBytes = threshold.bytes();
   if (usedBytes < thresholdBytes) {
     return false;
   }
 
   size_t niThreshold = thresholdBytes * tunables.nonIncrementalFactor();
   if (usedBytes >= thresholdBytes * niThreshold) {
     // We have passed the non-incremental threshold: immediately trigger a
     // non-incremental GC.
@@ -3571,33 +3569,34 @@ void GCRuntime::maybeGC() {
   }
 
   if (isIncrementalGCInProgress()) {
     return;
   }
 
   bool scheduledZones = false;
   for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
-    if (checkEagerAllocTrigger(zone->zoneSize, zone->threshold) ||
-        checkEagerAllocTrigger(zone->gcMallocBytes, zone->gcMallocThreshold)) {
+    if (checkEagerAllocTrigger(zone->gcHeapSize, zone->gcHeapThreshold) ||
+        checkEagerAllocTrigger(zone->mallocHeapSize,
+                               zone->mallocHeapThreshold)) {
       zone->scheduleGC();
       scheduledZones = true;
     }
   }
 
   if (scheduledZones) {
     startGC(GC_NORMAL, JS::GCReason::EAGER_ALLOC_TRIGGER);
   }
 }
 
 bool GCRuntime::checkEagerAllocTrigger(const HeapSize& size,
-                                       const ZoneThreshold& threshold) {
+                                       const HeapThreshold& threshold) {
   float thresholdBytes =
       threshold.eagerAllocTrigger(schedulingState.inHighFrequencyGCMode());
-  float usedBytes = size.gcBytes();
+  float usedBytes = size.bytes();
   if (usedBytes <= 1024 * 1024 || usedBytes < thresholdBytes) {
     return false;
   }
 
   stats().recordTrigger(usedBytes, thresholdBytes);
   return true;
 }
 
@@ -7365,38 +7364,38 @@ GCRuntime::IncrementalResult GCRuntime::
   }
 
   AbortReason resetReason = AbortReason::None;
   for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
     if (!zone->canCollect()) {
       continue;
     }
 
-    if (zone->zoneSize.gcBytes() >=
-        zone->threshold.nonIncrementalTriggerBytes(tunables)) {
+    if (zone->gcHeapSize.bytes() >=
+        zone->gcHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
       CheckZoneIsScheduled(zone, reason, "GC bytes");
       budget.makeUnlimited();
       stats().nonincremental(AbortReason::GCBytesTrigger);
       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
         resetReason = AbortReason::GCBytesTrigger;
       }
     }
 
-    if (zone->gcMallocBytes.gcBytes() >=
-        zone->gcMallocThreshold.nonIncrementalTriggerBytes(tunables)) {
+    if (zone->mallocHeapSize.bytes() >=
+        zone->mallocHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
       CheckZoneIsScheduled(zone, reason, "malloc bytes");
       budget.makeUnlimited();
       stats().nonincremental(AbortReason::MallocBytesTrigger);
       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
         resetReason = AbortReason::MallocBytesTrigger;
       }
     }
 
-    if (zone->gcJitBytes.gcBytes() >=
-        zone->gcJitThreshold.nonIncrementalTriggerBytes(tunables)) {
+    if (zone->jitHeapSize.bytes() >=
+        zone->jitHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
       CheckZoneIsScheduled(zone, reason, "JIT code bytes");
       budget.makeUnlimited();
       stats().nonincremental(AbortReason::JitCodeBytesTrigger);
       if (zone->wasGCStarted() && zone->gcState() > Zone::Sweep) {
         resetReason = AbortReason::JitCodeBytesTrigger;
       }
     }
 
@@ -7430,21 +7429,21 @@ static void ScheduleZones(GCRuntime* gc)
     // To avoid resets, continue to collect any zones that were being
     // collected in a previous slice.
     if (gc->isIncrementalGCInProgress() && zone->wasGCStarted()) {
       zone->scheduleGC();
     }
 
     // This is a heuristic to reduce the total number of collections.
     bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode();
-    if (zone->zoneSize.gcBytes() >=
-            zone->threshold.eagerAllocTrigger(inHighFrequencyMode) ||
-        zone->gcMallocBytes.gcBytes() >=
-            zone->gcMallocThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
-        zone->gcJitBytes.gcBytes() >= zone->gcJitThreshold.gcTriggerBytes()) {
+    if (zone->gcHeapSize.bytes() >=
+            zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+        zone->mallocHeapSize.bytes() >=
+            zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) ||
+        zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.bytes()) {
       zone->scheduleGC();
     }
   }
 }
 
 static void UnscheduleZones(GCRuntime* gc) {
   for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
     zone->unscheduleGC();
@@ -8252,17 +8251,17 @@ void GCRuntime::mergeRealms(Realm* sourc
     MOZ_ASSERT(r.get() == source);
   }
 
   // Merge the allocator, stats and UIDs in source's zone into target's zone.
   target->zone()->arenas.adoptArenas(&source->zone()->arenas,
                                      targetZoneIsCollecting);
   target->zone()->addTenuredAllocsSinceMinorGC(
       source->zone()->getAndResetTenuredAllocsSinceMinorGC());
-  target->zone()->zoneSize.adopt(source->zone()->zoneSize);
+  target->zone()->gcHeapSize.adopt(source->zone()->gcHeapSize);
   target->zone()->adoptUniqueIds(source->zone());
   target->zone()->adoptMallocBytes(source->zone());
 
   // Merge other info in source's zone into target's zone.
   target->zone()->types.typeLifoAlloc().transferFrom(
       &source->zone()->types.typeLifoAlloc());
   MOZ_RELEASE_ASSERT(source->zone()->types.sweepTypeLifoAlloc.ref().isEmpty());
 
@@ -8856,17 +8855,17 @@ uint64_t js::gc::NextCellUniqueId(JSRunt
 }
 
 namespace js {
 namespace gc {
 namespace MemInfo {
 
 static bool GCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
-  args.rval().setNumber(double(cx->runtime()->gc.heapSize.gcBytes()));
+  args.rval().setNumber(double(cx->runtime()->gc.heapSize.bytes()));
   return true;
 }
 
 static bool GCMaxBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   args.rval().setNumber(double(cx->runtime()->gc.tunables.gcMaxBytes()));
   return true;
 }
@@ -8899,45 +8898,45 @@ static bool MinorGCCountGetter(JSContext
 static bool GCSliceCountGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   args.rval().setNumber(double(cx->runtime()->gc.gcSliceCount()));
   return true;
 }
 
 static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
-  args.rval().setNumber(double(cx->zone()->zoneSize.gcBytes()));
+  args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes()));
   return true;
 }
 
 static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
-  args.rval().setNumber(double(cx->zone()->threshold.gcTriggerBytes()));
+  args.rval().setNumber(double(cx->zone()->gcHeapThreshold.bytes()));
   return true;
 }
 
 static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   bool highFrequency =
       cx->runtime()->gc.schedulingState.inHighFrequencyGCMode();
   args.rval().setNumber(
-      double(cx->zone()->threshold.eagerAllocTrigger(highFrequency)));
+      double(cx->zone()->gcHeapThreshold.eagerAllocTrigger(highFrequency)));
   return true;
 }
 
 static bool ZoneMallocBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
-  args.rval().setNumber(double(cx->zone()->gcMallocBytes.gcBytes()));
+  args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes()));
   return true;
 }
 
 static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc,
                                          Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
-  args.rval().setNumber(double(cx->zone()->gcMallocThreshold.gcTriggerBytes()));
+  args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.bytes()));
   return true;
 }
 
 static bool ZoneGCDelayBytesGetter(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   args.rval().setNumber(double(cx->zone()->gcDelayBytes));
   return true;
 }
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -262,24 +262,24 @@ class GCRuntime {
   MOZ_MUST_USE bool triggerGC(JS::GCReason reason);
   // Check whether to trigger a zone GC after allocating GC cells. During an
   // incremental GC, optionally count |nbytes| towards the threshold for
   // performing the next slice.
   void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0);
   // Check whether to trigger a zone GC after malloc memory.
   void maybeMallocTriggerZoneGC(Zone* zone);
   bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap,
-                                const ZoneThreshold& threshold,
+                                const HeapThreshold& threshold,
                                 JS::GCReason reason);
   // The return value indicates if we were able to do the GC.
   bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes,
                      size_t thresholdBytes);
   void maybeGC();
   bool checkEagerAllocTrigger(const HeapSize& size,
-                              const ZoneThreshold& threshold);
+                              const HeapThreshold& threshold);
   // The return value indicates whether a major GC was performed.
   bool gcIfRequested();
   void gc(JSGCInvocationKind gckind, JS::GCReason reason);
   void startGC(JSGCInvocationKind gckind, JS::GCReason reason,
                int64_t millis = 0);
   void gcSlice(JS::GCReason reason, int64_t millis = 0);
   void finishGC(JS::GCReason reason);
   void abortGC();
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -952,18 +952,18 @@ void js::Nursery::collect(JS::GCReason r
     // also clamp the parameter.
     poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
   }
 
   const float promotionRate = doPretenuring(rt, reason, tenureCounts);
 
   // We ignore gcMaxBytes when allocating for minor collection. However, if we
   // overflowed, we disable the nursery. The next time we allocate, we'll fail
-  // because gcBytes >= gcMaxBytes.
-  if (rt->gc.heapSize.gcBytes() >= tunables().gcMaxBytes()) {
+  // because bytes >= gcMaxBytes.
+  if (rt->gc.heapSize.bytes() >= tunables().gcMaxBytes()) {
     disable();
   }
 
   endProfile(ProfileKey::Total);
   rt->gc.incMinorGcNumber();
 
   TimeDuration totalTime = profileDurations_[ProfileKey::Total];
   rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime.ToMicroseconds());
--- a/js/src/gc/Scheduling.h
+++ b/js/src/gc/Scheduling.h
@@ -255,17 +255,17 @@
  *      If we do not return to the event loop before getting all the way to our
  *      gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
  *      succeed the current allocation and set the script interrupt so that we
  *      will (hopefully) do a GC before we overflow our max and have to raise
  *      an OOM exception for the script.
  *
  *          Assumptions:
  *            -> Common web scripts will return to the event loop before using
- *               10% of the current gcTriggerBytes worth of GC memory.
+ *               10% of the current triggerBytes worth of GC memory.
  *
  *      ALLOC_TRIGGER (incremental)
  *      ---------------------------
  *      In practice the above trigger is rough: if a website is just on the
  *      cusp, sometimes it will trigger a non-incremental GC moments before
  *      returning to the event loop, where it could have done an incremental
  *      GC. Thus, we recently added an incremental version of the above with a
  *      substantially lower threshold, so that we have a soft limit here. If
@@ -343,40 +343,40 @@ class GCSchedulingTunables {
    * Minimum and maximum nursery size for each runtime.
    */
   MainThreadData<size_t> gcMinNurseryBytes_;
   MainThreadData<size_t> gcMaxNurseryBytes_;
 
   /*
    * JSGC_ALLOCATION_THRESHOLD
    *
-   * The base value used to compute zone->threshold.gcTriggerBytes(). When
-   * usage.gcBytes() surpasses threshold.gcTriggerBytes() for a zone, the
-   * zone may be scheduled for a GC, depending on the exact circumstances.
+   * The base value used to compute zone->threshold.bytes(). When
+   * gcHeapSize.bytes() exceeds threshold.bytes() for a zone, the zone may be
+   * scheduled for a GC, depending on the exact circumstances.
    */
   MainThreadOrGCTaskData<size_t> gcZoneAllocThresholdBase_;
 
   /*
    * JSGC_NON_INCREMENTAL_FACTOR
    *
-   * Multiple of threshold.gcBytes() which triggers a non-incremental GC.
+   * Multiple of threshold.bytes() which triggers a non-incremental GC.
    */
   UnprotectedData<float> nonIncrementalFactor_;
 
   /*
    * JSGC_AVOID_INTERRUPT_FACTOR
    *
-   * Multiple of threshold.gcBytes() which triggers a new incremental GC when
+   * Multiple of threshold.bytes() which triggers a new incremental GC when
    * doing so would interrupt an ongoing incremental GC.
    */
   UnprotectedData<float> avoidInterruptFactor_;
 
   /*
-   * Number of bytes to allocate between incremental slices in GCs triggered
-   * by the zone allocation threshold.
+   * Number of bytes to allocate between incremental slices in GCs triggered by
+   * the zone allocation threshold.
    *
    * This value does not have a JSGCParamKey parameter yet.
    */
   UnprotectedData<size_t> zoneAllocDelayBytes_;
 
   /*
    * JSGC_DYNAMIC_HEAP_GROWTH
    *
@@ -564,144 +564,146 @@ class GCSchedulingState {
                                const mozilla::TimeStamp& currentTime,
                                const GCSchedulingTunables& tunables) {
     inHighFrequencyGCMode_ =
         tunables.isDynamicHeapGrowthEnabled() && !lastGCTime.IsNull() &&
         lastGCTime + tunables.highFrequencyThreshold() > currentTime;
   }
 };
 
+using AtomicByteCount =
+    mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
+                    mozilla::recordreplay::Behavior::DontPreserve>;
+
 /*
- * Tracks the used sizes for owned heap data and automatically maintains the
- * memory usage relationship between GCRuntime and Zones.
+ * Tracks the size of allocated data. This is used for both GC and malloc data.
+ * It automatically maintains the memory usage relationship between parent and
+ * child instances, i.e. between those in a GCRuntime and its Zones.
  */
 class HeapSize {
   /*
-   * A heap usage that contains our parent's heap usage, or null if this is
-   * the top-level usage container.
+   * An instance that contains our parent's heap usage, or null if this is the
+   * top-level usage container.
    */
   HeapSize* const parent_;
 
   /*
-   * The approximate number of bytes in use on the GC heap, to the nearest
-   * ArenaSize. This does not include any malloc data. It also does not
-   * include not-actively-used addresses that are still reserved at the OS
-   * level for GC usage. It is atomic because it is updated by both the active
-   * and GC helper threads.
+   * The number of bytes in use. For GC heaps this is approximate to the nearest
+   * ArenaSize. It is atomic because it is updated by both the active and GC
+   * helper threads.
    */
-  mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
-                  mozilla::recordreplay::Behavior::DontPreserve>
-      gcBytes_;
+  AtomicByteCount bytes_;
 
   /*
    * The number of bytes retained after the last collection. This is updated
    * dynamically during incremental GC. It does not include allocations that
    * happen during a GC.
    */
-  mozilla::Atomic<size_t, mozilla::ReleaseAcquire,
-                  mozilla::recordreplay::Behavior::DontPreserve>
-      retainedBytes_;
+  AtomicByteCount retainedBytes_;
 
  public:
-  explicit HeapSize(HeapSize* parent) : parent_(parent), gcBytes_(0) {}
+  explicit HeapSize(HeapSize* parent) : parent_(parent), bytes_(0) {}
 
-  size_t gcBytes() const { return gcBytes_; }
+  size_t bytes() const { return bytes_; }
   size_t retainedBytes() const { return retainedBytes_; }
 
-  void updateOnGCStart() { retainedBytes_ = size_t(gcBytes_); }
+  void updateOnGCStart() { retainedBytes_ = size_t(bytes_); }
 
   void addGCArena() { addBytes(ArenaSize); }
   void removeGCArena() {
     MOZ_ASSERT(retainedBytes_ >= ArenaSize);
     removeBytes(ArenaSize, true /* only sweeping removes arenas */);
   }
 
   void addBytes(size_t nbytes) {
-    mozilla::DebugOnly<size_t> initialBytes(gcBytes_);
+    mozilla::DebugOnly<size_t> initialBytes(bytes_);
     MOZ_ASSERT(initialBytes + nbytes > initialBytes);
-    gcBytes_ += nbytes;
+    bytes_ += nbytes;
     if (parent_) {
       parent_->addBytes(nbytes);
     }
   }
   void removeBytes(size_t nbytes, bool wasSwept) {
     if (wasSwept) {
       // TODO: We would like to assert that retainedBytes_ >= nbytes is here but
       // we can't do that yet, so clamp the result to zero.
       retainedBytes_ = nbytes <= retainedBytes_ ? retainedBytes_ - nbytes : 0;
     }
-    MOZ_ASSERT(gcBytes_ >= nbytes);
-    gcBytes_ -= nbytes;
+    MOZ_ASSERT(bytes_ >= nbytes);
+    bytes_ -= nbytes;
     if (parent_) {
       parent_->removeBytes(nbytes, wasSwept);
     }
   }
 
   /* Pair to adoptArenas. Adopts the attendant usage statistics. */
   void adopt(HeapSize& source) {
     // Skip retainedBytes_: we never adopt zones that are currently being
     // collected.
-    gcBytes_ += source.gcBytes_;
+    bytes_ += source.bytes_;
     source.retainedBytes_ = 0;
-    source.gcBytes_ = 0;
+    source.bytes_ = 0;
   }
 };
 
-// Base class for GC heap and malloc thresholds.
-class ZoneThreshold {
+// A heap size threshold used to trigger GC. This is an abstract base class for
+// GC heap and malloc thresholds defined below.
+class HeapThreshold {
  protected:
+  HeapThreshold() = default;
+
   // GC trigger threshold.
-  mozilla::Atomic<size_t, mozilla::Relaxed,
-                  mozilla::recordreplay::Behavior::DontPreserve>
-      gcTriggerBytes_;
+  AtomicByteCount bytes_;
 
  public:
-  size_t gcTriggerBytes() const { return gcTriggerBytes_; }
+  size_t bytes() const { return bytes_; }
   size_t nonIncrementalTriggerBytes(GCSchedulingTunables& tunables) const {
-    return gcTriggerBytes_ * tunables.nonIncrementalFactor();
+    return bytes_ * tunables.nonIncrementalFactor();
   }
   float eagerAllocTrigger(bool highFrequencyGC) const;
 };
 
-// This class encapsulates the data that determines when we need to do a zone GC
-// base on GC heap size.
-class ZoneHeapThreshold : public ZoneThreshold {
+// A heap threshold that is based on a multiple of the retained size after the
+// last collection adjusted based on collection frequency and retained
+// size. This is used to determine when to do a zone GC based on GC heap size.
+class GCHeapThreshold : public HeapThreshold {
  public:
   void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
                      const GCSchedulingTunables& tunables,
                      const GCSchedulingState& state, const AutoLockGC& lock);
 
  private:
   static float computeZoneHeapGrowthFactorForHeapSize(
       size_t lastBytes, const GCSchedulingTunables& tunables,
       const GCSchedulingState& state);
   static size_t computeZoneTriggerBytes(float growthFactor, size_t lastBytes,
                                         JSGCInvocationKind gckind,
                                         const GCSchedulingTunables& tunables,
                                         const AutoLockGC& lock);
 };
 
-// This class encapsulates the data that determines when we need to do a zone
+// A heap threshold that is calculated as a constant multiple of the retained
+// size after the last collection. This is used to determines when to do a zone
 // GC based on malloc data.
-class ZoneMallocThreshold : public ZoneThreshold {
+class MallocHeapThreshold : public HeapThreshold {
  public:
   void updateAfterGC(size_t lastBytes, size_t baseBytes, float growthFactor,
                      const AutoLockGC& lock);
 
  private:
   static size_t computeZoneTriggerBytes(float growthFactor, size_t lastBytes,
                                         size_t baseBytes,
                                         const AutoLockGC& lock);
 };
 
-// A fixed threshold that determines when we need to do a zone GC based on
-// allocated JIT code.
-class ZoneFixedThreshold : public ZoneThreshold {
+// A fixed threshold that's used to determine when we need to do a zone GC based
+// on allocated JIT code.
+class JitHeapThreshold : public HeapThreshold {
  public:
-  explicit ZoneFixedThreshold(size_t bytes) { gcTriggerBytes_ = bytes; }
+  explicit JitHeapThreshold(size_t bytes) { bytes_ = bytes; }
 };
 
 #ifdef DEBUG
 
 // Counts memory associated with GC things in a zone.
 //
 // This records details of the cell the memory allocations is associated with to
 // check the correctness of the information provided. This is not present in opt
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -985,44 +985,44 @@ void Statistics::printStats() {
 void Statistics::beginGC(JSGCInvocationKind kind,
                          const TimeStamp& currentTime) {
   slices_.clearAndFree();
   sccTimes.clearAndFree();
   gckind = kind;
   nonincrementalReason_ = gc::AbortReason::None;
 
   GCRuntime& gc = runtime->gc;
-  preTotalHeapBytes = gc.heapSize.gcBytes();
+  preTotalHeapBytes = gc.heapSize.bytes();
 
   preCollectedHeapBytes = 0;
 
   startingMajorGCNumber = gc.majorGCCount();
   startingSliceNumber = gc.gcNumber();
 
   if (gc.lastGCTime()) {
     timeSinceLastGC = currentTime - gc.lastGCTime();
   }
 }
 
 void Statistics::measureInitialHeapSize() {
   MOZ_ASSERT(preCollectedHeapBytes == 0);
   for (GCZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
-    preCollectedHeapBytes += zone->zoneSize.gcBytes();
+    preCollectedHeapBytes += zone->gcHeapSize.bytes();
   }
 }
 
 void Statistics::adoptHeapSizeDuringIncrementalGC(Zone* mergedZone) {
   // A zone is being merged into a zone that's currently being collected so we
   // need to adjust our record of the total size of heap for collected zones.
   MOZ_ASSERT(runtime->gc.isIncrementalGCInProgress());
-  preCollectedHeapBytes += mergedZone->zoneSize.gcBytes();
+  preCollectedHeapBytes += mergedZone->gcHeapSize.bytes();
 }
 
 void Statistics::endGC() {
-  postTotalHeapBytes = runtime->gc.heapSize.gcBytes();
+  postTotalHeapBytes = runtime->gc.heapSize.bytes();
 
   sendGCTelemetry();
 
   thresholdTriggered = false;
 }
 
 void Statistics::sendGCTelemetry() {
   runtime->addTelemetry(JS_TELEMETRY_GC_IS_ZONE_GC,
@@ -1086,17 +1086,17 @@ void Statistics::sendGCTelemetry() {
     if (!nonincremental()) {
       runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_COUNT, slices_.length());
     }
   }
 
   size_t bytesSurvived = 0;
   for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
     if (zone->wasCollected()) {
-      bytesSurvived += zone->zoneSize.retainedBytes();
+      bytesSurvived += zone->gcHeapSize.retainedBytes();
     }
   }
 
   MOZ_ASSERT(preCollectedHeapBytes >= bytesSurvived);
   double survialRate =
       100.0 * double(bytesSurvived) / double(preCollectedHeapBytes);
   runtime->addTelemetry(JS_TELEMETRY_GC_TENURED_SURVIVAL_RATE,
                         uint32_t(survialRate));
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -26,54 +26,54 @@
 
 using namespace js;
 using namespace js::gc;
 
 Zone* const Zone::NotOnList = reinterpret_cast<Zone*>(1);
 
 ZoneAllocator::ZoneAllocator(JSRuntime* rt)
     : JS::shadow::Zone(rt, &rt->gc.marker),
-      zoneSize(&rt->gc.heapSize),
-      gcMallocBytes(nullptr),
-      gcJitBytes(nullptr),
-      gcJitThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
+      gcHeapSize(&rt->gc.heapSize),
+      mallocHeapSize(nullptr),
+      jitHeapSize(nullptr),
+      jitHeapThreshold(jit::MaxCodeBytesPerProcess * 0.8) {
   AutoLockGC lock(rt);
   updateGCThresholds(rt->gc, GC_NORMAL, lock);
 }
 
 ZoneAllocator::~ZoneAllocator() {
 #ifdef DEBUG
-  gcMallocTracker.checkEmptyOnDestroy();
-  MOZ_ASSERT(zoneSize.gcBytes() == 0);
-  MOZ_ASSERT(gcMallocBytes.gcBytes() == 0);
-  MOZ_ASSERT(gcJitBytes.gcBytes() == 0);
+  mallocTracker.checkEmptyOnDestroy();
+  MOZ_ASSERT(gcHeapSize.bytes() == 0);
+  MOZ_ASSERT(mallocHeapSize.bytes() == 0);
+  MOZ_ASSERT(jitHeapSize.bytes() == 0);
 #endif
 }
 
 void ZoneAllocator::fixupAfterMovingGC() {
 #ifdef DEBUG
-  gcMallocTracker.fixupAfterMovingGC();
+  mallocTracker.fixupAfterMovingGC();
 #endif
 }
 
 void js::ZoneAllocator::updateMemoryCountersOnGCStart() {
-  zoneSize.updateOnGCStart();
-  gcMallocBytes.updateOnGCStart();
+  gcHeapSize.updateOnGCStart();
+  mallocHeapSize.updateOnGCStart();
 }
 
 void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc,
                                            JSGCInvocationKind invocationKind,
                                            const js::AutoLockGC& lock) {
   // This is called repeatedly during a GC to update thresholds as memory is
   // freed.
-  threshold.updateAfterGC(zoneSize.retainedBytes(), invocationKind, gc.tunables,
-                          gc.schedulingState, lock);
-  gcMallocThreshold.updateAfterGC(gcMallocBytes.retainedBytes(),
-                                  gc.tunables.mallocThresholdBase(),
-                                  gc.tunables.mallocGrowthFactor(), lock);
+  gcHeapThreshold.updateAfterGC(gcHeapSize.retainedBytes(), invocationKind,
+                                gc.tunables, gc.schedulingState, lock);
+  mallocHeapThreshold.updateAfterGC(mallocHeapSize.retainedBytes(),
+                                    gc.tunables.mallocThresholdBase(),
+                                    gc.tunables.mallocGrowthFactor(), lock);
 }
 
 void ZoneAllocPolicy::decMemory(size_t nbytes) {
   // Unfortunately we don't have enough context here to know whether we're being
   // called on behalf of the collector so we have to do a TLS lookup to find
   // out.
   JSContext* cx = TlsContext.get();
   zone_->decPolicyMemory(this, nbytes, cx->defaultFreeOp()->isCollecting());
--- a/js/src/gc/ZoneAllocator.h
+++ b/js/src/gc/ZoneAllocator.h
@@ -25,17 +25,17 @@ class ZoneAllocator;
 
 #ifdef DEBUG
 bool CurrentThreadIsGCSweeping();
 #endif
 
 namespace gc {
 void MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc,
                               const HeapSize& heap,
-                              const ZoneThreshold& threshold,
+                              const HeapThreshold& threshold,
                               JS::GCReason reason);
 }
 
 // Base class of JS::Zone that provides malloc memory allocation and accounting.
 class ZoneAllocator : public JS::shadow::Zone,
                       public js::MallocProvider<JS::Zone> {
  protected:
   explicit ZoneAllocator(JSRuntime* rt);
@@ -49,150 +49,150 @@ class ZoneAllocator : public JS::shadow:
   }
 
   MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc,
                                    arena_id_t arena, size_t nbytes,
                                    void* reallocPtr = nullptr);
   void reportAllocationOverflow() const;
 
   void adoptMallocBytes(ZoneAllocator* other) {
-    gcMallocBytes.adopt(other->gcMallocBytes);
-    gcJitBytes.adopt(other->gcJitBytes);
+    mallocHeapSize.adopt(other->mallocHeapSize);
+    jitHeapSize.adopt(other->jitHeapSize);
 #ifdef DEBUG
-    gcMallocTracker.adopt(other->gcMallocTracker);
+    mallocTracker.adopt(other->mallocTracker);
 #endif
   }
 
   void updateMemoryCountersOnGCStart();
   void updateGCThresholds(gc::GCRuntime& gc, JSGCInvocationKind invocationKind,
                           const js::AutoLockGC& lock);
 
   // Memory accounting APIs for malloc memory owned by GC cells.
 
   void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) {
     MOZ_ASSERT(cell);
     MOZ_ASSERT(nbytes);
-    gcMallocBytes.addBytes(nbytes);
+    mallocHeapSize.addBytes(nbytes);
 
     // We don't currently check GC triggers here.
 
 #ifdef DEBUG
-    gcMallocTracker.trackMemory(cell, nbytes, use);
+    mallocTracker.trackMemory(cell, nbytes, use);
 #endif
   }
 
   void removeCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use,
                         bool wasSwept = false) {
     MOZ_ASSERT(cell);
     MOZ_ASSERT(nbytes);
     MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
 
-    gcMallocBytes.removeBytes(nbytes, wasSwept);
+    mallocHeapSize.removeBytes(nbytes, wasSwept);
 
 #ifdef DEBUG
-    gcMallocTracker.untrackMemory(cell, nbytes, use);
+    mallocTracker.untrackMemory(cell, nbytes, use);
 #endif
   }
 
   void swapCellMemory(js::gc::Cell* a, js::gc::Cell* b, js::MemoryUse use) {
 #ifdef DEBUG
-    gcMallocTracker.swapMemory(a, b, use);
+    mallocTracker.swapMemory(a, b, use);
 #endif
   }
 
 #ifdef DEBUG
   void registerPolicy(js::ZoneAllocPolicy* policy) {
-    return gcMallocTracker.registerPolicy(policy);
+    return mallocTracker.registerPolicy(policy);
   }
   void unregisterPolicy(js::ZoneAllocPolicy* policy) {
-    return gcMallocTracker.unregisterPolicy(policy);
+    return mallocTracker.unregisterPolicy(policy);
   }
   void movePolicy(js::ZoneAllocPolicy* dst, js::ZoneAllocPolicy* src) {
-    return gcMallocTracker.movePolicy(dst, src);
+    return mallocTracker.movePolicy(dst, src);
   }
 #endif
 
   void incPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes) {
     MOZ_ASSERT(nbytes);
-    gcMallocBytes.addBytes(nbytes);
+    mallocHeapSize.addBytes(nbytes);
 
 #ifdef DEBUG
-    gcMallocTracker.incPolicyMemory(policy, nbytes);
+    mallocTracker.incPolicyMemory(policy, nbytes);
 #endif
 
     maybeMallocTriggerZoneGC();
   }
   void decPolicyMemory(js::ZoneAllocPolicy* policy, size_t nbytes,
                        bool wasSwept) {
     MOZ_ASSERT(nbytes);
     MOZ_ASSERT_IF(CurrentThreadIsGCSweeping(), wasSwept);
 
-    gcMallocBytes.removeBytes(nbytes, wasSwept);
+    mallocHeapSize.removeBytes(nbytes, wasSwept);
 
 #ifdef DEBUG
-    gcMallocTracker.decPolicyMemory(policy, nbytes);
+    mallocTracker.decPolicyMemory(policy, nbytes);
 #endif
   }
 
   void incJitMemory(size_t nbytes) {
     MOZ_ASSERT(nbytes);
-    gcJitBytes.addBytes(nbytes);
-    maybeTriggerZoneGC(gcJitBytes, gcJitThreshold,
+    jitHeapSize.addBytes(nbytes);
+    maybeTriggerZoneGC(jitHeapSize, jitHeapThreshold,
                        JS::GCReason::TOO_MUCH_JIT_CODE);
   }
   void decJitMemory(size_t nbytes) {
     MOZ_ASSERT(nbytes);
-    gcJitBytes.removeBytes(nbytes, true);
+    jitHeapSize.removeBytes(nbytes, true);
   }
 
   // Check malloc allocation threshold and trigger a zone GC if necessary.
   void maybeMallocTriggerZoneGC() {
-    maybeTriggerZoneGC(gcMallocBytes, gcMallocThreshold,
+    maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold,
                        JS::GCReason::TOO_MUCH_MALLOC);
   }
 
  private:
   void maybeTriggerZoneGC(const js::gc::HeapSize& heap,
-                          const js::gc::ZoneThreshold& threshold,
+                          const js::gc::HeapThreshold& threshold,
                           JS::GCReason reason) {
-    if (heap.gcBytes() >= threshold.gcTriggerBytes()) {
+    if (heap.bytes() >= threshold.bytes()) {
       gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap,
                                    threshold, reason);
     }
   }
 
  public:
-  // Track GC heap size under this Zone.
-  js::gc::HeapSize zoneSize;
+  // The size of allocated GC arenas in this zone.
+  js::gc::HeapSize gcHeapSize;
 
-  // Thresholds used to trigger GC based on heap size.
-  js::gc::ZoneHeapThreshold threshold;
+  // Threshold used to trigger GC based on GC heap size.
+  js::gc::GCHeapThreshold gcHeapThreshold;
 
   // Amount of data to allocate before triggering a new incremental slice for
   // the current GC.
   js::MainThreadData<size_t> gcDelayBytes;
 
-  // Malloc counter used for allocations where size information is
-  // available. Used for some internal and all tracked external allocations.
-  js::gc::HeapSize gcMallocBytes;
+  // Amount of malloc data owned by GC things in this zone, including external
+  // allocations supplied by JS::AddAssociatedMemory.
+  js::gc::HeapSize mallocHeapSize;
 
-  // Thresholds used to trigger GC based on malloc allocations.
-  js::gc::ZoneMallocThreshold gcMallocThreshold;
+  // Threshold used to trigger GC based on malloc allocations.
+  js::gc::MallocHeapThreshold mallocHeapThreshold;
 
-  // Malloc counter used for JIT code allocation.
-  js::gc::HeapSize gcJitBytes;
+  // Amount of exectuable JIT code owned by GC things in this zone.
+  js::gc::HeapSize jitHeapSize;
 
-  // Thresholds used to trigger GC based on JIT allocations.
-  js::gc::ZoneFixedThreshold gcJitThreshold;
+  // Threshold used to trigger GC based on JIT allocations.
+  js::gc::JitHeapThreshold jitHeapThreshold;
 
  private:
 #ifdef DEBUG
   // In debug builds, malloc allocations can be tracked to make debugging easier
   // (possible?) if allocation and free sizes don't balance.
-  js::gc::MemoryTracker gcMallocTracker;
+  js::gc::MemoryTracker mallocTracker;
 #endif
 
   friend class js::gc::GCRuntime;
 };
 
 /*
  * Allocation policy that performs precise memory tracking on the zone. This
  * should be used for all containers associated with a GC thing or a zone.
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -1396,17 +1396,17 @@ JS_FRIEND_API JS::Value js::MaybeGetScri
   if (!object->is<ScriptSourceObject>()) {
     return UndefinedValue();
   }
 
   return object->as<ScriptSourceObject>().canonicalPrivate();
 }
 
 JS_FRIEND_API uint64_t js::GetGCHeapUsageForObjectZone(JSObject* obj) {
-  return obj->zone()->zoneSize.gcBytes();
+  return obj->zone()->gcHeapSize.bytes();
 }
 
 #ifdef DEBUG
 JS_FRIEND_API bool js::RuntimeIsBeingDestroyed() {
   JSRuntime* runtime = TlsContext.get()->runtime();
   MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime));
   return runtime->isBeingDestroyed();
 }