Bug 1594054 - Move ExecutableAllocator from JitRuntime to JitZone. r=jonco,erahm
authorJan de Mooij <jdemooij@mozilla.com>
Thu, 14 Nov 2019 10:20:02 +0000
changeset 501914 ba1518c4b5e817c3cce93185afc5cebf3092dfde
parent 501913 b8a5f2a349bc8429c935dd0af40dbc360bead67c
child 501915 d21a794140eaf8f83113afa6346d352596292e9b
push id114172
push userdluca@mozilla.com
push dateTue, 19 Nov 2019 11:31:10 +0000
treeherdermozilla-inbound@b5c5ba07d3db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco, erahm
bugs1594054
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1594054 - Move ExecutableAllocator from JitRuntime to JitZone. r=jonco,erahm This matches the JitCode GC-thing lifetime and will hopefully help avoid fragmentation. Differential Revision: https://phabricator.services.mozilla.com/D52823
js/public/MemoryMetrics.h
js/src/gc/GC.cpp
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jit/Ion.cpp
js/src/jit/JitRealm.h
js/src/jit/Linker.cpp
js/src/vm/MemoryMetrics.cpp
js/src/vm/Realm.cpp
js/src/vm/Runtime.cpp
js/xpconnect/src/XPCJSRuntime.cpp
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -502,28 +502,26 @@ struct RuntimeSizes {
   MACRO(_, MallocHeap, wasmRuntime)                 \
   MACRO(_, MallocHeap, jitLazyLink)
 
   RuntimeSizes() { allScriptSources.emplace(); }
 
   void addToServoSizes(ServoSizes* sizes) const {
     FOR_EACH_SIZE(ADD_TO_SERVO_SIZES);
     scriptSourceInfo.addToServoSizes(sizes);
-    code.addToServoSizes(sizes);
     gc.addToServoSizes(sizes);
   }
 
   FOR_EACH_SIZE(DECL_SIZE_ZERO);
 
   // The script source measurements in |scriptSourceInfo| are initially for
   // all script sources.  At the end, if the measurement granularity is
   // FineGrained, we subtract the measurements of the notable script sources
   // and move them into |notableScriptSources|.
   ScriptSourceInfo scriptSourceInfo;
-  CodeSizes code;
   GCSizes gc;
 
   typedef js::HashMap<const char*, ScriptSourceInfo, mozilla::CStringHasher,
                       js::SystemAllocPolicy>
       ScriptSourcesHashMap;
 
   // |allScriptSources| is only used transiently.  During the reporting phase
   // it is filled with info about every script source in the runtime.  It's
@@ -678,27 +676,29 @@ struct ZoneStats {
   }
 
   void addToServoSizes(JS::ServoSizes* sizes) const {
     MOZ_ASSERT(isTotals);
     FOR_EACH_SIZE(ADD_TO_SERVO_SIZES);
     unusedGCThings.addToServoSizes(sizes);
     stringInfo.addToServoSizes(sizes);
     shapeInfo.addToServoSizes(sizes);
+    code.addToServoSizes(sizes);
   }
 
   FOR_EACH_SIZE(DECL_SIZE_ZERO);
 
   // These string measurements are initially for all strings.  At the end,
   // if the measurement granularity is FineGrained, we subtract the
   // measurements of the notable script sources and move them into
   // |notableStrings|.
   UnusedGCThingSizes unusedGCThings;
   StringInfo stringInfo;
   ShapeInfo shapeInfo;
+  CodeSizes code;
   void* extra = nullptr;  // This field can be used by embedders.
 
   typedef js::HashMap<JSString*, StringInfo,
                       js::InefficientNonFlatteningStringHashPolicy,
                       js::SystemAllocPolicy>
       StringsHashMap;
 
   // |allStrings| is only used transiently.  During the zone traversal it is
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -5547,16 +5547,20 @@ IncrementalProgress GCRuntime::endSweepi
     callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
   }
 
   /* Free LIFO blocks on a background thread if possible. */
   startBackgroundFree();
 
   /* Update the GC state for zones we have swept. */
   for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
+    if (jit::JitZone* jitZone = zone->jitZone()) {
+      // Clear out any small pools that we're hanging on to.
+      jitZone->execAlloc().purge();
+    }
     AutoLockGC lock(this);
     zone->changeGCState(Zone::Sweep, Zone::Finished);
     zone->updateGCThresholds(*this, invocationKind, lock);
     zone->arenas.unmarkPreMarkedFreeCells();
   }
 
   /*
    * Start background thread to sweep zones if required, sweeping the atoms
@@ -6352,21 +6356,16 @@ void GCRuntime::endSweepPhase(bool destr
 
     /*
      * Sweep script filenames after sweeping functions in the generic loop
      * above. In this way when a scripted function's finalizer destroys the
      * script and calls rt->destroyScriptHook, the hook can still access the
      * script's filename. See bug 323267.
      */
     SweepScriptData(rt);
-
-    /* Clear out any small pools that we're hanging on to. */
-    if (rt->hasJitRuntime()) {
-      rt->jitRuntime()->execAlloc().purge();
-    }
   }
 
   {
     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
     callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
 
     if (allCCVisibleZonesWereCollected()) {
       grayBitsValid = true;
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -575,25 +575,26 @@ void Zone::traceAtomCache(JSTracer* trc)
   for (auto r = atomCache().all(); !r.empty(); r.popFront()) {
     JSAtom* atom = r.front().asPtrUnbarriered();
     TraceRoot(trc, &atom, "kept atom");
     MOZ_ASSERT(r.front().asPtrUnbarriered() == atom);
   }
 }
 
 void Zone::addSizeOfIncludingThis(
-    mozilla::MallocSizeOf mallocSizeOf, size_t* typePool, size_t* regexpZone,
-    size_t* jitZone, size_t* baselineStubsOptimized, size_t* cachedCFG,
-    size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
-    size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
-    size_t* compartmentsPrivateData, size_t* scriptCountsMapArg) {
+    mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* typePool,
+    size_t* regexpZone, size_t* jitZone, size_t* baselineStubsOptimized,
+    size_t* cachedCFG, size_t* uniqueIdMap, size_t* shapeCaches,
+    size_t* atomsMarkBitmaps, size_t* compartmentObjects,
+    size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
+    size_t* scriptCountsMapArg) {
   *typePool += types.typeLifoAlloc().sizeOfExcludingThis(mallocSizeOf);
   *regexpZone += regExps().sizeOfExcludingThis(mallocSizeOf);
   if (jitZone_) {
-    jitZone_->addSizeOfIncludingThis(mallocSizeOf, jitZone,
+    jitZone_->addSizeOfIncludingThis(mallocSizeOf, code, jitZone,
                                      baselineStubsOptimized, cachedCFG);
   }
   *uniqueIdMap += uniqueIds().shallowSizeOfExcludingThis(mallocSizeOf);
   *shapeCaches += baseShapes().sizeOfExcludingThis(mallocSizeOf) +
                   initialShapes().sizeOfExcludingThis(mallocSizeOf);
   *atomsMarkBitmaps += markedAtoms().sizeOfExcludingThis(mallocSizeOf);
   *crossCompartmentWrappersTables +=
       crossZoneStringWrappers().sizeOfExcludingThis(mallocSizeOf);
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -205,21 +205,22 @@ class Zone : public js::ZoneAllocator, p
   };
 
   void discardJitCode(
       JSFreeOp* fop,
       ShouldDiscardBaselineCode discardBaselineCode = DiscardBaselineCode,
       ShouldDiscardJitScripts discardJitScripts = KeepJitScripts);
 
   void addSizeOfIncludingThis(
-      mozilla::MallocSizeOf mallocSizeOf, size_t* typePool, size_t* regexpZone,
-      size_t* jitZone, size_t* baselineStubsOptimized, size_t* cachedCFG,
-      size_t* uniqueIdMap, size_t* shapeCaches, size_t* atomsMarkBitmaps,
-      size_t* compartmentObjects, size_t* crossCompartmentWrappersTables,
-      size_t* compartmentsPrivateData, size_t* scriptCountsMapArg);
+      mozilla::MallocSizeOf mallocSizeOf, JS::CodeSizes* code, size_t* typePool,
+      size_t* regexpZone, size_t* jitZone, size_t* baselineStubsOptimized,
+      size_t* cachedCFG, size_t* uniqueIdMap, size_t* shapeCaches,
+      size_t* atomsMarkBitmaps, size_t* compartmentObjects,
+      size_t* crossCompartmentWrappersTables, size_t* compartmentsPrivateData,
+      size_t* scriptCountsMapArg);
 
   // Iterate over all cells in the zone. See the definition of ZoneCellIter
   // in gc/GC-inl.h for the possible arguments and documentation.
   template <typename T, typename... Args>
   js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
     return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this),
                                    std::forward<Args>(args)...);
   }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -151,18 +151,17 @@ bool jit::InitializeJit() {
   JitOptions.supportsUnalignedAccesses =
       MacroAssembler::SupportsUnalignedAccesses();
 
   CheckPerf();
   return true;
 }
 
 JitRuntime::JitRuntime()
-    : execAlloc_(),
-      nextCompilationId_(0),
+    : nextCompilationId_(0),
       exceptionTailOffset_(0),
       bailoutTailOffset_(0),
       profilerExitFrameTailOffset_(0),
       enterJITOffset_(0),
       bailoutHandlerOffset_(0),
       argumentsRectifierOffset_(0),
       argumentsRectifierReturnOffset_(0),
       invalidatorOffset_(0),
@@ -604,24 +603,26 @@ size_t JitRealm::sizeOfIncludingThis(moz
   size_t n = mallocSizeOf(this);
   if (stubCodes_) {
     n += stubCodes_->shallowSizeOfIncludingThis(mallocSizeOf);
   }
   return n;
 }
 
 void JitZone::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
-                                     size_t* jitZone,
+                                     JS::CodeSizes* code, size_t* jitZone,
                                      size_t* baselineStubsOptimized,
                                      size_t* cachedCFG) const {
   *jitZone += mallocSizeOf(this);
   *jitZone +=
       baselineCacheIRStubCodes_.shallowSizeOfExcludingThis(mallocSizeOf);
   *jitZone += ionCacheIRStubInfoSet_.shallowSizeOfExcludingThis(mallocSizeOf);
 
+  execAlloc().addSizeOfCode(code);
+
   *baselineStubsOptimized +=
       optimizedStubSpace_.sizeOfExcludingThis(mallocSizeOf);
   *cachedCFG += cfgSpace_.sizeOfExcludingThis(mallocSizeOf);
 }
 
 TrampolinePtr JitRuntime::getBailoutTable(
     const FrameSizeClass& frameClass) const {
   MOZ_ASSERT(frameClass != FrameSizeClass::None());
--- a/js/src/jit/JitRealm.h
+++ b/js/src/jit/JitRealm.h
@@ -127,19 +127,16 @@ typedef void (*EnterJitCode)(void* code,
                              Value* vp);
 
 class JitcodeGlobalTable;
 
 class JitRuntime {
  private:
   friend class JitRealm;
 
-  // Executable allocator for all code except wasm code.
-  MainThreadData<ExecutableAllocator> execAlloc_;
-
   MainThreadData<uint64_t> nextCompilationId_;
 
   // Buffer for OSR from baseline to Ion. To avoid holding on to this for too
   // long it's also freed in EnterBaseline and EnterJit (after returning from
   // JIT code).
   MainThreadData<js::UniquePtr<uint8_t>> ionOsrTempData_;
 
   // Shared exception-handler tail.
@@ -301,18 +298,16 @@ class JitRuntime {
   ~JitRuntime();
   MOZ_MUST_USE bool initialize(JSContext* cx);
 
   static void Trace(JSTracer* trc, const js::AutoAccessAtomsZone& access);
   static void TraceJitcodeGlobalTableForMinorGC(JSTracer* trc);
   static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
   static void TraceWeakJitcodeGlobalTable(JSRuntime* rt, JSTracer* trc);
 
-  ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
-
   const BaselineICFallbackCode& baselineICFallbackCode() const {
     return baselineICFallbackCode_.ref();
   }
 
   IonCompilationId nextCompilationId() {
     return IonCompilationId(nextCompilationId_++);
   }
 
@@ -495,21 +490,25 @@ class JitZone {
   IonCacheIRStubInfoSet ionCacheIRStubInfoSet_;
 
   // Map CacheIRStubKey to shared JitCode objects.
   using BaselineCacheIRStubCodeMap =
       GCHashMap<CacheIRStubKey, WeakHeapPtrJitCode, CacheIRStubKey,
                 SystemAllocPolicy, IcStubCodeMapGCPolicy<CacheIRStubKey>>;
   BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
 
+  // Executable allocator for all code except wasm code.
+  MainThreadData<ExecutableAllocator> execAlloc_;
+
  public:
   void traceWeak(JSTracer* trc);
 
   void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
-                              size_t* jitZone, size_t* baselineStubsOptimized,
+                              JS::CodeSizes* code, size_t* jitZone,
+                              size_t* baselineStubsOptimized,
                               size_t* cachedCFG) const;
 
   OptimizedICStubSpace* optimizedStubSpace() { return &optimizedStubSpace_; }
   CFGSpace* cfgSpace() { return &cfgSpace_; }
 
   JitCode* getBaselineCacheIRStubCode(const CacheIRStubKey::Lookup& key,
                                       CacheIRStubInfo** stubInfo) {
     auto p = baselineCacheIRStubCodes_.lookup(key);
@@ -535,16 +534,19 @@ class JitZone {
   MOZ_MUST_USE bool putIonCacheIRStubInfo(const CacheIRStubKey::Lookup& lookup,
                                           CacheIRStubKey& key) {
     IonCacheIRStubInfoSet::AddPtr p =
         ionCacheIRStubInfoSet_.lookupForAdd(lookup);
     MOZ_ASSERT(!p);
     return ionCacheIRStubInfoSet_.add(p, std::move(key));
   }
   void purgeIonCacheIRStubInfo() { ionCacheIRStubInfoSet_.clearAndCompact(); }
+
+  ExecutableAllocator& execAlloc() { return execAlloc_.ref(); }
+  const ExecutableAllocator& execAlloc() const { return execAlloc_.ref(); }
 };
 
 class JitRealm {
   friend class JitActivation;
 
   // Map ICStub keys to ICStub shared code objects.
   using ICStubCodeMap =
       GCHashMap<uint32_t, WeakHeapPtrJitCode, DefaultHasher<uint32_t>,
--- a/js/src/jit/Linker.cpp
+++ b/js/src/jit/Linker.cpp
@@ -32,19 +32,25 @@ JitCode* Linker::newCode(JSContext* cx, 
                        (CodeAlignment - ExecutableAllocatorAlignment);
   if (bytesNeeded >= MAX_BUFFER_SIZE) {
     return fail(cx);
   }
 
   // ExecutableAllocator requires bytesNeeded to be aligned.
   bytesNeeded = AlignBytes(bytesNeeded, ExecutableAllocatorAlignment);
 
+  JitZone* jitZone = cx->zone()->getJitZone(cx);
+  if (!jitZone) {
+    // Note: don't call fail(cx) here, getJitZone reports OOM.
+    return nullptr;
+  }
+
   ExecutablePool* pool;
-  uint8_t* result = (uint8_t*)cx->runtime()->jitRuntime()->execAlloc().alloc(
-      cx, bytesNeeded, &pool, kind);
+  uint8_t* result =
+      (uint8_t*)jitZone->execAlloc().alloc(cx, bytesNeeded, &pool, kind);
   if (!result) {
     return fail(cx);
   }
 
   // The JitCodeHeader will be stored right before the code buffer.
   uint8_t* codeStart = result + sizeof(JitCodeHeader);
 
   // Bump the code up to a nice alignment.
--- a/js/src/vm/MemoryMetrics.cpp
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -208,19 +208,19 @@ static void StatsZoneCallback(JSRuntime*
   // CollectRuntimeStats reserves enough space.
   MOZ_ALWAYS_TRUE(rtStats->zoneStatsVector.growBy(1));
   ZoneStats& zStats = rtStats->zoneStatsVector.back();
   zStats.initStrings();
   rtStats->initExtraZoneStats(zone, &zStats);
   rtStats->currZoneStats = &zStats;
 
   zone->addSizeOfIncludingThis(
-      rtStats->mallocSizeOf_, &zStats.typePool, &zStats.regexpZone,
-      &zStats.jitZone, &zStats.baselineStubsOptimized, &zStats.cachedCFG,
-      &zStats.uniqueIdMap, &zStats.shapeTables,
+      rtStats->mallocSizeOf_, &zStats.code, &zStats.typePool,
+      &zStats.regexpZone, &zStats.jitZone, &zStats.baselineStubsOptimized,
+      &zStats.cachedCFG, &zStats.uniqueIdMap, &zStats.shapeTables,
       &rtStats->runtime.atomsMarkBitmaps, &zStats.compartmentObjects,
       &zStats.crossCompartmentWrappersTables, &zStats.compartmentsPrivateData,
       &zStats.scriptCountsMap);
 }
 
 static void StatsRealmCallback(JSContext* cx, void* data,
                                Handle<Realm*> realm) {
   // Append a new RealmStats to the vector.
--- a/js/src/vm/Realm.cpp
+++ b/js/src/vm/Realm.cpp
@@ -111,42 +111,35 @@ bool Realm::init(JSContext* cx, JSPrinci
 }
 
 bool JSRuntime::createJitRuntime(JSContext* cx) {
   using namespace js::jit;
 
   MOZ_ASSERT(!jitRuntime_);
 
   if (!CanLikelyAllocateMoreExecutableMemory()) {
-    // Report OOM instead of potentially hitting the MOZ_CRASH below, but first
-    // try to release memory.
+    // Try to release memory first instead of potentially reporting OOM below.
     if (OnLargeAllocationFailure) {
       OnLargeAllocationFailure();
     }
-    if (!CanLikelyAllocateMoreExecutableMemory()) {
-      ReportOutOfMemory(cx);
-      return false;
-    }
   }
 
   jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>();
   if (!jrt) {
     return false;
   }
 
   // Unfortunately, initialization depends on jitRuntime_ being non-null, so
   // we can't just wait to assign jitRuntime_.
   jitRuntime_ = jrt;
 
-  AutoEnterOOMUnsafeRegion noOOM;
   if (!jitRuntime_->initialize(cx)) {
-    // Handling OOM here is complicated: if we delete jitRuntime_ now, we
-    // will destroy the ExecutableAllocator, even though there may still be
-    // JitCode instances holding references to ExecutablePools.
-    noOOM.crash("OOM in createJitRuntime");
+    js_delete(jitRuntime_.ref());
+    jitRuntime_ = nullptr;
+    return false;
   }
 
   return true;
 }
 
 bool Realm::ensureJitRealmExists(JSContext* cx) {
   using namespace js::jit;
 
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -381,18 +381,16 @@ void JSRuntime::addSizeOfIncludingThis(m
         scriptDataTable(lock).shallowSizeOfExcludingThis(mallocSizeOf);
     for (RuntimeScriptDataTable::Range r = scriptDataTable(lock).all();
          !r.empty(); r.popFront()) {
       rtSizes->scriptData += r.front()->sizeOfIncludingThis(mallocSizeOf);
     }
   }
 
   if (jitRuntime_) {
-    jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
-
     // Sizes of the IonBuilders we are holding for lazy linking
     for (auto builder : jitRuntime_->ionLazyLinkList(this)) {
       rtSizes->jitLazyLink += builder->sizeOfExcludingThis(mallocSizeOf);
     }
   }
 
   rtSizes->wasmRuntime +=
       wasmInstances.lock()->sizeOfExcludingThis(mallocSizeOf);
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -1349,16 +1349,30 @@ NS_IMPL_ISUPPORTS(JSMainRuntimeTemporary
                              nsIMemoryReporter::UNITS_BYTES, amount, \
                              NS_LITERAL_CSTRING(_desc), data);       \
       gcTotal += amount;                                             \
     } else {                                                         \
       sundriesGCHeap += amount;                                      \
     }                                                                \
   } while (0)
 
+// Report realm/zone non-heap bytes.
+#define ZRREPORT_NONHEAP_BYTES(_path, _amount, _desc)                \
+  do {                                                               \
+    size_t amount = _amount; /* evaluate _amount only once */        \
+    if (amount >= SUNDRIES_THRESHOLD) {                              \
+      handleReport->Callback(EmptyCString(), _path,                  \
+                             nsIMemoryReporter::KIND_NONHEAP,        \
+                             nsIMemoryReporter::UNITS_BYTES, amount, \
+                             NS_LITERAL_CSTRING(_desc), data);       \
+    } else {                                                         \
+      sundriesNonHeap += amount;                                     \
+    }                                                                \
+  } while (0)
+
 // Report runtime bytes.
 #define RREPORT_BYTES(_path, _kind, _amount, _desc)                         \
   do {                                                                      \
     size_t amount = _amount; /* evaluate _amount only once */               \
     handleReport->Callback(EmptyCString(), _path, nsIMemoryReporter::_kind, \
                            nsIMemoryReporter::UNITS_BYTES, amount,          \
                            NS_LITERAL_CSTRING(_desc), data);                \
     rtTotal += amount;                                                      \
@@ -1379,17 +1393,20 @@ MOZ_DEFINE_MALLOC_SIZE_OF(JSMallocSizeOf
 namespace xpc {
 
 static void ReportZoneStats(const JS::ZoneStats& zStats,
                             const xpc::ZoneStatsExtras& extras,
                             nsIHandleReportCallback* handleReport,
                             nsISupports* data, bool anonymize,
                             size_t* gcTotalOut = nullptr) {
   const nsCString& pathPrefix = extras.pathPrefix;
-  size_t gcTotal = 0, sundriesGCHeap = 0, sundriesMallocHeap = 0;
+  size_t gcTotal = 0;
+  size_t sundriesGCHeap = 0;
+  size_t sundriesMallocHeap = 0;
+  size_t sundriesNonHeap = 0;
 
   MOZ_ASSERT(!gcTotalOut == zStats.isTotals);
 
   ZRREPORT_GC_BYTES(pathPrefix + NS_LITERAL_CSTRING("symbols/gc-heap"),
                     zStats.symbolsGCHeap, "Symbols.");
 
   ZRREPORT_GC_BYTES(
       pathPrefix + NS_LITERAL_CSTRING("gc-heap-arena-admin"),
@@ -1473,16 +1490,37 @@ static void ReportZoneStats(const JS::Zo
   ZRREPORT_BYTES(pathPrefix + NS_LITERAL_CSTRING("jit-cached-cfg"),
                  zStats.cachedCFG,
                  "The cached CFG to construct Ion code out of it.");
 
   ZRREPORT_BYTES(pathPrefix + NS_LITERAL_CSTRING("script-counts-map"),
                  zStats.scriptCountsMap,
                  "Profiling-related information for scripts.");
 
+  ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/ion"),
+                         zStats.code.ion,
+                         "Code generated by the IonMonkey JIT.");
+
+  ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/baseline"),
+                         zStats.code.baseline,
+                         "Code generated by the Baseline JIT.");
+
+  ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/regexp"),
+                         zStats.code.regexp,
+                         "Code generated by the regexp JIT.");
+
+  ZRREPORT_NONHEAP_BYTES(
+      pathPrefix + NS_LITERAL_CSTRING("code/other"), zStats.code.other,
+      "Code generated by the JITs for wrappers and trampolines.");
+
+  ZRREPORT_NONHEAP_BYTES(pathPrefix + NS_LITERAL_CSTRING("code/unused"),
+                         zStats.code.unused,
+                         "Memory allocated by one of the JITs to hold code, "
+                         "but which is currently unused.");
+
   size_t stringsNotableAboutMemoryGCHeap = 0;
   size_t stringsNotableAboutMemoryMallocHeap = 0;
 
 #define MAYBE_INLINE "The characters may be inline or on the malloc heap."
 #define MAYBE_OVERALLOCATED \
   "Sometimes over-allocated to simplify string concatenation."
 
   for (size_t i = 0; i < zStats.notableStrings.length(); i++) {
@@ -1660,16 +1698,24 @@ static void ReportZoneStats(const JS::Zo
     // We deliberately don't use ZRREPORT_BYTES here.
     REPORT_BYTES(
         pathPrefix + NS_LITERAL_CSTRING("sundries/malloc-heap"), KIND_HEAP,
         sundriesMallocHeap,
         "The sum of all 'malloc-heap' measurements that are too small to "
         "be worth showing individually.");
   }
 
+  if (sundriesNonHeap > 0) {
+    // We deliberately don't use ZRREPORT_NONHEAP_BYTES here.
+    REPORT_BYTES(pathPrefix + NS_LITERAL_CSTRING("sundries/other-heap"),
+                 KIND_NONHEAP, sundriesNonHeap,
+                 "The sum of non-malloc/gc measurements that are too small to "
+                 "be worth showing individually.");
+  }
+
   if (gcTotalOut) {
     *gcTotalOut += gcTotal;
   }
 
 #undef STRING_LENGTH
 }
 
 static void ReportClassStats(const ClassInfo& classInfo, const nsACString& path,
@@ -2009,38 +2055,16 @@ void ReportJSRuntimeExplicitTreeStats(co
         rtPath +
         nsPrintfCString("runtime/script-sources/source(scripts=%d, %s)/",
                         scriptSourceInfo.numScripts, escapedFilename.get());
 
     ReportScriptSourceStats(scriptSourceInfo, notablePath, handleReport, data,
                             rtTotal);
   }
 
-  RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/ion"), KIND_NONHEAP,
-                rtStats.runtime.code.ion,
-                "Code generated by the IonMonkey JIT.");
-
-  RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/baseline"),
-                KIND_NONHEAP, rtStats.runtime.code.baseline,
-                "Code generated by the Baseline JIT.");
-
-  RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/regexp"),
-                KIND_NONHEAP, rtStats.runtime.code.regexp,
-                "Code generated by the regexp JIT.");
-
-  RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/code/other"), KIND_NONHEAP,
-                rtStats.runtime.code.other,
-                "Code generated by the JITs for wrappers and trampolines.");
-
-  RREPORT_BYTES(
-      rtPath + NS_LITERAL_CSTRING("runtime/code/unused"), KIND_NONHEAP,
-      rtStats.runtime.code.unused,
-      "Memory allocated by one of the JITs to hold code, but which is "
-      "currently unused.");
-
   RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/gc/marker"), KIND_HEAP,
                 rtStats.runtime.gc.marker, "The GC mark stack and gray roots.");
 
   RREPORT_BYTES(rtPath + NS_LITERAL_CSTRING("runtime/gc/nursery-committed"),
                 KIND_NONHEAP, rtStats.runtime.gc.nurseryCommitted,
                 "Memory being used by the GC's nursery.");
 
   RREPORT_BYTES(