Bug 1386511 - Part 1: Revise the format of the GC profiler markers. r=jonco, r=mstange
authorPaul Bone <pbone@mozilla.com>
Tue, 31 Oct 2017 14:32:49 +1100
changeset 443162 7ae1ff4001aa375e3b5511fad566e7bf8e0cec96
parent 443161 2e51201d771e101b4954d524e7e97fbb8f7e5f9b
child 443163 463e74189171d39a14a748c8af6b50456879ec06
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco, mstange
bugs1386511
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1386511 - Part 1: Revise the format of the GC profiler markers. r=jonco, r=mstange This change also updates the documentation to correct the semantics of GC slice numbers.
js/src/gc/Nursery.cpp
js/src/gc/Statistics.cpp
js/src/gc/Statistics.h
toolkit/components/telemetry/GCTelemetry.jsm
toolkit/components/telemetry/docs/data/main-ping.rst
toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
tools/profiler/core/platform.cpp
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -505,30 +505,32 @@ js::Nursery::renderProfileJSON(JSONPrint
     }
 
     if (previousGC.reason == JS::gcreason::NO_REASON) {
         // If the nursery was empty when the last minorGC was requested, then
         // no nursery collection will have been performed but JSON may still be
         // requested. (And as a public API, this function should not crash in
         // such a case.)
         json.beginObject();
-        json.property("status", "no collection");
+        json.property("status", "nursery empty");
         json.endObject();
         return;
     }
 
     json.beginObject();
 
+    json.property("status", "complete");
+
     json.property("reason", JS::gcreason::ExplainReason(previousGC.reason));
     json.property("bytes_tenured", previousGC.tenuredBytes);
-    json.floatProperty("promotion_rate", calcPromotionRate(nullptr), 0);
-    json.property("nursery_bytes", previousGC.nurseryUsedBytes);
-    json.property("new_nursery_bytes", numChunks() * ChunkSize);
+    json.property("bytes_used", previousGC.nurseryUsedBytes);
+    json.property("cur_capacity", previousGC.nurseryCapacity);
+    json.property("new_capacity", spaceToEnd());
 
-    json.beginObjectProperty("timings");
+    json.beginObjectProperty("phase_times");
 
 #define EXTRACT_NAME(name, text) #name,
     static const char* names[] = {
 FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
 #undef EXTRACT_NAME
     "" };
 
     size_t i = 0;
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -549,29 +549,38 @@ Statistics::renderNurseryJson(JSRuntime*
     JSONPrinter json(printer);
     rt->gc.nursery().renderProfileJSON(json);
     return UniqueChars(printer.release());
 }
 
 UniqueChars
 Statistics::renderJsonMessage(uint64_t timestamp, bool includeSlices) const
 {
+    /*
+     * The format of the JSON message is specified by the GCMajorMarkerPayload
+     * type in perf.html
+     * https://github.com/devtools-html/perf.html/blob/master/src/types/markers.js#L62
+     *
+     * All the properties listed here are created within the timings property
+     * of the GCMajor marker.
+     */
     if (aborted)
         return DuplicateString("{status:\"aborted\"}"); // May return nullptr
 
     Sprinter printer(nullptr, false);
     if (!printer.init())
         return UniqueChars(nullptr);
     JSONPrinter json(printer);
 
     json.beginObject();
+    json.property("status", "completed");
     formatJsonDescription(timestamp, json);
 
     if (includeSlices) {
-        json.beginListProperty("slices");
+        json.beginListProperty("slices_list");
         for (unsigned i = 0; i < slices_.length(); i++)
             formatJsonSlice(i, json);
         json.endList();
     }
 
     json.beginObjectProperty("totals");
     formatJsonPhaseTimes(phaseTimes, json);
     json.endObject();
@@ -605,21 +614,23 @@ Statistics::formatJsonDescription(uint64
     json.property("mmu_50ms", int(mmu50 * 100));
 
     TimeDuration sccTotal, sccLongest;
     sccDurations(&sccTotal, &sccLongest);
     json.property("scc_sweep_total", sccTotal, JSONPrinter::MILLISECONDS);
     json.property("scc_sweep_max_pause", sccLongest, JSONPrinter::MILLISECONDS);
 
     json.property("nonincremental_reason", ExplainAbortReason(nonincrementalReason_));
-    json.property("allocated", uint64_t(preBytes) / 1024 / 1024);
+    json.property("allocated", uint64_t(preBytes)/1024/1024);
+    json.property("allocated_bytes", preBytes);
     json.property("added_chunks", getCount(STAT_NEW_CHUNK));
     json.property("removed_chunks", getCount(STAT_DESTROY_CHUNK));
     json.property("major_gc_number", startingMajorGCNumber);
     json.property("minor_gc_number", startingMinorGCNumber);
+    json.property("slice_number", startingSliceNumber);
 }
 
 void
 Statistics::formatJsonSliceDescription(unsigned i, const SliceData& slice, JSONPrinter& json) const
 {
     TimeDuration when = slice.start - slices_[0].start;
     char budgetDescription[200];
     slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
@@ -875,16 +886,17 @@ Statistics::beginGC(JSGCInvocationKind k
 {
     slices_.clearAndFree();
     sccTimes.clearAndFree();
     gckind = kind;
     nonincrementalReason_ = gc::AbortReason::None;
 
     preBytes = runtime->gc.usage.gcBytes();
     startingMajorGCNumber = runtime->gc.majorGCCount();
+    startingSliceNumber = runtime->gc.gcNumber();
 }
 
 void
 Statistics::endGC()
 {
     TimeDuration sccTotal, sccLongest;
     sccDurations(&sccTotal, &sccLongest);
 
--- a/js/src/gc/Statistics.h
+++ b/js/src/gc/Statistics.h
@@ -312,16 +312,17 @@ struct Statistics
      * threshold and the value that exceeded it. */
     bool thresholdTriggered;
     double triggerAmount;
     double triggerThreshold;
 
     /* GC numbers as of the beginning of the collection. */
     uint64_t startingMinorGCNumber;
     uint64_t startingMajorGCNumber;
+    uint64_t startingSliceNumber;
 
     /* Records the maximum GC pause in an API-controlled interval (in us). */
     mutable TimeDuration maxPauseInInterval;
 
     /* Phases that are currently on stack. */
     Vector<Phase, MAX_PHASE_NESTING, SystemAllocPolicy> phaseStack;
 
     /*
--- a/toolkit/components/telemetry/GCTelemetry.jsm
+++ b/toolkit/components/telemetry/GCTelemetry.jsm
@@ -47,18 +47,18 @@ class GCData {
   // milliseconds since startup.
   rebaseTimes(data) {
     function fixup(t) {
       return t / 1000.0 - BASE_TIME;
     }
 
     data.timestamp = fixup(data.timestamp);
 
-    for (let i = 0; i < data.slices.length; i++) {
-      let slice = data.slices[i];
+    for (let i = 0; i < data.slices_list.length; i++) {
+      let slice = data.slices_list[i];
       slice.start_timestamp = fixup(slice.start_timestamp);
       slice.end_timestamp = fixup(slice.end_timestamp);
     }
   }
 
   // Records a GC (represented by |data|) in the randomlySelected or
   // worst batches depending on the criteria above.
   record(data) {
@@ -105,53 +105,53 @@ class GCData {
     };
   }
 }
 
 // If you adjust any of the constants here (slice limit, number of keys, etc.)
 // make sure to update the JSON schema at:
 // https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/telemetry/main.schema.json
 // You should also adjust browser_TelemetryGC.js.
-const MAX_GC_KEYS = 25;
+const MAX_GC_KEYS = 30;
 const MAX_SLICES = 4;
 const MAX_SLICE_KEYS = 15;
 const MAX_PHASES = 65;
 
 function limitProperties(obj, count) {
   // If there are too many properties, just delete them all. We don't
   // expect this ever to happen.
   if (Object.keys(obj).length > count) {
     for (let key of Object.keys(obj)) {
       delete obj[key];
     }
   }
 }
 
 function limitSize(data) {
   // Store the number of slices so we know if we lost any at the end.
-  data.num_slices = data.slices.length;
+  data.num_slices = data.slices_list.length;
 
-  data.slices.sort((a, b) => b.pause - a.pause);
+  data.slices_list.sort((a, b) => b.pause - a.pause);
 
-  if (data.slices.length > MAX_SLICES) {
+  if (data.slices_list.length > MAX_SLICES) {
     // Make sure we always keep the first slice since it has the
     // reason the GC was started.
-    let firstSliceIndex = data.slices.findIndex(s => s.slice == 0);
+    let firstSliceIndex = data.slices_list.findIndex(s => s.slice == 0);
     if (firstSliceIndex >= MAX_SLICES) {
-      data.slices[MAX_SLICES - 1] = data.slices[firstSliceIndex];
+      data.slices_list[MAX_SLICES - 1] = data.slices_list[firstSliceIndex];
     }
 
-    data.slices.length = MAX_SLICES;
+    data.slices_list.length = MAX_SLICES;
   }
 
-  data.slices.sort((a, b) => a.slice - b.slice);
+  data.slices_list.sort((a, b) => a.slice - b.slice);
 
   limitProperties(data, MAX_GC_KEYS);
 
-  for (let slice of data.slices) {
+  for (let slice of data.slices_list) {
     limitProperties(slice, MAX_SLICE_KEYS);
     limitProperties(slice.times, MAX_PHASES);
   }
 
   limitProperties(data.totals, MAX_PHASES);
 }
 
 let processData = new Map();
--- a/toolkit/components/telemetry/docs/data/main-ping.rst
+++ b/toolkit/components/telemetry/docs/data/main-ping.rst
@@ -462,16 +462,18 @@ GC has a C/T probablility of being selec
 
 Structure:
 
 .. code-block:: js
 
     "gc": {
       "random": [
         {
+          // "completed" or "aborted" if an OOM occured.
+          "status": "completed",
           // Timestamps are in milliseconds since startup. All the times here
           // are wall-clock times, which may not be monotonically increasing.
           "timestamp": 294872.2,
           // All durations are in milliseconds.
           "max_pause": 73.629,
           "total_time": 364.951, // Sum of all slice times.
           "zones_collected": 9,
           "total_zones": 9,
@@ -481,25 +483,27 @@ Structure:
           "mmu_20ms": 0,
           "mmu_50ms": 0,
           // Reasons include "None", "NonIncrementalRequested",
           // "AbortRequested", "KeepAtomsSet", "IncrementalDisabled",
           // "ModeChange", "MallocBytesTrigger", "GCBytesTrigger",
           // "ZoneChange", "CompartmentRevived".
           "nonincremental_reason": "None",
           "allocated": 37, // In megabytes.
+          "allocated_bytes": 38853696 // in bytes
           "added_chunks": 54,
           "removed_chunks": 12,
           // Total number of slices (some of which may not appear
           // in the "slices" array).
-          "num_slices": 15,
+          "slices": 15,
           // We record at most 4 slices.
-          "slices": [
+          "slice_number": 218, // The first slice number for this GC event.
+          "slices_list": [
             {
-              "slice": 0,  // The index of this slice.
+              "slice": 218,  // The global index of this slice.
               "pause": 23.221,  // How long the slice took.
               "when": 0,  // Milliseconds since the start of the GC.
               "reason": "SET_NEW_DOCUMENT",
               // GC state when the slice started
               "initial_state": "NotActive",
               // GC state when the slice ended
               "final_state": "Mark",
               // Budget is either "Xms", "work(Y)", or
--- a/toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
+++ b/toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
@@ -36,41 +36,43 @@ function check(entries) {
 
     ok(entries[f].length <= 2, "not too many GCs");
 
     for (let gc of entries[f]) {
       ok(gc !== null, "GC is non-null");
 
       foundGCs++;
 
-      ok(Object.keys(gc).length <= 25, "number of keys in GC is not too large");
+      ok(Object.keys(gc).length <= 30, "number of keys in GC is not too large");
 
       // Sanity check the GC data.
+      ok("status" in gc, "status field present");
+      is(gc.status, "completed", "status field correct");
       ok("total_time" in gc, "total_time field present");
       ok("max_pause" in gc, "max_pause field present");
 
-      ok("slices" in gc, "slices field present");
-      ok(Array.isArray(gc.slices), "slices is an array");
-      ok(gc.slices.length > 0, "slices array non-empty");
-      ok(gc.slices.length <= 4, "slices array is not too long");
+      ok("slices_list" in gc, "slices_list field present");
+      ok(Array.isArray(gc.slices_list), "slices_list is an array");
+      ok(gc.slices_list.length > 0, "slices_list array non-empty");
+      ok(gc.slices_list.length <= 4, "slices_list array is not too long");
 
       ok("totals" in gc, "totals field present");
       ok(typeof(gc.totals) == "object", "totals is an object");
       ok(Object.keys(gc.totals).length <= 65, "totals array is not too long");
 
       // Make sure we don't skip any big objects.
       for (let key in gc) {
-        if (key != "slices" && key != "totals") {
+        if (key != "slices_list" && key != "totals") {
           ok(typeof(gc[key]) != "object", `${key} property should be primitive`);
         }
       }
 
       let phases = new Set();
 
-      for (let slice of gc.slices) {
+      for (let slice of gc.slices_list) {
         ok(Object.keys(slice).length <= 15, "slice is not too large");
 
         ok("pause" in slice, "pause field present in slice");
         ok("reason" in slice, "reason field present in slice");
         ok("times" in slice, "times field present in slice");
 
         // Make sure we don't skip any big objects.
         for (let key in slice) {
--- a/tools/profiler/core/platform.cpp
+++ b/tools/profiler/core/platform.cpp
@@ -1442,17 +1442,17 @@ StreamTaskTracer(PSLockRef aLock, Splice
 }
 
 static void
 StreamMetaJSCustomObject(PSLockRef aLock, SpliceableJSONWriter& aWriter,
                          const TimeStamp& aShutdownTime)
 {
   MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
 
-  aWriter.IntProperty("version", 8);
+  aWriter.IntProperty("version", 9);
 
   // The "startTime" field holds the number of milliseconds since midnight
   // January 1, 1970 GMT. This grotty code computes (Now - (Now -
   // ProcessStartTime)) to convert CorePS::ProcessStartTime() into that form.
   TimeDuration delta = TimeStamp::Now() - CorePS::ProcessStartTime();
   aWriter.DoubleProperty(
     "startTime", static_cast<double>(PR_Now()/1000.0 - delta.ToMilliseconds()));