Bug 1578327 - Discard old data just before streaming - r=gregtatum
authorGerald Squelart <gsquelart@mozilla.com>
Wed, 18 Sep 2019 01:21:32 +0000
changeset 493673 226b570cb964aee4fb9711209b0a5429dc71b57e
parent 493672 d762521ac7eb5b2a184699c30976f27990e2b412
child 493674 96d6cfce93e10d5ab6f974968a66cfab4dad9b72
push id36585
push usermalexandru@mozilla.com
push dateWed, 18 Sep 2019 09:56:40 +0000
treeherdermozilla-central@e71921b729c6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersgregtatum
bugs1578327
milestone71.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1578327 - Discard old data just before streaming - r=gregtatum Since all profiler data is now stored inside ProfileBuffer, there is no real need to continuously discard old data during sampling (this was particularly useful to reclaim memory taken by old markers&payloads). Instead, we can now just discard the old data once, just before starting to stream it to JSON. Differential Revision: https://phabricator.services.mozilla.com/D44433
mozglue/baseprofiler/core/platform.cpp
tools/profiler/core/platform.cpp
--- a/mozglue/baseprofiler/core/platform.cpp
+++ b/mozglue/baseprofiler/core/platform.cpp
@@ -1617,20 +1617,27 @@ static void locked_profiler_stream_json_
     PSLockRef aLock, SpliceableJSONWriter& aWriter, double aSinceTime,
     bool aIsShuttingDown, bool aOnlyThreads = false) {
   LOG("locked_profiler_stream_json_for_this_process");
 
   MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
 
   AUTO_PROFILER_STATS(base_locked_profiler_stream_json_for_this_process);
 
-  double collectionStart = profiler_time();
+  const double collectionStartMs = profiler_time();
 
   ProfileBuffer& buffer = ActivePS::Buffer(aLock);
 
+  // If there is a set "Window length", discard older data.
+  Maybe<double> durationS = ActivePS::Duration(aLock);
+  if (durationS.isSome()) {
+    const double durationStartMs = collectionStartMs - *durationS * 1000;
+    buffer.DiscardSamplesBeforeTime(durationStartMs);
+  }
+
   if (!aOnlyThreads) {
     // Put shared library info
     aWriter.StartArrayProperty("libs");
     AppendSharedLibraries(aWriter);
     aWriter.EndArray();
 
     // Put meta data
     aWriter.StartObjectProperty("meta");
@@ -1667,25 +1674,25 @@ static void locked_profiler_stream_json_
   if (!aOnlyThreads) {
     aWriter.EndArray();
 
     aWriter.StartArrayProperty("pausedRanges");
     { buffer.StreamPausedRangesToJSON(aWriter, aSinceTime); }
     aWriter.EndArray();
   }
 
-  double collectionEnd = profiler_time();
+  const double collectionEndMs = profiler_time();
 
   // Record timestamps for the collection into the buffer, so that consumers
   // know why we didn't collect any samples for its duration.
   // We put these entries into the buffer after we've collected the profile,
   // so they'll be visible for the *next* profile collection (if they haven't
   // been overwritten due to buffer wraparound by then).
-  buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStart));
-  buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEnd));
+  buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStartMs));
+  buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEndMs));
 }
 
 bool profiler_stream_json_for_this_process(SpliceableJSONWriter& aWriter,
                                            double aSinceTime,
                                            bool aIsShuttingDown,
                                            bool aOnlyThreads) {
   LOG("profiler_stream_json_for_this_process");
 
@@ -2088,24 +2095,16 @@ void SamplerThread::Run() {
 #  endif
         TimeStamp threadsSampled = TimeStamp::NowUnfuzzed();
 
         buffer.CollectOverheadStats(delta, lockAcquired - sampleStart,
                                     expiredMarkersCleaned - lockAcquired,
                                     countersSampled - expiredMarkersCleaned,
                                     threadsSampled - countersSampled);
       }
-
-      Maybe<double> duration = ActivePS::Duration(lock);
-      if (duration) {
-        ActivePS::Buffer(lock).DiscardSamplesBeforeTime(
-            (TimeStamp::NowUnfuzzed() - TimeDuration::FromSeconds(*duration) -
-             CorePS::ProcessStartTime())
-                .ToMilliseconds());
-      }
     }
     // gPSMutex is not held after this point.
 
     // Calculate how long a sleep to request.  After the sleep, measure how
     // long we actually slept and take the difference into account when
     // calculating the sleep interval for the next iteration.  This is an
     // attempt to keep "to schedule" in the presence of inaccuracy of the
     // actual sleep intervals.
--- a/tools/profiler/core/platform.cpp
+++ b/tools/profiler/core/platform.cpp
@@ -2121,20 +2121,27 @@ static void locked_profiler_stream_json_
     PSLockRef aLock, SpliceableJSONWriter& aWriter, double aSinceTime,
     bool aIsShuttingDown, ProfilerCodeAddressService* aService) {
   LOG("locked_profiler_stream_json_for_this_process");
 
   MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
 
   AUTO_PROFILER_STATS(locked_profiler_stream_json_for_this_process);
 
-  double collectionStart = profiler_time();
+  const double collectionStartMs = profiler_time();
 
   ProfileBuffer& buffer = ActivePS::Buffer(aLock);
 
+  // If there is a set "Window length", discard older data.
+  Maybe<double> durationS = ActivePS::Duration(aLock);
+  if (durationS.isSome()) {
+    const double durationStartMs = collectionStartMs - *durationS * 1000;
+    buffer.DiscardSamplesBeforeTime(durationStartMs);
+  }
+
   // Put shared library info
   aWriter.StartArrayProperty("libs");
   AppendSharedLibraries(aWriter);
   aWriter.EndArray();
 
   // Put meta data
   aWriter.StartObjectProperty("meta");
   { StreamMetaJSCustomObject(aLock, aWriter, aIsShuttingDown); }
@@ -2219,25 +2226,25 @@ static void locked_profiler_stream_json_
     }
     aWriter.EndArray();
   }
 
   aWriter.StartArrayProperty("pausedRanges");
   { buffer.StreamPausedRangesToJSON(aWriter, aSinceTime); }
   aWriter.EndArray();
 
-  double collectionEnd = profiler_time();
+  const double collectionEndMs = profiler_time();
 
   // Record timestamps for the collection into the buffer, so that consumers
   // know why we didn't collect any samples for its duration.
   // We put these entries into the buffer after we've collected the profile,
   // so they'll be visible for the *next* profile collection (if they haven't
   // been overwritten due to buffer wraparound by then).
-  buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStart));
-  buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEnd));
+  buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStartMs));
+  buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEndMs));
 }
 
 bool profiler_stream_json_for_this_process(
     SpliceableJSONWriter& aWriter, double aSinceTime, bool aIsShuttingDown,
     ProfilerCodeAddressService* aService) {
   LOG("profiler_stream_json_for_this_process");
 
   MOZ_RELEASE_ASSERT(CorePS::Exists());
@@ -2654,24 +2661,16 @@ void SamplerThread::Run() {
 #endif
         TimeStamp threadsSampled = TimeStamp::NowUnfuzzed();
 
         buffer.CollectOverheadStats(delta, lockAcquired - sampleStart,
                                     expiredMarkersCleaned - lockAcquired,
                                     countersSampled - expiredMarkersCleaned,
                                     threadsSampled - countersSampled);
       }
-
-      Maybe<double> duration = ActivePS::Duration(lock);
-      if (duration) {
-        ActivePS::Buffer(lock).DiscardSamplesBeforeTime(
-            (TimeStamp::NowUnfuzzed() - TimeDuration::FromSeconds(*duration) -
-             CorePS::ProcessStartTime())
-                .ToMilliseconds());
-      }
     }
     // gPSMutex is not held after this point.
 
     // Calculate how long a sleep to request.  After the sleep, measure how
     // long we actually slept and take the difference into account when
     // calculating the sleep interval for the next iteration.  This is an
     // attempt to keep "to schedule" in the presence of inaccuracy of the
     // actual sleep intervals.