Bug 773428 - Part 1: Support dynamic profiler markers. r=ehsan
authorBenoit Girard <b56girard@gmail.com>
Tue, 11 Dec 2012 14:10:56 -0500
changeset 115797 0b7e244b7ac87db93bc9f7e23562aa04ad136962
parent 115796 d75e7bfff5544fced13ce6a950a31d019f846fde
child 115798 973a98b6265fa585833ecef11404105a1236b110
push id24028
push useremorley@mozilla.com
push dateThu, 13 Dec 2012 15:56:02 +0000
treeherdermozilla-central@9db79b97abbb [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs773428
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 773428 - Part 1: Support dynamic profiler markers. r=ehsan Add support for dynamic markers. All markers are copied and a signal lock is introduced to guard the stack usage. Because the marker stack is guarded by the signal lock the variables are no longer volatile.
.gdbinit
tools/profiler/TableTicker.cpp
tools/profiler/sps_sampler.h
--- a/tools/profiler/TableTicker.cpp
+++ b/tools/profiler/TableTicker.cpp
@@ -701,41 +701,49 @@ void TableTicker::BuildJSObject(JSAObjec
   SetPaused(true);
   JSCustomObject* threadSamples = b.CreateObject();
   GetPrimaryThreadProfile()->BuildJSObject(b, threadSamples);
   b.ArrayPush(threads, threadSamples);
   SetPaused(false);
 }
 
 static
+void addDynamicTag(ThreadProfile &aProfile, char aTagName, const char *aStr)
+{
+  aProfile.addTag(ProfileEntry(aTagName, ""));
+  // Add one to store the null termination
+  size_t strLen = strlen(aStr) + 1;
+  for (size_t j = 0; j < strLen;) {
+    // Store as many characters in the void* as the platform allows
+    char text[sizeof(void*)];
+    int len = sizeof(void*)/sizeof(char);
+    if (j+len >= strLen) {
+      len = strLen - j;
+    }
+    memcpy(text, &aStr[j], len);
+    j += sizeof(void*)/sizeof(char);
+    // Cast to *((void**) to pass the text data to a void*
+    aProfile.addTag(ProfileEntry('d', *((void**)(&text[0]))));
+  }
+}
+
+static
 void addProfileEntry(volatile StackEntry &entry, ThreadProfile &aProfile,
                      ProfileStack *stack, void *lastpc)
 {
   int lineno = -1;
 
   // First entry has tagName 's' (start)
   // Check for magic pointer bit 1 to indicate copy
   const char* sampleLabel = entry.label();
   if (entry.isCopyLabel()) {
     // Store the string using 1 or more 'd' (dynamic) tags
     // that will happen to the preceding tag
 
-    aProfile.addTag(ProfileEntry('c', ""));
-    // Add one to store the null termination
-    size_t strLen = strlen(sampleLabel) + 1;
-    for (size_t j = 0; j < strLen;) {
-      // Store as many characters in the void* as the platform allows
-      char text[sizeof(void*)];
-      for (size_t pos = 0; pos < sizeof(void*) && j+pos < strLen; pos++) {
-        text[pos] = sampleLabel[j+pos];
-      }
-      j += sizeof(void*)/sizeof(char);
-      // Cast to *((void**) to pass the text data to a void*
-      aProfile.addTag(ProfileEntry('d', *((void**)(&text[0]))));
-    }
+    addDynamicTag(aProfile, 'c', sampleLabel);
     if (entry.js()) {
       if (!entry.pc()) {
         // The JIT only allows the top-most entry to have a NULL pc
         MOZ_ASSERT(&entry == &stack->mStack[stack->stackSize() - 1]);
         // If stack-walking was disabled, then that's just unfortunate
         if (lastpc) {
           jsbytecode *jspc = js::ProfilingGetPC(stack->mRuntime, entry.script(),
                                                 lastpc);
@@ -902,17 +910,17 @@ unsigned int sCurrentEventGeneration = 0
  * a problem if 2^32 events happen between samples that we need
  * to know are associated with different events */
 
 void TableTicker::Tick(TickSample* sample)
 {
   // Marker(s) come before the sample
   ProfileStack* stack = mPrimaryThreadProfile.GetStack();
   for (int i = 0; stack->getMarker(i) != NULL; i++) {
-    mPrimaryThreadProfile.addTag(ProfileEntry('m', stack->getMarker(i)));
+    addDynamicTag(mPrimaryThreadProfile, 'm', stack->getMarker(i));
   }
   stack->mQueueClearMarker = true;
 
   bool recordSample = true;
   if (mJankOnly) {
     // if we are on a different event we can discard any temporary samples
     // we've kept around
     if (sLastSampledEventGeneration != sCurrentEventGeneration) {
--- a/tools/profiler/sps_sampler.h
+++ b/tools/profiler/sps_sampler.h
@@ -269,46 +269,59 @@ public:
     , mMarkerPointer(0)
     , mQueueClearMarker(false)
     , mRuntime(NULL)
     , mStartJSSampling(false)
   { }
 
   void addMarker(const char *aMarker)
   {
+    char* markerCopy = strdup(aMarker);
+    mSignalLock = true;
+    STORE_SEQUENCER();
+
     if (mQueueClearMarker) {
       clearMarkers();
     }
     if (!aMarker) {
       return; //discard
     }
     if (size_t(mMarkerPointer) == mozilla::ArrayLength(mMarkers)) {
       return; //array full, silently drop
     }
-    mMarkers[mMarkerPointer] = aMarker;
+    mMarkers[mMarkerPointer] = markerCopy;
+    mMarkerPointer++;
+
+    mSignalLock = false;
     STORE_SEQUENCER();
-    mMarkerPointer++;
   }
 
   // called within signal. Function must be reentrant
   const char* getMarker(int aMarkerId)
   {
-    if (mQueueClearMarker) {
-      clearMarkers();
-    }
-    if (aMarkerId < 0 ||
+    // if mSignalLock then the stack is inconsistent because it's being
+    // modified by the profiled thread. Post pone these markers
+    // for the next sample. The odds of a livelock are nearly impossible
+    // and would show up in a profile as many sample in 'addMarker' thus
+    // we ignore this scenario.
+    // if mQueueClearMarker then we've the sampler thread has already
+    // thread the markers then they are pending deletion.
+    if (mSignalLock || mQueueClearMarker || aMarkerId < 0 ||
       static_cast<mozilla::sig_safe_t>(aMarkerId) >= mMarkerPointer) {
       return NULL;
     }
     return mMarkers[aMarkerId];
   }
 
   // called within signal. Function must be reentrant
   void clearMarkers()
   {
+    for (mozilla::sig_safe_t i = 0; i < mMarkerPointer; i++) {
+      free(mMarkers[i]);
+    }
     mMarkerPointer = 0;
     mQueueClearMarker = false;
   }
 
   void push(const char *aName, uint32_t line)
   {
     push(aName, NULL, false, line);
   }
@@ -365,21 +378,23 @@ public:
     mStartJSSampling = false;
     if (mRuntime)
       js::EnableRuntimeProfilingStack(mRuntime, false);
   }
 
   // Keep a list of active checkpoints
   StackEntry volatile mStack[1024];
   // Keep a list of active markers to be applied to the next sample taken
-  char const * volatile mMarkers[1024];
+  char* mMarkers[1024];
  private:
   // This may exceed the length of mStack, so instead use the stackSize() method
   // to determine the number of valid samples in mStack
-  volatile mozilla::sig_safe_t mStackPointer;
+  mozilla::sig_safe_t mStackPointer;
+  // If this is set then it's not safe to read mStackPointer from the signal handler
+  volatile bool mSignalLock;
  public:
   volatile mozilla::sig_safe_t mMarkerPointer;
   // We don't want to modify _markers from within the signal so we allow
   // it to queue a clear operation.
   volatile mozilla::sig_safe_t mQueueClearMarker;
   // The runtime which is being sampled
   JSRuntime *mRuntime;
   // Start JS Profiling when possible
@@ -428,15 +443,21 @@ inline void mozilla_sampler_call_exit(vo
   stack->pop();
 }
 
 inline void mozilla_sampler_add_marker(const char *aMarker)
 {
   if (!stack_key_initialized)
     return;
 
+  // Don't insert a marker if we're not profiling to avoid
+  // the heap copy (malloc).
+  if (!mozilla_sampler_is_active()) {
+    return;
+  }
+
   ProfileStack *stack = tlsStack.get();
   if (!stack) {
     return;
   }
   stack->addMarker(aMarker);
 }