Bug 1476405: Part 1 - Allow enumerating non-native nsThread threads. r=erahm
☠☠ backed out by 77b07565d021 ☠ ☠
authorKris Maglione <maglione.k@gmail.com>
Fri, 20 Jul 2018 13:48:50 -0700
changeset 483789 c767b1b618fbdc8bc894719f5ed7ecdcc9fc5165
parent 483788 03c1386d08390bb8d983162e1181f7d5d5780788
child 483790 236b366fdf3731ef95e0ba75b8f24f03181343ee
push id9719
push userffxbld-merge
push dateFri, 24 Aug 2018 17:49:46 +0000
treeherdermozilla-beta@719ec98fba77 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerserahm
bugs1476405
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1476405: Part 1 - Allow enumerating non-native nsThread threads. r=erahm MozReview-Commit-ID: 1JKxWeejqzi
xpcom/threads/nsThread.cpp
xpcom/threads/nsThread.h
--- a/xpcom/threads/nsThread.cpp
+++ b/xpcom/threads/nsThread.cpp
@@ -415,90 +415,25 @@ nsThread::Enumerate()
 nsThread::ThreadFunc(void* aArg)
 {
   using mozilla::ipc::BackgroundChild;
 
   ThreadInitData* initData = static_cast<ThreadInitData*>(aArg);
   nsThread* self = initData->thread;  // strong reference
 
   self->mThread = PR_GetCurrentThread();
-  self->mThreadId = uint32_t(PlatformThread::CurrentId());
   self->mVirtualThread = GetCurrentVirtualThread();
   self->mEventTarget->SetCurrentThread();
   SetupCurrentThreadForChaosMode();
 
   if (!initData->name.IsEmpty()) {
     NS_SetCurrentThreadName(initData->name.BeginReading());
   }
 
-  {
-#if defined(XP_LINUX)
-    pthread_attr_t attr;
-    pthread_attr_init(&attr);
-    pthread_getattr_np(pthread_self(), &attr);
-
-    size_t stackSize;
-    pthread_attr_getstack(&attr, &self->mStackBase, &stackSize);
-
-    // Glibc prior to 2.27 reports the stack size and base including the guard
-    // region, so we need to compensate for it to get accurate accounting.
-    // Also, this behavior difference isn't guarded by a versioned symbol, so we
-    // actually need to check the runtime glibc version, not the version we were
-    // compiled against.
-    static bool sAdjustForGuardSize = ({
-#ifdef __GLIBC__
-      unsigned major, minor;
-      sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
-        major < 2 || (major == 2 && minor < 27);
-#else
-      false;
-#endif
-    });
-    if (sAdjustForGuardSize) {
-      size_t guardSize;
-      pthread_attr_getguardsize(&attr, &guardSize);
-
-      // Note: This assumes that the stack grows down, as is the case on all of
-      // our tier 1 platforms. On platforms where the stack grows up, the
-      // mStackBase adjustment is unnecessary, but doesn't cause any harm other
-      // than under-counting stack memory usage by one page.
-      self->mStackBase = reinterpret_cast<char*>(self->mStackBase) + guardSize;
-      stackSize -= guardSize;
-    }
-
-    self->mStackSize = stackSize;
-
-    // This is a bit of a hack.
-    //
-    // We really do want the NOHUGEPAGE flag on our thread stacks, since we
-    // don't expect any of them to need anywhere near 2MB of space. But setting
-    // it here is too late to have an effect, since the first stack page has
-    // already been faulted in existence, and NSPR doesn't give us a way to set
-    // it beforehand.
-    //
-    // What this does get us, however, is a different set of VM flags on our
-    // thread stacks compared to normal heap memory. Which makes the Linux
-    // kernel report them as separate regions, even when they are adjacent to
-    // heap memory. This allows us to accurately track the actual memory
-    // consumption of our allocated stacks.
-    madvise(self->mStackBase, stackSize, MADV_NOHUGEPAGE);
-
-    pthread_attr_destroy(&attr);
-#elif defined(XP_WIN)
-    static const DynamicallyLinkedFunctionPtr<GetCurrentThreadStackLimitsFn>
-      sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
-
-    if (sGetStackLimits) {
-      ULONG_PTR stackBottom, stackTop;
-      sGetStackLimits(&stackBottom, &stackTop);
-      self->mStackBase = reinterpret_cast<void*>(stackBottom);
-      self->mStackSize = stackTop - stackBottom;
-    }
-#endif
-  }
+  self->InitCommon();
 
   // Inform the ThreadManager
   nsThreadManager::get().RegisterCurrentThread(*self);
 
   mozilla::IOInterposer::RegisterCurrentThread();
 
   // This must come after the call to nsThreadManager::RegisterCurrentThread(),
   // because that call is needed to properly set up this thread as an nsThread,
@@ -569,16 +504,91 @@ nsThread::ThreadFunc(void* aArg)
 
 #ifdef MOZ_TASK_TRACER
   FreeTraceInfo();
 #endif
 
   NS_RELEASE(self);
 }
 
+void
+nsThread::InitCommon()
+{
+  mThreadId = uint32_t(PlatformThread::CurrentId());
+
+  {
+#if defined(XP_LINUX)
+    pthread_attr_t attr;
+    pthread_attr_init(&attr);
+    pthread_getattr_np(pthread_self(), &attr);
+
+    size_t stackSize;
+    pthread_attr_getstack(&attr, &mStackBase, &stackSize);
+
+    // Glibc prior to 2.27 reports the stack size and base including the guard
+    // region, so we need to compensate for it to get accurate accounting.
+    // Also, this behavior difference isn't guarded by a versioned symbol, so we
+    // actually need to check the runtime glibc version, not the version we were
+    // compiled against.
+    static bool sAdjustForGuardSize = ({
+#ifdef __GLIBC__
+      unsigned major, minor;
+      sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 ||
+        major < 2 || (major == 2 && minor < 27);
+#else
+      false;
+#endif
+    });
+    if (sAdjustForGuardSize) {
+      size_t guardSize;
+      pthread_attr_getguardsize(&attr, &guardSize);
+
+      // Note: This assumes that the stack grows down, as is the case on all of
+      // our tier 1 platforms. On platforms where the stack grows up, the
+      // mStackBase adjustment is unnecessary, but doesn't cause any harm other
+      // than under-counting stack memory usage by one page.
+      mStackBase = reinterpret_cast<char*>(mStackBase) + guardSize;
+      stackSize -= guardSize;
+    }
+
+    mStackSize = stackSize;
+
+    // This is a bit of a hack.
+    //
+    // We really do want the NOHUGEPAGE flag on our thread stacks, since we
+    // don't expect any of them to need anywhere near 2MB of space. But setting
+    // it here is too late to have an effect, since the first stack page has
+    // already been faulted in existence, and NSPR doesn't give us a way to set
+    // it beforehand.
+    //
+    // What this does get us, however, is a different set of VM flags on our
+    // thread stacks compared to normal heap memory. Which makes the Linux
+    // kernel report them as separate regions, even when they are adjacent to
+    // heap memory. This allows us to accurately track the actual memory
+    // consumption of our allocated stacks.
+    madvise(mStackBase, stackSize, MADV_NOHUGEPAGE);
+
+    pthread_attr_destroy(&attr);
+#elif defined(XP_WIN)
+    static const DynamicallyLinkedFunctionPtr<GetCurrentThreadStackLimitsFn>
+      sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits");
+
+    if (sGetStackLimits) {
+      ULONG_PTR stackBottom, stackTop;
+      sGetStackLimits(&stackBottom, &stackTop);
+      mStackBase = reinterpret_cast<void*>(stackBottom);
+      mStackSize = stackTop - stackBottom;
+    }
+#endif
+  }
+
+  OffTheBooksMutexAutoLock mal(ThreadListMutex());
+  ThreadList().insertBack(this);
+}
+
 //-----------------------------------------------------------------------------
 
 // Tell the crash reporter to save a memory report if our heuristics determine
 // that an OOM failure is likely to occur soon.
 // Memory usage will not be checked more than every 30 seconds or saved more
 // than every 3 minutes
 // If |aShouldSave == kForceReport|, a report will be saved regardless of
 // whether the process is low on memory or not. However, it will still not be
@@ -665,17 +675,27 @@ nsThread::nsThread(NotNull<SynchronizedE
   , mCurrentPerformanceCounter(nullptr)
 {
 }
 
 nsThread::~nsThread()
 {
   NS_ASSERTION(mRequestedShutdownContexts.IsEmpty(),
                "shouldn't be waiting on other threads to shutdown");
-  MOZ_ASSERT(!isInList());
+
+  // We shouldn't need to lock before checking isInList at this point. We're
+  // destroying the last reference to this object, so there's no way for anyone
+  // else to remove it in the middle of our check. And the not-in-list state is
+  // determined the element's next and previous members pointing to itself, so a
+  // non-atomic update to an adjacent member won't affect the outcome either.
+  if (isInList()) {
+    OffTheBooksMutexAutoLock mal(ThreadListMutex());
+    removeFrom(ThreadList());
+  }
+
 #ifdef DEBUG
   // We deliberately leak these so they can be tracked by the leak checker.
   // If you're having nsThreadShutdownContext leaks, you can set:
   //   XPCOM_MEM_LOG_CLASSES=nsThreadShutdownContext
   // during a test run and that will at least tell you what thread is
   // requesting shutdown on another, which can be helpful for diagnosing
   // the leak.
   for (size_t i = 0; i < mRequestedShutdownContexts.Length(); ++i) {
@@ -699,21 +719,16 @@ nsThread::Init(const nsACString& aName)
   // ThreadFunc is responsible for setting mThread
   if (!PR_CreateThread(PR_USER_THREAD, ThreadFunc, &initData,
                        PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
                        PR_JOINABLE_THREAD, mStackSize)) {
     NS_RELEASE_THIS();
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
-  {
-    OffTheBooksMutexAutoLock mal(ThreadListMutex());
-    ThreadList().insertBack(this);
-  }
-
   // ThreadFunc will wait for this event to be run before it tries to access
   // mThread.  By delaying insertion of this event into the queue, we ensure
   // that mThread is set properly.
   {
     mEvents->PutEvent(do_AddRef(startup), EventPriority::Normal); // retain a reference
   }
 
   // Wait for thread to call ThreadManager::SetupCurrentThread, which completes
@@ -723,16 +738,17 @@ nsThread::Init(const nsACString& aName)
 }
 
 nsresult
 nsThread::InitCurrentThread()
 {
   mThread = PR_GetCurrentThread();
   mVirtualThread = GetCurrentVirtualThread();
   SetupCurrentThreadForChaosMode();
+  InitCommon();
 
   nsThreadManager::get().RegisterCurrentThread(*this);
   return NS_OK;
 }
 
 //-----------------------------------------------------------------------------
 // nsIEventTarget
 
--- a/xpcom/threads/nsThread.h
+++ b/xpcom/threads/nsThread.h
@@ -61,16 +61,22 @@ public:
            uint32_t aStackSize);
 
   // Initialize this as a wrapper for a new PRThread, and optionally give it a name.
   nsresult Init(const nsACString& aName = NS_LITERAL_CSTRING(""));
 
   // Initialize this as a wrapper for the current PRThread.
   nsresult InitCurrentThread();
 
+private:
+  // Initializes the mThreadId and stack base/size members, and adds the thread
+  // to the ThreadList().
+  void InitCommon();
+
+public:
   // The PRThread corresponding to this thread.
   PRThread* GetPRThread()
   {
     return mThread;
   }
 
   const void* StackBase() const { return mStackBase; }
   size_t StackSize() const { return mStackSize; }