Bug 1461555 - Rename ProfileEntry to ProfilingStackFrame. r=njn
authorMarkus Stange <mstange@themasta.com>
Tue, 15 May 2018 01:14:03 -0400
changeset 472639 d3ca79e708a53fd8fc71960cbccb8e1445b777dd
parent 472638 c1e7592188cb953ca10f267dc3fdb0724f34960e
child 472640 589325c8b1a3cf88a7a1f66fd0c0d8864c515044
push id9374
push userjlund@mozilla.com
push dateMon, 18 Jun 2018 21:43:20 +0000
treeherdermozilla-beta@160e085dfb0b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1461555
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1461555 - Rename ProfileEntry to ProfilingStackFrame. r=njn The term "entry" is already used for elements in the profile buffer. MozReview-Commit-ID: 1aB22V6veQh
devtools/client/performance/modules/categories.js
dom/base/nsJSUtils.cpp
js/public/ProfilingStack.h
js/src/gc/GC.cpp
js/src/vm/GeckoProfiler-inl.h
js/src/vm/GeckoProfiler.cpp
js/src/vm/GeckoProfiler.h
js/src/vm/ProfilingStack.cpp
mozglue/misc/AutoProfilerLabel.h
toolkit/components/backgroundhangmonitor/ThreadStackHelper.cpp
toolkit/components/backgroundhangmonitor/ThreadStackHelper.h
tools/profiler/core/ProfileBuffer.cpp
tools/profiler/core/ProfileBuffer.h
tools/profiler/core/ProfileBufferEntry.cpp
tools/profiler/core/platform.cpp
tools/profiler/public/GeckoProfiler.h
tools/profiler/tests/gtest/GeckoProfiler.cpp
--- a/devtools/client/performance/modules/categories.js
+++ b/devtools/client/performance/modules/categories.js
@@ -44,36 +44,36 @@ const CATEGORIES = [{
 }, {
   color: "#8fa1b2",
   abbrev: "tools",
   label: L10N.getStr("category.tools")
 }];
 
 /**
  * Mapping from category bitmasks in the profiler data to additional details.
- * To be kept in sync with the js::ProfileEntry::Category in ProfilingStack.h
+ * To be kept in sync with the js::ProfilingStackFrame::Category in ProfilingStack.h
  */
 const CATEGORY_MAPPINGS = {
-  // js::ProfileEntry::Category::OTHER
+  // js::ProfilingStackFrame::Category::OTHER
   "16": CATEGORIES[0],
-  // js::ProfileEntry::Category::CSS
+  // js::ProfilingStackFrame::Category::CSS
   "32": CATEGORIES[1],
-  // js::ProfileEntry::Category::JS
+  // js::ProfilingStackFrame::Category::JS
   "64": CATEGORIES[2],
-  // js::ProfileEntry::Category::GC
+  // js::ProfilingStackFrame::Category::GC
   "128": CATEGORIES[3],
-  // js::ProfileEntry::Category::CC
+  // js::ProfilingStackFrame::Category::CC
   "256": CATEGORIES[3],
-  // js::ProfileEntry::Category::NETWORK
+  // js::ProfilingStackFrame::Category::NETWORK
   "512": CATEGORIES[4],
-  // js::ProfileEntry::Category::GRAPHICS
+  // js::ProfilingStackFrame::Category::GRAPHICS
   "1024": CATEGORIES[5],
-  // js::ProfileEntry::Category::STORAGE
+  // js::ProfilingStackFrame::Category::STORAGE
   "2048": CATEGORIES[6],
-  // js::ProfileEntry::Category::EVENTS
+  // js::ProfilingStackFrame::Category::EVENTS
   "4096": CATEGORIES[7],
   // non-bitmasks for specially-assigned categories
   "9000": CATEGORIES[8],
 };
 
 /**
  * Get the numeric bitmask (or set of masks) for the given category
  * abbreviation. See `CATEGORIES` and `CATEGORY_MAPPINGS` above.
--- a/dom/base/nsJSUtils.cpp
+++ b/dom/base/nsJSUtils.cpp
@@ -134,17 +134,17 @@ EvaluationExceptionToNSResult(JSContext*
   return NS_SUCCESS_DOM_SCRIPT_EVALUATION_THREW_UNCATCHABLE;
 }
 
 nsJSUtils::ExecutionContext::ExecutionContext(JSContext* aCx,
                                               JS::Handle<JSObject*> aGlobal)
   :
 #ifdef MOZ_GECKO_PROFILER
     mAutoProfilerLabel("nsJSUtils::ExecutionContext", /* dynamicStr */ nullptr,
-                       __LINE__, js::ProfileEntry::Category::JS),
+                       __LINE__, js::ProfilingStackFrame::Category::JS),
 #endif
     mCx(aCx)
   , mCompartment(aCx, aGlobal)
   , mRetValue(aCx)
   , mScopeChain(aCx)
   , mRv(NS_OK)
   , mSkip(false)
   , mCoerceToString(false)
--- a/js/public/ProfilingStack.h
+++ b/js/public/ProfilingStack.h
@@ -23,157 +23,157 @@
 class JS_PUBLIC_API(JSTracer);
 
 #ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
 #pragma GCC diagnostic pop
 #endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
 
 class PseudoStack;
 
-// This file defines the classes PseudoStack and ProfileEntry.
-// The PseudoStack manages an array of ProfileEntries.
+// This file defines the classes PseudoStack and ProfilingStackFrame.
+// The PseudoStack manages an array of ProfilingStackFrames.
 // Usage:
 //
 //  PseudoStack* pseudoStack = ...;
 //
 //  // For label frames:
 //  pseudoStack->pushLabelFrame(...);
-//  // Execute some code. When finished, pop the entry:
+//  // Execute some code. When finished, pop the frame:
 //  pseudoStack->pop();
 //
 //  // For JS stack frames:
 //  pseudoStack->pushJSFrame(...);
-//  // Execute some code. When finished, pop the entry:
+//  // Execute some code. When finished, pop the frame:
 //  pseudoStack->pop();
 //
 //
 // Concurrency considerations
 //
-// A thread's pseudo stack (and the entries inside it) is only modified by
+// A thread's pseudo stack (and the frames inside it) is only modified by
 // that thread. However, the pseudo stack can be *read* by a different thread,
 // the sampler thread: Whenever the profiler wants to sample a given thread A,
 // the following happens:
 //  (1) Thread A is suspended.
 //  (2) The sampler thread (thread S) reads the PseudoStack of thread A,
-//      including all ProfileEntries that are currently in that stack
-//      (pseudoStack->entries[0..pseudoStack->stackSize()]).
+//      including all ProfilingStackFrames that are currently in that stack
+//      (pseudoStack->frames[0..pseudoStack->stackSize()]).
 //  (3) Thread A is resumed.
 //
 // Thread suspension is achieved using platform-specific APIs; refer to each
 // platform's Sampler::SuspendAndSampleAndResumeThread implementation in
 // platform-*.cpp for details.
 //
 // When the thread is suspended, the values in pseudoStack->stackPointer and in
-// the entry range pseudoStack->entries[0..pseudoStack->stackPointer] need to
-// be in a consistent state, so that thread S does not read partially-
-// constructed profile entries. More specifically, we have two requirements:
-//  (1) When adding a new entry at the top of the stack, its ProfileEntry data
-//      needs to be put in place *before* the stackPointer is incremented, and
-//      the compiler + CPU need to know that this order matters.
-//  (2) When popping an entry from the stack and then preparing the
-//      ProfileEntry data for the next frame that is about to be pushed, the
-//      decrement of the stackPointer in pop() needs to happen *before* the
-//      ProfileEntry for the new frame is being popuplated, and the compiler +
-//      CPU need to know that this order matters.
+// the stack frame range pseudoStack->frames[0..pseudoStack->stackPointer] need
+// to be in a consistent state, so that thread S does not read partially-
+// constructed stack frames. More specifically, we have two requirements:
+//  (1) When adding a new frame at the top of the stack, its ProfilingStackFrame
+//      data needs to be put in place *before* the stackPointer is incremented,
+//      and the compiler + CPU need to know that this order matters.
+//  (2) When popping an frame from the stack and then preparing the
+//      ProfilingStackFrame data for the next frame that is about to be pushed,
+//      the decrement of the stackPointer in pop() needs to happen *before* the
+//      ProfilingStackFrame for the new frame is being popuplated, and the
+//      compiler + CPU need to know that this order matters.
 //
 // We can express the relevance of these orderings in multiple ways.
 // Option A is to make stackPointer an atomic with SequentiallyConsistent
 // memory ordering. This would ensure that no writes in thread A would be
 // reordered across any writes to stackPointer, which satisfies requirements
 // (1) and (2) at the same time. Option A is the simplest.
 // Option B is to use ReleaseAcquire memory ordering both for writes to
-// stackPointer *and* for writes to ProfileEntry fields. Release-stores ensure
-// that all writes that happened *before this write in program order* are not
-// reordered to happen after this write. ReleaseAcquire ordering places no
+// stackPointer *and* for writes to ProfilingStackFrame fields. Release-stores
+// ensure that all writes that happened *before this write in program order* are
+// not reordered to happen after this write. ReleaseAcquire ordering places no
 // requirements on the ordering of writes that happen *after* this write in
 // program order.
 // Using release-stores for writes to stackPointer expresses requirement (1),
-// and using release-stores for writes to the ProfileEntry fields expresses
-// requirement (2).
+// and using release-stores for writes to the ProfilingStackFrame fields
+// expresses requirement (2).
 //
 // Option B is more complicated than option A, but has much better performance
 // on x86/64: In a microbenchmark run on a Macbook Pro from 2017, switching
 // from option A to option B reduced the overhead of pushing+popping a
-// ProfileEntry by 10 nanoseconds.
+// ProfilingStackFrame by 10 nanoseconds.
 // On x86/64, release-stores require no explicit hardware barriers or lock
 // instructions.
 // On ARM/64, option B may be slower than option A, because the compiler will
 // generate hardware barriers for every single release-store instead of just
 // for the writes to stackPointer. However, the actual performance impact of
 // this has not yet been measured on ARM, so we're currently using option B
 // everywhere. This is something that we may want to change in the future once
 // we've done measurements.
 
 namespace js {
 
 // A call stack can be specified to the JS engine such that all JS entry/exits
-// to functions push/pop an entry to/from the specified stack.
+// to functions push/pop a stack frame to/from the specified stack.
 //
 // For more detailed information, see vm/GeckoProfiler.h.
 //
-class ProfileEntry
+class ProfilingStackFrame
 {
-    // A ProfileEntry represents either a C++ profile entry or a JS one.
+    // A ProfilingStackFrame represents either a label frame or a JS frame.
 
     // WARNING WARNING WARNING
     //
     // All the fields below are Atomic<...,ReleaseAcquire>. This is needed so
     // that writes to these fields are release-writes, which ensures that
     // earlier writes in this thread don't get reordered after the writes to
     // these fields. In particular, the decrement of the stack pointer in
     // PseudoStack::pop() is a write that *must* happen before the values in
-    // this ProfileEntry are changed. Otherwise, the sampler thread might see
-    // an inconsistent state where the stack pointer still points to a
-    // ProfileEntry which has already been popped off the stack and whose
+    // this ProfilingStackFrame are changed. Otherwise, the sampler thread might
+    // see an inconsistent state where the stack pointer still points to a
+    // ProfilingStackFrame which has already been popped off the stack and whose
     // fields have now been partially repopulated with new values.
     // See the "Concurrency considerations" paragraph at the top of this file
     // for more details.
 
-    // Descriptive label for this entry. Must be a static string! Can be an
-    // empty string, but not a null pointer.
+    // Descriptive label for this stack frame. Must be a static string! Can be
+    // an empty string, but not a null pointer.
     mozilla::Atomic<const char*, mozilla::ReleaseAcquire> label_;
 
-    // An additional descriptive string of this entry which is combined with
+    // An additional descriptive string of this frame which is combined with
     // |label_| in profiler output. Need not be (and usually isn't) static. Can
     // be null.
     mozilla::Atomic<const char*, mozilla::ReleaseAcquire> dynamicString_;
 
-    // Stack pointer for non-JS entries, the script pointer otherwise.
+    // Stack pointer for non-JS stack frames, the script pointer otherwise.
     mozilla::Atomic<void*, mozilla::ReleaseAcquire> spOrScript;
 
-    // Line number for non-JS entries, the bytecode offset otherwise.
+    // Line number for non-JS stack frames, the bytecode offset otherwise.
     mozilla::Atomic<int32_t, mozilla::ReleaseAcquire> lineOrPcOffset;
 
     // Bits 0...1 hold the Kind. Bits 2...3 are unused. Bits 4...12 hold the
     // Category.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> kindAndCategory_;
 
     static int32_t pcToOffset(JSScript* aScript, jsbytecode* aPc);
 
   public:
-    ProfileEntry() = default;
-    ProfileEntry& operator=(const ProfileEntry& other)
+    ProfilingStackFrame() = default;
+    ProfilingStackFrame& operator=(const ProfilingStackFrame& other)
     {
         label_ = other.label();
         dynamicString_ = other.dynamicString();
         void* spScript = other.spOrScript;
         spOrScript = spScript;
         int32_t offset = other.lineOrPcOffset;
         lineOrPcOffset = offset;
         uint32_t kindAndCategory = other.kindAndCategory_;
         kindAndCategory_ = kindAndCategory;
         return *this;
     }
 
     enum class Kind : uint32_t {
         // A regular label frame. These usually come from AutoProfilerLabel.
         LABEL = 0,
 
-        // A special label frame indicating the start of a run of JS pseudostack
-        // entries. SP_MARKER frames are ignored, except for the sp field.
+        // A special frame indicating the start of a run of JS profiling stack
+        // frames. SP_MARKER frames are ignored, except for the sp field.
         // These frames are needed to get correct ordering between JS and LABEL
         // frames because JS frames don't carry sp information.
         // SP is short for "stack pointer".
         SP_MARKER = 1,
 
         // A normal JS frame.
         JS_NORMAL = 2,
 
@@ -239,17 +239,17 @@ class ProfileEntry
     }
 
     void initSpMarkerFrame(void* sp)
     {
         label_ = "";
         dynamicString_ = nullptr;
         spOrScript = sp;
         lineOrPcOffset = 0;
-        kindAndCategory_ = uint32_t(Kind::SP_MARKER) | uint32_t(ProfileEntry::Category::OTHER);
+        kindAndCategory_ = uint32_t(Kind::SP_MARKER) | uint32_t(ProfilingStackFrame::Category::OTHER);
         MOZ_ASSERT(isSpMarkerFrame());
     }
 
     void initJsFrame(const char* aLabel, const char* aDynamicString, JSScript* aScript,
                      jsbytecode* aPc)
     {
         label_ = aLabel;
         dynamicString_ = aDynamicString;
@@ -320,129 +320,115 @@ RegisterContextProfilingEventMarker(JSCo
 //
 // The PseudoStack is also read periodically by the profiler's sampler thread.
 // This happens only when the thread that owns the PseudoStack is suspended. So
 // there are no genuine parallel accesses.
 //
 // However, it is possible for pushing/popping to be interrupted by a periodic
 // sample. Because of this, we need pushing/popping to be effectively atomic.
 //
-// - When pushing a new entry, we increment the stack pointer -- making the new
-//   entry visible to the sampler thread -- only after the new entry has been
+// - When pushing a new frame, we increment the stack pointer -- making the new
+//   frame visible to the sampler thread -- only after the new frame has been
 //   fully written. The stack pointer is Atomic<uint32_t,ReleaseAcquire>, so
 //   the increment is a release-store, which ensures that this store is not
-//   reordered before the writes of the entry.
+//   reordered before the writes of the frame.
 //
-// - When popping an old entry, the only operation is the decrementing of the
+// - When popping an old frame, the only operation is the decrementing of the
 //   stack pointer, which is obviously atomic.
 //
 class PseudoStack final
 {
   public:
     PseudoStack()
       : stackPointer(0)
     {}
 
     ~PseudoStack();
 
     void pushLabelFrame(const char* label, const char* dynamicString, void* sp,
-                        uint32_t line, js::ProfileEntry::Category category) {
+                        uint32_t line, js::ProfilingStackFrame::Category category) {
         uint32_t oldStackPointer = stackPointer;
 
-        if (MOZ_LIKELY(entryCapacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
-            entries[oldStackPointer].initLabelFrame(label, dynamicString, sp, line, category);
+        if (MOZ_LIKELY(capacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
+            frames[oldStackPointer].initLabelFrame(label, dynamicString, sp, line, category);
 
         // This must happen at the end! The compiler will not reorder this
         // update because stackPointer is Atomic<..., ReleaseAcquire>, so any
         // the writes above will not be reordered below the stackPointer store.
         // Do the read and the write as two separate statements, in order to
         // make it clear that we don't need an atomic increment, which would be
         // more expensive on x86 than the separate operations done here.
         // This thread is the only one that ever changes the value of
         // stackPointer.
         stackPointer = oldStackPointer + 1;
     }
 
     void pushSpMarkerFrame(void* sp) {
         uint32_t oldStackPointer = stackPointer;
 
-        if (MOZ_LIKELY(entryCapacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
-            entries[oldStackPointer].initSpMarkerFrame(sp);
+        if (MOZ_LIKELY(capacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
+            frames[oldStackPointer].initSpMarkerFrame(sp);
 
-        // This must happen at the end! The compiler will not reorder this
-        // update because stackPointer is Atomic<..., ReleaseAcquire>, so any
-        // the writes above will not be reordered below the stackPointer store.
-        // Do the read and the write as two separate statements, in order to
-        // make it clear that we don't need an atomic increment, which would be
-        // more expensive on x86 than the separate operations done here.
-        // This thread is the only one that ever changes the value of
-        // stackPointer.
+        // This must happen at the end, see the comment in pushLabelFrame.
         stackPointer = oldStackPointer + 1;
     }
 
     void pushJsFrame(const char* label, const char* dynamicString, JSScript* script,
                      jsbytecode* pc) {
         uint32_t oldStackPointer = stackPointer;
 
-        if (MOZ_LIKELY(entryCapacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
-            entries[oldStackPointer].initJsFrame(label, dynamicString, script, pc);
+        if (MOZ_LIKELY(capacity > oldStackPointer) || MOZ_LIKELY(ensureCapacitySlow()))
+            frames[oldStackPointer].initJsFrame(label, dynamicString, script, pc);
 
-        // This must happen at the end! The compiler will not reorder this
-        // update because stackPointer is Atomic<..., ReleaseAcquire>, which
-        // makes this assignment a release-store, so the writes above will not
-        // be reordered to occur after the stackPointer store.
-        // Do the read and the write as two separate statements, in order to
-        // make it clear that we don't need an atomic increment, which would be
-        // more expensive on x86 than the separate operations done here.
-        // This thread is the only one that ever changes the value of
-        // stackPointer.
+        // This must happen at the end, see the comment in pushLabelFrame.
         stackPointer = oldStackPointer + 1;
     }
 
     void pop() {
         MOZ_ASSERT(stackPointer > 0);
         // Do the read and the write as two separate statements, in order to
         // make it clear that we don't need an atomic decrement, which would be
         // more expensive on x86 than the separate operations done here.
         // This thread is the only one that ever changes the value of
         // stackPointer.
         uint32_t oldStackPointer = stackPointer;
         stackPointer = oldStackPointer - 1;
     }
 
     uint32_t stackSize() const { return std::min(uint32_t(stackPointer), stackCapacity()); }
-    uint32_t stackCapacity() const { return entryCapacity; }
+    uint32_t stackCapacity() const { return capacity; }
 
   private:
     // Out of line path for expanding the buffer, since otherwise this would get inlined in every
     // DOM WebIDL call.
     MOZ_COLD MOZ_MUST_USE bool ensureCapacitySlow();
 
     // No copying.
     PseudoStack(const PseudoStack&) = delete;
     void operator=(const PseudoStack&) = delete;
 
     // No moving either.
     PseudoStack(PseudoStack&&) = delete;
     void operator=(PseudoStack&&) = delete;
 
-    uint32_t entryCapacity = 0;
+    uint32_t capacity = 0;
 
   public:
 
-    // The pointer to the stack entries, this is read from the profiler thread and written from the
+    // The pointer to the stack frames, this is read from the profiler thread and written from the
     // current thread.
     //
     // This is effectively a unique pointer.
-    mozilla::Atomic<js::ProfileEntry*> entries { nullptr };
+    mozilla::Atomic<js::ProfilingStackFrame*> frames { nullptr };
 
-    // This may exceed the entry capacity, so instead use the stackSize() method to
-    // determine the number of valid samples in entries. When this is less
-    // than MaxEntries, it refers to the first free entry past the top of the
-    // in-use stack (i.e. entries[stackPointer - 1] is the top stack entry).
+    // This may exceed the capacity, so instead use the stackSize() method to
+    // determine the number of valid frames in stackFrames. When this is less
+    // than stackCapacity(), it refers to the first free stackframe past the top
+    // of the in-use stack (i.e. frames[stackPointer - 1] is the top stack
+    // frame).
     //
     // WARNING WARNING WARNING
     //
     // This is an atomic variable that uses ReleaseAcquire memory ordering.
     // See the "Concurrency considerations" paragraph at the top of this file
     // for more details.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> stackPointer;
 };
@@ -460,17 +446,17 @@ class GeckoProfilerThread
     friend class GeckoProfilerBaselineOSRMarker;
 
     PseudoStack*         pseudoStack_;
 
   public:
     GeckoProfilerThread();
 
     uint32_t stackPointer() { MOZ_ASSERT(installed()); return pseudoStack_->stackPointer; }
-    ProfileEntry* stack() { return pseudoStack_->entries; }
+    ProfilingStackFrame* stack() { return pseudoStack_->frames; }
     PseudoStack* getPseudoStack() { return pseudoStack_; }
 
     /* management of whether instrumentation is on or off */
     bool installed() { return pseudoStack_ != nullptr; }
 
     void setProfilingStack(PseudoStack* pseudoStack);
     void trace(JSTracer* trc);
 
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -6808,17 +6808,17 @@ HeapStateToLabel(JS::HeapState heapState
     return nullptr;
 }
 
 /* Start a new heap session. */
 AutoTraceSession::AutoTraceSession(JSRuntime* rt, JS::HeapState heapState)
   : runtime(rt),
     prevState(rt->mainContextFromOwnThread()->heapState),
     pseudoFrame(rt->mainContextFromOwnThread(), HeapStateToLabel(heapState),
-                ProfileEntry::Category::GC)
+                ProfilingStackFrame::Category::GC)
 {
     MOZ_ASSERT(prevState == JS::HeapState::Idle);
     MOZ_ASSERT(heapState != JS::HeapState::Idle);
     MOZ_ASSERT_IF(heapState == JS::HeapState::MajorCollecting, rt->gc.nursery().isEmpty());
 
     // Session always begins with lock held, see comment in class definition.
     maybeLock.emplace(rt);
 
--- a/js/src/vm/GeckoProfiler-inl.h
+++ b/js/src/vm/GeckoProfiler-inl.h
@@ -18,18 +18,18 @@ inline void
 GeckoProfilerThread::updatePC(JSContext* cx, JSScript* script, jsbytecode* pc)
 {
     if (!cx->runtime()->geckoProfiler().enabled())
         return;
 
     uint32_t sp = pseudoStack_->stackPointer;
     if (sp - 1 < pseudoStack_->stackCapacity()) {
         MOZ_ASSERT(sp > 0);
-        MOZ_ASSERT(pseudoStack_->entries[sp - 1].rawScript() == script);
-        pseudoStack_->entries[sp - 1].setPC(pc);
+        MOZ_ASSERT(pseudoStack_->frames[sp - 1].rawScript() == script);
+        pseudoStack_->frames[sp - 1].setPC(pc);
     }
 }
 
 /*
  * This class is used to suppress profiler sampling during
  * critical sections where stack state is not valid.
  */
 class MOZ_RAII AutoSuppressProfilerSampling
@@ -76,17 +76,17 @@ GeckoProfilerEntryMarker::~GeckoProfiler
 
     profiler_->pseudoStack_->pop();    // the JS frame
     profiler_->pseudoStack_->pop();    // the BEGIN_PSEUDO_JS frame
     MOZ_ASSERT(spBefore_ == profiler_->stackPointer());
 }
 
 MOZ_ALWAYS_INLINE
 AutoGeckoProfilerEntry::AutoGeckoProfilerEntry(JSContext* cx, const char* label,
-                                               ProfileEntry::Category category
+                                               ProfilingStackFrame::Category category
                                                MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
   : profiler_(&cx->geckoProfiler())
 {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     if (MOZ_LIKELY(!profiler_->installed())) {
         profiler_ = nullptr;
         return;
     }
--- a/js/src/vm/GeckoProfiler.cpp
+++ b/js/src/vm/GeckoProfiler.cpp
@@ -213,17 +213,17 @@ GeckoProfilerThread::enter(JSContext* cx
 #ifdef DEBUG
     // In debug builds, assert the JS pseudo frames already on the stack
     // have a non-null pc. Only look at the top frames to avoid quadratic
     // behavior.
     uint32_t sp = pseudoStack_->stackPointer;
     if (sp > 0 && sp - 1 < pseudoStack_->stackCapacity()) {
         size_t start = (sp > 4) ? sp - 4 : 0;
         for (size_t i = start; i < sp - 1; i++)
-            MOZ_ASSERT_IF(pseudoStack_->entries[i].isJsFrame(), pseudoStack_->entries[i].pc());
+            MOZ_ASSERT_IF(pseudoStack_->frames[i].isJsFrame(), pseudoStack_->frames[i].pc());
     }
 #endif
 
     pseudoStack_->pushJsFrame("", dynamicString, script, script->code());
     return true;
 }
 
 void
@@ -236,35 +236,35 @@ GeckoProfilerThread::exit(JSScript* scri
     uint32_t sp = pseudoStack_->stackPointer;
     if (sp < pseudoStack_->stackCapacity()) {
         JSRuntime* rt = script->runtimeFromMainThread();
         const char* dynamicString = rt->geckoProfiler().profileString(script, maybeFun);
         /* Can't fail lookup because we should already be in the set */
         MOZ_ASSERT(dynamicString);
 
         // Bug 822041
-        if (!pseudoStack_->entries[sp].isJsFrame()) {
+        if (!pseudoStack_->frames[sp].isJsFrame()) {
             fprintf(stderr, "--- ABOUT TO FAIL ASSERTION ---\n");
-            fprintf(stderr, " entries=%p size=%u/%u\n",
-                            (void*) pseudoStack_->entries,
+            fprintf(stderr, " frames=%p size=%u/%u\n",
+                            (void*) pseudoStack_->frames,
                             uint32_t(pseudoStack_->stackPointer),
                             pseudoStack_->stackCapacity());
             for (int32_t i = sp; i >= 0; i--) {
-                ProfileEntry& entry = pseudoStack_->entries[i];
-                if (entry.isJsFrame())
-                    fprintf(stderr, "  [%d] JS %s\n", i, entry.dynamicString());
+                ProfilingStackFrame& frame = pseudoStack_->frames[i];
+                if (frame.isJsFrame())
+                    fprintf(stderr, "  [%d] JS %s\n", i, frame.dynamicString());
                 else
-                    fprintf(stderr, "  [%d] C line %d %s\n", i, entry.line(), entry.dynamicString());
+                    fprintf(stderr, "  [%d] C line %d %s\n", i, frame.line(), frame.dynamicString());
             }
         }
 
-        ProfileEntry& entry = pseudoStack_->entries[sp];
-        MOZ_ASSERT(entry.isJsFrame());
-        MOZ_ASSERT(entry.script() == script);
-        MOZ_ASSERT(strcmp((const char*) entry.dynamicString(), dynamicString) == 0);
+        ProfilingStackFrame& frame = pseudoStack_->frames[sp];
+        MOZ_ASSERT(frame.isJsFrame());
+        MOZ_ASSERT(frame.script() == script);
+        MOZ_ASSERT(strcmp((const char*) frame.dynamicString(), dynamicString) == 0);
     }
 #endif
 }
 
 /*
  * Serializes the script/function pair into a "descriptive string" which is
  * allowed to fail. This function cannot trigger a GC because it could finalize
  * some scripts, resize the hash table of profile strings, and invalidate the
@@ -319,17 +319,17 @@ GeckoProfilerRuntime::allocProfileString
 }
 
 void
 GeckoProfilerThread::trace(JSTracer* trc)
 {
     if (pseudoStack_) {
         size_t size = pseudoStack_->stackSize();
         for (size_t i = 0; i < size; i++)
-            pseudoStack_->entries[i].trace(trc);
+            pseudoStack_->frames[i].trace(trc);
     }
 }
 
 void
 GeckoProfilerRuntime::fixupStringsMapAfterMovingGC()
 {
     auto locked = strings.lock();
     if (!locked->initialized())
@@ -357,21 +357,21 @@ GeckoProfilerRuntime::checkStringsMapAft
         CheckGCThingAfterMovingGC(script);
         auto ptr = locked->lookup(script);
         MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
     }
 }
 #endif
 
 void
-ProfileEntry::trace(JSTracer* trc)
+ProfilingStackFrame::trace(JSTracer* trc)
 {
     if (isJsFrame()) {
         JSScript* s = rawScript();
-        TraceNullableRoot(trc, &s, "ProfileEntry script");
+        TraceNullableRoot(trc, &s, "ProfilingStackFrame script");
         spOrScript = s;
     }
 }
 
 GeckoProfilerBaselineOSRMarker::GeckoProfilerBaselineOSRMarker(JSContext* cx, bool hasProfilerFrame
                                                                MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
     : profiler(&cx->geckoProfiler())
 {
@@ -386,38 +386,38 @@ GeckoProfilerBaselineOSRMarker::GeckoPro
         profiler = nullptr;
         return;
     }
 
     spBefore_ = sp;
     if (sp == 0)
         return;
 
-    ProfileEntry& entry = profiler->pseudoStack_->entries[sp - 1];
-    MOZ_ASSERT(entry.kind() == ProfileEntry::Kind::JS_NORMAL);
-    entry.setKind(ProfileEntry::Kind::JS_OSR);
+    ProfilingStackFrame& frame = profiler->pseudoStack_->frames[sp - 1];
+    MOZ_ASSERT(frame.kind() == ProfilingStackFrame::Kind::JS_NORMAL);
+    frame.setKind(ProfilingStackFrame::Kind::JS_OSR);
 }
 
 GeckoProfilerBaselineOSRMarker::~GeckoProfilerBaselineOSRMarker()
 {
     if (profiler == nullptr)
         return;
 
     uint32_t sp = profiler->stackPointer();
     MOZ_ASSERT(spBefore_ == sp);
     if (sp == 0)
         return;
 
-    ProfileEntry& entry = profiler->stack()[sp - 1];
-    MOZ_ASSERT(entry.kind() == ProfileEntry::Kind::JS_OSR);
-    entry.setKind(ProfileEntry::Kind::JS_NORMAL);
+    ProfilingStackFrame& frame = profiler->stack()[sp - 1];
+    MOZ_ASSERT(frame.kind() == ProfilingStackFrame::Kind::JS_OSR);
+    frame.setKind(ProfilingStackFrame::Kind::JS_NORMAL);
 }
 
 JS_PUBLIC_API(JSScript*)
-ProfileEntry::script() const
+ProfilingStackFrame::script() const
 {
     MOZ_ASSERT(isJsFrame());
     auto script = reinterpret_cast<JSScript*>(spOrScript.operator void*());
     if (!script)
         return nullptr;
 
     // If profiling is supressed then we can't trust the script pointers to be
     // valid as they could be in the process of being moved by a compacting GC
@@ -426,33 +426,33 @@ ProfileEntry::script() const
     if (!cx->isProfilerSamplingEnabled())
         return nullptr;
 
     MOZ_ASSERT(!IsForwarded(script));
     return script;
 }
 
 JS_FRIEND_API(jsbytecode*)
-ProfileEntry::pc() const
+ProfilingStackFrame::pc() const
 {
     MOZ_ASSERT(isJsFrame());
     if (lineOrPcOffset == NullPCOffset)
         return nullptr;
 
     JSScript* script = this->script();
     return script ? script->offsetToPC(lineOrPcOffset) : nullptr;
 }
 
 /* static */ int32_t
-ProfileEntry::pcToOffset(JSScript* aScript, jsbytecode* aPc) {
+ProfilingStackFrame::pcToOffset(JSScript* aScript, jsbytecode* aPc) {
     return aPc ? aScript->pcToOffset(aPc) : NullPCOffset;
 }
 
 void
-ProfileEntry::setPC(jsbytecode* pc)
+ProfilingStackFrame::setPC(jsbytecode* pc)
 {
     MOZ_ASSERT(isJsFrame());
     JSScript* script = this->script();
     MOZ_ASSERT(script); // This should not be called while profiling is suppressed.
     lineOrPcOffset = pcToOffset(script, pc);
 }
 
 JS_FRIEND_API(void)
--- a/js/src/vm/GeckoProfiler.h
+++ b/js/src/vm/GeckoProfiler.h
@@ -27,31 +27,31 @@
  * profiler needs integration with the engine because otherwise it is very
  * difficult to figure out what javascript is executing.
  *
  * The current method of integration with the profiler is a form of
  * instrumentation: every time a JS function is entered, a bit of information
  * is pushed onto a stack that the profiler owns and maintains. This
  * information is then popped at the end of the JS function. The profiler
  * informs the JS engine of this stack at runtime, and it can by turned on/off
- * dynamically. Each stack entry has type ProfileEntry.
+ * dynamically. Each stack frame has type ProfilingStackFrame.
  *
  * Throughout execution, the size of the stack recorded in memory may exceed the
  * maximum. The JS engine will not write any information past the maximum limit,
  * but it will still maintain the size of the stack. Profiler code is aware of
  * this and iterates the stack accordingly.
  *
  * There is some information pushed on the profiler stack for every JS function
  * that is entered. First is a char* label with a description of what function
  * was entered. Currently this string is of the form "function (file:line)" if
  * there's a function name, or just "file:line" if there's no function name
  * available. The other bit of information is the relevant C++ (native) stack
  * pointer. This stack pointer is what enables the interleaving of the C++ and
  * the JS stack. Finally, throughout execution of the function, some extra
- * information may be updated on the ProfileEntry structure.
+ * information may be updated on the ProfilingStackFrame structure.
  *
  * = Profile Strings
  *
  * The profile strings' allocations and deallocation must be carefully
  * maintained, and ideally at a very low overhead cost. For this reason, the JS
  * engine maintains a mapping of all known profile strings. These strings are
  * keyed in lookup by a JSScript*, but are serialized with a JSFunction*,
  * JSScript* pair. A JSScript will destroy its corresponding profile string when
@@ -77,28 +77,28 @@
  * nullptr native stack pointer on the profiler stack, it looks backwards for
  * the first non-nullptr pointer and uses that for all subsequent nullptr
  * native stack pointers.
  *
  * = Line Numbers
  *
  * One goal of sampling is to get both a backtrace of the JS stack, but also
  * know where within each function on the stack execution currently is. For
- * this, each ProfileEntry has a 'pc' field to tell where its execution
+ * this, each ProfilingStackFrame has a 'pc' field to tell where its execution
  * currently is. This field is updated whenever a call is made to another JS
  * function, and for the JIT it is also updated whenever the JIT is left.
  *
  * This field is in a union with a uint32_t 'line' so that C++ can make use of
  * the field as well. It was observed that tracking 'line' via PCToLineNumber in
  * JS was far too expensive, so that is why the pc instead of the translated
  * line number is stored.
  *
  * As an invariant, if the pc is nullptr, then the JIT is currently executing
  * generated code. Otherwise execution is in another JS function or in C++. With
- * this in place, only the top entry of the stack can ever have nullptr as its
+ * this in place, only the top frame of the stack can ever have nullptr as its
  * pc. Additionally with this invariant, it is possible to maintain mappings of
  * JIT code to pc which can be accessed safely because they will only be
  * accessed from a signal handler when the JIT code is executing.
  */
 
 namespace js {
 
 // The `ProfileStringMap` weakly holds its `JSScript*` keys and owns its string
@@ -180,40 +180,40 @@ class MOZ_RAII GeckoProfilerEntryMarker
     GeckoProfilerThread* profiler_;
 #ifdef DEBUG
     uint32_t spBefore_;
 #endif
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 };
 
 /*
- * RAII class to automatically add Gecko Profiler pseudo frame entries.
+ * RAII class to automatically add Gecko Profiler profiling stack frames.
  *
  * NB: The `label` string must be statically allocated.
  */
 class MOZ_NONHEAP_CLASS AutoGeckoProfilerEntry
 {
   public:
     explicit MOZ_ALWAYS_INLINE
     AutoGeckoProfilerEntry(JSContext* cx, const char* label,
-                           ProfileEntry::Category category = ProfileEntry::Category::JS
+                           ProfilingStackFrame::Category category = ProfilingStackFrame::Category::JS
                            MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
     MOZ_ALWAYS_INLINE ~AutoGeckoProfilerEntry();
 
   private:
     GeckoProfilerThread* profiler_;
 #ifdef DEBUG
     uint32_t spBefore_;
 #endif
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 };
 
 /*
  * This class is used in the interpreter to bound regions where the baseline JIT
- * being entered via OSR.  It marks the current top pseudostack entry as
+ * being entered via OSR.  It marks the current top profiling stack frame as
  * OSR-ed
  */
 class MOZ_RAII GeckoProfilerBaselineOSRMarker
 {
   public:
     explicit GeckoProfilerBaselineOSRMarker(JSContext* cx, bool hasProfilerFrame
                                             MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
     ~GeckoProfilerBaselineOSRMarker();
--- a/js/src/vm/ProfilingStack.cpp
+++ b/js/src/vm/ProfilingStack.cpp
@@ -16,37 +16,37 @@ using namespace js;
 
 PseudoStack::~PseudoStack()
 {
     // The label macros keep a reference to the PseudoStack to avoid a TLS
     // access. If these are somehow not all cleared we will get a
     // use-after-free so better to crash now.
     MOZ_RELEASE_ASSERT(stackPointer == 0);
 
-    delete[] entries;
+    delete[] frames;
 }
 
 bool
 PseudoStack::ensureCapacitySlow()
 {
-    MOZ_ASSERT(stackPointer >= entryCapacity);
+    MOZ_ASSERT(stackPointer >= capacity);
     const uint32_t kInitialCapacity = 128;
 
     uint32_t sp = stackPointer;
-    auto newCapacity = std::max(sp + 1,  entryCapacity ? entryCapacity * 2 : kInitialCapacity);
+    auto newCapacity = std::max(sp + 1,  capacity ? capacity * 2 : kInitialCapacity);
 
-    auto* newEntries =
-        new (mozilla::fallible) js::ProfileEntry[newCapacity];
-    if (MOZ_UNLIKELY(!newEntries))
+    auto* newFrames =
+        new (mozilla::fallible) js::ProfilingStackFrame[newCapacity];
+    if (MOZ_UNLIKELY(!newFrames))
         return false;
 
-    // It's important that `entries` / `entryCapacity` / `stackPointer` remain consistent here at
+    // It's important that `frames` / `capacity` / `stackPointer` remain consistent here at
     // all times.
-    for (auto i : mozilla::IntegerRange(entryCapacity))
-        newEntries[i] = entries[i];
+    for (auto i : mozilla::IntegerRange(capacity))
+        newFrames[i] = frames[i];
 
-    js::ProfileEntry* oldEntries = entries;
-    entries = newEntries;
-    entryCapacity = newCapacity;
-    delete[] oldEntries;
+    js::ProfilingStackFrame* oldFrames = frames;
+    frames = newFrames;
+    capacity = newCapacity;
+    delete[] oldFrames;
 
     return true;
 }
--- a/mozglue/misc/AutoProfilerLabel.h
+++ b/mozglue/misc/AutoProfilerLabel.h
@@ -7,26 +7,26 @@
 #ifndef mozilla_AutoProfilerLabel_h
 #define mozilla_AutoProfilerLabel_h
 
 #include "mozilla/Attributes.h"
 #include "mozilla/GuardObjects.h"
 #include "mozilla/Types.h"
 
 // The Gecko Profiler defines AutoProfilerLabel, an RAII class for
-// pushing/popping entries to/from the PseudoStack.
+// pushing/popping frames to/from the PseudoStack.
 //
 // This file defines a class of the same name that does much the same thing,
 // but which can be used in (and only in) mozglue. A different class is
 // necessary because mozglue cannot directly access sPseudoStack.
 //
 // Note that this class is slightly slower than the other AutoProfilerLabel,
 // and it lacks the macro wrappers. It also is effectively hardwired to use
-// js::ProfileEntry::Category::OTHER as the category, because that's what the
-// callbacks provided by the profiler use. (Specifying the category in
+// js::ProfilingStackFrame::Category::OTHER as the category, because that's what
+// the callbacks provided by the profiler use. (Specifying the category in
 // this file would require #including ProfilingStack.h in mozglue, which we
 // don't want to do.)
 
 class PseudoStack;
 
 namespace mozilla {
 
 typedef PseudoStack* (*ProfilerLabelEnter)(const char*, const char*, void*,
--- a/toolkit/components/backgroundhangmonitor/ThreadStackHelper.cpp
+++ b/toolkit/components/backgroundhangmonitor/ThreadStackHelper.cpp
@@ -260,62 +260,62 @@ GetPathAfterComponent(const char* filena
     next = strstr(found - 1, component);
   }
   return found;
 }
 
 } // namespace
 
 void
-ThreadStackHelper::CollectPseudoEntry(const js::ProfileEntry& aEntry)
+ThreadStackHelper::CollectProfilingStackFrame(const js::ProfilingStackFrame& aFrame)
 {
   // For non-js frames we just include the raw label.
-  if (!aEntry.isJsFrame()) {
-    const char* entryLabel = aEntry.label();
+  if (!aFrame.isJsFrame()) {
+    const char* frameLabel = aFrame.label();
 
-    // entryLabel is a statically allocated string, so we want to store a
+    // frameLabel is a statically allocated string, so we want to store a
     // reference to it without performing any allocations. This is important, as
     // we aren't allowed to allocate within this function.
     //
     // The variant for this kind of label in our HangStack object is a
     // `nsCString`, which normally contains heap allocated string data. However,
     // `nsCString` has an optimization for literal strings which causes the
     // backing data to not be copied when being copied between nsCString
     // objects.
     //
     // We take advantage of that optimization by creating a nsCString object
     // which has the LITERAL flag set. Without this optimization, this code
     // would be incorrect.
     nsCString label;
-    label.AssignLiteral(entryLabel, strlen(entryLabel));
+    label.AssignLiteral(frameLabel, strlen(frameLabel));
 
     // Let's make sure we don't deadlock here, by asserting that `label`'s
     // backing data matches.
-    MOZ_RELEASE_ASSERT(label.BeginReading() == entryLabel,
-        "String copy performed during ThreadStackHelper::CollectPseudoEntry");
+    MOZ_RELEASE_ASSERT(label.BeginReading() == frameLabel,
+        "String copy performed during ThreadStackHelper::CollectProfilingStackFrame");
     TryAppendFrame(label);
     return;
   }
 
-  if (!aEntry.script()) {
+  if (!aFrame.script()) {
     TryAppendFrame(HangEntrySuppressed());
     return;
   }
 
-  if (!IsChromeJSScript(aEntry.script())) {
+  if (!IsChromeJSScript(aFrame.script())) {
     TryAppendFrame(HangEntryContent());
     return;
   }
 
   // Rather than using the profiler's dynamic string, we compute our own string.
   // This is because we want to do some size-saving strategies, and throw out
   // information which won't help us as much.
   // XXX: We currently don't collect the function name which hung.
-  const char* filename = JS_GetScriptFilename(aEntry.script());
-  unsigned lineno = JS_PCToLineNumber(aEntry.script(), aEntry.pc());
+  const char* filename = JS_GetScriptFilename(aFrame.script());
+  unsigned lineno = JS_PCToLineNumber(aFrame.script(), aFrame.pc());
 
   // Some script names are in the form "foo -> bar -> baz".
   // Here we find the origin of these redirected scripts.
   const char* basename = GetPathAfterComponent(filename, " -> ");
   if (basename) {
     filename = basename;
   }
 
--- a/toolkit/components/backgroundhangmonitor/ThreadStackHelper.h
+++ b/toolkit/components/backgroundhangmonitor/ThreadStackHelper.h
@@ -88,17 +88,17 @@ public:
 protected:
   /**
    * ProfilerStackCollector
    */
   virtual void SetIsMainThread() override;
   virtual void CollectNativeLeafAddr(void* aAddr) override;
   virtual void CollectJitReturnAddr(void* aAddr) override;
   virtual void CollectWasmFrame(const char* aLabel) override;
-  virtual void CollectPseudoEntry(const js::ProfileEntry& aEntry) override;
+  virtual void CollectProfilingStackFrame(const js::ProfilingStackFrame& aEntry) override;
 
 private:
   void TryAppendFrame(mozilla::HangEntry aFrame);
 
   // The profiler's unique thread identifier for the target thread.
   int mThreadId;
 };
 
--- a/tools/profiler/core/ProfileBuffer.cpp
+++ b/tools/profiler/core/ProfileBuffer.cpp
@@ -64,17 +64,17 @@ ProfileBuffer::AddStoredMarker(ProfilerM
 {
   aStoredMarker->SetPositionInBuffer(mRangeEnd);
   mStoredMarkers.insert(aStoredMarker);
 }
 
 void
 ProfileBuffer::CollectCodeLocation(
   const char* aLabel, const char* aStr, int aLineNumber,
-  const Maybe<js::ProfileEntry::Category>& aCategory)
+  const Maybe<js::ProfilingStackFrame::Category>& aCategory)
 {
   AddEntry(ProfileBufferEntry::Label(aLabel));
 
   if (aStr) {
     // Store the string using one or more DynamicStringFragment entries.
     size_t strLen = strlen(aStr) + 1;   // +1 for the null terminator
     for (size_t j = 0; j < strLen; ) {
       // Store up to kNumChars characters in the entry.
@@ -148,60 +148,60 @@ ProfileBufferCollector::CollectJitReturn
 
 void
 ProfileBufferCollector::CollectWasmFrame(const char* aLabel)
 {
   mBuf.CollectCodeLocation("", aLabel, -1, Nothing());
 }
 
 void
-ProfileBufferCollector::CollectPseudoEntry(const js::ProfileEntry& aEntry)
+ProfileBufferCollector::CollectProfilingStackFrame(const js::ProfilingStackFrame& aFrame)
 {
   // WARNING: this function runs within the profiler's "critical section".
 
-  MOZ_ASSERT(aEntry.kind() == js::ProfileEntry::Kind::LABEL ||
-             aEntry.kind() == js::ProfileEntry::Kind::JS_NORMAL);
+  MOZ_ASSERT(aFrame.kind() == js::ProfilingStackFrame::Kind::LABEL ||
+             aFrame.kind() == js::ProfilingStackFrame::Kind::JS_NORMAL);
 
-  const char* label = aEntry.label();
-  const char* dynamicString = aEntry.dynamicString();
+  const char* label = aFrame.label();
+  const char* dynamicString = aFrame.dynamicString();
   bool isChromeJSEntry = false;
   int lineno = -1;
 
-  if (aEntry.isJsFrame()) {
+  if (aFrame.isJsFrame()) {
     // There are two kinds of JS frames that get pushed onto the PseudoStack.
     //
     // - label = "", dynamic string = <something>
     // - label = "js::RunScript", dynamic string = nullptr
     //
     // The line number is only interesting in the first case.
 
     if (label[0] == '\0') {
       MOZ_ASSERT(dynamicString);
 
-      // We call aEntry.script() repeatedly -- rather than storing the result in
+      // We call aFrame.script() repeatedly -- rather than storing the result in
       // a local variable in order -- to avoid rooting hazards.
-      if (aEntry.script()) {
-        isChromeJSEntry = IsChromeJSScript(aEntry.script());
-        if (aEntry.pc()) {
-          lineno = JS_PCToLineNumber(aEntry.script(), aEntry.pc());
+      if (aFrame.script()) {
+        isChromeJSEntry = IsChromeJSScript(aFrame.script());
+        if (aFrame.pc()) {
+          lineno = JS_PCToLineNumber(aFrame.script(), aFrame.pc());
         }
       }
 
     } else {
       MOZ_ASSERT(strcmp(label, "js::RunScript") == 0 && !dynamicString);
     }
   } else {
-    MOZ_ASSERT(aEntry.isLabelFrame());
-    lineno = aEntry.line();
+    MOZ_ASSERT(aFrame.isLabelFrame());
+    lineno = aFrame.line();
   }
 
   if (dynamicString) {
     // Adjust the dynamic string as necessary.
     if (ProfilerFeature::HasPrivacy(mFeatures) && !isChromeJSEntry) {
       dynamicString = "(private)";
     } else if (strlen(dynamicString) >= ProfileBuffer::kMaxFrameKeyLength) {
       dynamicString = "(too long)";
     }
   }
 
   mBuf.CollectCodeLocation(label, dynamicString, lineno,
-                           Some(aEntry.category()));
+                           Some(aFrame.category()));
 }
--- a/tools/profiler/core/ProfileBuffer.h
+++ b/tools/profiler/core/ProfileBuffer.h
@@ -40,17 +40,17 @@ public:
   void AddEntry(const ProfileBufferEntry& aEntry);
 
   // Add to the buffer a sample start (ThreadId) entry for aThreadId.
   // Returns the position of the entry.
   uint64_t AddThreadIdEntry(int aThreadId);
 
   void CollectCodeLocation(
     const char* aLabel, const char* aStr, int aLineNumber,
-    const mozilla::Maybe<js::ProfileEntry::Category>& aCategory);
+    const mozilla::Maybe<js::ProfilingStackFrame::Category>& aCategory);
 
   // Maximum size of a frameKey string that we'll handle.
   static const size_t kMaxFrameKeyLength = 512;
 
   // Add JIT frame information to aJITFrameInfo for any JitReturnAddr entries
   // that are currently in the buffer at or after aRangeStart, in samples
   // for the given thread.
   void AddJITInfoForRange(uint64_t aRangeStart,
@@ -157,17 +157,17 @@ public:
   mozilla::Maybe<uint64_t> BufferRangeStart() override
   {
     return mozilla::Some(mBuf.mRangeStart);
   }
 
   virtual void CollectNativeLeafAddr(void* aAddr) override;
   virtual void CollectJitReturnAddr(void* aAddr) override;
   virtual void CollectWasmFrame(const char* aLabel) override;
-  virtual void CollectPseudoEntry(const js::ProfileEntry& aEntry) override;
+  virtual void CollectProfilingStackFrame(const js::ProfilingStackFrame& aFrame) override;
 
 private:
   ProfileBuffer& mBuf;
   uint64_t mSamplePositionInBuffer;
   uint32_t mFeatures;
 };
 
 #endif
--- a/tools/profiler/core/ProfileBufferEntry.cpp
+++ b/tools/profiler/core/ProfileBufferEntry.cpp
@@ -801,89 +801,89 @@ private:
 //   | CollectionEnd
 //   | Pause
 //   | Resume
 // )*
 //
 // The most complicated part is the stack entry sequence that begins with
 // Label. Here are some examples.
 //
-// - PseudoStack entries without a dynamic string:
+// - PseudoStack frames without a dynamic string:
 //
 //     Label("js::RunScript")
-//     Category(ProfileEntry::Category::JS)
+//     Category(ProfilingStackFrame::Category::JS)
 //
 //     Label("XREMain::XRE_main")
 //     LineNumber(4660)
-//     Category(ProfileEntry::Category::OTHER)
+//     Category(ProfilingStackFrame::Category::OTHER)
 //
 //     Label("ElementRestyler::ComputeStyleChangeFor")
 //     LineNumber(3003)
-//     Category(ProfileEntry::Category::CSS)
+//     Category(ProfilingStackFrame::Category::CSS)
 //
-// - PseudoStack entries with a dynamic string:
+// - PseudoStack frames with a dynamic string:
 //
 //     Label("nsObserverService::NotifyObservers")
 //     DynamicStringFragment("domwindo")
 //     DynamicStringFragment("wopened")
 //     LineNumber(291)
-//     Category(ProfileEntry::Category::OTHER)
+//     Category(ProfilingStackFrame::Category::OTHER)
 //
 //     Label("")
 //     DynamicStringFragment("closeWin")
 //     DynamicStringFragment("dow (chr")
 //     DynamicStringFragment("ome://gl")
 //     DynamicStringFragment("obal/con")
 //     DynamicStringFragment("tent/glo")
 //     DynamicStringFragment("balOverl")
 //     DynamicStringFragment("ay.js:5)")
 //     DynamicStringFragment("")          # this string holds the closing '\0'
 //     LineNumber(25)
-//     Category(ProfileEntry::Category::JS)
+//     Category(ProfilingStackFrame::Category::JS)
 //
 //     Label("")
 //     DynamicStringFragment("bound (s")
 //     DynamicStringFragment("elf-host")
 //     DynamicStringFragment("ed:914)")
 //     LineNumber(945)
-//     Category(ProfileEntry::Category::JS)
+//     Category(ProfilingStackFrame::Category::JS)
 //
-// - A pseudoStack entry with a dynamic string, but with privacy enabled:
+// - A pseudoStack frame with a dynamic string, but with privacy enabled:
 //
 //     Label("nsObserverService::NotifyObservers")
 //     DynamicStringFragment("(private")
 //     DynamicStringFragment(")")
 //     LineNumber(291)
-//     Category(ProfileEntry::Category::OTHER)
+//     Category(ProfilingStackFrame::Category::OTHER)
 //
-// - A pseudoStack entry with an overly long dynamic string:
+// - A pseudoStack frame with an overly long dynamic string:
 //
 //     Label("")
 //     DynamicStringFragment("(too lon")
 //     DynamicStringFragment("g)")
 //     LineNumber(100)
-//     Category(ProfileEntry::Category::NETWORK)
+//     Category(ProfilingStackFrame::Category::NETWORK)
 //
-// - A wasm JIT frame entry:
+// - A wasm JIT frame:
 //
 //     Label("")
 //     DynamicStringFragment("wasm-fun")
 //     DynamicStringFragment("ction[87")
 //     DynamicStringFragment("36] (blo")
 //     DynamicStringFragment("b:http:/")
 //     DynamicStringFragment("/webasse")
 //     DynamicStringFragment("mbly.org")
 //     DynamicStringFragment("/3dc5759")
 //     DynamicStringFragment("4-ce58-4")
 //     DynamicStringFragment("626-975b")
 //     DynamicStringFragment("-08ad116")
 //     DynamicStringFragment("30bc1:38")
 //     DynamicStringFragment("29856)")
 //
-// - A JS frame entry in a synchronous sample:
+// - A JS frame in a synchronous sample:
 //
 //     Label("")
 //     DynamicStringFragment("u (https")
 //     DynamicStringFragment("://perf-")
 //     DynamicStringFragment("html.io/")
 //     DynamicStringFragment("ac0da204")
 //     DynamicStringFragment("aaa44d75")
 //     DynamicStringFragment("a800.bun")
--- a/tools/profiler/core/platform.cpp
+++ b/tools/profiler/core/platform.cpp
@@ -797,26 +797,24 @@ public:
   // This contains all the registers, which means it duplicates the four fields
   // above. This is ok.
   ucontext_t* mContext; // The context from the signal handler.
 #endif
 };
 
 // Setting MAX_NATIVE_FRAMES too high risks the unwinder wasting a lot of time
 // looping on corrupted stacks.
-//
-// The PseudoStack frame size is found in PseudoStack::MaxEntries.
 static const size_t MAX_NATIVE_FRAMES = 1024;
 static const size_t MAX_JS_FRAMES     = 1024;
 
 struct NativeStack
 {
   void* mPCs[MAX_NATIVE_FRAMES];
   void* mSPs[MAX_NATIVE_FRAMES];
-  size_t mCount;  // Number of entries filled.
+  size_t mCount;  // Number of frames filled.
 
   NativeStack()
     : mPCs(), mSPs(), mCount(0)
   {}
 };
 
 Atomic<bool> WALKING_JS_STACK(false);
 
@@ -844,27 +842,27 @@ MergeStacks(uint32_t aFeatures, bool aIs
             ProfilerStackCollector& aCollector)
 {
   // WARNING: this function runs within the profiler's "critical section".
   // WARNING: this function might be called while the profiler is inactive, and
   //          cannot rely on ActivePS.
 
   const PseudoStack& pseudoStack =
     aRegisteredThread.RacyRegisteredThread().PseudoStack();
-  const js::ProfileEntry* pseudoEntries = pseudoStack.entries;
+  const js::ProfilingStackFrame* pseudoEntries = pseudoStack.frames;
   uint32_t pseudoCount = pseudoStack.stackSize();
   JSContext* context = aRegisteredThread.GetJSContext();
 
   // Make a copy of the JS stack into a JSFrame array. This is necessary since,
   // like the native stack, the JS stack is iterated youngest-to-oldest and we
-  // need to iterate oldest-to-youngest when adding entries to aInfo.
+  // need to iterate oldest-to-youngest when adding frames to aInfo.
 
   // Non-periodic sampling passes Nothing() as the buffer write position to
   // ProfilingFrameIterator to avoid incorrectly resetting the buffer position
-  // of sampled JIT entries inside the JS engine.
+  // of sampled JIT frames inside the JS engine.
   Maybe<uint64_t> samplePosInBuffer;
   if (!aIsSynchronous) {
     // aCollector.SamplePositionInBuffer() will return Nothing() when
     // profiler_suspend_and_sample_thread is called from the background hang
     // reporter.
     samplePosInBuffer = aCollector.SamplePositionInBuffer();
   }
   uint32_t jsCount = 0;
@@ -918,28 +916,28 @@ MergeStacks(uint32_t aFeatures, bool aIs
   while (pseudoIndex != pseudoCount || jsIndex >= 0 || nativeIndex >= 0) {
     // There are 1 to 3 frames available. Find and add the oldest.
     uint8_t* pseudoStackAddr = nullptr;
     uint8_t* jsStackAddr = nullptr;
     uint8_t* nativeStackAddr = nullptr;
     uint8_t* jsActivationAddr = nullptr;
 
     if (pseudoIndex != pseudoCount) {
-      const js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
-
-      if (pseudoEntry.isLabelFrame() || pseudoEntry.isSpMarkerFrame()) {
-        lastLabelFrameStackAddr = (uint8_t*) pseudoEntry.stackAddress();
+      const js::ProfilingStackFrame& profilingStackFrame = pseudoEntries[pseudoIndex];
+
+      if (profilingStackFrame.isLabelFrame() || profilingStackFrame.isSpMarkerFrame()) {
+        lastLabelFrameStackAddr = (uint8_t*) profilingStackFrame.stackAddress();
       }
 
       // Skip any JS_OSR frames. Such frames are used when the JS interpreter
       // enters a jit frame on a loop edge (via on-stack-replacement, or OSR).
       // To avoid both the pseudoframe and jit frame being recorded (and
       // showing up twice), the interpreter marks the interpreter pseudostack
       // frame as JS_OSR to ensure that it doesn't get counted.
-      if (pseudoEntry.kind() == js::ProfileEntry::Kind::JS_OSR) {
+      if (profilingStackFrame.kind() == js::ProfilingStackFrame::Kind::JS_OSR) {
           pseudoIndex++;
           continue;
       }
 
       MOZ_ASSERT(lastLabelFrameStackAddr);
       pseudoStackAddr = lastLabelFrameStackAddr;
     }
 
@@ -947,20 +945,21 @@ MergeStacks(uint32_t aFeatures, bool aIs
       jsStackAddr = (uint8_t*) jsFrames[jsIndex].stackAddress;
       jsActivationAddr = (uint8_t*) jsFrames[jsIndex].activation;
     }
 
     if (nativeIndex >= 0) {
       nativeStackAddr = (uint8_t*) aNativeStack.mSPs[nativeIndex];
     }
 
-    // If there's a native stack entry which has the same SP as a pseudo stack
-    // entry, pretend we didn't see the native stack entry.  Ditto for a native
-    // stack entry which has the same SP as a JS stack entry.  In effect this
-    // means pseudo or JS entries trump conflicting native entries.
+    // If there's a native stack frame which has the same SP as a profiling
+    // stack frame, pretend we didn't see the native stack frame.  Ditto for a
+    // native stack frame which has the same SP as a JS stack frame.  In effect
+    // this means profiling stack frames or JS frames trump conflicting native
+    // frames.
     if (nativeStackAddr && (pseudoStackAddr == nativeStackAddr ||
                             jsStackAddr == nativeStackAddr)) {
       nativeStackAddr = nullptr;
       nativeIndex--;
       MOZ_ASSERT(pseudoStackAddr || jsStackAddr);
     }
 
     // Sanity checks.
@@ -969,25 +968,26 @@ MergeStacks(uint32_t aFeatures, bool aIs
     MOZ_ASSERT_IF(jsStackAddr, jsStackAddr != pseudoStackAddr &&
                                jsStackAddr != nativeStackAddr);
     MOZ_ASSERT_IF(nativeStackAddr, nativeStackAddr != pseudoStackAddr &&
                                    nativeStackAddr != jsStackAddr);
 
     // Check to see if pseudoStack frame is top-most.
     if (pseudoStackAddr > jsStackAddr && pseudoStackAddr > nativeStackAddr) {
       MOZ_ASSERT(pseudoIndex < pseudoCount);
-      const js::ProfileEntry& pseudoEntry = pseudoEntries[pseudoIndex];
+      const js::ProfilingStackFrame& profilingStackFrame = pseudoEntries[pseudoIndex];
 
       // Sp marker frames are just annotations and should not be recorded in
       // the profile.
-      if (!pseudoEntry.isSpMarkerFrame()) {
-        // The JIT only allows the top-most entry to have a nullptr pc.
-        MOZ_ASSERT_IF(pseudoEntry.isJsFrame() && pseudoEntry.script() && !pseudoEntry.pc(),
-                      &pseudoEntry == &pseudoStack.entries[pseudoStack.stackSize() - 1]);
-        aCollector.CollectPseudoEntry(pseudoEntry);
+      if (!profilingStackFrame.isSpMarkerFrame()) {
+        // The JIT only allows the top-most frame to have a nullptr pc.
+        MOZ_ASSERT_IF(profilingStackFrame.isJsFrame() &&
+                      profilingStackFrame.script() && !profilingStackFrame.pc(),
+                      &profilingStackFrame == &pseudoStack.frames[pseudoStack.stackSize() - 1]);
+        aCollector.CollectProfilingStackFrame(profilingStackFrame);
       }
       pseudoIndex++;
       continue;
     }
 
     // Check to see if JS jit stack frame is top-most
     if (jsStackAddr > nativeStackAddr) {
       MOZ_ASSERT(jsIndex >= 0);
@@ -1014,18 +1014,18 @@ MergeStacks(uint32_t aFeatures, bool aIs
                    jsFrame.kind == JS::ProfilingFrameIterator::Frame_Baseline);
         aCollector.CollectJitReturnAddr(jsFrame.returnAddress);
       }
 
       jsIndex--;
       continue;
     }
 
-    // If we reach here, there must be a native stack entry and it must be the
-    // greatest entry.
+    // If we reach here, there must be a native stack frame and it must be the
+    // greatest frame.
     if (nativeStackAddr &&
         // If the latest JS frame was JIT, this could be the native frame that
         // corresponds to it. In that case, skip the native frame, because there's
         // no need for the same frame to be present twice in the stack. The JS
         // frame can be considered the symbolicated version of the native frame.
         (!jitEndStackAddr || nativeStackAddr < jitEndStackAddr ) &&
         // This might still be a JIT operation, check to make sure that is not in range
         // of the NEXT JavaScript's stacks' activation address.
@@ -1134,23 +1134,23 @@ DoEHABIBacktrace(PSLockRef aLock, const 
 
   // The pseudostack contains an "EnterJIT" frame whenever we enter
   // JIT code with profiling enabled; the stack pointer value points
   // the saved registers.  We use this to unwind resume unwinding
   // after encounting JIT code.
   for (uint32_t i = pseudoStack.stackSize(); i > 0; --i) {
     // The pseudostack grows towards higher indices, so we iterate
     // backwards (from callee to caller).
-    const js::ProfileEntry& entry = pseudoStack.entries[i - 1];
-    if (!entry.isJsFrame() && strcmp(entry.label(), "EnterJIT") == 0) {
+    const js::ProfilingStackFrame& frame = pseudoStack.frames[i - 1];
+    if (!frame.isJsFrame() && strcmp(frame.label(), "EnterJIT") == 0) {
       // Found JIT entry frame.  Unwind up to that point (i.e., force
       // the stack walk to stop before the block of saved registers;
       // note that it yields nondecreasing stack pointers), then restore
       // the saved state.
-      uint32_t* vSP = reinterpret_cast<uint32_t*>(entry.stackAddress());
+      uint32_t* vSP = reinterpret_cast<uint32_t*>(frame.stackAddress());
 
       aNativeStack.mCount +=
         EHABIStackWalk(*mcontext, /* stackBase = */ vSP,
                        aNativeStack.mSPs + aNativeStack.mCount,
                        aNativeStack.mPCs + aNativeStack.mCount,
                        MAX_NATIVE_FRAMES - aNativeStack.mCount);
 
       memset(&savedContext, 0, sizeof(savedContext));
@@ -2324,17 +2324,17 @@ locked_profiler_start(PSLockRef aLock, u
 // This basically duplicates AutoProfilerLabel's constructor.
 PseudoStack*
 MozGlueLabelEnter(const char* aLabel, const char* aDynamicString, void* aSp,
                   uint32_t aLine)
 {
   PseudoStack* pseudoStack = AutoProfilerLabel::sPseudoStack.get();
   if (pseudoStack) {
     pseudoStack->pushLabelFrame(aLabel, aDynamicString, aSp, aLine,
-                                js::ProfileEntry::Category::OTHER);
+                                js::ProfilingStackFrame::Category::OTHER);
   }
   return pseudoStack;
 }
 
 // This basically duplicates AutoProfilerLabel's destructor.
 void
 MozGlueLabelExit(PseudoStack* aPseudoStack)
 {
--- a/tools/profiler/public/GeckoProfiler.h
+++ b/tools/profiler/public/GeckoProfiler.h
@@ -391,17 +391,17 @@ public:
   // returns.
 
   virtual void CollectNativeLeafAddr(void* aAddr) = 0;
 
   virtual void CollectJitReturnAddr(void* aAddr) = 0;
 
   virtual void CollectWasmFrame(const char* aLabel) = 0;
 
-  virtual void CollectPseudoEntry(const js::ProfileEntry& aEntry) = 0;
+  virtual void CollectProfilingStackFrame(const js::ProfilingStackFrame& aFrame) = 0;
 };
 
 // This method suspends the thread identified by aThreadId, samples its
 // pseudo-stack, JS stack, and (optionally) native stack, passing the collected
 // frames into aCollector. aFeatures dictates which compiler features are used.
 // |Privacy| and |Leaf| are the only relevant ones.
 void profiler_suspend_and_sample_thread(int aThreadId, uint32_t aFeatures,
                                         ProfilerStackCollector& aCollector,
@@ -445,17 +445,17 @@ mozilla::Maybe<ProfilerBufferInfo> profi
 // that for us, but __func__ gives us the function name without the class
 // name.) If the label applies to only part of a function, you can qualify it
 // like this: "ClassName::FunctionName:PartName".
 //
 // Use AUTO_PROFILER_LABEL_DYNAMIC_* if you want to add additional / dynamic
 // information to the pseudo stack frame.
 #define AUTO_PROFILER_LABEL(label, category) \
   mozilla::AutoProfilerLabel PROFILER_RAII(label, nullptr, __LINE__, \
-                                           js::ProfileEntry::Category::category)
+                                           js::ProfilingStackFrame::Category::category)
 
 // Similar to AUTO_PROFILER_LABEL, but with an additional string. The inserted
 // RAII object stores the cStr pointer in a field; it does not copy the string.
 //
 // WARNING: This means that the string you pass to this macro needs to live at
 // least until the end of the current scope. Be careful using this macro with
 // ns[C]String; the other AUTO_PROFILER_LABEL_DYNAMIC_* macros below are
 // preferred because they avoid this problem.
@@ -468,60 +468,60 @@ mozilla::Maybe<ProfilerBufferInfo> profi
 // Compare this to the plain AUTO_PROFILER_LABEL macro, which only accepts
 // literal strings: When the pseudo stack frames generated by
 // AUTO_PROFILER_LABEL are sampled, no string copy needs to be made because the
 // profile buffer can just store the raw pointers to the literal strings.
 // Consequently, AUTO_PROFILER_LABEL frames take up considerably less space in
 // the profile buffer than AUTO_PROFILER_LABEL_DYNAMIC_* frames.
 #define AUTO_PROFILER_LABEL_DYNAMIC_CSTR(label, category, cStr) \
   mozilla::AutoProfilerLabel \
-    PROFILER_RAII(label, cStr, __LINE__, js::ProfileEntry::Category::category)
+    PROFILER_RAII(label, cStr, __LINE__, js::ProfilingStackFrame::Category::category)
 
 // Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsACString.
 //
 // Note: The use of the Maybe<>s ensures the scopes for the dynamic string and
 // the AutoProfilerLabel are appropriate, while also not incurring the runtime
 // cost of the string assignment unless the profiler is active. Therefore,
 // unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR, this macro
 // doesn't push/pop a label when the profiler is inactive.
 #define AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING(label, category, nsCStr) \
   mozilla::Maybe<nsAutoCString> autoCStr; \
   mozilla::Maybe<AutoProfilerLabel> raiiObjectNsCString; \
   if (profiler_is_active()) { \
     autoCStr.emplace(nsCStr); \
     raiiObjectNsCString.emplace(label, autoCStr->get(), __LINE__, \
-                                js::ProfileEntry::Category::category); \
+                                js::ProfilingStackFrame::Category::category); \
   }
 
 // Similar to AUTO_PROFILER_LABEL_DYNAMIC_CSTR, but takes an nsString that is
 // is lossily converted to an ASCII string.
 //
 // Note: The use of the Maybe<>s ensures the scopes for the converted dynamic
 // string and the AutoProfilerLabel are appropriate, while also not incurring
 // the runtime cost of the string conversion unless the profiler is active.
 // Therefore, unlike AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC_CSTR,
 // this macro doesn't push/pop a label when the profiler is inactive.
 #define AUTO_PROFILER_LABEL_DYNAMIC_LOSSY_NSSTRING(label, category, nsStr) \
   mozilla::Maybe<NS_LossyConvertUTF16toASCII> asciiStr; \
   mozilla::Maybe<AutoProfilerLabel> raiiObjectLossyNsString; \
   if (profiler_is_active()) { \
     asciiStr.emplace(nsStr); \
     raiiObjectLossyNsString.emplace(label, asciiStr->get(), __LINE__, \
-                                    js::ProfileEntry::Category::category); \
+                                    js::ProfilingStackFrame::Category::category); \
   }
 
 // Similar to AUTO_PROFILER_LABEL, but accepting a JSContext* parameter, and a
 // no-op if the profiler is disabled.
 // Used to annotate functions for which overhead in the range of nanoseconds is
 // noticeable. It avoids overhead from the TLS lookup because it can get the
 // PseudoStack from the JS context, and avoids almost all overhead in the case
 // where the profiler is disabled.
 #define AUTO_PROFILER_LABEL_FAST(label, category, ctx) \
   mozilla::AutoProfilerLabel PROFILER_RAII(ctx, label, nullptr, __LINE__, \
-                                           js::ProfileEntry::Category::category)
+                                           js::ProfilingStackFrame::Category::category)
 
 // Insert a marker in the profile timeline. This is useful to delimit something
 // important happening such as the first paint. Unlike labels, which are only
 // recorded in the profile buffer if a sample is collected while the label is
 // on the pseudostack, markers will always be recorded in the profile buffer.
 // aMarkerName is copied, so the caller does not need to ensure it lives for a
 // certain length of time. A no-op if the profiler is inactive or in privacy
 // mode.
@@ -690,44 +690,44 @@ private:
 // are stack-allocated, and so exist within a thread, and are thus bounded by
 // the lifetime of the thread, which ensures that the references held can't be
 // used after the PseudoStack is destroyed.
 class MOZ_RAII AutoProfilerLabel
 {
 public:
   // This is the AUTO_PROFILER_LABEL and AUTO_PROFILER_LABEL_DYNAMIC variant.
   AutoProfilerLabel(const char* aLabel, const char* aDynamicString,
-                    uint32_t aLine, js::ProfileEntry::Category aCategory
+                    uint32_t aLine, js::ProfilingStackFrame::Category aCategory
                     MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
   {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
 
     // Get the PseudoStack from TLS.
     Push(sPseudoStack.get(), aLabel, aDynamicString, aLine, aCategory);
   }
 
   // This is the AUTO_PROFILER_LABEL_FAST variant. It's guarded on
   // profiler_is_active() and retrieves the PseudoStack from the JSContext.
   AutoProfilerLabel(JSContext* aJSContext,
                     const char* aLabel, const char* aDynamicString,
-                    uint32_t aLine, js::ProfileEntry::Category aCategory
+                    uint32_t aLine, js::ProfilingStackFrame::Category aCategory
                     MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
   {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     if (profiler_is_active()) {
       Push(js::GetContextProfilingStack(aJSContext),
            aLabel, aDynamicString, aLine, aCategory);
     } else {
       mPseudoStack = nullptr;
     }
   }
 
   void Push(PseudoStack* aPseudoStack,
             const char* aLabel, const char* aDynamicString,
-            uint32_t aLine, js::ProfileEntry::Category aCategory)
+            uint32_t aLine, js::ProfilingStackFrame::Category aCategory)
   {
     // This function runs both on and off the main thread.
 
     mPseudoStack = aPseudoStack;
     if (mPseudoStack) {
       mPseudoStack->pushLabelFrame(aLabel, aDynamicString, this, aLine,
                                    aCategory);
     }
--- a/tools/profiler/tests/gtest/GeckoProfiler.cpp
+++ b/tools/profiler/tests/gtest/GeckoProfiler.cpp
@@ -697,19 +697,19 @@ TEST(GeckoProfiler, PseudoStack)
 
     profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL,
                    features, filters, MOZ_ARRAY_LENGTH(filters));
 
     ASSERT_TRUE(profiler_get_backtrace());
   }
 
   AutoProfilerLabel label1("A", nullptr, 888,
-                           js::ProfileEntry::Category::STORAGE);
+                           js::ProfilingStackFrame::Category::STORAGE);
   AutoProfilerLabel label2("A", dynamic.get(), 888,
-                           js::ProfileEntry::Category::NETWORK);
+                           js::ProfilingStackFrame::Category::NETWORK);
   ASSERT_TRUE(profiler_get_backtrace());
 
   profiler_stop();
 
   ASSERT_TRUE(!profiler_get_profile());
 }
 
 TEST(GeckoProfiler, Bug1355807)
@@ -742,17 +742,17 @@ public:
     , mFrames(0)
   {}
 
   virtual void SetIsMainThread() { mSetIsMainThread++; }
 
   virtual void CollectNativeLeafAddr(void* aAddr) { mFrames++; }
   virtual void CollectJitReturnAddr(void* aAddr) { mFrames++; }
   virtual void CollectWasmFrame(const char* aLabel) { mFrames++; }
-  virtual void CollectPseudoEntry(const js::ProfileEntry& aEntry) { mFrames++; }
+  virtual void CollectProfilingStackFrame(const js::ProfilingStackFrame& aFrame) { mFrames++; }
 
   int mSetIsMainThread;
   int mFrames;
 };
 
 void DoSuspendAndSample(int aTid, nsIThread* aThread)
 {
   aThread->Dispatch(