Backed out changeset a5280e37401b (bug 1208747) for bustage on a CLOSED TREE
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Wed, 21 Oct 2015 11:40:59 +0200
changeset 303856 7e8966f583f3a9563239347176e91ccc96b5e1fd
parent 303855 10f906d69805a39c1f99fbc78f3a6b87a70225c0
child 303857 fd4bf5ebb3cde0183028e2d0bfdaa66aa262f1d1
push id1001
push userraliiev@mozilla.com
push dateMon, 18 Jan 2016 19:06:03 +0000
treeherdermozilla-release@8b89261f3ac4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1208747
milestone44.0a1
backs outa5280e37401b35f0e15e8471385eb7f6b86e1635
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset a5280e37401b (bug 1208747) for bustage on a CLOSED TREE
js/src/jsapi.cpp
js/src/jsapi.h
js/src/moz.build
js/src/vm/Interpreter.cpp
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/Stopwatch.cpp
js/src/vm/Stopwatch.h
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -295,16 +295,104 @@ JS_PUBLIC_API(JSString*)
 JS_GetEmptyString(JSRuntime* rt)
 {
     MOZ_ASSERT(rt->hasContexts());
     return rt->emptyString;
 }
 
 namespace js {
 
+JS_PUBLIC_API(bool)
+IterPerformanceStats(JSContext* cx,
+                     PerformanceStatsWalker walker,
+                     PerformanceData* processStats,
+                     void* closure)
+{
+    // As a PerformanceGroup is typically associated to several
+    // compartments, use a HashSet to make sure that we only report
+    // each PerformanceGroup once.
+    typedef HashSet<js::PerformanceGroup*,
+                    js::DefaultHasher<js::PerformanceGroup*>,
+                    js::SystemAllocPolicy> Set;
+    Set set;
+    if (!set.init(100)) {
+        return false;
+    }
+
+    JSRuntime* rt = JS_GetRuntime(cx);
+
+    // First report the shared groups
+    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
+        JSCompartment* compartment = c.get();
+        if (!c->principals()) {
+            // Compartments without principals could show up here, but
+            // reporting them doesn't really make sense.
+            continue;
+        }
+        if (!c->performanceMonitoring.hasSharedGroup()) {
+            // Don't report compartments that do not even have a PerformanceGroup.
+            continue;
+        }
+        js::AutoCompartment autoCompartment(cx, compartment);
+        RefPtr<PerformanceGroup> group = compartment->performanceMonitoring.getSharedGroup(cx);
+        if (group->data.ticks == 0) {
+            // Don't report compartments that have never been used.
+            continue;
+        }
+
+        Set::AddPtr ptr = set.lookupForAdd(group);
+        if (ptr) {
+            // Don't report the same group twice.
+            continue;
+        }
+
+        if (!(*walker)(cx,
+                       group->data, group->uid, nullptr,
+                       closure)) {
+            // Issue in callback
+            return false;
+        }
+        if (!set.add(ptr, group)) {
+            // Memory issue
+            return false;
+        }
+    }
+
+    // Then report the own groups
+    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
+        JSCompartment* compartment = c.get();
+        if (!c->principals()) {
+            // Compartments without principals could show up here, but
+            // reporting them doesn't really make sense.
+            continue;
+        }
+        if (!c->performanceMonitoring.hasOwnGroup()) {
+            // Don't report compartments that do not even have a PerformanceGroup.
+            continue;
+        }
+        js::AutoCompartment autoCompartment(cx, compartment);
+        RefPtr<PerformanceGroup> ownGroup = compartment->performanceMonitoring.getOwnGroup();
+        if (ownGroup->data.ticks == 0) {
+            // Don't report compartments that have never been used.
+            continue;
+        }
+        RefPtr<PerformanceGroup> sharedGroup = compartment->performanceMonitoring.getSharedGroup(cx);
+        if (!(*walker)(cx,
+                       ownGroup->data, ownGroup->uid, &sharedGroup->uid,
+                       closure)) {
+            // Issue in callback
+            return false;
+        }
+    }
+
+    // Finally, report the process stats
+    *processStats = rt->stopwatch.performance.getOwnGroup()->data;
+    return true;
+}
+
 void
 AssertHeapIsIdle(JSRuntime* rt)
 {
     MOZ_ASSERT(!rt->isHeapBusy());
 }
 
 void
 AssertHeapIsIdle(JSContext* cx)
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 /* JavaScript API. */
 
 #ifndef jsapi_h
 #define jsapi_h
 
-#include "mozilla/AlreadyAddRefed.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Range.h"
 #include "mozilla/RangedPtr.h"
 #include "mozilla/RefPtr.h"
 
 #include <stdarg.h>
 #include <stddef.h>
@@ -686,16 +685,28 @@ typedef void
 
 typedef void
 (* JSZoneCallback)(JS::Zone* zone);
 
 typedef void
 (* JSCompartmentNameCallback)(JSRuntime* rt, JSCompartment* compartment,
                               char* buf, size_t bufsize);
 
+/**
+ * Callback used to ask the embedding to determine in which
+ * Performance Group the current execution belongs. Typically, this is
+ * used to regroup JSCompartments from several iframes from the same
+ * page or from several compartments of the same addon into a single
+ * Performance Group.
+ *
+ * Returns an opaque key.
+ */
+typedef void*
+(* JSCurrentPerfGroupCallback)(JSContext*);
+
 /************************************************************************/
 
 static MOZ_ALWAYS_INLINE JS::Value
 JS_NumberValue(double d)
 {
     int32_t i;
     d = JS::CanonicalizeNaN(d);
     if (mozilla::NumberIsInt32(d, &i))
@@ -5627,160 +5638,279 @@ GetSavedFrameParent(JSContext* cx, Handl
  * each line.
  */
 extern JS_PUBLIC_API(bool)
 BuildStackString(JSContext* cx, HandleObject stack, MutableHandleString stringp, size_t indent = 0);
 
 } /* namespace JS */
 
 
-/* Stopwatch-based performance monitoring. */
+/* Stopwatch-based CPU monitoring. */
 
 namespace js {
 
 class AutoStopwatch;
 
 /**
- * Abstract base class for a representation of the performance of a
- * component. Embeddings interested in performance monitoring should
- * provide a concrete implementation of this class, as well as the
- * relevant callbacks (see below).
+ * Container for performance data
+ * All values are monotonic.
+ * All values are updated after running to completion.
+ */
+struct PerformanceData {
+    // Number of times we have spent at least 2^n consecutive
+    // milliseconds executing code in this group.
+    // durations[0] is increased whenever we spend at least 1 ms
+    // executing code in this group
+    // durations[1] whenever we spend 2ms+
+    //
+    // durations[i] whenever we spend 2^ims+
+    uint64_t durations[10];
+
+    // Total amount of time spent executing code in this group, in
+    // microseconds.
+    uint64_t totalUserTime;
+    uint64_t totalSystemTime;
+    uint64_t totalCPOWTime;
+
+    // Total number of times code execution entered this group,
+    // since process launch. This may be greater than the number
+    // of times we have entered the event loop.
+    uint64_t ticks;
+
+    PerformanceData()
+      : totalUserTime(0)
+      , totalSystemTime(0)
+      , totalCPOWTime(0)
+      , ticks(0)
+    {
+        mozilla::PodArrayZero(durations);
+    }
+    PerformanceData(const PerformanceData& from)
+      : totalUserTime(from.totalUserTime)
+      , totalSystemTime(from.totalSystemTime)
+      , totalCPOWTime(from.totalCPOWTime)
+      , ticks(from.ticks)
+    {
+        mozilla::PodArrayCopy(durations, from.durations);
+    }
+    PerformanceData& operator=(const PerformanceData& from)
+    {
+        mozilla::PodArrayCopy(durations, from.durations);
+        totalUserTime = from.totalUserTime;
+        totalSystemTime = from.totalSystemTime;
+        totalCPOWTime = from.totalCPOWTime;
+        ticks = from.ticks;
+        return *this;
+    }
+};
+
+/**
+ * A group of compartments forming a single unit in terms of
+ * performance monitoring.
+ *
+ * Two compartments belong to the same group if either:
+ * - they are part of the same add-on;
+ * - they are part of the same webpage;
+ * - they are both system built-ins.
+ *
+ * This class is refcounted by instances of `JSCompartment`.
+ * Do not attempt to hold to a pointer to a `PerformanceGroup`.
  */
 struct PerformanceGroup {
-    PerformanceGroup();
+
+    // Performance data for this group.
+    PerformanceData data;
+
+    // An id unique to this runtime.
+    const uint64_t uid;
+
+    // The number of cycles spent in this group during this iteration
+    // of the event loop. Note that cycles are not a reliable measure,
+    // especially over short intervals. See Runtime.cpp for a more
+    // complete discussion on the imprecision of cycle measurement.
+    uint64_t recentCycles;
+
+    // The number of times this group has been activated during this
+    // iteration of the event loop.
+    uint64_t recentTicks;
+
+    // The number of milliseconds spent doing CPOW during this
+    // iteration of the event loop.
+    uint64_t recentCPOW;
 
     // The current iteration of the event loop.
-    uint64_t iteration() const;
+    uint64_t iteration() const {
+        return iteration_;
+    }
 
     // `true` if an instance of `AutoStopwatch` is already monitoring
     // the performance of this performance group for this iteration
     // of the event loop, `false` otherwise.
-    bool isAcquired(uint64_t it) const;
+    bool hasStopwatch(uint64_t it) const {
+        return stopwatch_ != nullptr && iteration_ == it;
+    }
 
     // `true` if a specific instance of `AutoStopwatch` is already monitoring
     // the performance of this performance group for this iteration
     // of the event loop, `false` otherwise.
-    bool isAcquired(uint64_t it, const AutoStopwatch* owner) const;
+    bool hasStopwatch(uint64_t it, const AutoStopwatch* stopwatch) const {
+        return stopwatch_ == stopwatch && iteration_ == it;
+    }
 
     // Mark that an instance of `AutoStopwatch` is monitoring
     // the performance of this group for a given iteration.
-    void acquire(uint64_t it, const AutoStopwatch* owner);
+    void acquireStopwatch(uint64_t it, const AutoStopwatch* stopwatch) {
+        if (iteration_ != it) {
+            // Any data that pretends to be recent is actually bound
+            // to an older iteration and therefore stale.
+            resetRecentData();
+        }
+        iteration_ = it;
+        stopwatch_ = stopwatch;
+    }
 
     // Mark that no `AutoStopwatch` is monitoring the
     // performance of this group for the iteration.
-    void release(uint64_t it, const AutoStopwatch* owner);
-
-    // The number of cycles spent in this group during this iteration
-    // of the event loop. Note that cycles are not a reliable measure,
-    // especially over short intervals. See Stopwatch.* for a more
-    // complete discussion on the imprecision of cycle measurement.
-    uint64_t recentCycles(uint64_t iteration) const;
-    void addRecentCycles(uint64_t iteration, uint64_t cycles);
-
-    // The number of times this group has been activated during this
-    // iteration of the event loop.
-    uint64_t recentTicks(uint64_t iteration) const;
-    void addRecentTicks(uint64_t iteration, uint64_t ticks);
-
-    // The number of microseconds spent doing CPOW during this
-    // iteration of the event loop.
-    uint64_t recentCPOW(uint64_t iteration) const;
-    void addRecentCPOW(uint64_t iteration, uint64_t CPOW);
+    void releaseStopwatch(uint64_t it, const AutoStopwatch* stopwatch) {
+        if (iteration_ != it)
+            return;
+
+        MOZ_ASSERT(stopwatch == stopwatch_ || stopwatch_ == nullptr);
+        stopwatch_ = nullptr;
+    }
 
     // Get rid of any data that pretends to be recent.
-    void resetRecentData();
-
-    // `true` if new measures should be added to this group, `false`
-    // otherwise.
-    bool isActive() const;
-    void setIsActive(bool);
-
-    // `true` if this group has been used in the current iteration,
-    // `false` otherwise.
-    bool isUsedInThisIteration() const;
-    void setIsUsedInThisIteration(bool);
-  protected:
-    // An implementation of `delete` for this object. Must be provided
-    // by the embedding.
-    virtual void Delete() = 0;
-
-  private:
-    // The number of cycles spent in this group during this iteration
-    // of the event loop. Note that cycles are not a reliable measure,
-    // especially over short intervals. See Runtime.cpp for a more
-    // complete discussion on the imprecision of cycle measurement.
-    uint64_t recentCycles_;
-
-    // The number of times this group has been activated during this
-    // iteration of the event loop.
-    uint64_t recentTicks_;
-
-    // The number of microseconds spent doing CPOW during this
-    // iteration of the event loop.
-    uint64_t recentCPOW_;
+    void resetRecentData() {
+        recentCycles = 0;
+        recentTicks = 0;
+        recentCPOW = 0;
+    }
+
+    // Refcounting. For use with nsRefPtr.
+    void AddRef();
+    void Release();
+
+    // Construct a PerformanceGroup for a single compartment.
+    explicit PerformanceGroup(JSRuntime* rt);
+
+    // Construct a PerformanceGroup for a group of compartments.
+    explicit PerformanceGroup(JSContext* rt, void* key);
+
+private:
+    PerformanceGroup& operator=(const PerformanceGroup&) = delete;
+    PerformanceGroup(const PerformanceGroup&) = delete;
+
+    JSRuntime* runtime_;
+
+    // The stopwatch currently monitoring the group,
+    // or `nullptr` if none. Used ony for comparison.
+    const AutoStopwatch* stopwatch_;
 
     // The current iteration of the event loop. If necessary,
     // may safely overflow.
     uint64_t iteration_;
 
-    // `true` if new measures should be added to this group, `false`
-    // otherwise.
-    bool isActive_;
-
-    // `true` if this group has been used in the current iteration,
-    // `false` otherwise.
-    bool isUsedInThisIteration_;
-
-    // The stopwatch currently monitoring the group,
-    // or `nullptr` if none. Used ony for comparison.
-    const AutoStopwatch* owner_;
-
-  public:
-    // Compatibility with RefPtr<>
-    void AddRef();
-    void Release();
+    // The hash key for this PerformanceGroup.
+    void* const key_;
+
+    // Refcounter.
     uint64_t refCount_;
+
+    // `true` if this PerformanceGroup may be shared by several
+    // compartments, `false` if it is dedicated to a single
+    // compartment.
+    const bool isSharedGroup_;
+};
+
+/**
+ * Each PerformanceGroupHolder handles:
+ * - a reference-counted indirection towards a PerformanceGroup shared
+ *   by several compartments
+ * - a owned PerformanceGroup representing the performance of a single
+ *   compartment.
+ */
+struct PerformanceGroupHolder {
+    // Get the shared group.
+    // On first call, this causes a single Hashtable lookup.
+    // Successive calls do not require further lookups.
+    js::PerformanceGroup* getSharedGroup(JSContext*);
+
+    // Get the own group.
+    js::PerformanceGroup* getOwnGroup();
+
+    // `true` if the this holder is currently associated to a shared
+    // PerformanceGroup, `false` otherwise. Use this method to avoid
+    // instantiating a PerformanceGroup if you only need to get
+    // available performance data.
+    inline bool hasSharedGroup() const {
+        return sharedGroup_ != nullptr;
+    }
+    inline bool hasOwnGroup() const {
+        return ownGroup_ != nullptr;
+    }
+
+    // Remove the link to the PerformanceGroup. This method is designed
+    // as an invalidation mechanism if the JSCompartment changes nature
+    // (new values of `isSystem()`, `principals()` or `addonId`).
+    void unlink();
+
+    explicit PerformanceGroupHolder(JSRuntime* runtime)
+      : runtime_(runtime)
+    {   }
+    ~PerformanceGroupHolder();
+
+  private:
+    // Return the key representing this PerformanceGroup in
+    // Runtime::Stopwatch.
+    // Do not deallocate the key.
+    void* getHashKey(JSContext* cx);
+
+    JSRuntime *runtime_;
+
+    // The PerformanceGroups held by this object.
+    // Initially set to `nullptr` until the first call to `getGroup`.
+    // May be reset to `nullptr` by a call to `unlink`.
+    RefPtr<js::PerformanceGroup> sharedGroup_;
+    RefPtr<js::PerformanceGroup> ownGroup_;
 };
 
 /**
  * Commit any Performance Monitoring data.
  *
  * Until `FlushMonitoring` has been called, all PerformanceMonitoring data is invisible
  * to the outside world and can cancelled with a call to `ResetMonitoring`.
  */
-extern JS_PUBLIC_API(bool)
+extern JS_PUBLIC_API(void)
 FlushPerformanceMonitoring(JSRuntime*);
 
 /**
  * Cancel any measurement that hasn't been committed.
  */
 extern JS_PUBLIC_API(void)
 ResetPerformanceMonitoring(JSRuntime*);
 
-/**
- * Cleanup any memory used by performance monitoring.
- */
-extern JS_PUBLIC_API(void)
-DisposePerformanceMonitoring(JSRuntime*);
-
-/**
+/*
  * Turn on/off stopwatch-based CPU monitoring.
  *
  * `SetStopwatchIsMonitoringCPOW` or `SetStopwatchIsMonitoringJank`
  * may return `false` if monitoring could not be activated, which may
  * happen if we are out of memory.
  */
 extern JS_PUBLIC_API(bool)
 SetStopwatchIsMonitoringCPOW(JSRuntime*, bool);
 extern JS_PUBLIC_API(bool)
 GetStopwatchIsMonitoringCPOW(JSRuntime*);
 extern JS_PUBLIC_API(bool)
 SetStopwatchIsMonitoringJank(JSRuntime*, bool);
 extern JS_PUBLIC_API(bool)
 GetStopwatchIsMonitoringJank(JSRuntime*);
+extern JS_PUBLIC_API(bool)
+SetStopwatchIsMonitoringPerCompartment(JSRuntime*, bool);
+extern JS_PUBLIC_API(bool)
+GetStopwatchIsMonitoringPerCompartment(JSRuntime*);
 
 extern JS_PUBLIC_API(bool)
 IsStopwatchActive(JSRuntime*);
 
 // Extract the CPU rescheduling data.
 extern JS_PUBLIC_API(void)
 GetPerfMonitoringTestCpuRescheduling(JSRuntime*, uint64_t* stayed, uint64_t* moved);
 
@@ -5788,26 +5918,36 @@ GetPerfMonitoringTestCpuRescheduling(JSR
 /**
  * Add a number of microseconds to the time spent waiting on CPOWs
  * since process start.
  */
 extern JS_PUBLIC_API(void)
 AddCPOWPerformanceDelta(JSRuntime*, uint64_t delta);
 
 typedef bool
-(*StopwatchStartCallback)(uint64_t, void*);
-extern JS_PUBLIC_API(bool)
-SetStopwatchStartCallback(JSRuntime*, StopwatchStartCallback, void*);
-
-typedef bool
-(*StopwatchCommitCallback)(uint64_t, mozilla::Vector<RefPtr<PerformanceGroup>>&, void*);
-extern JS_PUBLIC_API(bool)
-SetStopwatchCommitCallback(JSRuntime*, StopwatchCommitCallback, void*);
-
-typedef bool
-(*GetGroupsCallback)(JSContext*, mozilla::Vector<RefPtr<PerformanceGroup>>&, void*);
-extern JS_PUBLIC_API(bool)
-SetGetPerformanceGroupsCallback(JSRuntime*, GetGroupsCallback, void*);
+(PerformanceStatsWalker)(JSContext* cx,
+                         const PerformanceData& stats, uint64_t uid,
+                         const uint64_t* parentId, void* closure);
+
+/**
+ * Extract the performance statistics.
+ *
+ * Note that before calling `walker`, we enter the corresponding context.
+ */
+extern JS_PUBLIC_API(bool)
+IterPerformanceStats(JSContext* cx, PerformanceStatsWalker* walker, js::PerformanceData* process, void* closure);
 
 } /* namespace js */
 
+/**
+ * Callback used to ask the embedding to determine in which
+ * Performance Group a compartment belongs. Typically, this is used to
+ * regroup JSCompartments from several iframes from the same page or
+ * from several compartments of the same addon into a single
+ * Performance Group.
+ *
+ * Returns an opaque key.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetCurrentPerfGroupCallback(JSRuntime *rt, JSCurrentPerfGroupCallback cb);
+
 
 #endif /* jsapi_h */
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -318,17 +318,16 @@ UNIFIED_SOURCES += [
     'vm/SavedStacks.cpp',
     'vm/ScopeObject.cpp',
     'vm/SelfHosting.cpp',
     'vm/Shape.cpp',
     'vm/SharedArrayObject.cpp',
     'vm/SharedTypedArrayObject.cpp',
     'vm/SPSProfiler.cpp',
     'vm/Stack.cpp',
-    'vm/Stopwatch.cpp',
     'vm/String.cpp',
     'vm/StringBuffer.cpp',
     'vm/StructuredClone.cpp',
     'vm/Symbol.cpp',
     'vm/TaggedProto.cpp',
     'vm/Time.cpp',
     'vm/TypedArrayObject.cpp',
     'vm/TypeInference.cpp',
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -36,31 +36,35 @@
 #include "jit/AtomicOperations.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Ion.h"
 #include "jit/IonAnalysis.h"
 #include "vm/Debugger.h"
 #include "vm/GeneratorObject.h"
 #include "vm/Opcodes.h"
 #include "vm/Shape.h"
-#include "vm/Stopwatch.h"
 #include "vm/TraceLogging.h"
 
 #include "jsatominlines.h"
 #include "jsboolinlines.h"
 #include "jsfuninlines.h"
 #include "jsscriptinlines.h"
 
 #include "jit/JitFrames-inl.h"
 #include "vm/Debugger-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/Probes-inl.h"
 #include "vm/ScopeObject-inl.h"
 #include "vm/Stack-inl.h"
 
+#if defined(XP_WIN)
+#include <processthreadsapi.h>
+#include <windows.h>
+#endif // defined(XP_WIN)
+
 using namespace js;
 using namespace js::gc;
 
 using mozilla::ArrayLength;
 using mozilla::DebugOnly;
 using mozilla::NumberEqualsInt32;
 using mozilla::PodCopy;
 using JS::ForOfIterator;
@@ -380,16 +384,305 @@ InvokeState::pushInterpreterFrame(JSCont
 }
 
 InterpreterFrame*
 ExecuteState::pushInterpreterFrame(JSContext* cx)
 {
     return cx->runtime()->interpreterStack().pushExecuteFrame(cx, script_, thisv_, newTargetValue_,
                                                               scopeChain_, type_, evalInFrame_);
 }
+namespace js {
+// Implementation of per-performance group performance measurement.
+//
+//
+// All mutable state is stored in `Runtime::stopwatch` (per-process
+// performance stats and logistics) and in `PerformanceGroup` (per
+// group performance stats).
+class MOZ_RAII AutoStopwatch final
+{
+    // The context with which this object was initialized.
+    // Non-null.
+    JSContext* const cx_;
+
+    // An indication of the number of times we have entered the event
+    // loop.  Used only for comparison.
+    uint64_t iteration_;
+
+    // `true` if we are monitoring jank, `false` otherwise.
+    bool isMonitoringJank_;
+    // `true` if we are monitoring CPOW, `false` otherwise.
+    bool isMonitoringCPOW_;
+
+    // Timestamps captured while starting the stopwatch.
+    uint64_t cyclesStart_;
+    uint64_t CPOWTimeStart_;
+
+    // The CPU on which we started the measure. Defined only
+    // if `isMonitoringJank_` is `true`.
+#if defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
+    struct cpuid_t {
+        WORD group_;
+        BYTE number_;
+        cpuid_t(WORD group, BYTE number)
+          : group_(group),
+            number_(number)
+        { }
+        cpuid_t()
+          : group_(0),
+            number_(0)
+        { }
+    };
+#elif defined(XP_LINUX)
+    typedef int cpuid_t;
+#else
+    typedef struct {} cpuid_t;
+#endif // defined(XP_WIN) || defined(XP_LINUX)
+
+    cpuid_t cpuStart_;
+
+    // The performance group shared by this compartment and possibly
+    // others, or `nullptr` if another AutoStopwatch is already in
+    // charge of monitoring that group.
+    RefPtr<js::PerformanceGroup> sharedGroup_;
+
+    // The toplevel group, representing the entire process, or `nullptr`
+    // if another AutoStopwatch is already in charge of monitoring that group.
+    RefPtr<js::PerformanceGroup> topGroup_;
+
+    // The performance group specific to this compartment, or
+    // `nullptr` if another AutoStopwatch is already in charge of
+    // monitoring that group.
+    RefPtr<js::PerformanceGroup> ownGroup_;
+
+ public:
+    // If the stopwatch is active, constructing an instance of
+    // AutoStopwatch causes it to become the current owner of the
+    // stopwatch.
+    //
+    // Previous owner is restored upon destruction.
+    explicit inline AutoStopwatch(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : cx_(cx)
+      , iteration_(0)
+      , isMonitoringJank_(false)
+      , isMonitoringCPOW_(false)
+      , cyclesStart_(0)
+      , CPOWTimeStart_(0)
+    {
+        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+
+        JSCompartment* compartment = cx_->compartment();
+        if (compartment->scheduledForDestruction)
+            return;
+
+        JSRuntime* runtime = cx_->runtime();
+        iteration_ = runtime->stopwatch.iteration();
+
+        sharedGroup_ = acquireGroup(compartment->performanceMonitoring.getSharedGroup(cx));
+        if (sharedGroup_)
+            topGroup_ = acquireGroup(runtime->stopwatch.performance.getOwnGroup());
+
+        if (runtime->stopwatch.isMonitoringPerCompartment())
+            ownGroup_ = acquireGroup(compartment->performanceMonitoring.getOwnGroup());
+
+        if (!sharedGroup_ && !ownGroup_) {
+            // We are not in charge of monitoring anything.
+            return;
+        }
+
+        // Now that we are sure that JS code is being executed,
+        // initialize the stopwatch for this iteration, lazily.
+        runtime->stopwatch.start();
+        enter();
+    }
+    ~AutoStopwatch()
+    {
+        if (!sharedGroup_ && !ownGroup_) {
+            // We are not in charge of monitoring anything.
+            return;
+        }
+
+        JSCompartment* compartment = cx_->compartment();
+        if (compartment->scheduledForDestruction)
+            return;
+
+        JSRuntime* runtime = cx_->runtime();
+        if (iteration_ != runtime->stopwatch.iteration()) {
+            // We have entered a nested event loop at some point.
+            // Any information we may have is obsolete.
+            return;
+        }
+
+        // Finish and commit measures
+        exit();
+
+        releaseGroup(sharedGroup_);
+        releaseGroup(topGroup_);
+        releaseGroup(ownGroup_);
+    }
+   private:
+    void enter() {
+        JSRuntime* runtime = cx_->runtime();
+
+        if (runtime->stopwatch.isMonitoringCPOW()) {
+            CPOWTimeStart_ = runtime->stopwatch.totalCPOWTime;
+            isMonitoringCPOW_ = true;
+        }
+
+        if (runtime->stopwatch.isMonitoringJank()) {
+            cyclesStart_ = this->getCycles();
+            cpuStart_ = this->getCPU();
+            isMonitoringJank_ = true;
+        }
+
+    }
+
+    void exit() {
+        JSRuntime* runtime = cx_->runtime();
+
+        uint64_t cyclesDelta = 0;
+        if (isMonitoringJank_ && runtime->stopwatch.isMonitoringJank()) {
+            // We were monitoring jank when we entered and we still are.
+
+            // If possible, discard results when we don't end on the
+            // same CPU as we started.  Note that we can be
+            // rescheduled to another CPU beween `getCycles()` and
+            // `getCPU()`.  We hope that this will happen rarely
+            // enough that the impact on our statistics will remain
+            // limited.
+            const cpuid_t cpuEnd = this->getCPU();
+            if (isSameCPU(cpuStart_, cpuEnd)) {
+                const uint64_t cyclesEnd = getCycles();
+                cyclesDelta = getDelta(cyclesEnd, cyclesStart_);
+            }
+#if (defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA) || defined(XP_LINUX)
+            if (isSameCPU(cpuStart_, cpuEnd))
+                runtime->stopwatch.testCpuRescheduling.stayed += 1;
+            else
+                runtime->stopwatch.testCpuRescheduling.moved += 1;
+#endif // defined(XP_WIN) || defined(XP_LINUX)
+        }
+
+        uint64_t CPOWTimeDelta = 0;
+        if (isMonitoringCPOW_ && runtime->stopwatch.isMonitoringCPOW()) {
+            // We were monitoring CPOW when we entered and we still are.
+            const uint64_t CPOWTimeEnd = runtime->stopwatch.totalCPOWTime;
+            CPOWTimeDelta = getDelta(CPOWTimeEnd, CPOWTimeStart_);
+
+        }
+        addToGroups(cyclesDelta, CPOWTimeDelta);
+    }
+
+    // Attempt to acquire a group
+    // If the group is `null` or if the group already has a stopwatch,
+    // do nothing and return `null`.
+    // Otherwise, bind the group to `this` for the current iteration
+    // and return `group`.
+    PerformanceGroup* acquireGroup(PerformanceGroup* group) {
+        if (!group)
+            return nullptr;
+
+        if (group->hasStopwatch(iteration_))
+            return nullptr;
+
+        group->acquireStopwatch(iteration_, this);
+        return group;
+    }
+
+    // Release a group.
+    // Noop if `group` is null or if `this` is not the stopwatch
+    // of `group` for the current iteration.
+    void releaseGroup(PerformanceGroup* group) {
+        if (group)
+            group->releaseStopwatch(iteration_, this);
+    }
+
+    // Add recent changes to all the groups owned by this stopwatch.
+    // Mark the groups as changed recently.
+    void addToGroups(uint64_t cyclesDelta, uint64_t CPOWTimeDelta) {
+        addToGroup(cyclesDelta, CPOWTimeDelta, sharedGroup_);
+        addToGroup(cyclesDelta, CPOWTimeDelta, topGroup_);
+        addToGroup(cyclesDelta, CPOWTimeDelta, ownGroup_);
+    }
+
+    // Add recent changes to a single group. Mark the group as changed recently.
+    void addToGroup(uint64_t cyclesDelta, uint64_t CPOWTimeDelta, PerformanceGroup* group) {
+        if (!group)
+            return;
+
+        MOZ_ASSERT(group->hasStopwatch(iteration_, this));
+
+        if (group->recentTicks == 0) {
+            // First time we meet this group during the tick,
+            // mark it as needing updates.
+            JSRuntime* runtime = cx_->runtime();
+            runtime->stopwatch.addChangedGroup(group);
+        }
+        group->recentTicks++;
+        group->recentCycles += cyclesDelta;
+        group->recentCPOW += CPOWTimeDelta;
+    }
+
+    // Perform a subtraction for a quantity that should be monotonic
+    // but is not guaranteed to be so.
+    //
+    // If `start <= end`, return `end - start`.
+    // Otherwise, return `0`.
+    uint64_t getDelta(const uint64_t end, const uint64_t start) const
+    {
+        if (start >= end)
+            return 0;
+        return end - start;
+    }
+
+    // Return the value of the Timestamp Counter, as provided by the CPU.
+    // 0 on platforms for which we do not have access to a Timestamp Counter.
+    uint64_t getCycles() const
+    {
+#if defined(MOZ_HAVE_RDTSC)
+        return ReadTimestampCounter();
+#else
+        return 0;
+#endif // defined(MOZ_HAVE_RDTSC)
+    }
+
+
+    // Return the identifier of the current CPU, on platforms for which we have
+    // access to the current CPU.
+    cpuid_t inline getCPU() const
+    {
+#if defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
+        PROCESSOR_NUMBER proc;
+        GetCurrentProcessorNumberEx(&proc);
+
+        cpuid_t result(proc.Group, proc.Number);
+        return result;
+#elif defined(XP_LINUX)
+        return sched_getcpu();
+#else
+        return {};
+#endif // defined(XP_WIN) || defined(XP_LINUX)
+    }
+
+    // Compare two CPU identifiers.
+    bool inline isSameCPU(const cpuid_t& a, const cpuid_t& b) const
+    {
+#if defined(XP_WIN)  && WINVER >= _WIN32_WINNT_VISTA
+        return a.group_ == b.group_ && a.number_ == b.number_;
+#elif defined(XP_LINUX)
+        return a == b;
+#else
+        return true;
+#endif
+    }
+ private:
+    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
+};
+
+} // namespace js
+
 // MSVC with PGO inlines a lot of functions in RunScript, resulting in large
 // stack frames and stack overflow issues, see bug 1167883. Turn off PGO to
 // avoid this.
 #ifdef _MSC_VER
 # pragma optimize("g", off)
 #endif
 bool
 js::RunScript(JSContext* cx, RunState& state)
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -229,17 +229,17 @@ JSRuntime::JSRuntime(JSRuntime* parentRu
     autoWritableJitCodeActive_(false),
 #ifdef DEBUG
     enteredPolicy(nullptr),
 #endif
     largeAllocationFailureCallback(nullptr),
     oomCallback(nullptr),
     debuggerMallocSizeOf(ReturnZeroSize),
     lastAnimationTime(0),
-    performanceMonitoring(thisFromCtor())
+    stopwatch(thisFromCtor())
 {
     setGCStoreBufferPtr(&gc.storeBuffer);
 
     liveRuntimesCount++;
 
     /* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */
     JS_INIT_CLIST(&onNewGlobalObjectWatchers);
 
@@ -887,8 +887,461 @@ JS::UpdateJSRuntimeProfilerSampleBufferG
 }
 
 JS_FRIEND_API(bool)
 JS::IsProfilingEnabledForRuntime(JSRuntime* runtime)
 {
     MOZ_ASSERT(runtime);
     return runtime->spsProfiler.enabled();
 }
+
+JS_PUBLIC_API(void)
+js::FlushPerformanceMonitoring(JSRuntime* runtime)
+{
+    MOZ_ASSERT(runtime);
+    return runtime->stopwatch.commit();
+}
+JS_PUBLIC_API(void)
+js::ResetPerformanceMonitoring(JSRuntime* runtime)
+{
+    MOZ_ASSERT(runtime);
+    return runtime->stopwatch.reset();
+}
+
+void
+JSRuntime::Stopwatch::reset()
+{
+    // All ongoing measures are dependent on the current iteration#.
+    // By incrementing it, we mark all data as stale. Stale data will
+    // be overwritten progressively during the execution.
+    ++iteration_;
+    touchedGroups.clear();
+}
+
+void
+JSRuntime::Stopwatch::start()
+{
+    if (!isMonitoringJank_) {
+        return;
+    }
+
+    if (iteration_ == startedAtIteration_) {
+        // The stopwatch is already started for this iteration.
+        return;
+    }
+
+    startedAtIteration_ = iteration_;
+    if (!getResources(&userTimeStart_, &systemTimeStart_))
+        return;
+}
+
+// Commit the data that has been collected during the iteration
+// into the actual `PerformanceData`.
+//
+// We use the proportion of cycles-spent-in-group over
+// cycles-spent-in-toplevel-group as an approximation to allocate
+// system (kernel) time and user (CPU) time to each group. Note
+// that cycles are not an exact measure:
+//
+// 1. if the computer has gone to sleep, the clock may be reset to 0;
+// 2. if the process is moved between CPUs/cores, it may end up on a CPU
+//    or core with an unsynchronized clock;
+// 3. the mapping between clock cycles and walltime varies with the current
+//    frequency of the CPU;
+// 4. other threads/processes using the same CPU will also increment
+//    the counter.
+//
+// ** Effect of 1. (computer going to sleep)
+//
+// We assume that this will happen very seldom. Since the final numbers
+// are bounded by the CPU time and Kernel time reported by `getresources`,
+// the effect will be contained to a single iteration of the event loop.
+//
+// ** Effect of 2. (moving between CPUs/cores)
+//
+// On platforms that support it, we only measure the number of cycles
+// if we start and end execution of a group on the same
+// CPU/core. While there is a small window (a few cycles) during which
+// the thread can be migrated without us noticing, we expect that this
+// will happen rarely enough that this won't affect the statistics
+// meaningfully.
+//
+// On other platforms, assuming that the probability of jumping
+// between CPUs/cores during a given (real) cycle is constant, and
+// that the distribution of differences between clocks is even, the
+// probability that the number of cycles reported by a measure is
+// modified by X cycles should be a gaussian distribution, with groups
+// with longer execution having a larger amplitude than groups with
+// shorter execution. Since we discard measures that result in a
+// negative number of cycles, this distribution is actually skewed
+// towards over-estimating the number of cycles of groups that already
+// have many cycles and under-estimating the number of cycles that
+// already have fewer cycles.
+//
+// Since the final numbers are bounded by the CPU time and Kernel time
+// reported by `getresources`, we accept this bias.
+//
+// ** Effect of 3. (mapping between clock cycles and walltime)
+//
+// Assuming that this is evenly distributed, we expect that this will
+// eventually balance out.
+//
+// ** Effect of 4. (cycles increase with system activity)
+//
+// Assuming that, within an iteration of the event loop, this happens
+// unformly over time, this will skew towards over-estimating the number
+// of cycles of groups that already have many cycles and under-estimating
+// the number of cycles that already have fewer cycles.
+//
+// Since the final numbers are bounded by the CPU time and Kernel time
+// reported by `getresources`, we accept this bias.
+//
+// ** Big picture
+//
+// Computing the number of cycles is fast and should be accurate
+// enough in practice. Alternatives (such as calling `getresources`
+// all the time or sampling from another thread) are very expensive
+// in system calls and/or battery and not necessarily more accurate.
+void
+JSRuntime::Stopwatch::commit()
+{
+#if !defined(MOZ_HAVE_RDTSC)
+    // The AutoStopwatch is only executed if `MOZ_HAVE_RDTSC`.
+    return;
+#endif // !defined(MOZ_HAVE_RDTSC)
+
+    if (!isMonitoringJank_) {
+        // Either we have not started monitoring or monitoring has
+        // been cancelled during the iteration.
+        return;
+    }
+
+    if (startedAtIteration_ != iteration_) {
+        // No JS code has been monitored during this iteration.
+        return;
+    }
+
+    uint64_t userTimeStop, systemTimeStop;
+    if (!getResources(&userTimeStop, &systemTimeStop))
+        return;
+
+    // `getResources` is not guaranteed to be monotonic, so round up
+    // any negative result to 0 milliseconds.
+    uint64_t userTimeDelta = 0;
+    if (userTimeStop > userTimeStart_)
+        userTimeDelta = userTimeStop - userTimeStart_;
+
+    uint64_t systemTimeDelta = 0;
+    if (systemTimeStop > systemTimeStart_)
+        systemTimeDelta = systemTimeStop - systemTimeStart_;
+
+    RefPtr<js::PerformanceGroup> group = performance.getOwnGroup();
+    const uint64_t totalRecentCycles = group->recentCycles;
+
+    mozilla::Vector<RefPtr<js::PerformanceGroup>> recentGroups;
+    touchedGroups.swap(recentGroups);
+    MOZ_ASSERT(recentGroups.length() > 0);
+
+    // We should only reach this stage if `group` has had some activity.
+    MOZ_ASSERT(group->recentTicks > 0);
+    for (RefPtr<js::PerformanceGroup>* iter = recentGroups.begin(); iter != recentGroups.end(); ++iter) {
+        transferDeltas(userTimeDelta, systemTimeDelta, totalRecentCycles, *iter);
+    }
+
+    // Make sure that `group` was treated along with the other items of `recentGroups`.
+    MOZ_ASSERT(group->recentTicks == 0);
+
+    // Finally, reset immediately, to make sure that we're not hit by the
+    // end of a nested event loop (which would cause `commit` to be called
+    // twice in succession).
+    reset();
+}
+
+void
+JSRuntime::Stopwatch::transferDeltas(uint64_t totalUserTimeDelta, uint64_t totalSystemTimeDelta,
+                                     uint64_t totalCyclesDelta, js::PerformanceGroup* group) {
+
+    const uint64_t ticksDelta = group->recentTicks;
+    const uint64_t cpowTimeDelta = group->recentCPOW;
+    const uint64_t cyclesDelta = group->recentCycles;
+    group->resetRecentData();
+
+    // We have now performed all cleanup and may `return` at any time without fear of leaks.
+
+    if (group->iteration() != iteration_) {
+        // Stale data, don't commit.
+        return;
+    }
+
+    // When we add a group as changed, we immediately set its
+    // `recentTicks` from 0 to 1.  If we have `ticksDelta == 0` at
+    // this stage, we have already called `resetRecentData` but we
+    // haven't removed it from the list.
+    MOZ_ASSERT(ticksDelta != 0);
+    MOZ_ASSERT(cyclesDelta <= totalCyclesDelta);
+    if (cyclesDelta == 0 || totalCyclesDelta == 0) {
+        // Nothing useful, don't commit.
+        return;
+    }
+
+    double proportion = (double)cyclesDelta / (double)totalCyclesDelta;
+    MOZ_ASSERT(proportion <= 1);
+
+    const uint64_t userTimeDelta = proportion * totalUserTimeDelta;
+    const uint64_t systemTimeDelta = proportion * totalSystemTimeDelta;
+
+    group->data.totalUserTime += userTimeDelta;
+    group->data.totalSystemTime += systemTimeDelta;
+    group->data.totalCPOWTime += cpowTimeDelta;
+    group->data.ticks += ticksDelta;
+
+    const uint64_t totalTimeDelta = userTimeDelta + systemTimeDelta;
+
+    size_t i = 0;
+    uint64_t duration = 1000; // 1ms in ┬Ás
+    for (i = 0, duration = 1000;
+         i < mozilla::ArrayLength(group->data.durations) && duration < totalTimeDelta;
+         ++i, duration *= 2) {
+        group->data.durations[i]++;
+    }
+}
+
+// Get the OS-reported time spent in userland/systemland, in
+// microseconds. On most platforms, this data is per-thread,
+// but on some platforms we need to fall back to per-process.
+// Data is not guaranteed to be monotonic.
+bool
+JSRuntime::Stopwatch::getResources(uint64_t* userTime,
+                                   uint64_t* systemTime) const {
+    MOZ_ASSERT(userTime);
+    MOZ_ASSERT(systemTime);
+
+#if defined(XP_DARWIN)
+    // On MacOS X, to get we per-thread data, we need to
+    // reach into the kernel.
+
+    mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+    thread_basic_info_data_t info;
+    mach_port_t port = mach_thread_self();
+    kern_return_t err =
+        thread_info(/* [in] targeted thread*/ port,
+                    /* [in] nature of information*/ THREAD_BASIC_INFO,
+                    /* [out] thread information */  (thread_info_t)&info,
+                    /* [inout] number of items */   &count);
+
+    // We do not need ability to communicate with the thread, so
+    // let's release the port.
+    mach_port_deallocate(mach_task_self(), port);
+
+    if (err != KERN_SUCCESS)
+        return false;
+
+    *userTime = info.user_time.microseconds + info.user_time.seconds * 1000000;
+    *systemTime = info.system_time.microseconds + info.system_time.seconds * 1000000;
+
+#elif defined(XP_UNIX)
+    struct rusage rusage;
+#if defined(RUSAGE_THREAD)
+    // Under Linux, we can obtain per-thread statistics
+    int err = getrusage(RUSAGE_THREAD, &rusage);
+#else
+    // Under other Unices, we need to do with more noisy
+    // per-process statistics.
+    int err = getrusage(RUSAGE_SELF, &rusage);
+#endif // defined(RUSAGE_THREAD)
+
+    if (err)
+        return false;
+
+    *userTime = rusage.ru_utime.tv_usec + rusage.ru_utime.tv_sec * 1000000;
+    *systemTime = rusage.ru_stime.tv_usec + rusage.ru_stime.tv_sec * 1000000;
+
+#elif defined(XP_WIN)
+    // Under Windows, we can obtain per-thread statistics,
+    // although experience seems to suggest that they are
+    // not very good under Windows XP.
+    FILETIME creationFileTime; // Ignored
+    FILETIME exitFileTime; // Ignored
+    FILETIME kernelFileTime;
+    FILETIME userFileTime;
+    BOOL success = GetThreadTimes(GetCurrentThread(),
+                                  &creationFileTime, &exitFileTime,
+                                  &kernelFileTime, &userFileTime);
+
+    if (!success)
+        return false;
+
+    ULARGE_INTEGER kernelTimeInt;
+    kernelTimeInt.LowPart = kernelFileTime.dwLowDateTime;
+    kernelTimeInt.HighPart = kernelFileTime.dwHighDateTime;
+    // Convert 100 ns to 1 us.
+    *systemTime = kernelTimeInt.QuadPart / 10;
+
+    ULARGE_INTEGER userTimeInt;
+    userTimeInt.LowPart = userFileTime.dwLowDateTime;
+    userTimeInt.HighPart = userFileTime.dwHighDateTime;
+    // Convert 100 ns to 1 us.
+    *userTime = userTimeInt.QuadPart / 10;
+
+#endif // defined(XP_DARWIN) || defined(XP_UNIX) || defined(XP_WIN)
+
+    return true;
+}
+
+
+bool
+js::SetStopwatchIsMonitoringJank(JSRuntime* rt, bool value)
+{
+    return rt->stopwatch.setIsMonitoringJank(value);
+}
+bool
+js::GetStopwatchIsMonitoringJank(JSRuntime* rt)
+{
+    return rt->stopwatch.isMonitoringJank();
+}
+
+bool
+js::SetStopwatchIsMonitoringCPOW(JSRuntime* rt, bool value)
+{
+    return rt->stopwatch.setIsMonitoringCPOW(value);
+}
+bool
+js::GetStopwatchIsMonitoringCPOW(JSRuntime* rt)
+{
+    return rt->stopwatch.isMonitoringCPOW();
+}
+
+bool
+js::SetStopwatchIsMonitoringPerCompartment(JSRuntime* rt, bool value)
+{
+    return rt->stopwatch.setIsMonitoringPerCompartment(value);
+}
+bool
+js::GetStopwatchIsMonitoringPerCompartment(JSRuntime* rt)
+{
+    return rt->stopwatch.isMonitoringPerCompartment();
+}
+
+void
+js::GetPerfMonitoringTestCpuRescheduling(JSRuntime* rt, uint64_t* stayed, uint64_t* moved)
+{
+    *stayed = rt->stopwatch.testCpuRescheduling.stayed;
+    *moved = rt->stopwatch.testCpuRescheduling.moved;
+}
+
+js::PerformanceGroupHolder::~PerformanceGroupHolder()
+{
+    unlink();
+}
+
+void*
+js::PerformanceGroupHolder::getHashKey(JSContext* cx)
+{
+    if (runtime_->stopwatch.currentPerfGroupCallback) {
+        return (*runtime_->stopwatch.currentPerfGroupCallback)(cx);
+    }
+
+    // As a fallback, put everything in the same PerformanceGroup.
+    return nullptr;
+}
+
+void
+js::PerformanceGroupHolder::unlink()
+{
+    ownGroup_ = nullptr;
+    sharedGroup_ = nullptr;
+}
+
+PerformanceGroup*
+js::PerformanceGroupHolder::getOwnGroup()
+{
+    if (ownGroup_)
+        return ownGroup_;
+
+    return ownGroup_ = runtime_->new_<PerformanceGroup>(runtime_);
+}
+
+PerformanceGroup*
+js::PerformanceGroupHolder::getSharedGroup(JSContext* cx)
+{
+    if (sharedGroup_)
+        return sharedGroup_;
+
+    if (!runtime_->stopwatch.groups().initialized())
+        return nullptr;
+
+    void* key = getHashKey(cx);
+    JSRuntime::Stopwatch::Groups::AddPtr ptr = runtime_->stopwatch.groups().lookupForAdd(key);
+    if (ptr) {
+        sharedGroup_ = ptr->value();
+        MOZ_ASSERT(sharedGroup_);
+    } else {
+        sharedGroup_ = runtime_->new_<PerformanceGroup>(cx, key);
+        if (!sharedGroup_)
+          return nullptr;
+        runtime_->stopwatch.groups().add(ptr, key, sharedGroup_);
+    }
+
+    return sharedGroup_;
+}
+
+void
+js::AddCPOWPerformanceDelta(JSRuntime* rt, uint64_t delta)
+{
+    rt->stopwatch.totalCPOWTime += delta;
+}
+
+js::PerformanceGroup::PerformanceGroup(JSRuntime* rt)
+  : uid(rt->stopwatch.uniqueId()),
+    recentCycles(0),
+    recentTicks(0),
+    recentCPOW(0),
+    runtime_(rt),
+    stopwatch_(nullptr),
+    iteration_(0),
+    key_(nullptr),
+    refCount_(0),
+    isSharedGroup_(false)
+{
+}
+
+js::PerformanceGroup::PerformanceGroup(JSContext* cx, void* key)
+  : uid(cx->runtime()->stopwatch.uniqueId()),
+    recentCycles(0),
+    recentTicks(0),
+    recentCPOW(0),
+    runtime_(cx->runtime()),
+    stopwatch_(nullptr),
+    iteration_(0),
+    key_(key),
+    refCount_(0),
+    isSharedGroup_(true)
+{
+}
+
+void
+js::PerformanceGroup::AddRef()
+{
+    ++refCount_;
+}
+
+void
+js::PerformanceGroup::Release()
+{
+    MOZ_ASSERT(refCount_ > 0);
+    --refCount_;
+    if (refCount_ > 0)
+        return;
+
+    if (isSharedGroup_) {
+        JSRuntime::Stopwatch::Groups::Ptr ptr = runtime_->stopwatch.groups().lookup(key_);
+        MOZ_ASSERT(ptr);
+        runtime_->stopwatch.groups().remove(ptr);
+    }
+
+    js_delete(this);
+}
+
+void
+JS_SetCurrentPerfGroupCallback(JSRuntime *rt, JSCurrentPerfGroupCallback cb)
+{
+    rt->stopwatch.currentPerfGroupCallback = cb;
+}
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -40,17 +40,16 @@
 #include "js/TraceableVector.h"
 #include "js/Vector.h"
 #include "vm/CodeCoverage.h"
 #include "vm/CommonPropertyNames.h"
 #include "vm/DateTime.h"
 #include "vm/MallocProvider.h"
 #include "vm/SPSProfiler.h"
 #include "vm/Stack.h"
-#include "vm/Stopwatch.h"
 #include "vm/Symbol.h"
 
 #ifdef _MSC_VER
 #pragma warning(push)
 #pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */
 #endif
 
 namespace js {
@@ -1492,17 +1491,312 @@ struct JSRuntime : public JS::shadow::Ru
      * function to assess the size of malloc'd blocks of memory.
      */
     mozilla::MallocSizeOf debuggerMallocSizeOf;
 
     /* Last time at which an animation was played for this runtime. */
     int64_t lastAnimationTime;
 
   public:
-    js::PerformanceMonitoring performanceMonitoring;
+
+    /* ------------------------------------------
+       Performance measurements
+       ------------------------------------------ */
+    struct Stopwatch {
+        /**
+         * A map used to collapse compartments belonging to the same
+         * add-on (respectively to the same webpage, to the platform)
+         * into a single group.
+         *
+         * Keys: for system compartments, a `JSAddonId*` (which may be
+         * `nullptr`), and for webpages, a `JSPrincipals*` (which may
+         * not). Note that compartments may start as non-system
+         * compartments and become compartments later during their
+         * lifetime, which requires an invalidation.
+         *
+         * This map is meant to be accessed only by instances of
+         * PerformanceGroupHolder, which handle both reference-counting
+         * of the values and invalidation of the key/value pairs.
+         */
+        typedef js::HashMap<void*, js::PerformanceGroup*,
+                            js::DefaultHasher<void*>,
+                            js::SystemAllocPolicy> Groups;
+
+        Groups& groups() {
+            return groups_;
+        }
+
+        /**
+         * Performance data on the entire runtime.
+         */
+        js::PerformanceGroupHolder performance;
+
+        /**
+         * Callback used to ask the embedding to determine in which
+         * Performance Group the current execution belongs. Typically, this is
+         * used to regroup JSCompartments from several iframes from the same
+         * page or from several compartments of the same addon into a single
+         * Performance Group.
+         *
+         * May be `nullptr`, in which case we put all the JSCompartments
+         * in the same PerformanceGroup.
+         */
+        JSCurrentPerfGroupCallback currentPerfGroupCallback;
+
+        /**
+         * The number of the current iteration of the event loop.
+         */
+        uint64_t iteration() {
+            return iteration_;
+        }
+
+        explicit Stopwatch(JSRuntime* runtime)
+          : performance(runtime)
+          , currentPerfGroupCallback(nullptr)
+          , totalCPOWTime(0)
+          , isMonitoringJank_(false)
+          , isMonitoringCPOW_(false)
+          , isMonitoringPerCompartment_(false)
+          , iteration_(0)
+          , startedAtIteration_(0)
+          , idCounter_(0)
+        { }
+
+        /**
+         * Reset the stopwatch.
+         *
+         * This method is meant to be called whenever we start
+         * processing an event, to ensure that we stop any ongoing
+         * measurement that would otherwise provide irrelevant
+         * results.
+         */
+        void reset();
+
+        /**
+         * Start the stopwatch.
+         *
+         * This method is meant to be called once we know that the
+         * current event contains JavaScript code to execute. Calling
+         * this several times during the same iteration is idempotent.
+         */
+        void start();
+
+        /**
+         * Commit the performance data collected since the last call
+         * to `start()`, unless `reset()` has been called since then.
+         */
+        void commit();
+
+        /**
+         * Activate/deactivate stopwatch measurement of jank.
+         *
+         * Noop if `value` is `true` and the stopwatch is already
+         * measuring jank, or if `value` is `false` and the stopwatch
+         * is not measuring jank.
+         *
+         * Otherwise, any pending measurements are dropped, but previous
+         * measurements remain stored.
+         *
+         * May return `false` if the underlying hashtable cannot be allocated.
+         */
+        bool setIsMonitoringJank(bool value) {
+            if (isMonitoringJank_ != value)
+                reset();
+
+            if (value && !groups_.initialized()) {
+                if (!groups_.init(128))
+                    return false;
+            }
+
+            isMonitoringJank_ = value;
+            return true;
+        }
+        bool isMonitoringJank() const {
+            return isMonitoringJank_;
+        }
+
+        /**
+         * Activate/deactivate stopwatch measurement per compartment.
+         *
+         * Noop if `value` is `true` and the stopwatch is already
+         * measuring per compartment, or if `value` is `false` and the
+         * stopwatch is not measuring per compartment.
+         *
+         * Otherwise, any pending measurements are dropped, but previous
+         * measurements remain stored.
+         *
+         * May return `false` if the underlying hashtable cannot be allocated.
+         */
+        bool setIsMonitoringPerCompartment(bool value) {
+            if (isMonitoringPerCompartment_ != value)
+                reset();
+
+            if (value && !groups_.initialized()) {
+                if (!groups_.init(128))
+                    return false;
+            }
+
+            isMonitoringPerCompartment_ = value;
+            return true;
+        }
+        bool isMonitoringPerCompartment() const {
+            return isMonitoringPerCompartment_;
+        }
+
+        /**
+         * Activate/deactivate stopwatch measurement of CPOW.
+         *
+         * Noop if `value` is `true` and the stopwatch is already
+         * measuring CPOW, or if `value` is `false` and the stopwatch
+         * is not measuring CPOW.
+         *
+         * Otherwise, any pending measurements are dropped, but previous
+         * measurements remain stored.
+         *
+         * May return `false` if the underlying hashtable cannot be allocated.
+         */
+        bool setIsMonitoringCPOW(bool value) {
+            if (isMonitoringCPOW_ != value)
+                reset();
+
+            if (value && !groups_.initialized()) {
+                if (!groups_.init(128))
+                    return false;
+            }
+
+            isMonitoringCPOW_ = value;
+            return true;
+        }
+
+        bool isMonitoringCPOW() const {
+            return isMonitoringCPOW_;
+        }
+
+        /**
+         * Return a identifier for a group, unique to the runtime.
+         */
+        uint64_t uniqueId() {
+            return idCounter_++;
+        }
+
+        /**
+         * Mark a group as changed during the current iteration.
+         *
+         * Recent data from this group will be post-processed and
+         * committed at the end of the iteration.
+         */
+        void addChangedGroup(js::PerformanceGroup* group) {
+            MOZ_ASSERT(group->recentTicks == 0);
+            touchedGroups.append(group);
+        }
+
+        // The total amount of time spent waiting on CPOWs since the
+        // start of the process, in microseconds.
+        uint64_t totalCPOWTime;
+
+        // Data extracted by the AutoStopwatch to determine how often
+        // we reschedule the process to a different CPU during the
+        // execution of JS.
+        //
+        // Warning: These values are incremented *only* on platforms
+        // that offer a syscall/libcall to check on which CPU a
+        // process is currently executed.
+        struct TestCpuRescheduling
+        {
+            // Incremented once we have finished executing code
+            // in a group, if the CPU on which we started
+            // execution is the same as the CPU on which
+            // we finished.
+            uint64_t stayed;
+            // Incremented once we have finished executing code
+            // in a group, if the CPU on which we started
+            // execution is different from the CPU on which
+            // we finished.
+            uint64_t moved;
+            TestCpuRescheduling()
+              : stayed(0),
+                moved(0)
+            { }
+        };
+        TestCpuRescheduling testCpuRescheduling;
+
+    private:
+        Stopwatch(const Stopwatch&) = delete;
+        Stopwatch& operator=(const Stopwatch&) = delete;
+
+        // Commit a piece of data to a single group.
+        // `totalUserTimeDelta`, `totalSystemTimeDelta`, `totalCyclesDelta`
+        // represent the outer measures, taken for the entire runtime.
+        void transferDeltas(uint64_t totalUserTimeDelta,
+                            uint64_t totalSystemTimeDelta,
+                            uint64_t totalCyclesDelta,
+                            js::PerformanceGroup* destination);
+
+        // Query the OS for the time spent in CPU/kernel since process
+        // launch.
+        bool getResources(uint64_t* userTime, uint64_t* systemTime) const;
+
+    private:
+        Groups groups_;
+        friend struct js::PerformanceGroupHolder;
+
+        /**
+         * `true` if stopwatch monitoring is active for Jank, `false` otherwise.
+         */
+        bool isMonitoringJank_;
+        /**
+         * `true` if stopwatch monitoring is active for CPOW, `false` otherwise.
+         */
+        bool isMonitoringCPOW_;
+        /**
+         * `true` if the stopwatch should udpdate data per-compartment, in
+         * addition to data per-group.
+         */
+        bool isMonitoringPerCompartment_;
+
+        /**
+         * The number of times we have entered the event loop.
+         * Used to reset counters whenever we enter the loop,
+         * which may be caused either by having completed the
+         * previous run of the event loop, or by entering a
+         * nested loop.
+         *
+         * Always incremented by 1, may safely overflow.
+         */
+        uint64_t iteration_;
+
+        /**
+         * The iteration at which the stopwatch was last started.
+         *
+         * Used both to avoid starting the stopwatch several times
+         * during the same event loop and to avoid committing stale
+         * stopwatch results.
+         */
+        uint64_t startedAtIteration_;
+
+        /**
+         * A counter used to generate unique identifiers for groups.
+         */
+        uint64_t idCounter_;
+
+        /**
+         * The timestamps returned by `getResources()` during the call to
+         * `start()` in the current iteration of the event loop.
+         */
+        uint64_t userTimeStart_;
+        uint64_t systemTimeStart_;
+
+        /**
+         * Performance groups used during the current event.
+         *
+         * They are cleared by `commit()` and `reset()`.
+         */
+        mozilla::Vector<RefPtr<js::PerformanceGroup>> touchedGroups;
+    };
+    Stopwatch stopwatch;
 };
 
 namespace js {
 
 // When entering JIT code, the calling JSContext* is stored into the thread's
 // PerThreadData. This function retrieves the JSContext with the pre-condition
 // that the caller is JIT code or C++ called directly from JIT code. This
 // function should not be called from arbitrary locations since the JSContext
deleted file mode 100644
--- a/js/src/vm/Stopwatch.cpp
+++ /dev/null
@@ -1,619 +0,0 @@
-#include "vm/Stopwatch.h"
-
-#include <mozilla/ArrayUtils.h>
-#include <mozilla/IntegerTypeTraits.h>
-#include <mozilla/unused.h>
-
-namespace js {
-
-bool
-PerformanceMonitoring::addRecentGroup(PerformanceGroup* group)
-{
-  if (group->isUsedInThisIteration())
-    return true;
-
-  group->setIsUsedInThisIteration(true);
-  return recentGroups_.append(group);
-}
-
-void
-PerformanceMonitoring::reset()
-{
-    // All ongoing measures are dependent on the current iteration#.
-    // By incrementing it, we mark all data as stale. Stale data will
-    // be overwritten progressively during the execution.
-    ++iteration_;
-    recentGroups_.clear();
-}
-
-void
-PerformanceMonitoring::start()
-{
-    if (!isMonitoringJank_)
-        return;
-
-    if (iteration_ == startedAtIteration_) {
-        // The stopwatch is already started for this iteration.
-        return;
-    }
-
-    startedAtIteration_ = iteration_;
-    if (stopwatchStartCallback)
-        stopwatchStartCallback(iteration_, stopwatchStartClosure);
-}
-
-// Commit the data that has been collected during the iteration
-// into the actual `PerformanceData`.
-//
-// We use the proportion of cycles-spent-in-group over
-// cycles-spent-in-toplevel-group as an approximation to allocate
-// system (kernel) time and user (CPU) time to each group. Note
-// that cycles are not an exact measure:
-//
-// 1. if the computer has gone to sleep, the clock may be reset to 0;
-// 2. if the process is moved between CPUs/cores, it may end up on a CPU
-//    or core with an unsynchronized clock;
-// 3. the mapping between clock cycles and walltime varies with the current
-//    frequency of the CPU;
-// 4. other threads/processes using the same CPU will also increment
-//    the counter.
-//
-// ** Effect of 1. (computer going to sleep)
-//
-// We assume that this will happen very seldom. Since the final numbers
-// are bounded by the CPU time and Kernel time reported by `getresources`,
-// the effect will be contained to a single iteration of the event loop.
-//
-// ** Effect of 2. (moving between CPUs/cores)
-//
-// On platforms that support it, we only measure the number of cycles
-// if we start and end execution of a group on the same
-// CPU/core. While there is a small window (a few cycles) during which
-// the thread can be migrated without us noticing, we expect that this
-// will happen rarely enough that this won't affect the statistics
-// meaningfully.
-//
-// On other platforms, assuming that the probability of jumping
-// between CPUs/cores during a given (real) cycle is constant, and
-// that the distribution of differences between clocks is even, the
-// probability that the number of cycles reported by a measure is
-// modified by X cycles should be a gaussian distribution, with groups
-// with longer execution having a larger amplitude than groups with
-// shorter execution. Since we discard measures that result in a
-// negative number of cycles, this distribution is actually skewed
-// towards over-estimating the number of cycles of groups that already
-// have many cycles and under-estimating the number of cycles that
-// already have fewer cycles.
-//
-// Since the final numbers are bounded by the CPU time and Kernel time
-// reported by `getresources`, we accept this bias.
-//
-// ** Effect of 3. (mapping between clock cycles and walltime)
-//
-// Assuming that this is evenly distributed, we expect that this will
-// eventually balance out.
-//
-// ** Effect of 4. (cycles increase with system activity)
-//
-// Assuming that, within an iteration of the event loop, this happens
-// unformly over time, this will skew towards over-estimating the number
-// of cycles of groups that already have many cycles and under-estimating
-// the number of cycles that already have fewer cycles.
-//
-// Since the final numbers are bounded by the CPU time and Kernel time
-// reported by `getresources`, we accept this bias.
-//
-// ** Big picture
-//
-// Computing the number of cycles is fast and should be accurate
-// enough in practice. Alternatives (such as calling `getresources`
-// all the time or sampling from another thread) are very expensive
-// in system calls and/or battery and not necessarily more accurate.
-bool
-PerformanceMonitoring::commit()
-{
-#if !defined(MOZ_HAVE_RDTSC)
-    // The AutoStopwatch is only executed if `MOZ_HAVE_RDTSC`.
-    return;
-#endif // !defined(MOZ_HAVE_RDTSC)
-
-    if (!isMonitoringJank_) {
-        // Either we have not started monitoring or monitoring has
-        // been cancelled during the iteration.
-        return true;
-    }
-
-    if (startedAtIteration_ != iteration_) {
-        // No JS code has been monitored during this iteration.
-        return true;
-    }
-
-    GroupVector recentGroups;
-    recentGroups_.swap(recentGroups);
-
-    bool success = true;
-    if (stopwatchCommitCallback)
-        success = stopwatchCommitCallback(iteration_, recentGroups, stopwatchCommitClosure);
-
-    // Reset immediately, to make sure that we're not hit by the end
-    // of a nested event loop (which would cause `commit` to be called
-    // twice in succession).
-    reset();
-    return success;
-}
-
-void
-PerformanceMonitoring::dispose(JSRuntime* rt)
-{
-    reset();
-    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
-        c->performanceMonitoring.unlink();
-    }
-}
-
-PerformanceGroupHolder::~PerformanceGroupHolder()
-{
-    unlink();
-}
-
-void
-PerformanceGroupHolder::unlink()
-{
-    initialized_ = false;
-    groups_.clear();
-}
-
-const GroupVector*
-PerformanceGroupHolder::getGroups(JSContext* cx)
-{
-    if (initialized_)
-        return &groups_;
-
-    if (!runtime_->performanceMonitoring.getGroupsCallback)
-        return nullptr;
-
-    if (!runtime_->performanceMonitoring.getGroupsCallback(cx, groups_, runtime_->performanceMonitoring.getGroupsClosure))
-        return nullptr;
-
-    initialized_ = true;
-    return &groups_;
-}
-
-AutoStopwatch::AutoStopwatch(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
-  : cx_(cx)
-  , iteration_(0)
-  , isMonitoringJank_(false)
-  , isMonitoringCPOW_(false)
-  , cyclesStart_(0)
-  , CPOWTimeStart_(0)
-{
-    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
-
-    JSCompartment* compartment = cx_->compartment();
-    if (compartment->scheduledForDestruction)
-        return;
-
-    JSRuntime* runtime = cx_->runtime();
-    iteration_ = runtime->performanceMonitoring.iteration();
-
-    const GroupVector* groups = compartment->performanceMonitoring.getGroups(cx);
-    if (!groups) {
-      // Either the embedding has not provided any performance
-      // monitoring logistics or there was an error that prevents
-      // performance monitoring.
-      return;
-    }
-    for (auto group = groups->begin(); group < groups->end(); group++) {
-      auto acquired = acquireGroup(*group);
-      if (acquired)
-        groups_.append(acquired);
-    }
-    if (groups_.length() == 0) {
-      // We are not in charge of monitoring anything.
-      return;
-    }
-
-    // Now that we are sure that JS code is being executed,
-    // initialize the stopwatch for this iteration, lazily.
-    runtime->performanceMonitoring.start();
-    enter();
-}
-
-AutoStopwatch::~AutoStopwatch()
-{
-    if (groups_.length() == 0) {
-        // We are not in charge of monitoring anything.
-        return;
-    }
-
-    JSCompartment* compartment = cx_->compartment();
-    if (compartment->scheduledForDestruction)
-        return;
-
-    JSRuntime* runtime = cx_->runtime();
-    if (iteration_ != runtime->performanceMonitoring.iteration()) {
-        // We have entered a nested event loop at some point.
-        // Any information we may have is obsolete.
-        return;
-    }
-
-    mozilla::unused << exit(); // Sadly, there is nothing we can do about an error at this point.
-
-    for (auto group = groups_.begin(); group < groups_.end(); group++)
-        releaseGroup(*group);
-}
-
-void
-AutoStopwatch::enter()
-{
-    JSRuntime* runtime = cx_->runtime();
-
-    if (runtime->performanceMonitoring.isMonitoringCPOW()) {
-        CPOWTimeStart_ = runtime->performanceMonitoring.totalCPOWTime;
-        isMonitoringCPOW_ = true;
-    }
-
-    if (runtime->performanceMonitoring.isMonitoringJank()) {
-        cyclesStart_ = this->getCycles();
-        cpuStart_ = this->getCPU();
-        isMonitoringJank_ = true;
-    }
-}
-
-bool
-AutoStopwatch::exit()
-{
-    JSRuntime* runtime = cx_->runtime();
-
-    uint64_t cyclesDelta = 0;
-    if (isMonitoringJank_ && runtime->performanceMonitoring.isMonitoringJank()) {
-        // We were monitoring jank when we entered and we still are.
-
-        // If possible, discard results when we don't end on the
-        // same CPU as we started.  Note that we can be
-        // rescheduled to another CPU beween `getCycles()` and
-        // `getCPU()`.  We hope that this will happen rarely
-        // enough that the impact on our statistics will remain
-        // limited.
-        const cpuid_t cpuEnd = this->getCPU();
-        if (isSameCPU(cpuStart_, cpuEnd)) {
-            const uint64_t cyclesEnd = getCycles();
-            cyclesDelta = getDelta(cyclesEnd, cyclesStart_);
-        }
-#if WINVER >= 0x600
-        updateTelemetry(cpuStart_, cpuEnd);
-#elif defined(__linux__)
-        updateTelemetry(cpuStart_, cpuEnd);
-#endif // WINVER >= 0x600 || _linux__
-    }
-
-    uint64_t CPOWTimeDelta = 0;
-    if (isMonitoringCPOW_ && runtime->performanceMonitoring.isMonitoringCPOW()) {
-        // We were monitoring CPOW when we entered and we still are.
-        const uint64_t CPOWTimeEnd = runtime->performanceMonitoring.totalCPOWTime;
-        CPOWTimeDelta = getDelta(CPOWTimeEnd, CPOWTimeStart_);
-    }
-    return addToGroups(cyclesDelta, CPOWTimeDelta);
-}
-
-void
-AutoStopwatch::updateTelemetry(const cpuid_t& cpuStart_, const cpuid_t& cpuEnd)
-{
-  JSRuntime* runtime = cx_->runtime();
-
-    if (isSameCPU(cpuStart_, cpuEnd))
-        runtime->performanceMonitoring.testCpuRescheduling.stayed += 1;
-    else
-        runtime->performanceMonitoring.testCpuRescheduling.moved += 1;
-}
-
-PerformanceGroup*
-AutoStopwatch::acquireGroup(PerformanceGroup* group)
-{
-    MOZ_ASSERT(group);
-
-    if (group->isAcquired(iteration_))
-        return nullptr;
-
-    if (!group->isActive())
-        return nullptr;
-
-    group->acquire(iteration_, this);
-    return group;
-}
-
-void
-AutoStopwatch::releaseGroup(PerformanceGroup* group)
-{
-    MOZ_ASSERT(group);
-        group->release(iteration_, this);
-}
-
-bool
-AutoStopwatch::addToGroups(uint64_t cyclesDelta, uint64_t CPOWTimeDelta)
-{
-  JSRuntime* runtime = cx_->runtime();
-
-    for (auto group = groups_.begin(); group < groups_.end(); ++group) {
-      if (!addToGroup(runtime, cyclesDelta, CPOWTimeDelta, *group))
-        return false;
-    }
-}
-
-bool
-AutoStopwatch::addToGroup(JSRuntime* runtime, uint64_t cyclesDelta, uint64_t CPOWTimeDelta, PerformanceGroup* group)
-{
-    MOZ_ASSERT(group);
-    MOZ_ASSERT(group->isAcquired(iteration_, this));
-
-    if (!runtime->performanceMonitoring.addRecentGroup(group))
-      return false;
-    group->addRecentTicks(iteration_, 1);
-    group->addRecentCycles(iteration_, cyclesDelta);
-    group->addRecentCPOW(iteration_, CPOWTimeDelta);
-}
-
-uint64_t
-AutoStopwatch::getDelta(const uint64_t end, const uint64_t start) const
-{
-    if (start >= end)
-      return 0;
-    return end - start;
-}
-
-uint64_t
-AutoStopwatch::getCycles() const
-{
-#if defined(MOZ_HAVE_RDTSC)
-    return ReadTimestampCounter();
-#else
-    return 0;
-#endif // defined(MOZ_HAVE_RDTSC)
-}
-
-cpuid_t inline
-AutoStopwatch::getCPU() const
-{
-#if defined(XP_WIN) && WINVER >= _WIN32_WINNT_VISTA
-    PROCESSOR_NUMBER proc;
-    GetCurrentProcessorNumberEx(&proc);
-
-    cpuid_t result(proc.Group, proc.Number);
-    return result;
-#elif defined(XP_LINUX)
-    return sched_getcpu();
-#else
-    return {};
-#endif // defined(XP_WIN) || defined(XP_LINUX)
-}
-
-bool inline
-AutoStopwatch::isSameCPU(const cpuid_t& a, const cpuid_t& b) const
-{
-#if defined(XP_WIN)  && WINVER >= _WIN32_WINNT_VISTA
-    return a.group_ == b.group_ && a.number_ == b.number_;
-#elif defined(XP_LINUX)
-    return a == b;
-#else
-    return true;
-#endif
-}
-
-PerformanceGroup::PerformanceGroup()
-    : recentCycles_(0)
-    , recentTicks_(0)
-    , recentCPOW_(0)
-    , iteration_(0)
-    , isActive_(false)
-    , isUsedInThisIteration_(false)
-    , owner_(nullptr)
-    , refCount_(0)
-{ }
-
-uint64_t
-PerformanceGroup::iteration() const
-{
-    return iteration_;
-}
-
-
-bool
-PerformanceGroup::isAcquired(uint64_t it) const
-{
-    return owner_ != nullptr && iteration_ == it;
-}
-
-bool
-PerformanceGroup::isAcquired(uint64_t it, const AutoStopwatch* owner) const
-{
-    return owner_ == owner && iteration_ == it;
-}
-
-void
-PerformanceGroup::acquire(uint64_t it, const AutoStopwatch* owner)
-{
-    if (iteration_ != it) {
-        // Any data that pretends to be recent is actually bound
-        // to an older iteration and therefore stale.
-        resetRecentData();
-    }
-    iteration_ = it;
-    owner_ = owner;
-}
-
-void
-PerformanceGroup::release(uint64_t it, const AutoStopwatch* owner)
-{
-    if (iteration_ != it)
-        return;
-
-    MOZ_ASSERT(owner == owner_ || owner_ == nullptr);
-    owner_ = nullptr;
-}
-
-void
-PerformanceGroup::resetRecentData()
-{
-    recentCycles_ = 0;
-    recentTicks_ = 0;
-    recentCPOW_ = 0;
-    isUsedInThisIteration_ = false;
-}
-
-
-uint64_t
-PerformanceGroup::recentCycles(uint64_t iteration) const
-{
-    MOZ_ASSERT(iteration == iteration_);
-        return recentCycles_;
-    }
-
-void
-PerformanceGroup::addRecentCycles(uint64_t iteration, uint64_t cycles)
-{
-    MOZ_ASSERT(iteration == iteration);
-    recentCycles_ += cycles;
-}
-
-uint64_t
-PerformanceGroup::recentTicks(uint64_t iteration) const
-{
-    MOZ_ASSERT(iteration == iteration_);
-    return recentTicks_;
-}
-
-void
-PerformanceGroup::addRecentTicks(uint64_t iteration, uint64_t ticks)
-{
-    MOZ_ASSERT(iteration == iteration);
-    recentTicks_ += ticks;
-}
-
-
-uint64_t
-PerformanceGroup::recentCPOW(uint64_t iteration) const
-{
-    MOZ_ASSERT(iteration == iteration_);
-    return recentCPOW_;
-}
-
-void
-PerformanceGroup::addRecentCPOW(uint64_t iteration, uint64_t CPOW)
-{
-    MOZ_ASSERT(iteration == iteration);
-    recentCPOW_ += CPOW;
-}
-
-
-bool
-PerformanceGroup::isActive() const
-{
-    return isActive_;
-}
-
-void
-PerformanceGroup::setIsActive(bool value)
-{
-  isActive_ = value;
-}
-
-void
-PerformanceGroup::setIsUsedInThisIteration(bool value)
-{
-  isUsedInThisIteration_ = value;
-}
-bool
-PerformanceGroup::isUsedInThisIteration() const
-{
-  return isUsedInThisIteration_;
-}
-
-void
-PerformanceGroup::AddRef()
-{
-    ++refCount_;
-}
-
-void
-PerformanceGroup::Release()
-{
-    MOZ_ASSERT(refCount_ > 0);
-    --refCount_;
-    if (refCount_ > 0)
-        return;
-
-    this->Delete();
-}
-
-JS_PUBLIC_API(bool) SetStopwatchStartCallback(JSRuntime* rt, StopwatchStartCallback cb, void* closure)
-{
-    rt->performanceMonitoring.setStopwatchStartCallback(cb, closure);
-    return true;
-}
-
-JS_PUBLIC_API(bool) SetStopwatchCommitCallback(JSRuntime* rt, StopwatchCommitCallback cb, void* closure)
-{
-    rt->performanceMonitoring.setStopwatchCommitCallback(cb, closure);
-    return true;
-}
-
-JS_PUBLIC_API(bool) SetGetPerformanceGroupsCallback(JSRuntime* rt, GetGroupsCallback cb, void* closure)
-{
-    rt->performanceMonitoring.setGetGroupsCallback(cb, closure);
-    return true;
-}
-
-JS_PUBLIC_API(bool)
-FlushPerformanceMonitoring(JSRuntime* rt)
-{
-    return rt->performanceMonitoring.commit();
-}
-JS_PUBLIC_API(void)
-ResetPerformanceMonitoring(JSRuntime* rt)
-{
-    return rt->performanceMonitoring.reset();
-}
-JS_PUBLIC_API(void)
-DisposePerformanceMonitoring(JSRuntime* rt)
-{
-    return rt->performanceMonitoring.dispose(rt);
-}
-
-JS_PUBLIC_API(bool)
-SetStopwatchIsMonitoringJank(JSRuntime* rt, bool value)
-{
-    return rt->performanceMonitoring.setIsMonitoringJank(value);
-}
-JS_PUBLIC_API(bool)
-GetStopwatchIsMonitoringJank(JSRuntime* rt)
-{
-    return rt->performanceMonitoring.isMonitoringJank();
-}
-
-JS_PUBLIC_API(bool)
-SetStopwatchIsMonitoringCPOW(JSRuntime* rt, bool value)
-{
-    return rt->performanceMonitoring.setIsMonitoringCPOW(value);
-}
-JS_PUBLIC_API(bool)
-GetStopwatchIsMonitoringCPOW(JSRuntime* rt)
-{
-    return rt->performanceMonitoring.isMonitoringCPOW();
-}
-
-JS_PUBLIC_API(void)
-GetPerfMonitoringTestCpuRescheduling(JSRuntime* rt, uint64_t* stayed, uint64_t* moved)
-{
-    *stayed = rt->performanceMonitoring.testCpuRescheduling.stayed;
-    *moved = rt->performanceMonitoring.testCpuRescheduling.moved;
-}
-
-JS_PUBLIC_API(void)
-AddCPOWPerformanceDelta(JSRuntime* rt, uint64_t delta)
-{
-    rt->performanceMonitoring.totalCPOWTime += delta;
-}
-
-
-} // namespace js
-
deleted file mode 100644
--- a/js/src/vm/Stopwatch.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef vm_Stopwatch_h
-#define vm_Stopwatch_h
-
-/*
-  An API for following in real-time the amount of CPU spent executing
-  webpages, add-ons, etc.
-*/
-
-namespace js {
-
-typedef mozilla::Vector<RefPtr<js::PerformanceGroup>> GroupVector;
-
-/**
- * A container for performance groups.
- *
- * Performance monitoring deals with the execution duration of code
- * that belongs to components, for a notion of components defined by
- * the embedding.  Typically, in a web browser, a component may be a
- * webpage and/or a frame and/or a module and/or an add-on and/or a
- * sandbox and/or a process etc.
- *
- * A PerformanceGroupHolder is owned y a JSCompartment and maps that
- * compartment to all the components to which it belongs.
- */
-struct PerformanceGroupHolder {
-
-    /**
-     * Get the groups to which this compartment belongs.
-     *
-     * Pre-condition: Execution must have entered the compartment.
-     *
-     * May return `nullptr` if the embedding has not initialized
-     * support for performance groups.
-     */
-    const GroupVector* getGroups(JSContext*);
-
-    explicit PerformanceGroupHolder(JSRuntime* runtime)
-      : runtime_(runtime)
-      , initialized_(false)
-    {  }
-    ~PerformanceGroupHolder();
-    void unlink();
-  private:
-    JSRuntime* runtime_;
-
-    // `true` once a call to `getGroups` has succeeded.
-    bool initialized_;
-
-    // The groups to which this compartment belongs. Filled if and only
-    // if `initialized_` is `true`.
-    GroupVector groups_;
-};
-
-/**
- * Container class for everything related to performance monitoring.
- */
-struct PerformanceMonitoring {
-    /**
-     * The number of the current iteration of the event loop.
-     */
-    uint64_t iteration() {
-        return iteration_;
-    }
-
-    explicit PerformanceMonitoring(JSRuntime* runtime)
-      : totalCPOWTime(0)
-      , stopwatchStartCallback(nullptr)
-      , stopwatchStartClosure(nullptr)
-      , stopwatchCommitCallback(nullptr)
-      , stopwatchCommitClosure(nullptr)
-      , getGroupsCallback(nullptr)
-      , getGroupsClosure(nullptr)
-      , isMonitoringJank_(false)
-      , isMonitoringCPOW_(false)
-      , iteration_(0)
-      , startedAtIteration_(0)
-    { }
-
-    /**
-     * Reset the stopwatch.
-     *
-     * This method is meant to be called whenever we start
-     * processing an event, to ensure that we stop any ongoing
-     * measurement that would otherwise provide irrelevant
-     * results.
-     */
-    void reset();
-
-    /**
-     * Start the stopwatch.
-     *
-     * This method is meant to be called once we know that the
-     * current event contains JavaScript code to execute. Calling
-     * this several times during the same iteration is idempotent.
-     */
-    void start();
-
-    /**
-     * Commit the performance data collected since the last call
-     * to `start()`, unless `reset()` has been called since then.
-     */
-    bool commit();
-
-    /**
-     * Liberate memory and references.
-     */
-    void dispose(JSRuntime* rtx);
-
-    /**
-     * Activate/deactivate stopwatch measurement of jank.
-     *
-     * Noop if `value` is `true` and the stopwatch is already
-     * measuring jank, or if `value` is `false` and the stopwatch
-     * is not measuring jank.
-     *
-     * Otherwise, any pending measurements are dropped, but previous
-     * measurements remain stored.
-     *
-     * May return `false` if the underlying hashtable cannot be allocated.
-     */
-    bool setIsMonitoringJank(bool value) {
-        if (isMonitoringJank_ != value)
-            reset();
-
-        isMonitoringJank_ = value;
-        return true;
-    }
-    bool isMonitoringJank() const {
-        return isMonitoringJank_;
-    }
-
-    /**
-     * Mark that a group has been used in this iteration.
-     */
-    bool addRecentGroup(PerformanceGroup* group);
-
-    /**
-     * Activate/deactivate stopwatch measurement of CPOW.
-     *
-     * Noop if `value` is `true` and the stopwatch is already
-     * measuring CPOW, or if `value` is `false` and the stopwatch
-     * is not measuring CPOW.
-     *
-     * Otherwise, any pending measurements are dropped, but previous
-     * measurements remain stored.
-     *
-     * May return `false` if the underlying hashtable cannot be allocated.
-     */
-    bool setIsMonitoringCPOW(bool value) {
-        if (isMonitoringCPOW_ != value)
-            reset();
-
-        isMonitoringCPOW_ = value;
-        return true;
-    }
-
-    bool isMonitoringCPOW() const {
-        return isMonitoringCPOW_;
-    }
-
-    /**
-     * Callbacks called when we start executing an event/when we have
-     * run to completion (including enqueued microtasks).
-     *
-     * If there are no nested event loops, each call to
-     * `stopwatchStartCallback` is followed by a call to
-     * `stopwatchCommitCallback`. However, embedders should not assume
-     * that this will always be the case, unless they take measures to
-     * prevent nested event loops.
-     *
-     * In presence of nested event loops, several calls to
-     * `stopwatchStartCallback` may occur before a call to
-     * `stopwatchCommitCallback`. Embedders should assume that a
-     * second call to `stopwatchStartCallback` cancels any measure
-     * started by the previous calls to `stopwatchStartCallback` and
-     * which have not been committed by `stopwatchCommitCallback`.
-     */
-    void setStopwatchStartCallback(js::StopwatchStartCallback cb, void* closure) {
-        stopwatchStartCallback = cb;
-        stopwatchStartClosure = closure;
-    }
-    void setStopwatchCommitCallback(js::StopwatchCommitCallback cb, void* closure) {
-        stopwatchCommitCallback = cb;
-        stopwatchCommitClosure = closure;
-    }
-
-    /**
-     * Callback called to associate a JSCompartment to the set of
-     * `PerformanceGroup`s that represent the components to which
-     * it belongs.
-     */
-    void setGetGroupsCallback(js::GetGroupsCallback cb, void* closure) {
-        getGroupsCallback = cb;
-        getGroupsClosure = closure;
-    }
-
-    /**
-     * The total amount of time spent waiting on CPOWs since the
-     * start of the process, in microseconds.
-     */
-    uint64_t totalCPOWTime;
-
-    /**
-     * Data extracted by the AutoStopwatch to determine how often
-     * we reschedule the process to a different CPU during the
-     * execution of JS.
-     *
-     * Warning: These values are incremented *only* on platforms
-     * that offer a syscall/libcall to check on which CPU a
-     * process is currently executed.
-     */
-    struct TestCpuRescheduling
-    {
-        // Incremented once we have finished executing code
-        // in a group, if the CPU on which we started
-        // execution is the same as the CPU on which
-        // we finished.
-        uint64_t stayed;
-        // Incremented once we have finished executing code
-        // in a group, if the CPU on which we started
-        // execution is different from the CPU on which
-        // we finished.
-        uint64_t moved;
-        TestCpuRescheduling()
-            : stayed(0),
-              moved(0)
-        { }
-    };
-    TestCpuRescheduling testCpuRescheduling;
-  private:
-    PerformanceMonitoring(const PerformanceMonitoring&) = delete;
-    PerformanceMonitoring& operator=(const PerformanceMonitoring&) = delete;
-
-  private:
-    friend struct PerformanceGroupHolder;
-    js::StopwatchStartCallback stopwatchStartCallback;
-    void* stopwatchStartClosure;
-    js::StopwatchCommitCallback stopwatchCommitCallback;
-    void* stopwatchCommitClosure;
-
-    js::GetGroupsCallback getGroupsCallback;
-    void* getGroupsClosure;
-
-    /**
-     * `true` if stopwatch monitoring is active for Jank, `false` otherwise.
-     */
-    bool isMonitoringJank_;
-    /**
-     * `true` if stopwatch monitoring is active for CPOW, `false` otherwise.
-     */
-    bool isMonitoringCPOW_;
-
-    /**
-     * The number of times we have entered the event loop.
-     * Used to reset counters whenever we enter the loop,
-     * which may be caused either by having completed the
-     * previous run of the event loop, or by entering a
-     * nested loop.
-     *
-     * Always incremented by 1, may safely overflow.
-     */
-    uint64_t iteration_;
-
-    /**
-     * The iteration at which the stopwatch was last started.
-     *
-     * Used both to avoid starting the stopwatch several times
-     * during the same event loop and to avoid committing stale
-     * stopwatch results.
-     */
-    uint64_t startedAtIteration_;
-
-    /**
-     * Groups used in the current iteration.
-     */
-    GroupVector recentGroups_;
-};
-
-#if WINVER >= 0x0600
-struct cpuid_t {
-    WORD group_;
-    BYTE number_;
-    cpuid_t(WORD group, BYTE number)
-        : group_(group),
-          number_(number)
-    { }
-    cpuid_t()
-        : group_(0),
-          number_(0)
-    { }
-};
-#elif defined(__linux__)
-    typedef int cpuid_t;
-#else
-    typedef struct {} cpuid_t;
-#endif // defined(WINVER >= 0x0600) || defined(__linux__)
-
-/**
- * RAII class to start/stop measuring performance when
- * entering/leaving a compartment.
- */
-class AutoStopwatch final {
-    // The context with which this object was initialized.
-    // Non-null.
-    JSContext* const cx_;
-
-    // An indication of the number of times we have entered the event
-    // loop.  Used only for comparison.
-    uint64_t iteration_;
-
-    // `true` if we are monitoring jank, `false` otherwise.
-    bool isMonitoringJank_;
-    // `true` if we are monitoring CPOW, `false` otherwise.
-    bool isMonitoringCPOW_;
-
-    // Timestamps captured while starting the stopwatch.
-    uint64_t cyclesStart_;
-    uint64_t CPOWTimeStart_;
-
-    // The CPU on which we started the measure. Defined only
-    // if `isMonitoringJank_` is `true`.
-    cpuid_t cpuStart_;
-
-    mozilla::Vector<RefPtr<js::PerformanceGroup>> groups_;
-
-  public:
-    // If the stopwatch is active, constructing an instance of
-    // AutoStopwatch causes it to become the current owner of the
-    // stopwatch.
-    //
-    // Previous owner is restored upon destruction.
-    explicit AutoStopwatch(JSContext* cx MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
-    ~AutoStopwatch();
-  private:
-    void inline enter();
-
-    bool inline exit();
-
-    // Attempt to acquire a group
-    // If the group is inactive or if the group already has a stopwatch,
-    // do nothing and return `null`.
-    // Otherwise, bind the group to `this` for the current iteration
-    // and return `group`.
-    PerformanceGroup* acquireGroup(PerformanceGroup* group);
-
-    // Release a group. Noop if `this` is not the stopwatch of
-    // `group` for the current iteration.
-    void releaseGroup(PerformanceGroup* group);
-
-    // Add recent changes to all the groups owned by this stopwatch.
-    // Mark the groups as changed recently.
-    bool addToGroups(uint64_t cyclesDelta, uint64_t CPOWTimeDelta);
-
-    // Add recent changes to a single group. Mark the group as changed recently.
-    bool addToGroup(JSRuntime* runtime, uint64_t cyclesDelta, uint64_t CPOWTimeDelta, PerformanceGroup* group);
-
-    // Update telemetry statistics.
-    void updateTelemetry(const cpuid_t& a, const cpuid_t& b);
-
-    // Perform a subtraction for a quantity that should be monotonic
-    // but is not guaranteed to be so.
-    //
-    // If `start <= end`, return `end - start`.
-    // Otherwise, return `0`.
-    uint64_t inline getDelta(const uint64_t end, const uint64_t start) const;
-
-    // Return the value of the Timestamp Counter, as provided by the CPU.
-    // 0 on platforms for which we do not have access to a Timestamp Counter.
-    uint64_t inline getCycles() const;
-
-
-    // Return the identifier of the current CPU, on platforms for which we have
-    // access to the current CPU.
-    cpuid_t inline getCPU() const;
-
-    // Compare two CPU identifiers.
-    bool inline isSameCPU(const cpuid_t& a, const cpuid_t& b) const;
-  private:
-    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
-};
-
-
-} // namespace js
-
-#endif // vm_Stopwatch_h