Merge mozilla-inbound to mozilla-central. a=merge
Merge mozilla-inbound to mozilla-central. a=merge
--- a/accessible/base/nsAccessibilityService.cpp
+++ b/accessible/base/nsAccessibilityService.cpp
@@ -1337,19 +1337,16 @@ nsAccessibilityService::Init()
#if defined(XP_WIN)
// This information needs to be initialized before the observer fires.
if (XRE_IsParentProcess()) {
Compatibility::Init();
}
#endif // defined(XP_WIN)
- static const char16_t kInitIndicator[] = { '1', 0 };
- observerService->NotifyObservers(nullptr, "a11y-init-or-shutdown", kInitIndicator);
-
// Subscribe to EventListenerService.
nsCOMPtr<nsIEventListenerService> eventListenerService =
do_GetService("@mozilla.org/eventlistenerservice;1");
if (!eventListenerService)
return false;
eventListenerService->AddListenerChangeListener(this);
@@ -1401,16 +1398,19 @@ nsAccessibilityService::Init()
#endif
// Now its safe to start platform accessibility.
if (XRE_IsParentProcess())
PlatformInit();
statistics::A11yInitialized();
+ static const char16_t kInitIndicator[] = { '1', 0 };
+ observerService->NotifyObservers(nullptr, "a11y-init-or-shutdown", kInitIndicator);
+
return true;
}
void
nsAccessibilityService::Shutdown()
{
// Application is going to be closed, shutdown accessibility and mark
// accessibility service as shutdown to prevent calls of its methods.
@@ -1420,19 +1420,16 @@ nsAccessibilityService::Shutdown()
MOZ_ASSERT(gConsumers, "Accessibility was shutdown already");
UnsetConsumers(eXPCOM | eMainProcess | ePlatformAPI);
// Remove observers.
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
if (observerService) {
observerService->RemoveObserver(this, NS_XPCOM_SHUTDOWN_OBSERVER_ID);
-
- static const char16_t kShutdownIndicator[] = { '0', 0 };
- observerService->NotifyObservers(nullptr, "a11y-init-or-shutdown", kShutdownIndicator);
}
// Stop accessible document loader.
DocManager::Shutdown();
SelectionManager::Shutdown();
#ifdef XP_WIN
@@ -1452,16 +1449,21 @@ nsAccessibilityService::Shutdown()
NS_RELEASE(gApplicationAccessible);
gApplicationAccessible = nullptr;
NS_IF_RELEASE(gXPCApplicationAccessible);
gXPCApplicationAccessible = nullptr;
NS_RELEASE(gAccessibilityService);
gAccessibilityService = nullptr;
+
+ if (observerService) {
+ static const char16_t kShutdownIndicator[] = { '0', 0 };
+ observerService->NotifyObservers(nullptr, "a11y-init-or-shutdown", kShutdownIndicator);
+ }
}
already_AddRefed<Accessible>
nsAccessibilityService::CreateAccessibleByType(nsIContent* aContent,
DocAccessible* aDoc)
{
nsAutoString role;
nsCoreUtils::XBLBindingRole(aContent, role);
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -513,20 +513,24 @@ Statistics::formatDetailedTotals() const
char buffer[1024];
SprintfLiteral(buffer, format, t(total), t(longest));
return DuplicateString(buffer);
}
void
Statistics::formatJsonSlice(size_t sliceNum, JSONPrinter& json) const
{
+ /*
+ * We number each of the slice properties to keep the code in
+ * GCTelemetry.jsm in sync. See MAX_SLICE_KEYS.
+ */
json.beginObject();
- formatJsonSliceDescription(sliceNum, slices_[sliceNum], json);
+ formatJsonSliceDescription(sliceNum, slices_[sliceNum], json); // # 1-11
- json.beginObjectProperty("times");
+ json.beginObjectProperty("times"); // # 12
formatJsonPhaseTimes(slices_[sliceNum].phaseTimes, json);
json.endObject();
json.endObject();
}
UniqueChars
Statistics::renderJsonSlice(size_t sliceNum) const
@@ -566,113 +570,126 @@ Statistics::renderJsonMessage(uint64_t t
return DuplicateString("{status:\"aborted\"}"); // May return nullptr
Sprinter printer(nullptr, false);
if (!printer.init())
return UniqueChars(nullptr);
JSONPrinter json(printer);
json.beginObject();
- json.property("status", "completed");
- formatJsonDescription(timestamp, json);
+ json.property("status", "completed"); // JSON Key #1
+ formatJsonDescription(timestamp, json); // #2-22
if (includeSlices) {
- json.beginListProperty("slices_list");
+ json.beginListProperty("slices_list"); // #23
for (unsigned i = 0; i < slices_.length(); i++)
formatJsonSlice(i, json);
json.endList();
}
- json.beginObjectProperty("totals");
+ json.beginObjectProperty("totals"); // #24
formatJsonPhaseTimes(phaseTimes, json);
json.endObject();
json.endObject();
return UniqueChars(printer.release());
}
void
Statistics::formatJsonDescription(uint64_t timestamp, JSONPrinter& json) const
{
// If you change JSON properties here, please update:
- // Telemetry ping code: toolkit/components/telemetry/GCTelemetry.jsm
- // Telemetry documentation: toolkit/components/telemetry/docs/data/main-ping.rst
- // Telemetry tests: toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
- // Perf.html: https://github.com/devtools-html/perf.html
+ // Telemetry ping code:
+ // toolkit/components/telemetry/GCTelemetry.jsm
+ // Telemetry documentation:
+ // toolkit/components/telemetry/docs/data/main-ping.rst
+ // Telemetry tests:
+ // toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js,
+ // toolkit/components/telemetry/tests/unit/test_TelemetryGC.js
+ // Perf.html:
+ // https://github.com/devtools-html/perf.html
+ //
+ // Please also number each property to help correctly maintain the Telemetry ping code
- json.property("timestamp", timestamp);
+ json.property("timestamp", timestamp); // # JSON Key #2
TimeDuration total, longest;
gcDuration(&total, &longest);
- json.property("max_pause", longest, JSONPrinter::MILLISECONDS);
- json.property("total_time", total, JSONPrinter::MILLISECONDS);
+ json.property("max_pause", longest, JSONPrinter::MILLISECONDS); // #3
+ json.property("total_time", total, JSONPrinter::MILLISECONDS); // #4
// We might be able to omit reason if perf.html was able to retrive it
// from the first slice. But it doesn't do this yet.
- json.property("reason", ExplainReason(slices_[0].reason));
- json.property("zones_collected", zoneStats.collectedZoneCount);
- json.property("total_zones", zoneStats.zoneCount);
- json.property("total_compartments", zoneStats.compartmentCount);
- json.property("minor_gcs", getCount(STAT_MINOR_GC));
+ json.property("reason", ExplainReason(slices_[0].reason)); // #5
+ json.property("zones_collected", zoneStats.collectedZoneCount); // #6
+ json.property("total_zones", zoneStats.zoneCount); // #7
+ json.property("total_compartments", zoneStats.compartmentCount); // #8
+ json.property("minor_gcs", getCount(STAT_MINOR_GC)); // #9
uint32_t storebufferOverflows = getCount(STAT_STOREBUFFER_OVERFLOW);
if (storebufferOverflows)
- json.property("store_buffer_overflows", storebufferOverflows);
- json.property("slices", slices_.length());
+ json.property("store_buffer_overflows", storebufferOverflows); // #10
+ json.property("slices", slices_.length()); // #11
const double mmu20 = computeMMU(TimeDuration::FromMilliseconds(20));
const double mmu50 = computeMMU(TimeDuration::FromMilliseconds(50));
- json.property("mmu_20ms", int(mmu20 * 100));
- json.property("mmu_50ms", int(mmu50 * 100));
+ json.property("mmu_20ms", int(mmu20 * 100)); // #12
+ json.property("mmu_50ms", int(mmu50 * 100)); // #13
TimeDuration sccTotal, sccLongest;
sccDurations(&sccTotal, &sccLongest);
- json.property("scc_sweep_total", sccTotal, JSONPrinter::MILLISECONDS);
- json.property("scc_sweep_max_pause", sccLongest, JSONPrinter::MILLISECONDS);
-
+ json.property("scc_sweep_total", sccTotal, JSONPrinter::MILLISECONDS); // #14
+ json.property("scc_sweep_max_pause", sccLongest, JSONPrinter::MILLISECONDS); // #15
+
if (nonincrementalReason_ != AbortReason::None)
- json.property("nonincremental_reason", ExplainAbortReason(nonincrementalReason_));
- json.property("allocated_bytes", preBytes);
+ json.property("nonincremental_reason", ExplainAbortReason(nonincrementalReason_)); // #16
+ json.property("allocated_bytes", preBytes); // #17
uint32_t addedChunks = getCount(STAT_NEW_CHUNK);
if (addedChunks)
- json.property("added_chunks", addedChunks);
+ json.property("added_chunks", addedChunks); // #18
uint32_t removedChunks = getCount(STAT_DESTROY_CHUNK);
if (removedChunks)
- json.property("removed_chunks", removedChunks);
- json.property("major_gc_number", startingMajorGCNumber);
- json.property("minor_gc_number", startingMinorGCNumber);
- json.property("slice_number", startingSliceNumber);
+ json.property("removed_chunks", removedChunks); // #19
+ json.property("major_gc_number", startingMajorGCNumber); // #20
+ json.property("minor_gc_number", startingMinorGCNumber); // #21
+ json.property("slice_number", startingSliceNumber); // #22
}
void
Statistics::formatJsonSliceDescription(unsigned i, const SliceData& slice, JSONPrinter& json) const
{
// If you change JSON properties here, please update:
- // Telemetry ping code: toolkit/components/telemetry/GCTelemetry.jsm
- // Telemetry documentation: toolkit/components/telemetry/docs/data/main-ping.rst
- // Telemetry tests: toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
- // Perf.html: https://github.com/devtools-html/perf.html
+ // Telemetry ping code:
+ // toolkit/components/telemetry/GCTelemetry.jsm
+ // Telemetry documentation:
+ // toolkit/components/telemetry/docs/data/main-ping.rst
+ // Telemetry tests:
+ // toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js,
+ // toolkit/components/telemetry/tests/unit/test_TelemetryGC.js
+ // Perf.html:
+ // https://github.com/devtools-html/perf.html
+ //
char budgetDescription[200];
slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
TimeStamp originTime = TimeStamp::ProcessCreation();
- json.property("slice", i);
- json.property("pause", slice.duration(), JSONPrinter::MILLISECONDS);
- json.property("reason", ExplainReason(slice.reason));
- json.property("initial_state", gc::StateName(slice.initialState));
- json.property("final_state", gc::StateName(slice.finalState));
- json.property("budget", budgetDescription);
- json.property("major_gc_number", startingMajorGCNumber);
+ json.property("slice", i); // JSON Property #1
+ json.property("pause", slice.duration(), JSONPrinter::MILLISECONDS); // #2
+ json.property("reason", ExplainReason(slice.reason)); // #3
+ json.property("initial_state", gc::StateName(slice.initialState)); // #4
+ json.property("final_state", gc::StateName(slice.finalState)); // #5
+ json.property("budget", budgetDescription); // #6
+ json.property("major_gc_number", startingMajorGCNumber); // #7
if (thresholdTriggered) {
- json.floatProperty("trigger_amount", triggerAmount, 0);
- json.floatProperty("trigger_threshold", triggerThreshold, 0);
+ json.floatProperty("trigger_amount", triggerAmount, 0); // #8
+ json.floatProperty("trigger_threshold", triggerThreshold, 0); // #9
}
int64_t numFaults = slice.endFaults - slice.startFaults;
if (numFaults != 0)
- json.property("page_faults", numFaults);
- json.property("start_timestamp", slice.start - originTime, JSONPrinter::SECONDS);
+ json.property("page_faults", numFaults); // #10
+ json.property("start_timestamp", slice.start - originTime, JSONPrinter::SECONDS); // #11
}
void
Statistics::formatJsonPhaseTimes(const PhaseTimeTable& phaseTimes, JSONPrinter& json) const
{
for (auto phase : AllPhases()) {
TimeDuration ownTime = phaseTimes[phase];
if (!ownTime.IsZero())
--- a/toolkit/components/telemetry/GCTelemetry.jsm
+++ b/toolkit/components/telemetry/GCTelemetry.jsm
@@ -18,16 +18,17 @@
*
* GCs from both the main process and all content processes are
* recorded. The data is cleared for each new subsession.
*/
const Cu = Components.utils;
Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/Log.jsm");
this.EXPORTED_SYMBOLS = ["GCTelemetry"];
// Names of processes where we record GCs.
const PROCESS_NAMES = ["main", "content"];
// Should be the time we started up in milliseconds since the epoch.
const BASE_TIME = Date.now() - Services.telemetry.msSinceProcessStart();
@@ -109,28 +110,50 @@ class GCData {
// If you adjust any of the constants here (slice limit, number of keys, etc.)
// make sure to update the JSON schema at:
// https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/telemetry/main.schema.json
// You should also adjust browser_TelemetryGC.js.
const MAX_GC_KEYS = 24;
const MAX_SLICES = 4;
const MAX_SLICE_KEYS = 12;
const MAX_PHASES = 65;
+const LOGGER_NAME = "Toolkit.Telemetry";
-function limitProperties(obj, count) {
- // If there are too many properties, just delete them all. We don't
+function limitProperties(name, obj, count, log) {
+ log.trace("limitProperties");
+
+ // If there are too many properties delete all/most of them. We don't
// expect this ever to happen.
- if (Object.keys(obj).length > count) {
+ if (Object.keys(obj).length >= count) {
for (let key of Object.keys(obj)) {
+ // If this is the main GC object then save some of the critical
+ // properties.
+ if (name === "data" && (
+ key === "max_pause" ||
+ key === "num_slices" ||
+ key === "slices_list" ||
+ key === "status" ||
+ key === "timestamp" ||
+ key === "total_time" ||
+ key === "totals")) {
+ continue;
+ }
+
delete obj[key];
}
+ log.warn("Number of properties exceeded in the GC telemetry " +
+ name + " ping");
}
}
-function limitSize(data) {
+/*
+ * Reduce the size of the object by limiting the number of slices or times
+ * etc.
+ */
+function limitSize(data, log) {
// Store the number of slices so we know if we lost any at the end.
data.num_slices = data.slices_list.length;
data.slices_list.sort((a, b) => b.pause - a.pause);
if (data.slices_list.length > MAX_SLICES) {
// Make sure we always keep the first slice since it has the
// reason the GC was started.
@@ -139,40 +162,43 @@ function limitSize(data) {
data.slices_list[MAX_SLICES - 1] = data.slices_list[firstSliceIndex];
}
data.slices_list.length = MAX_SLICES;
}
data.slices_list.sort((a, b) => a.slice - b.slice);
- limitProperties(data, MAX_GC_KEYS);
+ limitProperties("data", data, MAX_GC_KEYS, log);
for (let slice of data.slices_list) {
- limitProperties(slice, MAX_SLICE_KEYS);
- limitProperties(slice.times, MAX_PHASES);
+ limitProperties("slice", slice, MAX_SLICE_KEYS, log);
+ limitProperties("slice.times", slice.times, MAX_PHASES, log);
}
- limitProperties(data.totals, MAX_PHASES);
+ limitProperties("data.totals", data.totals, MAX_PHASES, log);
}
let processData = new Map();
for (let name of PROCESS_NAMES) {
processData.set(name, new GCData(name));
}
var GCTelemetry = {
initialized: false,
init() {
if (this.initialized) {
return false;
}
this.initialized = true;
+ this._log = Log.repository.getLoggerWithMessagePrefix(
+ LOGGER_NAME, "GCTelemetry::");
+
Services.obs.addObserver(this, "garbage-collection-statistics");
if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
Services.ppmm.addMessageListener("Telemetry:GCStatistics", this);
}
return true;
},
@@ -186,19 +212,23 @@ var GCTelemetry = {
if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
Services.ppmm.removeMessageListener("Telemetry:GCStatistics", this);
}
this.initialized = false;
},
observe(subject, topic, arg) {
- let data = JSON.parse(arg);
+ this.observeRaw(JSON.parse(arg));
+ },
- limitSize(data);
+ // We expose this method so unit tests can call it, no need to test JSON
+ // parsing.
+ observeRaw(data) {
+ limitSize(data, this._log);
if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
processData.get("main").record(data);
} else {
Services.cpmm.sendAsyncMessage("Telemetry:GCStatistics", data);
}
},
new file mode 100644
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryGC.js
@@ -0,0 +1,144 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+"use strict";
+
+Cu.import("resource://gre/modules/GCTelemetry.jsm", this);
+
+function do_register_cleanup() {
+ GCTelemetry.shutdown();
+}
+
+/*
+ * These tests are very basic, my goal was to add enough testing to support
+ * a change made in Bug 1424760. TODO Bug 1429635 for adding more extensive
+ * tests and deleting this comment.
+ */
+
+function run_test() {
+ // Test initialisation
+ Assert.ok(GCTelemetry.init(), "Initialize success");
+ Assert.ok(!GCTelemetry.init(), "Wont initialize twice");
+
+ // Test the basic success path.
+
+ // There are currently no entries
+ assert_num_entries(0, false);
+
+ // Add an entry
+ GCTelemetry.observeRaw(make_gc());
+
+ // Get it back.
+ assert_num_entries(1, false);
+ let entries1 = GCTelemetry.entries("main", false);
+ Assert.ok(entries1, "Got entries object");
+ Assert.ok(entries1.random, "Has random property");
+ Assert.ok(entries1.worst, "Has worst property");
+ let entry1 = entries1.worst[0];
+ Assert.ok(entry1, "Got worst entry");
+
+ // "true" will cause the entry to be clared.
+ assert_num_entries(1, true);
+ // There are currently no entries.
+ assert_num_entries(0, false);
+ Assert.equal(20, Object.keys(entry1).length);
+
+ // Test too many fields
+ let my_gc = make_gc();
+ for (let i = 0; i < 100; i++) {
+ my_gc["new_property_" + i] = "Data";
+ }
+
+ GCTelemetry.observeRaw(my_gc);
+ // Assert that it was recorded but has only 7 fields.
+ assert_num_entries(1, false);
+ let entries2 = GCTelemetry.entries("main", false);
+ Assert.ok(entries2, "Got entries object");
+ let entry2 = entries2.worst[0];
+ Assert.ok(entry2, "Got worst entry");
+ Assert.equal(7, Object.keys(entry2).length);
+}
+
+function assert_num_entries(expect, clear) {
+ let entries = GCTelemetry.entries("main", clear);
+ Assert.equal(expect, entries.worst.length, expect + " worst entries");
+ // Randomly sampled GCs are only recorded for content processes
+ Assert.equal(0, entries.random.length, expect + " random entries");
+}
+
+/*
+ * These events are not exactly well-formed, but good enough. For example
+ * there's no guantee that all the pause times add up to total time, or
+ * that max_pause is correct.
+ */
+function make_gc() {
+ // Timestamps are in milliseconds since startup. All the times here
+ // are wall-clock times, which may not be monotonically increasing.
+ let timestamp = Math.random() * 1000000;
+
+ let gc = {
+ "status": "completed",
+ "timestamp": timestamp,
+ // All durations are in milliseconds.
+ "max_pause": Math.random() * 95 + 5,
+ "total_time": Math.random() * 500 + 500, // Sum of all slice times.
+ "zones_collected": 9,
+ "total_zones": 9,
+ "total_compartments": 309,
+ "minor_gcs": 44,
+ "store_buffer_overflows": 19,
+ "mmu_20ms": 0,
+ "mmu_50ms": 0,
+ "nonincremental_reason": "GCBytesTrigger",
+ "allocated_bytes": 38853696, // in bytes
+ "added_chunks": 54,
+ "removed_chunks": 12,
+ "slices": 15,
+ "slice_number": 218, // The first slice number for this GC event.
+ "slices_list": [
+ {
+ "slice": 218, // The global index of this slice.
+ "pause": Math.random() * 2 + 28,
+ "reason": "SET_NEW_DOCUMENT",
+ "initial_state": "NotActive",
+ "final_state": "Mark",
+ "budget": "10ms",
+ "page_faults": 1,
+ "start_timestamp": timestamp + Math.random() * 50000,
+ "times": {
+ "wait_background_thread": 0.012,
+ "mark_discard_code": 2.845,
+ "purge": 0.723,
+ "mark": 9.831,
+ "mark_roots": 0.102,
+ "buffer_gray_roots": 3.095,
+ "mark_cross_compartment_wrappers": 0.039,
+ "mark_c_and_js_stacks": 0.005,
+ "mark_runtime_wide_data": 2.313,
+ "mark_embedding": 0.117,
+ "mark_compartments": 2.27,
+ "unmark": 1.063,
+ "minor_gcs_to_evict_nursery": 8.701,
+ }
+ }
+ ],
+ "totals": {
+ "wait_background_thread": 0.012,
+ "mark_discard_code": 2.845,
+ "purge": 0.723,
+ "mark": 9.831,
+ "mark_roots": 0.102,
+ "buffer_gray_roots": 3.095,
+ "mark_cross_compartment_wrappers": 0.039,
+ "mark_c_and_js_stacks": 0.005,
+ "mark_runtime_wide_data": 2.313,
+ "mark_embedding": 0.117,
+ "mark_compartments": 2.27,
+ "unmark": 1.063,
+ "minor_gcs_to_evict_nursery": 8.701,
+ }
+ };
+ return gc;
+}
+
--- a/toolkit/components/telemetry/tests/unit/xpcshell.ini
+++ b/toolkit/components/telemetry/tests/unit/xpcshell.ini
@@ -69,8 +69,9 @@ skip-if = toolkit == 'android'
[test_TelemetryCaptureStack.js]
[test_TelemetryEvents.js]
[test_ChildEvents.js]
skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
[test_TelemetryModules.js]
skip-if = (os == "android" && release_or_beta) # (see bug 1351197)
[test_PingSender.js]
skip-if = (os == "android") || (os == "linux" && bits == 32)
+[test_TelemetryGC.js]