summaryrefslogtreecommitdiffstats
path: root/toolkit/components/telemetry/tests
diff options
context:
space:
mode:
Diffstat (limited to 'toolkit/components/telemetry/tests')
-rw-r--r--toolkit/components/telemetry/tests/addons/dictionary/manifest.json15
-rw-r--r--toolkit/components/telemetry/tests/addons/long-fields/manifest.json12
-rw-r--r--toolkit/components/telemetry/tests/addons/restartless/manifest.json12
-rw-r--r--toolkit/components/telemetry/tests/addons/signed-webext/.web-extension-id3
-rw-r--r--toolkit/components/telemetry/tests/addons/signed-webext/META-INF/manifest.mf7
-rw-r--r--toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.rsabin0 -> 4193 bytes
-rw-r--r--toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.sf4
-rw-r--r--toolkit/components/telemetry/tests/addons/signed-webext/manifest.json12
-rw-r--r--toolkit/components/telemetry/tests/addons/system/manifest.json13
-rw-r--r--toolkit/components/telemetry/tests/browser/browser.ini13
-rw-r--r--toolkit/components/telemetry/tests/browser/browser_DynamicScalars.js244
-rw-r--r--toolkit/components/telemetry/tests/browser/browser_UpdatePingSuccess.js164
-rw-r--r--toolkit/components/telemetry/tests/browser/browser_media_element_in_page_scalar.js128
-rw-r--r--toolkit/components/telemetry/tests/browser/file_iframe.html9
-rw-r--r--toolkit/components/telemetry/tests/browser/file_media.html9
-rw-r--r--toolkit/components/telemetry/tests/browser/gizmo.mp4bin0 -> 455255 bytes
-rw-r--r--toolkit/components/telemetry/tests/gtest/TelemetryFixture.cpp28
-rw-r--r--toolkit/components/telemetry/tests/gtest/TelemetryFixture.h39
-rw-r--r--toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.cpp376
-rw-r--r--toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.h68
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestCombinedStacks.cpp158
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestCounters.cpp173
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestEvents.cpp125
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestHistograms.cpp891
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestOrigins.cpp290
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestScalars.cpp491
-rw-r--r--toolkit/components/telemetry/tests/gtest/moz.build30
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/conftest.py316
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/python.ini9
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/resources/helloworld/helloworld.html18
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/resources/helloworld/manifest.json12
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/test_deletion_request_ping.py65
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/test_event_ping.py51
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/test_main_tab_scalars.py34
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/test_search_counts_across_sessions.py170
-rw-r--r--toolkit/components/telemetry/tests/integration/tests/test_subsession_management.py148
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in3
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/requirements.txt2
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/setup.py48
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/__init__.py3
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_filters.py29
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_server.py77
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_testcase.py53
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_filters.py75
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_server.py65
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/helloworld.html18
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/manifest.json12
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py56
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py14
-rw-r--r--toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py242
-rw-r--r--toolkit/components/telemetry/tests/marionette/mach_commands.py101
-rw-r--r--toolkit/components/telemetry/tests/marionette/moz.build11
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini10
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_deletion_request_ping.py64
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_event_ping.py68
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_fog_custom_ping.py24
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_fog_deletion_request_ping.py65
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_main_tab_scalars.py39
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_search_counts_across_sessions.py212
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/client/test_subsession_management.py147
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/manifest.ini2
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/unit/manifest.ini4
-rw-r--r--toolkit/components/telemetry/tests/marionette/tests/unit/test_ping_server_received_ping.py46
-rw-r--r--toolkit/components/telemetry/tests/modules-test.cpp27
-rw-r--r--toolkit/components/telemetry/tests/moz.build25
-rw-r--r--toolkit/components/telemetry/tests/python/python.ini11
-rw-r--r--toolkit/components/telemetry/tests/python/test_gen_event_data_json.py102
-rw-r--r--toolkit/components/telemetry/tests/python/test_gen_scalar_data_json.py100
-rw-r--r--toolkit/components/telemetry/tests/python/test_histogramtools_non_strict.py145
-rw-r--r--toolkit/components/telemetry/tests/python/test_histogramtools_strict.py565
-rw-r--r--toolkit/components/telemetry/tests/python/test_parse_events.py165
-rw-r--r--toolkit/components/telemetry/tests/python/test_parse_scalars.py266
-rw-r--r--toolkit/components/telemetry/tests/python/test_usecounters.py69
-rw-r--r--toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm81
-rw-r--r--toolkit/components/telemetry/tests/unit/data/search-extensions/engines.json12
-rw-r--r--toolkit/components/telemetry/tests/unit/data/search-extensions/telemetrySearchIdentifier/manifest.json29
-rw-r--r--toolkit/components/telemetry/tests/unit/engine.xml7
-rw-r--r--toolkit/components/telemetry/tests/unit/head.js589
-rw-r--r--toolkit/components/telemetry/tests/unit/testNoPDB32.dllbin0 -> 8704 bytes
-rw-r--r--toolkit/components/telemetry/tests/unit/testNoPDB64.dllbin0 -> 10240 bytes
-rwxr-xr-xtoolkit/components/telemetry/tests/unit/testNoPDBAArch64.dllbin0 -> 1536 bytes
-rw-r--r--toolkit/components/telemetry/tests/unit/testUnicodePDB32.dllbin0 -> 8704 bytes
-rw-r--r--toolkit/components/telemetry/tests/unit/testUnicodePDB64.dllbin0 -> 10752 bytes
-rwxr-xr-xtoolkit/components/telemetry/tests/unit/testUnicodePDBAArch64.dllbin0 -> 7168 bytes
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ChildEvents.js226
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ChildHistograms.js333
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ChildScalars.js242
-rw-r--r--toolkit/components/telemetry/tests/unit/test_CoveragePing.js115
-rw-r--r--toolkit/components/telemetry/tests/unit/test_EcosystemTelemetry.js430
-rw-r--r--toolkit/components/telemetry/tests/unit/test_EventPing.js290
-rw-r--r--toolkit/components/telemetry/tests/unit/test_HealthPing.js403
-rw-r--r--toolkit/components/telemetry/tests/unit/test_MigratePendingPings.js151
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ModulesPing.js297
-rw-r--r--toolkit/components/telemetry/tests/unit/test_PingAPI.js711
-rw-r--r--toolkit/components/telemetry/tests/unit/test_PingSender.js229
-rw-r--r--toolkit/components/telemetry/tests/unit/test_PrioPing.js140
-rw-r--r--toolkit/components/telemetry/tests/unit/test_SocketScalars.js55
-rw-r--r--toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js282
-rw-r--r--toolkit/components/telemetry/tests/unit/test_SyncPingIntegration.js76
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryAndroidEnvironment.js62
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryChildEvents_buildFaster.js132
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryClientID_reset.js180
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryController.js1271
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js69
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js75
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js76
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js2711
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js1111
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEvents_buildFaster.js468
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js29
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryHistograms.js2067
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js145
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js58
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js348
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js1090
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryScalars_buildFaster.js236
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryScalars_impressionId.js49
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryScalars_multistore.js415
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySend.js1095
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js626
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySession.js2395
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySession_abortedSessionQueued.js187
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySession_activeTicks.js119
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js196
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js78
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryUtils.js34
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ThirdPartyModulesPing.js269
-rw-r--r--toolkit/components/telemetry/tests/unit/test_UninstallPing.js127
-rw-r--r--toolkit/components/telemetry/tests/unit/test_UserInteraction.js134
-rw-r--r--toolkit/components/telemetry/tests/unit/test_UserInteraction_annotations.js481
-rw-r--r--toolkit/components/telemetry/tests/unit/test_bug1555798.js50
-rw-r--r--toolkit/components/telemetry/tests/unit/test_client_id.js372
-rw-r--r--toolkit/components/telemetry/tests/unit/xpcshell.ini100
-rw-r--r--toolkit/components/telemetry/tests/utils/TelemetryTestUtils.jsm368
134 files changed, 29023 insertions, 0 deletions
diff --git a/toolkit/components/telemetry/tests/addons/dictionary/manifest.json b/toolkit/components/telemetry/tests/addons/dictionary/manifest.json
new file mode 100644
index 0000000000..b6539a9d81
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/dictionary/manifest.json
@@ -0,0 +1,15 @@
+{
+ "manifest_version": 2,
+
+ "name": "Telemetry test dictionary",
+ "description": "A nice dictionary to prevent all typos for Telemetry.",
+ "version": "1.0",
+
+ "dictionaries": {},
+
+ "applications": {
+ "gecko": {
+ "id": "telemetry-dictionary@tests.mozilla.org"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/addons/long-fields/manifest.json b/toolkit/components/telemetry/tests/addons/long-fields/manifest.json
new file mode 100644
index 0000000000..dda713dc24
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/long-fields/manifest.json
@@ -0,0 +1,12 @@
+{
+ "manifest_version": 2,
+ "name": "This is a really long addon name, that will get limited to 100 characters. We're much longer, we're at about 219. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus nullam sodales. Yeah, Latin placeholder.",
+ "description": "This is a really long addon description, that will get limited to 100 characters. We're much longer, we're at about 200. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus nullam sodales.",
+ "version": "1.0",
+
+ "applications": {
+ "gecko": {
+ "id": "tel-longfields-webext@tests.mozilla.org"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/addons/restartless/manifest.json b/toolkit/components/telemetry/tests/addons/restartless/manifest.json
new file mode 100644
index 0000000000..ad991cc149
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/restartless/manifest.json
@@ -0,0 +1,12 @@
+{
+ "manifest_version": 2,
+ "name": "XPI Telemetry Restartless Test",
+ "description": "A restartless addon which gets enabled without a reboot.",
+ "version": "1.0",
+
+ "applications": {
+ "gecko": {
+ "id": "tel-restartless-webext@tests.mozilla.org"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/addons/signed-webext/.web-extension-id b/toolkit/components/telemetry/tests/addons/signed-webext/.web-extension-id
new file mode 100644
index 0000000000..e78cecf6d6
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed-webext/.web-extension-id
@@ -0,0 +1,3 @@
+# This file was created by https://github.com/mozilla/web-ext
+# Your auto-generated extension ID for addons.mozilla.org is:
+tel-signed-webext@tests.mozilla.org \ No newline at end of file
diff --git a/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/manifest.mf b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/manifest.mf
new file mode 100644
index 0000000000..6be3a6e32f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/manifest.mf
@@ -0,0 +1,7 @@
+Manifest-Version: 1.0
+
+Name: manifest.json
+Digest-Algorithms: MD5 SHA1
+MD5-Digest: vh0VF5quc9YIhMhIsZgKcg==
+SHA1-Digest: DNXAbrHJ4ncET5W+qtJl4+45D6s=
+
diff --git a/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.rsa b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.rsa
new file mode 100644
index 0000000000..b202c515a8
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.rsa
Binary files differ
diff --git a/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.sf b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.sf
new file mode 100644
index 0000000000..72af14d816
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed-webext/META-INF/mozilla.sf
@@ -0,0 +1,4 @@
+Signature-Version: 1.0
+MD5-Digest-Manifest: tbeA48G5pe86kvUbd4rsXA==
+SHA1-Digest-Manifest: jyvWt2v9XUnYHrvrlSi4BdyZV/0=
+
diff --git a/toolkit/components/telemetry/tests/addons/signed-webext/manifest.json b/toolkit/components/telemetry/tests/addons/signed-webext/manifest.json
new file mode 100644
index 0000000000..437b415fef
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed-webext/manifest.json
@@ -0,0 +1,12 @@
+{
+ "manifest_version": 2,
+ "name": "XPI Telemetry Signed Test",
+ "description": "A signed webextension",
+ "version": "1.0",
+
+ "applications": {
+ "gecko": {
+ "id": "tel-signed-webext@tests.mozilla.org"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/addons/system/manifest.json b/toolkit/components/telemetry/tests/addons/system/manifest.json
new file mode 100644
index 0000000000..d3fb6514c3
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/system/manifest.json
@@ -0,0 +1,13 @@
+{
+ "manifest_version": 2,
+
+ "name": "XPI Telemetry System Add-on Test",
+ "description": "A system addon which is shipped with Firefox.",
+ "version": "1.0",
+
+ "applications": {
+ "gecko": {
+ "id": "tel-system-xpi@tests.mozilla.org"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/browser/browser.ini b/toolkit/components/telemetry/tests/browser/browser.ini
new file mode 100644
index 0000000000..79606f3578
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser.ini
@@ -0,0 +1,13 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+[DEFAULT]
+support-files =
+ file_iframe.html
+ file_media.html
+ gizmo.mp4
+
+[browser_UpdatePingSuccess.js]
+[browser_DynamicScalars.js]
+skip-if = !e10s || verify # e10s specific test for definition broadcasting across processes.
+[browser_media_element_in_page_scalar.js]
diff --git a/toolkit/components/telemetry/tests/browser/browser_DynamicScalars.js b/toolkit/components/telemetry/tests/browser/browser_DynamicScalars.js
new file mode 100644
index 0000000000..024d4a5d7b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser_DynamicScalars.js
@@ -0,0 +1,244 @@
+"use strict";
+
+const { ContentTaskUtils } = ChromeUtils.import(
+ "resource://testing-common/ContentTaskUtils.jsm"
+);
+const { TelemetryController } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryController.jsm"
+);
+const { TelemetryUtils } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryUtils.jsm"
+);
+
+const CONTENT_CREATED = "ipc:content-created";
+
+async function waitForProcessesScalars(
+ aProcesses,
+ aKeyed,
+ aAdditionalCondition = data => true
+) {
+ await ContentTaskUtils.waitForCondition(() => {
+ const scalars = aKeyed
+ ? Services.telemetry.getSnapshotForKeyedScalars("main", false)
+ : Services.telemetry.getSnapshotForScalars("main", false);
+ return (
+ aProcesses.every(p => Object.keys(scalars).includes(p)) &&
+ aAdditionalCondition(scalars)
+ );
+ });
+}
+
+add_task(async function test_setup() {
+ // Make sure the newly spawned content processes will have extended Telemetry enabled.
+ // Since Telemetry reads the prefs only at process startup, flush all cached
+ // and preallocated processes so they pick up the setting.
+ await SpecialPowers.pushPrefEnv({
+ set: [
+ [TelemetryUtils.Preferences.OverridePreRelease, true],
+ ["dom.ipc.processPrelaunch.enabled", false],
+ ],
+ });
+ Services.ppmm.releaseCachedProcesses();
+ await SpecialPowers.pushPrefEnv({
+ set: [["dom.ipc.processPrelaunch.enabled", true]],
+ });
+
+ // And take care of the already initialized one as well.
+ let canRecordExtended = Services.telemetry.canRecordExtended;
+ Services.telemetry.canRecordExtended = true;
+ registerCleanupFunction(
+ () => (Services.telemetry.canRecordExtended = canRecordExtended)
+ );
+});
+
+add_task(async function test_recording() {
+ let currentPid = gBrowser.selectedBrowser.frameLoader.remoteTab.osPid;
+
+ // Register test scalars before spawning the content process: the scalar
+ // definitions will propagate to it.
+ Services.telemetry.registerScalars("telemetry.test.dynamic", {
+ pre_content_spawn: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ record_on_release: true,
+ },
+ pre_content_spawn_expiration: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ record_on_release: true,
+ },
+ });
+
+ Services.telemetry.scalarSet(
+ "telemetry.test.dynamic.pre_content_spawn_expiration",
+ 3
+ );
+
+ let processCreated = TestUtils.topicObserved(CONTENT_CREATED);
+ await BrowserTestUtils.withNewTab(
+ { gBrowser, url: "about:blank", forceNewProcess: true },
+ async function(browser) {
+ // Make sure our new browser is in its own process. The processCreated
+ // promise should have already resolved by this point.
+ await processCreated;
+ let newPid = browser.frameLoader.remoteTab.osPid;
+ ok(currentPid != newPid, "The new tab must spawn its own process");
+
+ // Register test scalars after spawning the content process: the scalar
+ // definitions will propagate to it.
+ // Also attempt to register again "pre_content_spawn_expiration" and set
+ // it to expired.
+ Services.telemetry.registerScalars("telemetry.test.dynamic", {
+ post_content_spawn: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_BOOLEAN,
+ keyed: false,
+ record_on_release: false,
+ },
+ post_content_spawn_keyed: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: true,
+ record_on_release: true,
+ },
+ pre_content_spawn_expiration: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ record_on_release: true,
+ expired: true,
+ },
+ });
+
+ // Accumulate from the content process into both dynamic scalars.
+ await SpecialPowers.spawn(browser, [], async function() {
+ Services.telemetry.scalarAdd(
+ "telemetry.test.dynamic.pre_content_spawn_expiration",
+ 1
+ );
+ Services.telemetry.scalarSet(
+ "telemetry.test.dynamic.pre_content_spawn",
+ 3
+ );
+ Services.telemetry.scalarSet(
+ "telemetry.test.dynamic.post_content_spawn",
+ true
+ );
+ Services.telemetry.keyedScalarSet(
+ "telemetry.test.dynamic.post_content_spawn_keyed",
+ "testKey",
+ 3
+ );
+ });
+ }
+ );
+
+ // Wait for the dynamic scalars to appear non-keyed snapshots.
+ await waitForProcessesScalars(["dynamic"], true, scalars => {
+ // Wait for the scalars set in the content process to be available.
+ return "telemetry.test.dynamic.post_content_spawn_keyed" in scalars.dynamic;
+ });
+
+ // Verify the content of the snapshots.
+ const scalars = Services.telemetry.getSnapshotForScalars("main", false);
+ ok(
+ "dynamic" in scalars,
+ "The scalars must contain the 'dynamic' process section"
+ );
+ ok(
+ "telemetry.test.dynamic.pre_content_spawn" in scalars.dynamic,
+ "Dynamic scalars registered before a process spawns must be present."
+ );
+ is(
+ scalars.dynamic["telemetry.test.dynamic.pre_content_spawn"],
+ 3,
+ "The dynamic scalar must contain the expected value."
+ );
+ is(
+ scalars.dynamic["telemetry.test.dynamic.pre_content_spawn_expiration"],
+ 3,
+ "The dynamic scalar must not be updated after being expired."
+ );
+ ok(
+ "telemetry.test.dynamic.post_content_spawn" in scalars.dynamic,
+ "Dynamic scalars registered after a process spawns must be present."
+ );
+ is(
+ scalars.dynamic["telemetry.test.dynamic.post_content_spawn"],
+ true,
+ "The dynamic scalar must contain the expected value."
+ );
+
+ // Wait for the dynamic scalars to appear in the keyed snapshots.
+ await waitForProcessesScalars(["dynamic"], true);
+
+ const keyedScalars = Services.telemetry.getSnapshotForKeyedScalars(
+ "main",
+ false
+ );
+ ok(
+ "dynamic" in keyedScalars,
+ "The keyed scalars must contain the 'dynamic' process section"
+ );
+ ok(
+ "telemetry.test.dynamic.post_content_spawn_keyed" in keyedScalars.dynamic,
+ "Dynamic keyed scalars registered after a process spawns must be present."
+ );
+ is(
+ keyedScalars.dynamic["telemetry.test.dynamic.post_content_spawn_keyed"]
+ .testKey,
+ 3,
+ "The dynamic keyed scalar must contain the expected value."
+ );
+});
+
+add_task(async function test_aggregation() {
+ Services.telemetry.clearScalars();
+
+ // Register test scalars before spawning the content process: the scalar
+ // definitions will propagate to it. Also cheat TelemetrySession to put
+ // the test scalar in the payload by using "cheattest" instead of "test" in
+ // the scalar category name.
+ Services.telemetry.registerScalars("telemetry.cheattest.dynamic", {
+ test_aggregation: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ record_on_release: true,
+ },
+ });
+
+ const SCALAR_FULL_NAME = "telemetry.cheattest.dynamic.test_aggregation";
+ Services.telemetry.scalarAdd(SCALAR_FULL_NAME, 1);
+
+ await BrowserTestUtils.withNewTab(
+ { gBrowser, url: "about:blank", forceNewProcess: true },
+ async function(browser) {
+ // Accumulate from the content process into both dynamic scalars.
+ await SpecialPowers.spawn(browser, [SCALAR_FULL_NAME], async function(
+ aName
+ ) {
+ Services.telemetry.scalarAdd(aName, 3);
+ });
+ }
+ );
+
+ // Wait for the dynamic scalars to appear. Since we're testing that children
+ // and parent data get aggregated, we might need to wait a bit more:
+ // TelemetryIPCAccumulator.cpp sends batches to the parent process every 2 seconds.
+ await waitForProcessesScalars(["dynamic"], false, scalarData => {
+ return (
+ "dynamic" in scalarData &&
+ SCALAR_FULL_NAME in scalarData.dynamic &&
+ scalarData.dynamic[SCALAR_FULL_NAME] == 4
+ );
+ });
+
+ // Check that the definitions made it to the ping payload.
+ const pingData = TelemetryController.getCurrentPingData(true);
+ ok(
+ "dynamic" in pingData.payload.processes,
+ "The ping payload must contain the 'dynamic' process section"
+ );
+ is(
+ pingData.payload.processes.dynamic.scalars[SCALAR_FULL_NAME],
+ 4,
+ "The dynamic scalar must contain the aggregated parent and children data."
+ );
+});
diff --git a/toolkit/components/telemetry/tests/browser/browser_UpdatePingSuccess.js b/toolkit/components/telemetry/tests/browser/browser_UpdatePingSuccess.js
new file mode 100644
index 0000000000..9a4e6acf49
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser_UpdatePingSuccess.js
@@ -0,0 +1,164 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import(
+ "resource://testing-common/TelemetryArchiveTesting.jsm",
+ this
+);
+
+add_task(async function test_updatePing() {
+ const TEST_VERSION = "37.85";
+ const TEST_BUILDID = "20150711123724";
+ const XML_UPDATE = `<?xml version="1.0"?>
+ <updates xmlns="http://www.mozilla.org/2005/app-update">
+ <update appVersion="${Services.appinfo.version}" buildID="20080811053724"
+ channel="nightly" displayVersion="Version 1.0"
+ installDate="1238441400314" isCompleteUpdate="true" type="minor"
+ name="Update Test 1.0" detailsURL="http://example.com/"
+ previousAppVersion="${TEST_VERSION}"
+ serviceURL="https://example.com/" foregroundDownload="true"
+ statusText="The Update was successfully installed">
+ <patch type="complete" URL="http://example.com/" size="775"
+ selected="true" state="succeeded"/>
+ </update>
+ </updates>`;
+
+ // Set the preferences needed for the test: they will be cleared up
+ // after it runs.
+ await SpecialPowers.pushPrefEnv({
+ set: [
+ [TelemetryUtils.Preferences.UpdatePing, true],
+ ["browser.startup.homepage_override.mstone", TEST_VERSION],
+ ["browser.startup.homepage_override.buildID", TEST_BUILDID],
+ ["toolkit.telemetry.log.level", "Trace"],
+ ],
+ });
+
+ registerCleanupFunction(async () => {
+ let activeUpdateFile = getActiveUpdateFile();
+ activeUpdateFile.remove(false);
+ reloadUpdateManagerData(true);
+ });
+ writeUpdatesToXMLFile(XML_UPDATE);
+ reloadUpdateManagerData(false);
+
+ // Start monitoring the ping archive.
+ let archiveChecker = new TelemetryArchiveTesting.Checker();
+ await archiveChecker.promiseInit();
+
+ // Manually call the BrowserContentHandler: this automatically gets called when
+ // the browser is started and an update was applied successfully in order to
+ // display the "update" info page.
+ Cc["@mozilla.org/browser/clh;1"].getService(Ci.nsIBrowserHandler).defaultArgs;
+
+ // We cannot control when the ping will be generated/archived after we trigger
+ // an update, so let's make sure to have one before moving on with validation.
+ let updatePing;
+ await BrowserTestUtils.waitForCondition(
+ async function() {
+ // Check that the ping made it into the Telemetry archive.
+ // The test data is defined in ../data/sharedUpdateXML.js
+ updatePing = await archiveChecker.promiseFindPing("update", [
+ [["payload", "reason"], "success"],
+ [["payload", "previousBuildId"], TEST_BUILDID],
+ [["payload", "previousVersion"], TEST_VERSION],
+ ]);
+ return !!updatePing;
+ },
+ "Make sure the ping is generated before trying to validate it.",
+ 500,
+ 100
+ );
+
+ ok(updatePing, "The 'update' ping must be correctly sent.");
+
+ // We have no easy way to simulate a previously applied update from toolkit/telemetry.
+ // Instead of moving this test to mozapps/update as well, just test that the
+ // "previousChannel" field is present and either a string or null.
+ ok(
+ "previousChannel" in updatePing.payload,
+ "The payload must contain the 'previousChannel' field"
+ );
+ const channelField = updatePing.payload.previousChannel;
+ if (channelField != null) {
+ ok(
+ typeof channelField == "string",
+ "'previousChannel' must be a string, if available."
+ );
+ }
+
+ // Also make sure that the ping contains both a client id and an
+ // environment section.
+ ok("clientId" in updatePing, "The update ping must report a client id.");
+ ok(
+ "environment" in updatePing,
+ "The update ping must report the environment."
+ );
+});
+
+/**
+ * Removes the updates.xml file and returns the nsIFile for the
+ * active-update.xml file.
+ *
+ * @return The nsIFile for the active-update.xml file.
+ */
+function getActiveUpdateFile() {
+ let updateRootDir = Services.dirsvc.get("UpdRootD", Ci.nsIFile);
+ let updatesFile = updateRootDir.clone();
+ updatesFile.append("updates.xml");
+ if (updatesFile.exists()) {
+ // The following is non-fatal.
+ try {
+ updatesFile.remove(false);
+ } catch (e) {}
+ }
+ let activeUpdateFile = updateRootDir.clone();
+ activeUpdateFile.append("active-update.xml");
+ return activeUpdateFile;
+}
+
+/**
+ * Reloads the update xml files.
+ *
+ * @param skipFiles (optional)
+ * If true, the update xml files will not be read and the metadata will
+ * be reset. If false (the default), the update xml files will be read
+ * to populate the update metadata.
+ */
+function reloadUpdateManagerData(skipFiles = false) {
+ Cc["@mozilla.org/updates/update-manager;1"]
+ .getService(Ci.nsIUpdateManager)
+ .QueryInterface(Ci.nsIObserver)
+ .observe(null, "um-reload-update-data", skipFiles ? "skip-files" : "");
+}
+
+/**
+ * Writes the updates specified to the active-update.xml file.
+ *
+ * @param aText
+ * The updates represented as a string to write to the active-update.xml
+ * file.
+ */
+function writeUpdatesToXMLFile(aText) {
+ const PERMS_FILE = 0o644;
+
+ const MODE_WRONLY = 0x02;
+ const MODE_CREATE = 0x08;
+ const MODE_TRUNCATE = 0x20;
+
+ let activeUpdateFile = getActiveUpdateFile();
+ if (!activeUpdateFile.exists()) {
+ activeUpdateFile.create(Ci.nsIFile.NORMAL_FILE_TYPE, PERMS_FILE);
+ }
+ let fos = Cc["@mozilla.org/network/file-output-stream;1"].createInstance(
+ Ci.nsIFileOutputStream
+ );
+ let flags = MODE_WRONLY | MODE_CREATE | MODE_TRUNCATE;
+ fos.init(activeUpdateFile, flags, PERMS_FILE, 0);
+ fos.write(aText, aText.length);
+ fos.close();
+}
diff --git a/toolkit/components/telemetry/tests/browser/browser_media_element_in_page_scalar.js b/toolkit/components/telemetry/tests/browser/browser_media_element_in_page_scalar.js
new file mode 100644
index 0000000000..e0c4437e80
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser_media_element_in_page_scalar.js
@@ -0,0 +1,128 @@
+"use strict";
+
+const { TelemetryTestUtils } = ChromeUtils.import(
+ "resource://testing-common/TelemetryTestUtils.jsm"
+);
+const LOCATION =
+ "https://example.com/browser/toolkit/components/telemetry/tests/browser/";
+const CORS_LOCATION =
+ "https://example.org/browser/toolkit/components/telemetry/tests/browser/";
+const MEDIA_SCALAR_NAME = "media.element_in_page_count";
+
+/**
+ * 'media.element_in_page_count' is a permanant scalar, this test is used to
+ * check if that scalar can be accumulated correctly under different situations.
+ */
+add_task(async function start_tests() {
+ // Clean all scalars first to prevent being interfered by former test.
+ TelemetryTestUtils.getProcessScalars("parent", false, true /* clear */);
+
+ await testMediaInPageScalar({
+ description: "load a page with one media element",
+ url: "file_media.html",
+ expectedScalarCount: 1,
+ });
+ await testMediaInPageScalar({
+ description: "load a page with multiple media elements",
+ url: "file_media.html",
+ options: {
+ createSecondMedia: true,
+ },
+ expectedScalarCount: 1,
+ });
+ await testMediaInPageScalar({
+ description: "load a page with media element created from iframe",
+ url: "file_iframe.html",
+ options: {
+ iframeUrl: "file_media.html",
+ },
+ expectedScalarCount: 1,
+ });
+ await testMediaInPageScalar({
+ description: "load a page with media element created from CORS iframe",
+ url: "file_iframe.html",
+ options: {
+ iframeUrl: "file_media.html",
+ CORSIframe: true,
+ },
+ expectedScalarCount: 1,
+ });
+ await testMediaInPageScalar({
+ description: "run multiple tabs, all loading media page",
+ url: "file_media.html",
+ options: {
+ tabNums: 2,
+ },
+ expectedScalarCount: 2,
+ });
+});
+
+async function testMediaInPageScalar({
+ description,
+ url,
+ options,
+ expectedScalarCount,
+} = {}) {
+ info(`media scalar should be undefined in the start`);
+ let scalars = TelemetryTestUtils.getProcessScalars("parent");
+ is(scalars[MEDIA_SCALAR_NAME], undefined, "has not created media scalar yet");
+
+ info(`run test '${description}'`);
+ url = LOCATION + url;
+ await runMediaPage(url, options);
+
+ info(`media scalar should be increased to ${expectedScalarCount}`);
+ scalars = TelemetryTestUtils.getProcessScalars(
+ "parent",
+ false,
+ true /* clear */
+ );
+ is(
+ scalars[MEDIA_SCALAR_NAME],
+ expectedScalarCount,
+ "media scalar count is correct"
+ );
+ info("============= Next Testcase =============");
+}
+
+/**
+ * The following are helper functions.
+ */
+async function runMediaPage(url, options = {}) {
+ const tabNums = options.tabNums ? options.tabNums : 1;
+ for (let idx = 0; idx < tabNums; idx++) {
+ info(`open a tab loading media page`);
+ const tab = await BrowserTestUtils.openNewForegroundTab(gBrowser, url);
+ if (options.iframeUrl) {
+ let iframeURL = options.CORSIframe ? CORS_LOCATION : LOCATION;
+ iframeURL += options.iframeUrl;
+ await loadPageForIframe(tab, iframeURL);
+ }
+
+ if (options.createSecondMedia) {
+ info(`create second media in the page`);
+ await createMedia(tab);
+ }
+
+ info(`remove tab`);
+ await BrowserTestUtils.removeTab(tab);
+ await BrowserUtils.promiseObserved("window-global-destroyed");
+ }
+}
+
+function createMedia(tab) {
+ return SpecialPowers.spawn(tab.linkedBrowser, [], _ => {
+ const video = content.document.createElement("VIDEO");
+ video.src = "gizmo.mp4";
+ video.loop = true;
+ content.document.body.appendChild(video);
+ });
+}
+
+function loadPageForIframe(tab, url) {
+ return SpecialPowers.spawn(tab.linkedBrowser, [url], async url => {
+ const iframe = content.document.getElementById("iframe");
+ iframe.src = url;
+ await new Promise(r => (iframe.onload = r));
+ });
+}
diff --git a/toolkit/components/telemetry/tests/browser/file_iframe.html b/toolkit/components/telemetry/tests/browser/file_iframe.html
new file mode 100644
index 0000000000..271c179eb2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/file_iframe.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>Media loaded in iframe</title>
+</head>
+<body>
+<iframe id="iframe"></iframe>
+</body>
+</html>
diff --git a/toolkit/components/telemetry/tests/browser/file_media.html b/toolkit/components/telemetry/tests/browser/file_media.html
new file mode 100644
index 0000000000..e2109d18f5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/file_media.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>media page</title>
+</head>
+<body>
+<video id="video" src="gizmo.mp4" loop></video>
+</body>
+</html>
diff --git a/toolkit/components/telemetry/tests/browser/gizmo.mp4 b/toolkit/components/telemetry/tests/browser/gizmo.mp4
new file mode 100644
index 0000000000..87efad5ade
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/gizmo.mp4
Binary files differ
diff --git a/toolkit/components/telemetry/tests/gtest/TelemetryFixture.cpp b/toolkit/components/telemetry/tests/gtest/TelemetryFixture.cpp
new file mode 100644
index 0000000000..35742654f6
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TelemetryFixture.cpp
@@ -0,0 +1,28 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "TelemetryFixture.h"
+#include "mozilla/dom/SimpleGlobalObject.h"
+
+using namespace mozilla;
+
+void TelemetryTestFixture::SetUp() {
+ mTelemetry = do_GetService("@mozilla.org/base/telemetry;1");
+
+ mCleanGlobal = dom::SimpleGlobalObject::Create(
+ dom::SimpleGlobalObject::GlobalType::BindingDetail);
+
+ // The test must fail if we failed getting the global.
+ ASSERT_NE(mCleanGlobal, nullptr)
+ << "SimpleGlobalObject must return a valid global object.";
+}
+
+AutoJSContextWithGlobal::AutoJSContextWithGlobal(JSObject* aGlobalObject)
+ : mCx(nullptr) {
+ // The JS API must initialize correctly.
+ JS::Rooted<JSObject*> globalObject(dom::RootingCx(), aGlobalObject);
+ MOZ_ALWAYS_TRUE(mJsAPI.Init(globalObject));
+}
+
+JSContext* AutoJSContextWithGlobal::GetJSContext() const { return mJsAPI.cx(); }
diff --git a/toolkit/components/telemetry/tests/gtest/TelemetryFixture.h b/toolkit/components/telemetry/tests/gtest/TelemetryFixture.h
new file mode 100644
index 0000000000..f89aecf6a8
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TelemetryFixture.h
@@ -0,0 +1,39 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#ifndef TelemetryFixture_h_
+#define TelemetryFixture_h_
+
+#include "gtest/gtest.h"
+#include "mozilla/CycleCollectedJSContext.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "nsITelemetry.h"
+
+class TelemetryTestFixture : public ::testing::Test {
+ protected:
+ TelemetryTestFixture() : mCleanGlobal(nullptr) {}
+ virtual void SetUp();
+
+ JSObject* mCleanGlobal;
+
+ nsCOMPtr<nsITelemetry> mTelemetry;
+};
+
+// AutoJSAPI is annotated with MOZ_STACK_CLASS and thus cannot be
+// used as a member of TelemetryTestFixture, since gtest instantiates
+// that on the heap. To work around the problem, use the following class
+// at the beginning of each Telemetry test.
+// Note: this is very similar to AutoJSContext, but it allows to pass a
+// global JS object in.
+class MOZ_RAII AutoJSContextWithGlobal {
+ public:
+ explicit AutoJSContextWithGlobal(JSObject* aGlobalObject);
+ JSContext* GetJSContext() const;
+
+ protected:
+ mozilla::dom::AutoJSAPI mJsAPI;
+ JSContext* mCx;
+};
+
+#endif // TelemetryFixture_h_
diff --git a/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.cpp b/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.cpp
new file mode 100644
index 0000000000..ff219ef386
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.cpp
@@ -0,0 +1,376 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "TelemetryTestHelpers.h"
+
+#include "core/TelemetryCommon.h"
+#include "core/TelemetryOrigin.h"
+#include "gtest/gtest.h"
+#include "js/Array.h" // JS::GetArrayLength, JS::IsArrayObject
+#include "mozilla/CycleCollectedJSContext.h"
+#include "mozilla/Unused.h"
+#include "nsPrintfCString.h"
+
+using namespace mozilla;
+
+// Helper methods provided to simplify writing tests and meant to be used in C++
+// Gtests.
+namespace TelemetryTestHelpers {
+
+void CheckUintScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, uint32_t expectedValue) {
+ // Validate the value of the test scalar.
+ JS::RootedValue value(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &value))
+ << "The test scalar must be reported.";
+ JS_GetProperty(aCx, scalarObj, aName, &value);
+
+ ASSERT_TRUE(value.isInt32())
+ << "The scalar value must be of the correct type.";
+ ASSERT_TRUE(value.toInt32() >= 0)
+ << "The uint scalar type must contain a value >= 0.";
+ ASSERT_EQ(static_cast<uint32_t>(value.toInt32()), expectedValue)
+ << "The scalar value must match the expected value.";
+}
+
+void CheckBoolScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, bool expectedValue) {
+ // Validate the value of the test scalar.
+ JS::RootedValue value(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &value))
+ << "The test scalar must be reported.";
+ ASSERT_TRUE(value.isBoolean())
+ << "The scalar value must be of the correct type.";
+ ASSERT_EQ(static_cast<bool>(value.toBoolean()), expectedValue)
+ << "The scalar value must match the expected value.";
+}
+
+void CheckStringScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, const char* expectedValue) {
+ // Validate the value of the test scalar.
+ JS::RootedValue value(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &value))
+ << "The test scalar must be reported.";
+ ASSERT_TRUE(value.isString())
+ << "The scalar value must be of the correct type.";
+
+ bool sameString;
+ ASSERT_TRUE(
+ JS_StringEqualsAscii(aCx, value.toString(), expectedValue, &sameString))
+ << "JS String comparison failed";
+ ASSERT_TRUE(sameString)
+ << "The scalar value must match the expected string";
+}
+
+void CheckKeyedUintScalar(const char* aName, const char* aKey, JSContext* aCx,
+ JS::HandleValue aSnapshot, uint32_t expectedValue) {
+ JS::RootedValue keyedScalar(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ // Get the aName keyed scalar object from the scalars snapshot.
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &keyedScalar))
+ << "The keyed scalar must be reported.";
+
+ CheckUintScalar(aKey, aCx, keyedScalar, expectedValue);
+}
+
+void CheckKeyedBoolScalar(const char* aName, const char* aKey, JSContext* aCx,
+ JS::HandleValue aSnapshot, bool expectedValue) {
+ JS::RootedValue keyedScalar(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ // Get the aName keyed scalar object from the scalars snapshot.
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &keyedScalar))
+ << "The keyed scalar must be reported.";
+
+ CheckBoolScalar(aKey, aCx, keyedScalar, expectedValue);
+}
+
+void CheckNumberOfProperties(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot,
+ uint32_t expectedNumProperties) {
+ JS::RootedValue keyedScalar(aCx);
+ JS::RootedObject scalarObj(aCx, &aSnapshot.toObject());
+ // Get the aName keyed scalar object from the scalars snapshot.
+ ASSERT_TRUE(JS_GetProperty(aCx, scalarObj, aName, &keyedScalar))
+ << "The keyed scalar must be reported.";
+
+ JS::RootedObject keyedScalarObj(aCx, &keyedScalar.toObject());
+ JS::Rooted<JS::IdVector> ids(aCx, JS::IdVector(aCx));
+ ASSERT_TRUE(JS_Enumerate(aCx, keyedScalarObj, &ids))
+ << "We must be able to get keyed scalar members.";
+
+ ASSERT_EQ(expectedNumProperties, ids.length())
+ << "The scalar must report the expected number of properties.";
+}
+
+bool EventPresent(JSContext* aCx, const JS::RootedValue& aSnapshot,
+ const nsACString& aCategory, const nsACString& aMethod,
+ const nsACString& aObject) {
+ EXPECT_FALSE(aSnapshot.isNullOrUndefined())
+ << "Event snapshot must not be null/undefined.";
+ bool isArray = false;
+ EXPECT_TRUE(JS::IsArrayObject(aCx, aSnapshot, &isArray) && isArray)
+ << "The snapshot must be an array.";
+ JS::RootedObject arrayObj(aCx, &aSnapshot.toObject());
+ uint32_t arrayLength = 0;
+ EXPECT_TRUE(JS::GetArrayLength(aCx, arrayObj, &arrayLength))
+ << "Array must have a length.";
+ EXPECT_TRUE(arrayLength > 0) << "Array must have at least one element.";
+
+ for (uint32_t arrayIdx = 0; arrayIdx < arrayLength; ++arrayIdx) {
+ JS::Rooted<JS::Value> element(aCx);
+ EXPECT_TRUE(JS_GetElement(aCx, arrayObj, arrayIdx, &element))
+ << "Must be able to get element.";
+ EXPECT_TRUE(JS::IsArrayObject(aCx, element, &isArray) && isArray)
+ << "Element must be an array.";
+ JS::RootedObject eventArray(aCx, &element.toObject());
+ uint32_t eventLength;
+ EXPECT_TRUE(JS::GetArrayLength(aCx, eventArray, &eventLength))
+ << "Event array must have a length.";
+ EXPECT_TRUE(eventLength >= 4)
+ << "Event array must have at least 4 elements (timestamp, category, "
+ "method, object).";
+
+ JS::Rooted<JS::Value> str(aCx);
+ nsAutoJSString jsStr;
+ EXPECT_TRUE(JS_GetElement(aCx, eventArray, 1, &str))
+ << "Must be able to get category.";
+ EXPECT_TRUE(str.isString()) << "Category must be a string.";
+ EXPECT_TRUE(jsStr.init(aCx, str))
+ << "Category must be able to be init'd to a jsstring.";
+ if (NS_ConvertUTF16toUTF8(jsStr) != aCategory) {
+ continue;
+ }
+
+ EXPECT_TRUE(JS_GetElement(aCx, eventArray, 2, &str))
+ << "Must be able to get method.";
+ EXPECT_TRUE(str.isString()) << "Method must be a string.";
+ EXPECT_TRUE(jsStr.init(aCx, str))
+ << "Method must be able to be init'd to a jsstring.";
+ if (NS_ConvertUTF16toUTF8(jsStr) != aMethod) {
+ continue;
+ }
+
+ EXPECT_TRUE(JS_GetElement(aCx, eventArray, 3, &str))
+ << "Must be able to get object.";
+ EXPECT_TRUE(str.isString()) << "Object must be a string.";
+ EXPECT_TRUE(jsStr.init(aCx, str))
+ << "Object must be able to be init'd to a jsstring.";
+ if (NS_ConvertUTF16toUTF8(jsStr) != aObject) {
+ continue;
+ }
+
+ // We found it!
+ return true;
+ }
+
+ // We didn't find it!
+ return false;
+}
+
+void GetOriginSnapshot(JSContext* aCx, JS::MutableHandle<JS::Value> aResult,
+ bool aClear) {
+ nsCOMPtr<nsITelemetry> telemetry =
+ do_GetService("@mozilla.org/base/telemetry;1");
+
+ JS::RootedValue originSnapshot(aCx);
+ nsresult rv;
+ rv = telemetry->GetOriginSnapshot(aClear, aCx, &originSnapshot);
+ ASSERT_EQ(rv, NS_OK) << "Snapshotting origin data must not fail.";
+ ASSERT_TRUE(originSnapshot.isObject())
+ << "The snapshot must be an object.";
+
+ aResult.set(originSnapshot);
+}
+
+/*
+ * Extracts the `a` and `b` strings from the prioData snapshot object
+ * of any length. Which looks like:
+ *
+ * [{
+ * encoding: encodingName,
+ * prio: {
+ * a: <string>,
+ * b: <string>,
+ * },
+ * }, ...]
+ */
+void GetEncodedOriginStrings(
+ JSContext* aCx, const nsCString& aEncoding,
+ nsTArray<Tuple<nsCString, nsCString>>& aPrioStrings) {
+ JS::RootedValue snapshot(aCx);
+ nsresult rv;
+ rv = TelemetryOrigin::GetEncodedOriginSnapshot(false /* clear */, aCx,
+ &snapshot);
+
+ ASSERT_FALSE(NS_FAILED(rv));
+ ASSERT_FALSE(snapshot.isNullOrUndefined())
+ << "Encoded snapshot must not be null/undefined.";
+
+ JS::RootedObject prioDataObj(aCx, &snapshot.toObject());
+ bool isArray = false;
+ ASSERT_TRUE(JS::IsArrayObject(aCx, prioDataObj, &isArray) && isArray)
+ << "The metric's origins must be in an array.";
+
+ uint32_t length = 0;
+ ASSERT_TRUE(JS::GetArrayLength(aCx, prioDataObj, &length));
+ ASSERT_TRUE(length > 0)
+ << "Length of returned array must greater than 0";
+
+ for (auto i = 0u; i < length; ++i) {
+ JS::RootedValue arrayItem(aCx);
+ ASSERT_TRUE(JS_GetElement(aCx, prioDataObj, i, &arrayItem));
+ ASSERT_TRUE(arrayItem.isObject());
+ ASSERT_FALSE(arrayItem.isNullOrUndefined());
+
+ JS::RootedObject arrayItemObj(aCx, &arrayItem.toObject());
+
+ JS::RootedValue encodingVal(aCx);
+ ASSERT_TRUE(JS_GetProperty(aCx, arrayItemObj, "encoding", &encodingVal));
+ ASSERT_TRUE(encodingVal.isString());
+ nsAutoJSString jsStr;
+ ASSERT_TRUE(jsStr.init(aCx, encodingVal));
+
+ nsPrintfCString encoding(aEncoding.get(),
+ i % TelemetryOrigin::SizeOfPrioDatasPerMetric());
+ ASSERT_TRUE(NS_ConvertUTF16toUTF8(jsStr) == encoding)
+ << "Actual 'encoding' (" << NS_ConvertUTF16toUTF8(jsStr).get()
+ << ") must match expected (" << encoding << ")";
+
+ JS::RootedValue prioVal(aCx);
+ ASSERT_TRUE(JS_GetProperty(aCx, arrayItemObj, "prio", &prioVal));
+ ASSERT_TRUE(prioVal.isObject());
+ ASSERT_FALSE(prioVal.isNullOrUndefined());
+
+ JS::RootedObject prioObj(aCx, &prioVal.toObject());
+
+ JS::RootedValue aVal(aCx);
+ nsAutoJSString aStr;
+ ASSERT_TRUE(JS_GetProperty(aCx, prioObj, "a", &aVal));
+ ASSERT_TRUE(aVal.isString());
+ ASSERT_TRUE(aStr.init(aCx, aVal));
+
+ JS::RootedValue bVal(aCx);
+ nsAutoJSString bStr;
+ ASSERT_TRUE(JS_GetProperty(aCx, prioObj, "b", &bVal));
+ ASSERT_TRUE(bVal.isString());
+ ASSERT_TRUE(bStr.init(aCx, bVal));
+
+ aPrioStrings.AppendElement(Tuple<nsCString, nsCString>(
+ NS_ConvertUTF16toUTF8(aStr), NS_ConvertUTF16toUTF8(bStr)));
+ }
+}
+
+void GetEventSnapshot(JSContext* aCx, JS::MutableHandle<JS::Value> aResult,
+ ProcessID aProcessType) {
+ nsCOMPtr<nsITelemetry> telemetry =
+ do_GetService("@mozilla.org/base/telemetry;1");
+
+ JS::RootedValue eventSnapshot(aCx);
+ nsresult rv;
+ rv = telemetry->SnapshotEvents(1 /* PRERELEASE_CHANNELS */, false /* clear */,
+ 0 /* eventLimit */, aCx, 1 /* argc */,
+ &eventSnapshot);
+ ASSERT_EQ(rv, NS_OK) << "Snapshotting events must not fail.";
+ ASSERT_TRUE(eventSnapshot.isObject())
+ << "The snapshot must be an object.";
+
+ JS::RootedValue processEvents(aCx);
+ JS::RootedObject eventObj(aCx, &eventSnapshot.toObject());
+ Unused << JS_GetProperty(aCx, eventObj,
+ Telemetry::Common::GetNameForProcessID(aProcessType),
+ &processEvents);
+
+ aResult.set(processEvents);
+}
+
+void GetScalarsSnapshot(bool aKeyed, JSContext* aCx,
+ JS::MutableHandle<JS::Value> aResult,
+ ProcessID aProcessType) {
+ nsCOMPtr<nsITelemetry> telemetry =
+ do_GetService("@mozilla.org/base/telemetry;1");
+
+ // Get a snapshot of the scalars.
+ JS::RootedValue scalarsSnapshot(aCx);
+ nsresult rv;
+
+ if (aKeyed) {
+ rv = telemetry->GetSnapshotForKeyedScalars(
+ "main"_ns, false, false /* filter */, aCx, &scalarsSnapshot);
+ } else {
+ rv = telemetry->GetSnapshotForScalars("main"_ns, false, false /* filter */,
+ aCx, &scalarsSnapshot);
+ }
+
+ // Validate the snapshot.
+ ASSERT_EQ(rv, NS_OK) << "Creating a snapshot of the data must not fail.";
+ ASSERT_TRUE(scalarsSnapshot.isObject())
+ << "The snapshot must be an object.";
+
+ JS::RootedValue processScalars(aCx);
+ JS::RootedObject scalarObj(aCx, &scalarsSnapshot.toObject());
+ // Don't complain if no scalars for the process can be found. Just
+ // return an empty object.
+ Unused << JS_GetProperty(aCx, scalarObj,
+ Telemetry::Common::GetNameForProcessID(aProcessType),
+ &processScalars);
+
+ aResult.set(processScalars);
+}
+
+void GetAndClearHistogram(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
+ const nsACString& name, bool is_keyed) {
+ JS::RootedValue testHistogram(cx);
+ nsresult rv =
+ is_keyed ? mTelemetry->GetKeyedHistogramById(name, cx, &testHistogram)
+ : mTelemetry->GetHistogramById(name, cx, &testHistogram);
+
+ ASSERT_EQ(rv, NS_OK) << "Cannot fetch histogram";
+
+ // Clear the stored value
+ JS::RootedObject testHistogramObj(cx, &testHistogram.toObject());
+ JS::RootedValue rval(cx);
+ ASSERT_TRUE(JS_CallFunctionName(cx, testHistogramObj, "clear",
+ JS::HandleValueArray::empty(), &rval))
+ << "Cannot clear histogram";
+}
+
+void GetProperty(JSContext* cx, const char* name, JS::HandleValue valueIn,
+ JS::MutableHandleValue valueOut) {
+ JS::RootedValue property(cx);
+ JS::RootedObject valueInObj(cx, &valueIn.toObject());
+ ASSERT_TRUE(JS_GetProperty(cx, valueInObj, name, &property))
+ << "Cannot get property '" << name << "'";
+ valueOut.set(property);
+}
+
+void GetElement(JSContext* cx, uint32_t index, JS::HandleValue valueIn,
+ JS::MutableHandleValue valueOut) {
+ JS::RootedValue element(cx);
+ JS::RootedObject valueInObj(cx, &valueIn.toObject());
+ ASSERT_TRUE(JS_GetElement(cx, valueInObj, index, &element))
+ << "Cannot get element at index '" << index << "'";
+ valueOut.set(element);
+}
+
+void GetSnapshots(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
+ const char* name, JS::MutableHandleValue valueOut,
+ bool is_keyed) {
+ JS::RootedValue snapshots(cx);
+ nsresult rv = is_keyed
+ ? mTelemetry->GetSnapshotForKeyedHistograms(
+ "main"_ns, false, false /* filter */, cx, &snapshots)
+ : mTelemetry->GetSnapshotForHistograms(
+ "main"_ns, false, false /* filter */, cx, &snapshots);
+
+ JS::RootedValue snapshot(cx);
+ GetProperty(cx, "parent", snapshots, &snapshot);
+
+ ASSERT_EQ(rv, NS_OK) << "Cannot call histogram snapshots";
+ valueOut.set(snapshot);
+}
+
+} // namespace TelemetryTestHelpers
diff --git a/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.h b/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.h
new file mode 100644
index 0000000000..b5fdf98e1e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TelemetryTestHelpers.h
@@ -0,0 +1,68 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#ifndef TelemetryTestHelpers_h_
+#define TelemetryTestHelpers_h_
+
+#include "js/TypeDecls.h"
+#include "mozilla/TelemetryProcessEnums.h"
+#include "nsITelemetry.h"
+
+using mozilla::Telemetry::ProcessID;
+
+namespace TelemetryTestHelpers {
+
+void CheckUintScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, uint32_t expectedValue);
+
+void CheckBoolScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, bool expectedValue);
+
+void CheckStringScalar(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot, const char* expectedValue);
+
+void CheckKeyedUintScalar(const char* aName, const char* aKey, JSContext* aCx,
+ JS::HandleValue aSnapshot, uint32_t expectedValue);
+
+void CheckKeyedBoolScalar(const char* aName, const char* aKey, JSContext* aCx,
+ JS::HandleValue aSnapshot, bool expectedValue);
+
+void CheckNumberOfProperties(const char* aName, JSContext* aCx,
+ JS::HandleValue aSnapshot,
+ uint32_t expectedNumProperties);
+
+bool EventPresent(JSContext* aCx, const JS::RootedValue& aSnapshot,
+ const nsACString& aCategory, const nsACString& aMethod,
+ const nsACString& aObject);
+
+void GetEventSnapshot(JSContext* aCx, JS::MutableHandle<JS::Value> aResult,
+ ProcessID aProcessType = ProcessID::Parent);
+
+void GetScalarsSnapshot(bool aKeyed, JSContext* aCx,
+ JS::MutableHandle<JS::Value> aResult,
+ ProcessID aProcessType = ProcessID::Parent);
+
+void GetAndClearHistogram(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
+ const nsACString& name, bool is_keyed);
+
+void GetProperty(JSContext* cx, const char* name, JS::HandleValue valueIn,
+ JS::MutableHandleValue valueOut);
+
+void GetElement(JSContext* cx, uint32_t index, JS::HandleValue valueIn,
+ JS::MutableHandleValue valueOut);
+
+void GetSnapshots(JSContext* cx, nsCOMPtr<nsITelemetry> mTelemetry,
+ const char* name, JS::MutableHandleValue valueOut,
+ bool is_keyed);
+
+void GetOriginSnapshot(JSContext* aCx, JS::MutableHandle<JS::Value> aResult,
+ bool aClear = false);
+
+void GetEncodedOriginStrings(
+ JSContext* aCx, const nsCString& aEncoding,
+ nsTArray<mozilla::Tuple<nsCString, nsCString>>& aPrioStrings);
+
+} // namespace TelemetryTestHelpers
+
+#endif
diff --git a/toolkit/components/telemetry/tests/gtest/TestCombinedStacks.cpp b/toolkit/components/telemetry/tests/gtest/TestCombinedStacks.cpp
new file mode 100644
index 0000000000..3e21c7378c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestCombinedStacks.cpp
@@ -0,0 +1,158 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+#include "other/CombinedStacks.h"
+#include "other/ProcessedStack.h"
+#include "nsPrintfCString.h"
+
+using namespace mozilla::Telemetry;
+using namespace TelemetryTestHelpers;
+
+TEST_F(TelemetryTestFixture, CombinedStacks) {
+ const size_t kMaxStacksKept = 10;
+ CombinedStacks stacks(kMaxStacksKept);
+
+ size_t iterations = kMaxStacksKept * 2;
+ for (size_t i = 0; i < iterations; ++i) {
+ ProcessedStack stack;
+ ProcessedStack::Frame frame = {static_cast<uint16_t>(i)};
+ const nsAutoString& name =
+ NS_ConvertUTF8toUTF16(nsPrintfCString("test%zu", i));
+ ProcessedStack::Module module = {name};
+
+ stack.AddFrame(frame);
+ stack.AddModule(module);
+ stacks.AddStack(stack);
+ }
+
+ ASSERT_EQ(stacks.GetStackCount(), kMaxStacksKept) << "Wrong number of stacks";
+ ASSERT_EQ(stacks.GetModuleCount(), kMaxStacksKept * 2)
+ << "Wrong number of modules";
+
+ for (size_t i = 0; i < kMaxStacksKept; ++i) {
+ ProcessedStack::Frame frame = stacks.GetStack(i)[0];
+ ASSERT_EQ(frame.mOffset, kMaxStacksKept + i)
+ << "Frame is not returning expected value";
+
+ ProcessedStack::Module module = stacks.GetModule(frame.mModIndex);
+ nsPrintfCString moduleName("test%hu", frame.mModIndex);
+ ASSERT_TRUE(module.mName.Equals(NS_ConvertUTF8toUTF16(moduleName)))
+ << "Module should have expected name";
+ }
+
+ for (size_t i = 0; i < kMaxStacksKept; ++i) {
+ stacks.RemoveStack(kMaxStacksKept - i - 1);
+ ASSERT_EQ(stacks.GetStackCount(), kMaxStacksKept - i - 1)
+ << "Stack should be removed";
+ }
+}
+
+template <int N>
+ProcessedStack MakeStack(const nsLiteralString (&aModules)[N],
+ const uintptr_t (&aOffsets)[N]) {
+ ProcessedStack stack;
+ for (int i = 0; i < N; ++i) {
+ ProcessedStack::Frame frame = {aOffsets[i]};
+ if (aModules[i].IsEmpty()) {
+ frame.mModIndex = std::numeric_limits<uint16_t>::max();
+ } else {
+ frame.mModIndex = stack.GetNumModules();
+ stack.AddModule(ProcessedStack::Module{aModules[i]});
+ }
+ stack.AddFrame(frame);
+ }
+ return stack;
+}
+
+TEST(CombinedStacks, Combine)
+{
+ const nsLiteralString moduleSet1[] = {u"mod1"_ns, u"mod2"_ns, u"base"_ns};
+ const nsLiteralString moduleSet2[] = {u"modX"_ns, u""_ns, u"modZ"_ns,
+ u"base"_ns};
+ // [0] 00 mod1+100
+ // 01 mod2+200
+ // 02 base+300
+ // [1] 00 mod1+1000
+ // 01 mod2+2000
+ // 02 base+3000
+ // [2] 00 modX+100
+ // 01 <no module>+200
+ // 02 modZ+300
+ // 03 base+400
+ // [3] 00 modX+1000
+ // 01 <no module>+3000
+ // 02 modZ+2000
+ // 03 base+4000
+ const ProcessedStack testStacks[] = {
+ MakeStack(moduleSet1, {100ul, 200ul, 300ul}),
+ MakeStack(moduleSet1, {1000ul, 2000ul, 3000ul}),
+ MakeStack(moduleSet2, {100ul, 200ul, 300ul, 400ul}),
+ MakeStack(moduleSet2, {1000ul, 2000ul, 3000ul, 4000ul}),
+ };
+
+ // combined1 <-- testStacks[0] + testStacks[1]
+ // combined2 <-- testStacks[2] + testStacks[3]
+ CombinedStacks combined1, combined2;
+ combined1.AddStack(testStacks[0]);
+ combined1.AddStack(testStacks[1]);
+ combined2.AddStack(testStacks[2]);
+ combined2.AddStack(testStacks[3]);
+
+ EXPECT_EQ(combined1.GetModuleCount(), mozilla::ArrayLength(moduleSet1));
+ EXPECT_EQ(combined1.GetStackCount(), 2u);
+ EXPECT_EQ(combined2.GetModuleCount(), mozilla::ArrayLength(moduleSet2) - 1);
+ EXPECT_EQ(combined2.GetStackCount(), 2u);
+
+ // combined1 <-- combined1 + combined2
+ combined1.AddStacks(combined2);
+
+ EXPECT_EQ(combined1.GetModuleCount(), 5u); // {mod1, mod2, modX, modZ, base}
+ EXPECT_EQ(combined1.GetStackCount(), mozilla::ArrayLength(testStacks));
+
+ for (size_t i = 0; i < combined1.GetStackCount(); ++i) {
+ const auto& expectedStack = testStacks[i];
+ const auto& actualStack = combined1.GetStack(i);
+ EXPECT_EQ(actualStack.size(), expectedStack.GetStackSize());
+ if (actualStack.size() != expectedStack.GetStackSize()) {
+ continue;
+ }
+
+ for (size_t j = 0; j < actualStack.size(); ++j) {
+ const auto& expectedFrame = expectedStack.GetFrame(j);
+ const auto& actualFrame = actualStack[j];
+
+ EXPECT_EQ(actualFrame.mOffset, expectedFrame.mOffset);
+
+ if (expectedFrame.mModIndex == std::numeric_limits<uint16_t>::max()) {
+ EXPECT_EQ(actualFrame.mModIndex, std::numeric_limits<uint16_t>::max());
+ } else {
+ EXPECT_EQ(combined1.GetModule(actualFrame.mModIndex),
+ expectedStack.GetModule(expectedFrame.mModIndex));
+ }
+ }
+ }
+
+ // Only testStacks[3] will be stored into oneStack
+ CombinedStacks oneStack(1);
+ oneStack.AddStacks(combined1);
+
+ EXPECT_EQ(oneStack.GetStackCount(), 1u);
+ EXPECT_EQ(oneStack.GetStack(0).size(), testStacks[3].GetStackSize());
+
+ for (size_t i = 0; i < oneStack.GetStack(0).size(); ++i) {
+ const auto& expectedFrame = testStacks[3].GetFrame(i);
+ const auto& actualFrame = oneStack.GetStack(0)[i];
+
+ EXPECT_EQ(actualFrame.mOffset, expectedFrame.mOffset);
+
+ if (expectedFrame.mModIndex == std::numeric_limits<uint16_t>::max()) {
+ EXPECT_EQ(actualFrame.mModIndex, std::numeric_limits<uint16_t>::max());
+ } else {
+ EXPECT_EQ(oneStack.GetModule(actualFrame.mModIndex),
+ testStacks[3].GetModule(expectedFrame.mModIndex));
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/gtest/TestCounters.cpp b/toolkit/components/telemetry/tests/gtest/TestCounters.cpp
new file mode 100644
index 0000000000..2950500c3f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestCounters.cpp
@@ -0,0 +1,173 @@
+/* vim:set ts=2 sw=2 sts=0 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "gtest/gtest.h"
+#include "js/Conversions.h"
+#include "mozilla/Telemetry.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+using namespace mozilla;
+using namespace TelemetryTestHelpers;
+
+TEST_F(TelemetryTestFixture, AutoCounter) {
+ const uint32_t kExpectedValue = 100;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ const char* telemetryTestCountName =
+ Telemetry::GetHistogramName(Telemetry::TELEMETRY_TEST_COUNT);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ {
+ Telemetry::AutoCounter<Telemetry::TELEMETRY_TEST_COUNT> autoCounter;
+ autoCounter += kExpectedValue / 2;
+ }
+ // This counter should not accumulate since it does not go out of scope
+ Telemetry::AutoCounter<Telemetry::TELEMETRY_TEST_COUNT> autoCounter;
+ autoCounter += kExpectedValue;
+ // Accumulate a second time in the histogram
+ {
+ Telemetry::AutoCounter<Telemetry::TELEMETRY_TEST_COUNT> autoCounter;
+ autoCounter += kExpectedValue / 2;
+ }
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, telemetryTestCountName, &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), telemetryTestCountName, snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AutoCounterUnderflow) {
+ const uint32_t kExpectedValue = 0;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ const char* telemetryTestCountName =
+ Telemetry::GetHistogramName(Telemetry::TELEMETRY_TEST_COUNT);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ {
+ Telemetry::AutoCounter<Telemetry::TELEMETRY_TEST_COUNT> autoCounter;
+ autoCounter += -1;
+ }
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, telemetryTestCountName, &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), telemetryTestCountName, snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 42;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is supposed to return 0 when an underflow occurs.";
+}
+
+TEST_F(TelemetryTestFixture, RuntimeAutoCounter) {
+ const uint32_t kExpectedValue = 100;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ const char* telemetryTestCountName =
+ Telemetry::GetHistogramName(Telemetry::TELEMETRY_TEST_COUNT);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ {
+ Telemetry::RuntimeAutoCounter autoCounter(Telemetry::TELEMETRY_TEST_COUNT);
+ autoCounter += kExpectedValue / 2;
+ }
+ // This counter should not accumulate since it does not go out of scope
+ Telemetry::RuntimeAutoCounter autoCounter(Telemetry::TELEMETRY_TEST_COUNT);
+ autoCounter += kExpectedValue;
+ // Accumulate a second time in the histogram
+ {
+ Telemetry::RuntimeAutoCounter autoCounter(Telemetry::TELEMETRY_TEST_COUNT);
+ autoCounter += kExpectedValue / 2;
+ }
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, telemetryTestCountName, &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), telemetryTestCountName, snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, RuntimeAutoCounterUnderflow) {
+ const uint32_t kExpectedValue = 0;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ const char* telemetryTestCountName =
+ Telemetry::GetHistogramName(Telemetry::TELEMETRY_TEST_COUNT);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ {
+ Telemetry::RuntimeAutoCounter autoCounter(Telemetry::TELEMETRY_TEST_COUNT,
+ kExpectedValue);
+ autoCounter += -1;
+ }
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, telemetryTestCountName, &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), telemetryTestCountName, snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 42;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is supposed to return 0 when an underflow occurs.";
+}
diff --git a/toolkit/components/telemetry/tests/gtest/TestEvents.cpp b/toolkit/components/telemetry/tests/gtest/TestEvents.cpp
new file mode 100644
index 0000000000..1298586c59
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestEvents.cpp
@@ -0,0 +1,125 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "core/TelemetryEvent.h"
+#include "gtest/gtest.h"
+#include "js/Array.h" // JS::GetArrayLength
+#include "mozilla/Maybe.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Unused.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+using namespace mozilla;
+using namespace TelemetryTestHelpers;
+
+namespace mozilla {
+namespace Telemetry {
+
+struct EventExtraEntry {
+ nsCString key;
+ nsCString value;
+};
+
+} // namespace Telemetry
+} // namespace mozilla
+
+// Test that we can properly record events using the C++ API.
+TEST_F(TelemetryTestFixture, RecordEventNative) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get events from other tests.
+ Unused << mTelemetry->ClearEvents();
+
+ const nsLiteralCString category("telemetry.test");
+ const nsLiteralCString method("test1");
+ const nsLiteralCString method2("test2");
+ const nsLiteralCString object("object1");
+ const nsLiteralCString object2("object2");
+ const nsLiteralCString value("value");
+ const nsLiteralCString valueLong(
+ "this value is much too long and must be truncated to fit in the limit "
+ "which at time of writing was 80 bytes.");
+ const nsLiteralCString extraKey("key1");
+ const nsLiteralCString extraValue("extra value");
+ const nsLiteralCString extraValueLong(
+ "this extra value is much too long and must be truncated to fit in the "
+ "limit which at time of writing was 80 bytes.");
+
+ // Try recording before category's enabled.
+ Telemetry::RecordEvent(Telemetry::EventID::TelemetryTest_Test1_Object1,
+ Nothing(), Nothing());
+
+ // Ensure "telemetry.test" is enabled
+ Telemetry::SetEventRecordingEnabled(category, true);
+
+ // Try recording after it's enabled.
+ Telemetry::RecordEvent(Telemetry::EventID::TelemetryTest_Test2_Object1,
+ Nothing(), Nothing());
+
+ // Try recording with normal value, extra
+ CopyableTArray<EventExtraEntry> extra(
+ {EventExtraEntry{extraKey, extraValue}});
+ Telemetry::RecordEvent(Telemetry::EventID::TelemetryTest_Test1_Object2,
+ mozilla::Some(value), mozilla::Some(extra));
+
+ // Try recording with too-long value, extra
+ CopyableTArray<EventExtraEntry> longish(
+ {EventExtraEntry{extraKey, extraValueLong}});
+ Telemetry::RecordEvent(Telemetry::EventID::TelemetryTest_Test2_Object2,
+ mozilla::Some(valueLong), mozilla::Some(longish));
+
+ JS::RootedValue eventsSnapshot(cx.GetJSContext());
+ GetEventSnapshot(cx.GetJSContext(), &eventsSnapshot);
+
+ ASSERT_TRUE(!EventPresent(cx.GetJSContext(), eventsSnapshot, category, method,
+ object))
+ << "Test event must not be present when recorded before enabled.";
+ ASSERT_TRUE(EventPresent(cx.GetJSContext(), eventsSnapshot, category, method2,
+ object))
+ << "Test event must be present.";
+ ASSERT_TRUE(EventPresent(cx.GetJSContext(), eventsSnapshot, category, method,
+ object2))
+ << "Test event with value and extra must be present.";
+ ASSERT_TRUE(EventPresent(cx.GetJSContext(), eventsSnapshot, category, method2,
+ object2))
+ << "Test event with truncated value and extra must be present.";
+
+ // Ensure that the truncations happened appropriately.
+ JSContext* aCx = cx.GetJSContext();
+ JS::RootedObject arrayObj(aCx, &eventsSnapshot.toObject());
+ JS::Rooted<JS::Value> eventRecord(aCx);
+ ASSERT_TRUE(JS_GetElement(aCx, arrayObj, 2, &eventRecord))
+ << "Must be able to get record.";
+ JS::RootedObject recordArray(aCx, &eventRecord.toObject());
+ uint32_t recordLength;
+ ASSERT_TRUE(JS::GetArrayLength(aCx, recordArray, &recordLength))
+ << "Event record array must have length.";
+ ASSERT_TRUE(recordLength == 6)
+ << "Event record must have 6 elements.";
+
+ JS::Rooted<JS::Value> str(aCx);
+ nsAutoJSString jsStr;
+ // The value string is at index 4
+ ASSERT_TRUE(JS_GetElement(aCx, recordArray, 4, &str))
+ << "Must be able to get value.";
+ ASSERT_TRUE(jsStr.init(aCx, str))
+ << "Value must be able to be init'd to a jsstring.";
+ ASSERT_EQ(NS_ConvertUTF16toUTF8(jsStr).Length(), (uint32_t)80)
+ << "Value must have been truncated to 80 bytes.";
+
+ // Extra is at index 5
+ JS::Rooted<JS::Value> obj(aCx);
+ ASSERT_TRUE(JS_GetElement(aCx, recordArray, 5, &obj))
+ << "Must be able to get extra.";
+ JS::RootedObject extraObj(aCx, &obj.toObject());
+ JS::Rooted<JS::Value> extraVal(aCx);
+ ASSERT_TRUE(JS_GetProperty(aCx, extraObj, extraKey.get(), &extraVal))
+ << "Must be able to get the extra key's value.";
+ ASSERT_TRUE(jsStr.init(aCx, extraVal))
+ << "Extra must be able to be init'd to a jsstring.";
+ ASSERT_EQ(NS_ConvertUTF16toUTF8(jsStr).Length(), (uint32_t)80)
+ << "Extra must have been truncated to 80 bytes.";
+}
diff --git a/toolkit/components/telemetry/tests/gtest/TestHistograms.cpp b/toolkit/components/telemetry/tests/gtest/TestHistograms.cpp
new file mode 100644
index 0000000000..5bb22dc3b5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestHistograms.cpp
@@ -0,0 +1,891 @@
+/* vim:set ts=2 sw=2 sts=0 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "gtest/gtest.h"
+#include "js/Conversions.h"
+#include "mozilla/Telemetry.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+using namespace mozilla;
+using namespace TelemetryTestHelpers;
+
+TEST_F(TelemetryTestFixture, AccumulateCountHistogram) {
+ const uint32_t kExpectedValue = 200;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ const char* telemetryTestCountName =
+ Telemetry::GetHistogramName(Telemetry::TELEMETRY_TEST_COUNT);
+ ASSERT_STREQ(telemetryTestCountName, "TELEMETRY_TEST_COUNT")
+ << "The histogram name is wrong";
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_COUNT, kExpectedValue / 2);
+ Telemetry::Accumulate("TELEMETRY_TEST_COUNT", kExpectedValue / 2);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, telemetryTestCountName, &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), telemetryTestCountName, snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateKeyedCountHistogram) {
+ const uint32_t kExpectedValue = 100;
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_COUNT"_ns, true);
+
+ // Accumulate data in the provided key within the histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_COUNT, "sample"_ns,
+ kExpectedValue);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_COUNT",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_COUNT", snapshot,
+ &histogram);
+
+ // Get "sample" property from histogram
+ JS::RootedValue expectedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sample", histogram, &expectedKeyData);
+
+ // Get "sum" property from keyed data
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", expectedKeyData, &sum);
+
+ // Check that the sum stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected sum";
+}
+
+TEST_F(TelemetryTestFixture, TestKeyedKeysHistogram) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ JS::RootedValue testHistogram(cx.GetJSContext());
+ JS::RootedValue rval(cx.GetJSContext());
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_KEYS"_ns, true);
+
+ // Test the accumulation on both the allowed and unallowed keys, using
+ // the API that accepts histogram IDs.
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_KEYS, "not-allowed"_ns,
+ 1);
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_KEYS, "testkey"_ns, 0);
+ // Do the same, using the API that accepts the histogram name as a string.
+ Telemetry::Accumulate("TELEMETRY_TEST_KEYED_KEYS", "not-allowed"_ns, 1);
+ Telemetry::Accumulate("TELEMETRY_TEST_KEYED_KEYS", "CommonKey"_ns, 1);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_KEYS",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_KEYS", snapshot,
+ &histogram);
+
+ // Get "testkey" property from histogram and check that it stores the correct
+ // data.
+ JS::RootedValue expectedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "testkey", histogram, &expectedKeyData);
+ ASSERT_TRUE(!expectedKeyData.isUndefined())
+ << "Cannot find the expected key in the histogram data";
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", expectedKeyData, &sum);
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, 0U)
+ << "The histogram is not returning expected sum for 'testkey'";
+
+ // Do the same for the "CommonKey" property.
+ GetProperty(cx.GetJSContext(), "CommonKey", histogram, &expectedKeyData);
+ ASSERT_TRUE(!expectedKeyData.isUndefined())
+ << "Cannot find the expected key in the histogram data";
+ GetProperty(cx.GetJSContext(), "sum", expectedKeyData, &sum);
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, 1U)
+ << "The histogram is not returning expected sum for 'CommonKey'";
+
+ GetProperty(cx.GetJSContext(), "not-allowed", histogram, &expectedKeyData);
+ ASSERT_TRUE(expectedKeyData.isUndefined())
+ << "Unallowed keys must not be recorded in the histogram data";
+
+ // The 'not-allowed' key accumulation for 'TELEMETRY_TESTED_KEYED_KEYS' was
+ // attemtped twice, so we expect the count of
+ // 'telemetry.accumulate_unknown_histogram_keys' to be 2
+ const uint32_t expectedAccumulateUnknownCount = 2;
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.accumulate_unknown_histogram_keys",
+ "TELEMETRY_TEST_KEYED_KEYS", cx.GetJSContext(),
+ scalarsSnapshot, expectedAccumulateUnknownCount);
+}
+
+TEST_F(TelemetryTestFixture, AccumulateCategoricalHistogram) {
+ const uint32_t kExpectedValue = 2;
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_CATEGORICAL"_ns, false);
+
+ // Accumulate one unit into the categorical histogram with label
+ // Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel
+ Telemetry::AccumulateCategorical(
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel);
+
+ // Accumulate another unit into the same categorical histogram using a string
+ // label
+ Telemetry::AccumulateCategorical(Telemetry::TELEMETRY_TEST_CATEGORICAL,
+ "CommonLabel"_ns);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_CATEGORICAL",
+ &snapshot, false);
+
+ // Get our histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Get values object from histogram. Each entry in the object maps to a label
+ // in the histogram.
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Get the value for the label we care about
+ JS::RootedValue value(cx.GetJSContext());
+ GetElement(cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel),
+ values, &value);
+
+ // Check that the value stored in the histogram matches with |kExpectedValue|
+ uint32_t uValue = 0;
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ ASSERT_EQ(uValue, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateKeyedCategoricalHistogram) {
+ const uint32_t kSampleExpectedValue = 2;
+ const uint32_t kOtherSampleExpectedValue = 1;
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_CATEGORICAL"_ns, true);
+
+ // Accumulate one unit into the categorical histogram with label
+ // Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel
+ Telemetry::AccumulateCategoricalKeyed(
+ "sample"_ns,
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel);
+ // Accumulate another unit into the same categorical histogram
+ Telemetry::AccumulateCategoricalKeyed(
+ "sample"_ns,
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel);
+ // Accumulate another unit into a different categorical histogram
+ Telemetry::AccumulateCategoricalKeyed(
+ "other-sample"_ns,
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_CATEGORICAL", &snapshot, true);
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Check that the sample histogram contains the values we expect
+ JS::RootedValue sample(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sample", histogram, &sample);
+ // Get values object from sample. Each entry in the object maps to a label in
+ // the histogram.
+ JS::RootedValue sampleValues(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", sample, &sampleValues);
+ // Get the value for the label we care about
+ JS::RootedValue sampleValue(cx.GetJSContext());
+ GetElement(
+ cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel),
+ sampleValues, &sampleValue);
+ // Check that the value stored in the histogram matches with
+ // |kSampleExpectedValue|
+ uint32_t uSampleValue = 0;
+ JS::ToUint32(cx.GetJSContext(), sampleValue, &uSampleValue);
+ ASSERT_EQ(uSampleValue, kSampleExpectedValue)
+ << "The sample histogram is not returning expected value";
+
+ // Check that the other-sample histogram contains the values we expect
+ JS::RootedValue otherSample(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "other-sample", histogram, &otherSample);
+ // Get values object from the other-sample. Each entry in the object maps to a
+ // label in the histogram.
+ JS::RootedValue otherValues(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", otherSample, &otherValues);
+ // Get the value for the label we care about
+ JS::RootedValue otherValue(cx.GetJSContext());
+ GetElement(
+ cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel),
+ otherValues, &otherValue);
+ // Check that the value stored in the histogram matches with
+ // |kOtherSampleExpectedValue|
+ uint32_t uOtherValue = 0;
+ JS::ToUint32(cx.GetJSContext(), otherValue, &uOtherValue);
+ ASSERT_EQ(uOtherValue, kOtherSampleExpectedValue)
+ << "The other-sample histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateCountHistogram_MultipleSamples) {
+ nsTArray<uint32_t> samples({4, 4, 4});
+ const uint32_t kExpectedSum = 12;
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_COUNT, samples);
+
+ // Get a snapshot of all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT", &snapshot,
+ false);
+
+ // Get histogram from snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_COUNT", snapshot, &histogram);
+
+ // Get "sum" from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that sum matches with aValue
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedSum)
+ << "This histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateLinearHistogram_MultipleSamples) {
+ nsTArray<uint32_t> samples({4, 4, 4});
+ const uint32_t kExpectedCount = 3;
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_LINEAR"_ns, false);
+
+ // Accumulate in the histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_LINEAR, samples);
+
+ // Get a snapshot of all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_LINEAR",
+ &snapshot, false);
+
+ // Get histogram from snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_LINEAR", snapshot, &histogram);
+
+ // Get "values" object from histogram
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Index 0 is only for values less than 'low'. Values within range start at
+ // index 1
+ JS::RootedValue count(cx.GetJSContext());
+ const uint32_t index = 1;
+ GetElement(cx.GetJSContext(), index, values, &count);
+
+ // Check that this count matches with nSamples
+ uint32_t uCount = 0;
+ JS::ToUint32(cx.GetJSContext(), count, &uCount);
+ ASSERT_EQ(uCount, kExpectedCount)
+ << "The histogram did not accumulate the correct number of values";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateLinearHistogram_DifferentSamples) {
+ nsTArray<uint32_t> samples(
+ {4, 8, 2147483646, uint32_t(INT_MAX) + 1, UINT32_MAX});
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ mTelemetry->ClearScalars();
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_LINEAR"_ns, false);
+
+ // Accumulate in histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_LINEAR, samples);
+
+ // Get a snapshot of all histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_LINEAR",
+ &snapshot, false);
+
+ // Get histogram from snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_LINEAR", snapshot, &histogram);
+
+ // Get values object from histogram
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Get values in first and last buckets
+ JS::RootedValue countFirst(cx.GetJSContext());
+ JS::RootedValue countLast(cx.GetJSContext());
+ const uint32_t firstIndex = 1;
+ // Buckets are indexed by their start value
+ const uint32_t lastIndex = INT32_MAX - 1;
+ GetElement(cx.GetJSContext(), firstIndex, values, &countFirst);
+ GetElement(cx.GetJSContext(), lastIndex, values, &countLast);
+
+ // Check that the values match
+ uint32_t uCountFirst = 0;
+ uint32_t uCountLast = 0;
+ JS::ToUint32(cx.GetJSContext(), countFirst, &uCountFirst);
+ JS::ToUint32(cx.GetJSContext(), countLast, &uCountLast);
+
+ const uint32_t kExpectedCountFirst = 2;
+ // We expect 2147483646 to be in the last bucket, as well the two samples
+ // above 2^31 (prior to bug 1438335, values between INT_MAX and UINT32_MAX
+ // would end up as 0s)
+ const uint32_t kExpectedCountLast = 3;
+ ASSERT_EQ(uCountFirst, kExpectedCountFirst)
+ << "The first bucket did not accumulate the correct number of values";
+ ASSERT_EQ(uCountLast, kExpectedCountLast)
+ << "The last bucket did not accumulate the correct number of values";
+
+ // We accumulated two values that had to be clamped. We expect the count in
+ // 'telemetry.accumulate_clamped_values' to be 2 (only one storage).
+ const uint32_t expectedAccumulateClampedCount = 2;
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.accumulate_clamped_values",
+ "TELEMETRY_TEST_LINEAR", cx.GetJSContext(),
+ scalarsSnapshot, expectedAccumulateClampedCount);
+}
+
+TEST_F(TelemetryTestFixture, AccumulateKeyedCountHistogram_MultipleSamples) {
+ const nsTArray<uint32_t> samples({5, 10, 15});
+ const uint32_t kExpectedSum = 5 + 10 + 15;
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_COUNT"_ns, true);
+
+ // Accumulate data in the provided key within the histogram
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_COUNT, "sample"_ns,
+ samples);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_COUNT",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_COUNT", snapshot,
+ &histogram);
+
+ // Get "sample" property from histogram
+ JS::RootedValue expectedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sample", histogram, &expectedKeyData);
+
+ // Get "sum" property from keyed data
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", expectedKeyData, &sum);
+
+ // Check that the sum stored in the histogram matches with |kExpectedSum|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedSum)
+ << "The histogram is not returning expected sum";
+}
+
+TEST_F(TelemetryTestFixture, TestKeyedLinearHistogram_MultipleSamples) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ mTelemetry->ClearScalars();
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_LINEAR"_ns, true);
+
+ const nsTArray<uint32_t> samples({1, 5, 250000, UINT_MAX});
+ // Test the accumulation on the key 'testkey', using
+ // the API that accepts histogram IDs.
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_LINEAR, "testkey"_ns,
+ samples);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_LINEAR",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_LINEAR", snapshot,
+ &histogram);
+
+ // Get "testkey" property from histogram.
+ JS::RootedValue expectedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "testkey", histogram, &expectedKeyData);
+ ASSERT_TRUE(!expectedKeyData.isUndefined())
+ << "Cannot find the expected key in the histogram data";
+
+ // Get values object from 'testkey' histogram.
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", expectedKeyData, &values);
+
+ // Get values in first and last buckets.
+ JS::RootedValue countFirst(cx.GetJSContext());
+ JS::RootedValue countLast(cx.GetJSContext());
+ const uint32_t firstIndex = 1;
+ // Buckets are indexed by their start value
+ const uint32_t lastIndex = 250000;
+ GetElement(cx.GetJSContext(), firstIndex, values, &countFirst);
+ GetElement(cx.GetJSContext(), lastIndex, values, &countLast);
+
+ // Check that the values match.
+ uint32_t uCountFirst = 0;
+ uint32_t uCountLast = 0;
+ JS::ToUint32(cx.GetJSContext(), countFirst, &uCountFirst);
+ JS::ToUint32(cx.GetJSContext(), countLast, &uCountLast);
+
+ const uint32_t kExpectedCountFirst = 2;
+ const uint32_t kExpectedCountLast = 2;
+ ASSERT_EQ(uCountFirst, kExpectedCountFirst)
+ << "The first bucket did not accumulate the correct number of values for "
+ "key 'testkey'";
+ ASSERT_EQ(uCountLast, kExpectedCountLast)
+ << "The last bucket did not accumulate the correct number of values for "
+ "key 'testkey'";
+
+ // We accumulated one keyed values that had to be clamped. We expect the
+ // count in 'telemetry.accumulate_clamped_values' to be 1
+ const uint32_t expectedAccumulateClampedCount = 1;
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.accumulate_clamped_values",
+ "TELEMETRY_TEST_KEYED_LINEAR", cx.GetJSContext(),
+ scalarsSnapshot, expectedAccumulateClampedCount);
+}
+
+TEST_F(TelemetryTestFixture, TestKeyedKeysHistogram_MultipleSamples) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ mTelemetry->ClearScalars();
+ const nsTArray<uint32_t> samples({false, false, true, 32, true});
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_KEYS"_ns, true);
+
+ // Test the accumulation on both the allowed and unallowed keys, using
+ // the API that accepts histogram IDs.
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_KEYS, "not-allowed"_ns,
+ samples);
+ Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_KEYED_KEYS, "testkey"_ns,
+ samples);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_KEYS",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_KEYS", snapshot,
+ &histogram);
+
+ // Get "testkey" property from histogram and check that it stores the correct
+ // data.
+ JS::RootedValue testKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "testkey", histogram, &testKeyData);
+ ASSERT_TRUE(!testKeyData.isUndefined())
+ << "Cannot find the key 'testkey' in the histogram data";
+
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", testKeyData, &values);
+
+ // Get values in buckets 0,1,2
+ const uint32_t falseIndex = 0;
+ const uint32_t trueIndex = 1;
+ const uint32_t otherIndex = 2;
+
+ JS::RootedValue countFalse(cx.GetJSContext());
+ JS::RootedValue countTrue(cx.GetJSContext());
+ JS::RootedValue countOther(cx.GetJSContext());
+
+ GetElement(cx.GetJSContext(), falseIndex, values, &countFalse);
+ GetElement(cx.GetJSContext(), trueIndex, values, &countTrue);
+ GetElement(cx.GetJSContext(), otherIndex, values, &countOther);
+
+ uint32_t uCountFalse = 0;
+ uint32_t uCountTrue = 0;
+ uint32_t uCountOther = 0;
+ JS::ToUint32(cx.GetJSContext(), countFalse, &uCountFalse);
+ JS::ToUint32(cx.GetJSContext(), countTrue, &uCountTrue);
+ JS::ToUint32(cx.GetJSContext(), countOther, &uCountOther);
+
+ const uint32_t kExpectedCountFalse = 2;
+ const uint32_t kExpectedCountTrue = 3;
+ const uint32_t kExpectedCountOther = 0;
+
+ ASSERT_EQ(uCountFalse, kExpectedCountFalse)
+ << "The histogram did not accumulate the correct number of 'false' "
+ "booleans for key 'testkey'";
+ ASSERT_EQ(uCountTrue, kExpectedCountTrue)
+ << "The histogram did not accumulate the correct number of 'true' "
+ "booleans for key 'testkey'";
+ ASSERT_EQ(uCountOther, kExpectedCountOther)
+ << "The histogram did not accumulate the correct number of undefined "
+ "values for key 'testkey'";
+
+ // Here we check that we are not accumulating to a different (but still
+ // 'allowed') key. Get "CommonKey" property from histogram and check that it
+ // has no data. Since we accumulated no data to it, commonKeyData should be
+ // undefined.
+ JS::RootedValue commonKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "CommonKey", histogram, &commonKeyData);
+ ASSERT_TRUE(commonKeyData.isUndefined())
+ << "Found data in key 'CommonKey' even though we accumulated no data to "
+ "it";
+
+ // Here we check that our function does not allow accumulation into unallowed
+ // keys. Get 'not-allowed' property from histogram and check that this also
+ // has no data. This should contain no data because this key is not allowed.
+ JS::RootedValue notAllowedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "not-allowed", histogram, &notAllowedKeyData);
+ ASSERT_TRUE(notAllowedKeyData.isUndefined())
+ << "Found data in key 'not-allowed' even though accumuling data to it is "
+ "not allowed";
+
+ // The 'not-allowed' key accumulation for 'TELEMETRY_TESTED_KEYED_KEYS' was
+ // attemtped once, so we expect the count of
+ // 'telemetry.accumulate_unknown_histogram_keys' to be 1
+ const uint32_t expectedAccumulateUnknownCount = 1;
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.accumulate_unknown_histogram_keys",
+ "TELEMETRY_TEST_KEYED_KEYS", cx.GetJSContext(),
+ scalarsSnapshot, expectedAccumulateUnknownCount);
+}
+
+TEST_F(TelemetryTestFixture,
+ AccumulateCategoricalHistogram_MultipleStringLabels) {
+ const uint32_t kExpectedValue = 2;
+ const nsTArray<nsCString> labels({"CommonLabel"_ns, "CommonLabel"_ns});
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_CATEGORICAL"_ns, false);
+
+ // Accumulate the units into a categorical histogram using a string label
+ Telemetry::AccumulateCategorical(Telemetry::TELEMETRY_TEST_CATEGORICAL,
+ labels);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_CATEGORICAL",
+ &snapshot, false);
+
+ // Get our histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Get values object from histogram. Each entry in the object maps to a label
+ // in the histogram.
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Get the value for the label we care about
+ JS::RootedValue value(cx.GetJSContext());
+ GetElement(cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel),
+ values, &value);
+
+ // Check that the value stored in the histogram matches with |kExpectedValue|
+ uint32_t uValue = 0;
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ ASSERT_EQ(uValue, kExpectedValue)
+ << "The histogram is not returning expected value";
+
+ // Now we check for no accumulation when a bad label is present in the array.
+ //
+ // The 'values' property is not initialized unless data is accumulated so
+ // keeping another test to check for this case alone is wasteful as we will
+ // have to accumulate some data anyway.
+
+ const nsTArray<nsCString> badLabelArray({"CommonLabel"_ns, "BadLabel"_ns});
+
+ // Try to accumulate the array into the histogram.
+ Telemetry::AccumulateCategorical(Telemetry::TELEMETRY_TEST_CATEGORICAL,
+ badLabelArray);
+
+ // Get snapshot of all the histograms
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_CATEGORICAL",
+ &snapshot, false);
+
+ // Get our histogram from the snapshot
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Get values array from histogram
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Get the value for the label we care about
+ GetElement(cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel),
+ values, &value);
+
+ // Check that the value stored in the histogram matches with |kExpectedValue|
+ uValue = 0;
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ ASSERT_EQ(uValue, kExpectedValue)
+ << "The histogram accumulated data when it should not have";
+}
+
+TEST_F(TelemetryTestFixture,
+ AccumulateCategoricalHistogram_MultipleEnumValues) {
+ const uint32_t kExpectedValue = 2;
+ const nsTArray<Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL> enumLabels(
+ {Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel,
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel});
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_CATEGORICAL"_ns, false);
+
+ // Accumulate the units into a categorical histogram using the enumLabels
+ // array
+ Telemetry::AccumulateCategorical<
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL>(enumLabels);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_CATEGORICAL",
+ &snapshot, false);
+
+ // Get our histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Get values object from histogram. Each entry in the object maps to a label
+ // in the histogram.
+ JS::RootedValue values(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", histogram, &values);
+
+ // Get the value for the label we care about
+ JS::RootedValue value(cx.GetJSContext());
+ GetElement(cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL::CommonLabel),
+ values, &value);
+
+ // Check that the value stored in the histogram matches with |kExpectedValue|
+ uint32_t uValue = 0;
+ JS::ToUint32(cx.GetJSContext(), value, &uValue);
+ ASSERT_EQ(uValue, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture,
+ AccumulateKeyedCategoricalHistogram_MultipleEnumValues) {
+ const uint32_t kExpectedCommonLabel = 2;
+ const uint32_t kExpectedLabel2 = 1;
+ const nsTArray<Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL> enumLabels(
+ {Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel,
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel,
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::Label2});
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_CATEGORICAL"_ns, true);
+
+ // Accumulate the array into the categorical keyed histogram
+ Telemetry::AccumulateCategoricalKeyed("sampleKey"_ns, enumLabels);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_CATEGORICAL", &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_CATEGORICAL", snapshot,
+ &histogram);
+
+ // Check that the sampleKey histogram contains correct number of CommonLabel
+ // samples
+ JS::RootedValue sample(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sampleKey", histogram, &sample);
+
+ // Get values object from the sample. Each entry in the object maps to a label
+ // in the histogram.
+ JS::RootedValue sampleKeyValues(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "values", sample, &sampleKeyValues);
+
+ // Get the count of CommonLabel
+ JS::RootedValue commonLabelValue(cx.GetJSContext());
+ GetElement(
+ cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::CommonLabel),
+ sampleKeyValues, &commonLabelValue);
+
+ // Check that the value stored in the histogram matches with
+ // |kExpectedCommonLabel|
+ uint32_t uCommonLabelValue = 0;
+ JS::ToUint32(cx.GetJSContext(), commonLabelValue, &uCommonLabelValue);
+ ASSERT_EQ(uCommonLabelValue, kExpectedCommonLabel)
+ << "The sampleKey histogram did not accumulate the correct number of "
+ "CommonLabel samples";
+
+ // Check that the sampleKey histogram contains the correct number of Label2
+ // values Get the count of Label2
+ JS::RootedValue label2Value(cx.GetJSContext());
+ GetElement(cx.GetJSContext(),
+ static_cast<uint32_t>(
+ Telemetry::LABELS_TELEMETRY_TEST_KEYED_CATEGORICAL::Label2),
+ sampleKeyValues, &label2Value);
+
+ // Check that the value stored in the histogram matches with |kExpectedLabel2|
+ uint32_t uLabel2Value = 0;
+ JS::ToUint32(cx.GetJSContext(), label2Value, &uLabel2Value);
+ ASSERT_EQ(uLabel2Value, kExpectedLabel2)
+ << "The sampleKey histogram did not accumulate the correct number of "
+ "Label2 samples";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateTimeDelta) {
+ const uint32_t kExpectedValue = 100;
+ const TimeStamp start = TimeStamp::Now();
+ const TimeDuration delta = TimeDuration::FromMilliseconds(50);
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT"_ns,
+ false);
+
+ // Accumulate in the histogram
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_COUNT, start - delta,
+ start);
+
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_COUNT, start - delta,
+ start);
+
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_COUNT, start, start);
+
+ // end > start timestamp gives zero contribution
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_COUNT, start + delta,
+ start);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_COUNT", &snapshot,
+ false);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_COUNT", snapshot, &histogram);
+
+ // Get "sum" property from histogram
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", histogram, &sum);
+
+ // Check that the "sum" stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected value";
+}
+
+TEST_F(TelemetryTestFixture, AccumulateKeyedTimeDelta) {
+ const uint32_t kExpectedValue = 100;
+ const TimeStamp start = TimeStamp::Now();
+ const TimeDuration delta = TimeDuration::FromMilliseconds(50);
+
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ GetAndClearHistogram(cx.GetJSContext(), mTelemetry,
+ "TELEMETRY_TEST_KEYED_COUNT"_ns, true);
+
+ // Accumulate time delta in the provided key within the histogram
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_KEYED_COUNT,
+ "sample"_ns, start - delta, start);
+
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_KEYED_COUNT,
+ "sample"_ns, start - delta, start);
+
+ // end > start timestamp gives zero contribution
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_KEYED_COUNT,
+ "sample"_ns, start + delta, start);
+
+ Telemetry::AccumulateTimeDelta(Telemetry::TELEMETRY_TEST_KEYED_COUNT,
+ "sample"_ns, start, start);
+
+ // Get a snapshot for all the histograms
+ JS::RootedValue snapshot(cx.GetJSContext());
+ GetSnapshots(cx.GetJSContext(), mTelemetry, "TELEMETRY_TEST_KEYED_COUNT",
+ &snapshot, true);
+
+ // Get the histogram from the snapshot
+ JS::RootedValue histogram(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "TELEMETRY_TEST_KEYED_COUNT", snapshot,
+ &histogram);
+
+ // Get "sample" property from histogram
+ JS::RootedValue expectedKeyData(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sample", histogram, &expectedKeyData);
+
+ // Get "sum" property from keyed data
+ JS::RootedValue sum(cx.GetJSContext());
+ GetProperty(cx.GetJSContext(), "sum", expectedKeyData, &sum);
+
+ // Check that the sum stored in the histogram matches with |kExpectedValue|
+ uint32_t uSum = 0;
+ JS::ToUint32(cx.GetJSContext(), sum, &uSum);
+ ASSERT_EQ(uSum, kExpectedValue)
+ << "The histogram is not returning expected sum";
+}
diff --git a/toolkit/components/telemetry/tests/gtest/TestOrigins.cpp b/toolkit/components/telemetry/tests/gtest/TestOrigins.cpp
new file mode 100644
index 0000000000..b52391e869
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestOrigins.cpp
@@ -0,0 +1,290 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "core/TelemetryOrigin.h"
+#include "gtest/gtest.h"
+#include "gmock/gmock.h"
+#include "mozilla/ContentBlockingLog.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/Services.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Unused.h"
+#include "nsIObserverService.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+using namespace mozilla;
+using namespace TelemetryTestHelpers;
+using mozilla::Telemetry::OriginMetricID;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::StrEq;
+
+constexpr auto kTelemetryTest1Metric = "telemetry.test_test1"_ns;
+
+constexpr auto kDoubleclickOrigin = "doubleclick.net"_ns;
+constexpr auto kDoubleclickOriginHash =
+ "uXNT1PzjAVau8b402OMAIGDejKbiXfQX5iXvPASfO/s="_ns;
+constexpr auto kFacebookOrigin = "fb.com"_ns;
+constexpr auto kUnknownOrigin1 =
+ "this origin isn't known to Origin Telemetry"_ns;
+constexpr auto kUnknownOrigin2 = "neither is this one"_ns;
+
+// Properly prepare the prio prefs
+// (Sourced from PrioEncoder.cpp from when it was being prototyped)
+constexpr auto prioKeyA =
+ "35AC1C7576C7C6EDD7FED6BCFC337B34D48CB4EE45C86BEEFB40BD8875707733"_ns;
+constexpr auto prioKeyB =
+ "26E6674E65425B823F1F1D5F96E3BB3EF9E406EC7FBA7DEF8B08A35DD135AF50"_ns;
+
+// Test that we can properly record origin stuff using the C++ API.
+TEST_F(TelemetryTestFixture, RecordOrigin) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ JSContext* aCx = cx.GetJSContext();
+
+ Unused << mTelemetry->ClearOrigins();
+
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ mozilla::ContentBlockingLog::kDummyOriginHash);
+
+ JS::RootedValue originSnapshot(aCx);
+ GetOriginSnapshot(aCx, &originSnapshot);
+
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined())
+ << "Origin snapshot must not be null/undefined.";
+
+ JS::RootedValue origins(aCx);
+ JS::RootedObject snapshotObj(aCx, &originSnapshot.toObject());
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, snapshotObj, kTelemetryTest1Metric.get(), &origins))
+ << "telemetry.test_test1 must be in the snapshot.";
+
+ JS::RootedObject originsObj(aCx, &origins.toObject());
+ JS::RootedValue count(aCx);
+ ASSERT_TRUE(JS_GetProperty(
+ aCx, originsObj, mozilla::ContentBlockingLog::kDummyOriginHash.get(),
+ &count));
+ ASSERT_TRUE(count.isInt32() && count.toInt32() == 1)
+ << "Must have recorded the origin exactly once.";
+
+ // Now test that the snapshot didn't clear things out.
+ GetOriginSnapshot(aCx, &originSnapshot);
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined());
+ JS::RootedObject unemptySnapshotObj(aCx, &originSnapshot.toObject());
+ JS::Rooted<JS::IdVector> ids(aCx, JS::IdVector(aCx));
+ ASSERT_TRUE(JS_Enumerate(aCx, unemptySnapshotObj, &ids));
+ ASSERT_GE(ids.length(), (unsigned)0) << "Returned object must not be empty.";
+}
+
+TEST_F(TelemetryTestFixture, RecordOriginTwiceAndClear) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ JSContext* aCx = cx.GetJSContext();
+
+ Unused << mTelemetry->ClearOrigins();
+
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOrigin);
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOrigin);
+
+ JS::RootedValue originSnapshot(aCx);
+ GetOriginSnapshot(aCx, &originSnapshot, true /* aClear */);
+
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined())
+ << "Origin snapshot must not be null/undefined.";
+
+ JS::RootedValue origins(aCx);
+ JS::RootedObject snapshotObj(aCx, &originSnapshot.toObject());
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, snapshotObj, kTelemetryTest1Metric.get(), &origins))
+ << "telemetry.test_test1 must be in the snapshot.";
+
+ JS::RootedObject originsObj(aCx, &origins.toObject());
+ JS::RootedValue count(aCx);
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, originsObj, kDoubleclickOrigin.get(), &count));
+ ASSERT_TRUE(count.isInt32() && count.toInt32() == 2)
+ << "Must have recorded the origin exactly twice.";
+
+ // Now check that snapshotting with clear actually cleared it.
+ GetOriginSnapshot(aCx, &originSnapshot);
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined());
+ JS::RootedObject emptySnapshotObj(aCx, &originSnapshot.toObject());
+ JS::Rooted<JS::IdVector> ids(aCx, JS::IdVector(aCx));
+ ASSERT_TRUE(JS_Enumerate(aCx, emptySnapshotObj, &ids));
+ ASSERT_EQ(ids.length(), (unsigned)0) << "Returned object must be empty.";
+}
+
+TEST_F(TelemetryTestFixture, RecordOriginTwiceMixed) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ JSContext* aCx = cx.GetJSContext();
+
+ Unused << mTelemetry->ClearOrigins();
+
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOrigin);
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOriginHash);
+
+ Preferences::SetCString("prio.publicKeyA", prioKeyA);
+ Preferences::SetCString("prio.publicKeyB", prioKeyB);
+
+ nsTArray<Tuple<nsCString, nsCString>> encodedStrings;
+ GetEncodedOriginStrings(aCx, kTelemetryTest1Metric + "-%u"_ns,
+ encodedStrings);
+ ASSERT_EQ(2 * TelemetryOrigin::SizeOfPrioDatasPerMetric(),
+ encodedStrings.Length());
+
+ JS::RootedValue originSnapshot(aCx);
+ GetOriginSnapshot(aCx, &originSnapshot, true /* aClear */);
+
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined())
+ << "Origin snapshot must not be null/undefined.";
+
+ JS::RootedValue origins(aCx);
+ JS::RootedObject snapshotObj(aCx, &originSnapshot.toObject());
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, snapshotObj, kTelemetryTest1Metric.get(), &origins))
+ << "telemetry.test_test1 must be in the snapshot.";
+
+ JS::RootedObject originsObj(aCx, &origins.toObject());
+ JS::RootedValue count(aCx);
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, originsObj, kDoubleclickOrigin.get(), &count));
+ ASSERT_TRUE(count.isInt32() && count.toInt32() == 2)
+ << "Must have recorded the origin exactly twice.";
+}
+
+TEST_F(TelemetryTestFixture, RecordUnknownOrigin) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ JSContext* aCx = cx.GetJSContext();
+
+ Unused << mTelemetry->ClearOrigins();
+
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1, kUnknownOrigin1);
+
+ JS::RootedValue originSnapshot(aCx);
+ GetOriginSnapshot(aCx, &originSnapshot);
+
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined())
+ << "Origin snapshot must not be null/undefined.";
+
+ JS::RootedValue origins(aCx);
+ JS::RootedObject snapshotObj(aCx, &originSnapshot.toObject());
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, snapshotObj, kTelemetryTest1Metric.get(), &origins))
+ << "telemetry.test_test1 must be in the snapshot.";
+
+ JS::RootedObject originsObj(aCx, &origins.toObject());
+ JS::RootedValue count(aCx);
+ ASSERT_TRUE(JS_GetProperty(aCx, originsObj, "__UNKNOWN__", &count));
+ ASSERT_TRUE(count.isInt32() && count.toInt32() == 1)
+ << "Must have recorded the unknown origin exactly once.";
+
+ // Record a second, different unknown origin and ensure only one is stored.
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1, kUnknownOrigin2);
+
+ GetOriginSnapshot(aCx, &originSnapshot);
+
+ ASSERT_FALSE(originSnapshot.isNullOrUndefined())
+ << "Origin snapshot must not be null/undefined.";
+
+ JS::RootedObject snapshotObj2(aCx, &originSnapshot.toObject());
+ ASSERT_TRUE(
+ JS_GetProperty(aCx, snapshotObj2, kTelemetryTest1Metric.get(), &origins))
+ << "telemetry.test_test1 must be in the snapshot.";
+
+ JS::RootedObject originsObj2(aCx, &origins.toObject());
+ JS::RootedValue count2(aCx);
+ ASSERT_TRUE(JS_GetProperty(aCx, originsObj2, "__UNKNOWN__", &count2));
+ ASSERT_TRUE(count2.isInt32() && count2.toInt32() == 1)
+ << "Must have recorded the unknown origin exactly once.";
+}
+
+TEST_F(TelemetryTestFixture, EncodedSnapshot) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ JSContext* aCx = cx.GetJSContext();
+
+ Unused << mTelemetry->ClearOrigins();
+
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOrigin);
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1, kUnknownOrigin1);
+
+ Preferences::SetCString("prio.publicKeyA", prioKeyA);
+ Preferences::SetCString("prio.publicKeyB", prioKeyB);
+
+ nsTArray<Tuple<nsCString, nsCString>> firstStrings;
+ GetEncodedOriginStrings(aCx, kTelemetryTest1Metric + "-%u"_ns, firstStrings);
+
+ // Now snapshot a second time and ensure the encoded payloads change.
+ nsTArray<Tuple<nsCString, nsCString>> secondStrings;
+ GetEncodedOriginStrings(aCx, kTelemetryTest1Metric + "-%u"_ns, secondStrings);
+
+ const auto sizeOfPrioDatasPerMetric =
+ TelemetryOrigin::SizeOfPrioDatasPerMetric();
+ ASSERT_EQ(sizeOfPrioDatasPerMetric, firstStrings.Length());
+ ASSERT_EQ(sizeOfPrioDatasPerMetric, secondStrings.Length());
+
+ for (size_t i = 0; i < sizeOfPrioDatasPerMetric; ++i) {
+ auto& aStr = Get<0>(firstStrings[i]);
+ auto& bStr = Get<1>(firstStrings[i]);
+ auto& secondAStr = Get<0>(secondStrings[i]);
+ auto& secondBStr = Get<1>(secondStrings[i]);
+
+ ASSERT_TRUE(aStr != secondAStr)
+ << "aStr (" << aStr.get() << ") must not equal secondAStr ("
+ << secondAStr.get() << ")";
+ ASSERT_TRUE(bStr != secondBStr)
+ << "bStr (" << bStr.get() << ") must not equal secondBStr ("
+ << secondBStr.get() << ")";
+ }
+}
+
+class MockObserver final : public nsIObserver {
+ public:
+ NS_DECL_ISUPPORTS
+
+ MOCK_METHOD1(Mobserve, void(const char* aTopic));
+ NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData) override {
+ Mobserve(aTopic);
+ return NS_OK;
+ };
+
+ MockObserver() = default;
+
+ private:
+ ~MockObserver() = default;
+};
+
+NS_IMPL_ISUPPORTS(MockObserver, nsIObserver);
+
+TEST_F(TelemetryTestFixture, OriginTelemetryNotifiesTopic) {
+ Unused << mTelemetry->ClearOrigins();
+
+ const char* kTopic = "origin-telemetry-storage-limit-reached";
+
+ MockObserver* mo = new MockObserver();
+ nsCOMPtr<nsIObserver> nsMo(mo);
+ EXPECT_CALL(*mo, Mobserve(StrEq(kTopic))).Times(1);
+
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ ASSERT_TRUE(os);
+ os->AddObserver(nsMo, kTopic, false);
+
+ const size_t size = ceil(10.0 / TelemetryOrigin::SizeOfPrioDatasPerMetric());
+ for (size_t i = 0; i < size; ++i) {
+ if (i < size - 1) {
+ // Let's ensure we only notify the once.
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kFacebookOrigin);
+ }
+ Telemetry::RecordOrigin(OriginMetricID::TelemetryTest_Test1,
+ kDoubleclickOrigin);
+ }
+
+ os->RemoveObserver(nsMo, kTopic);
+}
diff --git a/toolkit/components/telemetry/tests/gtest/TestScalars.cpp b/toolkit/components/telemetry/tests/gtest/TestScalars.cpp
new file mode 100644
index 0000000000..c860369eac
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/TestScalars.cpp
@@ -0,0 +1,491 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+#include "core/TelemetryScalar.h"
+#include "gtest/gtest.h"
+#include "js/Conversions.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/TelemetryProcessEnums.h"
+#include "mozilla/Unused.h"
+#include "nsJSUtils.h" // nsAutoJSString
+#include "nsThreadUtils.h"
+#include "TelemetryFixture.h"
+#include "TelemetryTestHelpers.h"
+
+using namespace mozilla;
+using namespace TelemetryTestHelpers;
+using mozilla::Telemetry::ProcessID;
+
+#define EXPECTED_STRING "Nice, expected and creative string."
+
+// Test that we can properly write unsigned scalars using the C++ API.
+TEST_F(TelemetryTestFixture, ScalarUnsigned) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ // Set the test scalar to a known value.
+ const uint32_t kInitialValue = 1172015;
+ const uint32_t kExpectedUint = 1172017;
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ kInitialValue);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ kExpectedUint - kInitialValue);
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckUintScalar("telemetry.test.unsigned_int_kind", cx.GetJSContext(),
+ scalarsSnapshot, kExpectedUint);
+
+ // Try to use SetMaximum.
+ const uint32_t kExpectedUintMaximum = kExpectedUint * 2;
+ Telemetry::ScalarSetMaximum(
+ Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ kExpectedUintMaximum);
+
+// Make sure that calls of the unsupported type don't corrupt the stored value.
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ false);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ u"test"_ns);
+#endif
+
+ // Check the recorded value.
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckUintScalar("telemetry.test.unsigned_int_kind", cx.GetJSContext(),
+ scalarsSnapshot, kExpectedUintMaximum);
+}
+
+// Test that the AutoScalarTimer records a proper uint32_t value to a
+// scalar once it goes out of scope.
+TEST_F(TelemetryTestFixture, AutoScalarTimer) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+ {
+ Telemetry::AutoScalarTimer<
+ Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND>
+ timer;
+ }
+
+ const char* kScalarName = "telemetry.test.unsigned_int_kind";
+
+ // Check that there's a recorded value that is greater than 0. Since
+ // this is a timer, we'll not check the non-deterministic value - just
+ // that it exists.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+
+ // Validate the value of the test scalar.
+ JS::RootedValue value(cx.GetJSContext());
+ JS::RootedObject scalarObj(cx.GetJSContext(), &scalarsSnapshot.toObject());
+ ASSERT_TRUE(JS_GetProperty(cx.GetJSContext(), scalarObj, kScalarName, &value))
+ << "The test scalar must be reported.";
+
+ JS_GetProperty(cx.GetJSContext(), scalarObj, kScalarName, &value);
+ ASSERT_TRUE(value.isInt32())
+ << "The scalar value must be of the correct type.";
+ ASSERT_TRUE(value.toInt32() >= 0)
+ << "The uint scalar type must contain a value >= 0.";
+}
+
+// Test that we can properly write boolean scalars using the C++ API.
+TEST_F(TelemetryTestFixture, ScalarBoolean) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ Unused << mTelemetry->ClearScalars();
+
+ // Set the test scalar to a known value.
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, true);
+
+// Make sure that calls of the unsupported type don't corrupt the stored value.
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND,
+ static_cast<uint32_t>(12));
+ Telemetry::ScalarSetMaximum(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND,
+ 20);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, 2);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND,
+ u"test"_ns);
+#endif
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckBoolScalar("telemetry.test.boolean_kind", cx.GetJSContext(),
+ scalarsSnapshot, true);
+}
+
+// Test that we can properly write string scalars using the C++ API.
+TEST_F(TelemetryTestFixture, ScalarString) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ Unused << mTelemetry->ClearScalars();
+
+ // Set the test scalar to a known value.
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND,
+ NS_LITERAL_STRING_FROM_CSTRING(EXPECTED_STRING));
+
+// Make sure that calls of the unsupported type don't corrupt the stored value.
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND,
+ static_cast<uint32_t>(12));
+ Telemetry::ScalarSetMaximum(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND,
+ 20);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND, 2);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND, true);
+#endif
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckStringScalar("telemetry.test.string_kind", cx.GetJSContext(),
+ scalarsSnapshot, EXPECTED_STRING);
+}
+
+// Test that we can properly write keyed unsigned scalars using the C++ API.
+TEST_F(TelemetryTestFixture, KeyedScalarUnsigned) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ Unused << mTelemetry->ClearScalars();
+
+ // Set the test scalar to a known value.
+ const char* kScalarName = "telemetry.test.keyed_unsigned_int";
+ const uint32_t kKey1Value = 1172015;
+ const uint32_t kKey2Value = 1172017;
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key1"_ns, kKey1Value);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key2"_ns, kKey1Value);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key2"_ns, 2);
+
+// Make sure that calls of the unsupported type don't corrupt the stored value.
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key1"_ns, false);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"test"_ns);
+#endif
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+
+ // Check the keyed scalar we're interested in.
+ CheckKeyedUintScalar(kScalarName, "key1", cx.GetJSContext(), scalarsSnapshot,
+ kKey1Value);
+ CheckKeyedUintScalar(kScalarName, "key2", cx.GetJSContext(), scalarsSnapshot,
+ kKey2Value);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 2);
+
+ // Try to use SetMaximum.
+ const uint32_t kExpectedUintMaximum = kKey1Value * 2;
+ Telemetry::ScalarSetMaximum(
+ Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT, u"key1"_ns,
+ kExpectedUintMaximum);
+
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ // The first key should be different and te second is expected to be the same.
+ CheckKeyedUintScalar(kScalarName, "key1", cx.GetJSContext(), scalarsSnapshot,
+ kExpectedUintMaximum);
+ CheckKeyedUintScalar(kScalarName, "key2", cx.GetJSContext(), scalarsSnapshot,
+ kKey2Value);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 2);
+}
+
+TEST_F(TelemetryTestFixture, KeyedScalarBoolean) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ Unused << mTelemetry->ClearScalars();
+
+ // Set the test scalar to a known value.
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key1"_ns, false);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key2"_ns, true);
+
+// Make sure that calls of the unsupported type don't corrupt the stored value.
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key1"_ns, static_cast<uint32_t>(12));
+ Telemetry::ScalarSetMaximum(
+ Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND, u"key1"_ns, 20);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key1"_ns, 2);
+#endif
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+
+ // Make sure that the keys contain the expected values.
+ const char* kScalarName = "telemetry.test.keyed_boolean_kind";
+ CheckKeyedBoolScalar(kScalarName, "key1", cx.GetJSContext(), scalarsSnapshot,
+ false);
+ CheckKeyedBoolScalar(kScalarName, "key2", cx.GetJSContext(), scalarsSnapshot,
+ true);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 2);
+}
+
+TEST_F(TelemetryTestFixture, NonMainThreadAdd) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ Unused << mTelemetry->ClearScalars();
+
+ // Define the function that will be called on the testing thread.
+ nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
+ "TelemetryTestFixture_NonMainThreadAdd_Test::TestBody", []() -> void {
+ Telemetry::ScalarAdd(
+ Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND, 37);
+ });
+
+ // Spawn the testing thread and run the function.
+ nsCOMPtr<nsIThread> testingThread;
+ nsresult rv =
+ NS_NewNamedThread("Test thread", getter_AddRefs(testingThread), runnable);
+ ASSERT_EQ(rv, NS_OK);
+
+ // Shutdown the thread. This also waits for the runnable to complete.
+ testingThread->Shutdown();
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckUintScalar("telemetry.test.unsigned_int_kind", cx.GetJSContext(),
+ scalarsSnapshot, 37);
+}
+
+TEST_F(TelemetryTestFixture, ScalarUnknownID) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ const uint32_t kTestFakeIds[] = {
+ static_cast<uint32_t>(Telemetry::ScalarID::ScalarCount),
+ static_cast<uint32_t>(Telemetry::ScalarID::ScalarCount) + 378537,
+ std::numeric_limits<uint32_t>::max()};
+
+ for (auto id : kTestFakeIds) {
+ Telemetry::ScalarID scalarId = static_cast<Telemetry::ScalarID>(id);
+ Telemetry::ScalarSet(scalarId, static_cast<uint32_t>(1));
+ Telemetry::ScalarSet(scalarId, true);
+ Telemetry::ScalarSet(scalarId, u"test"_ns);
+ Telemetry::ScalarAdd(scalarId, 1);
+ Telemetry::ScalarSetMaximum(scalarId, 1);
+
+ // Make sure that nothing was recorded in the plain scalars.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ ASSERT_TRUE(scalarsSnapshot.isUndefined())
+ << "No scalar must be recorded";
+
+ // Same for the keyed scalars.
+ Telemetry::ScalarSet(scalarId, u"key1"_ns, static_cast<uint32_t>(1));
+ Telemetry::ScalarSet(scalarId, u"key1"_ns, true);
+ Telemetry::ScalarAdd(scalarId, u"key1"_ns, 1);
+ Telemetry::ScalarSetMaximum(scalarId, u"key1"_ns, 1);
+
+ // Make sure that nothing was recorded in the keyed scalars.
+ JS::RootedValue keyedSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &keyedSnapshot);
+ ASSERT_TRUE(keyedSnapshot.isUndefined())
+ << "No keyed scalar must be recorded";
+ }
+#endif
+}
+
+TEST_F(TelemetryTestFixture, ScalarEventSummary) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ const char* kScalarName = "telemetry.event_counts";
+
+ const char* kLongestEvent =
+ "oohwowlookthiscategoryissolong#thismethodislongtooo#"
+ "thisobjectisnoslouch";
+ TelemetryScalar::SummarizeEvent(nsCString(kLongestEvent), ProcessID::Parent,
+ false /* aDynamic */);
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+
+ CheckKeyedUintScalar(kScalarName, kLongestEvent, cx.GetJSContext(),
+ scalarsSnapshot, 1);
+
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ const char* kTooLongEvent =
+ "oohwowlookthiscategoryissolong#thismethodislongtooo#"
+ "thisobjectisnoslouch2";
+ TelemetryScalar::SummarizeEvent(nsCString(kTooLongEvent), ProcessID::Parent,
+ false /* aDynamic */);
+
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 1);
+#endif // #ifndef DEBUG
+
+ // Test we can fill the next 499 keys up to our 500 maximum
+ for (int i = 1; i < 500; i++) {
+ std::ostringstream eventName;
+ eventName << "category#method#object" << i;
+ TelemetryScalar::SummarizeEvent(nsCString(eventName.str().c_str()),
+ ProcessID::Parent, false /* aDynamic */);
+ }
+
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 500);
+
+// Don't run this part in debug builds as that intentionally asserts.
+#ifndef DEBUG
+ TelemetryScalar::SummarizeEvent(nsCString("whoops#too#many"),
+ ProcessID::Parent, false /* aDynamic */);
+
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckNumberOfProperties(kScalarName, cx.GetJSContext(), scalarsSnapshot, 500);
+#endif // #ifndef DEBUG
+}
+
+TEST_F(TelemetryTestFixture, ScalarEventSummary_Dynamic) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ const char* kScalarName = "telemetry.dynamic_event_counts";
+ const char* kLongestEvent =
+ "oohwowlookthiscategoryissolong#thismethodislongtooo#"
+ "thisobjectisnoslouch";
+ TelemetryScalar::SummarizeEvent(nsCString(kLongestEvent), ProcessID::Parent,
+ true /* aDynamic */);
+ TelemetryScalar::SummarizeEvent(nsCString(kLongestEvent), ProcessID::Content,
+ true /* aDynamic */);
+
+ // Check the recorded value.
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot,
+ ProcessID::Dynamic);
+
+ // Recording in parent or content doesn't matter for dynamic scalars
+ // which all end up in the same place.
+ CheckKeyedUintScalar(kScalarName, kLongestEvent, cx.GetJSContext(),
+ scalarsSnapshot, 2);
+}
+
+TEST_F(TelemetryTestFixture, WrongScalarOperator) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ const uint32_t expectedValue = 1172015;
+
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ expectedValue);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND,
+ NS_LITERAL_STRING_FROM_CSTRING(EXPECTED_STRING));
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, true);
+
+ TelemetryScalar::DeserializationStarted();
+
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND, 1447);
+ Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, 1447);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
+ true);
+ TelemetryScalar::ApplyPendingOperations();
+
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
+ CheckStringScalar("telemetry.test.string_kind", cx.GetJSContext(),
+ scalarsSnapshot, EXPECTED_STRING);
+ CheckBoolScalar("telemetry.test.boolean_kind", cx.GetJSContext(),
+ scalarsSnapshot, true);
+ CheckUintScalar("telemetry.test.unsigned_int_kind", cx.GetJSContext(),
+ scalarsSnapshot, expectedValue);
+}
+
+TEST_F(TelemetryTestFixture, WrongKeyedScalarOperator) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ const uint32_t kExpectedUint = 1172017;
+
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key1"_ns, kExpectedUint);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key2"_ns, true);
+
+ TelemetryScalar::DeserializationStarted();
+
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
+ u"key1"_ns, false);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
+ u"key2"_ns, static_cast<uint32_t>(13));
+
+ TelemetryScalar::ApplyPendingOperations();
+
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.test.keyed_unsigned_int", "key1",
+ cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
+ CheckKeyedBoolScalar("telemetry.test.keyed_boolean_kind", "key2",
+ cx.GetJSContext(), scalarsSnapshot, true);
+}
+
+TEST_F(TelemetryTestFixture, TestKeyedScalarAllowedKeys) {
+ AutoJSContextWithGlobal cx(mCleanGlobal);
+ // Make sure we don't get scalars from other tests.
+ Unused << mTelemetry->ClearScalars();
+
+ const uint32_t kExpectedUint = 1172017;
+
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"only"_ns, kExpectedUint);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"meant"_ns, kExpectedUint);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"for"_ns, kExpectedUint);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"testing"_ns, kExpectedUint);
+
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"invalid"_ns, kExpectedUint);
+ Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_WITH_KEYS,
+ u"not-valid"_ns, kExpectedUint);
+
+ JS::RootedValue scalarsSnapshot(cx.GetJSContext());
+ GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
+ CheckKeyedUintScalar("telemetry.test.keyed_with_keys", "only",
+ cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
+ CheckKeyedUintScalar("telemetry.test.keyed_with_keys", "meant",
+ cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
+ CheckKeyedUintScalar("telemetry.test.keyed_with_keys", "for",
+ cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
+ CheckKeyedUintScalar("telemetry.test.keyed_with_keys", "testing",
+ cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
+ CheckNumberOfProperties("telemetry.test.keyed_with_keys", cx.GetJSContext(),
+ scalarsSnapshot, 4);
+
+ CheckKeyedUintScalar("telemetry.keyed_scalars_unknown_keys",
+ "telemetry.test.keyed_with_keys", cx.GetJSContext(),
+ scalarsSnapshot, 2);
+}
diff --git a/toolkit/components/telemetry/tests/gtest/moz.build b/toolkit/components/telemetry/tests/gtest/moz.build
new file mode 100644
index 0000000000..1a20a75d99
--- /dev/null
+++ b/toolkit/components/telemetry/tests/gtest/moz.build
@@ -0,0 +1,30 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, you can obtain one at http://mozilla.org/MPL/2.0/.
+
+Library("telemetrytest")
+
+LOCAL_INCLUDES += [
+ "../..",
+]
+
+UNIFIED_SOURCES = [
+ "TelemetryFixture.cpp",
+ "TelemetryTestHelpers.cpp",
+]
+
+if CONFIG["OS_TARGET"] != "Android":
+ UNIFIED_SOURCES += [
+ "TestCombinedStacks.cpp",
+ "TestCounters.cpp",
+ "TestEvents.cpp",
+ "TestHistograms.cpp",
+ "TestOrigins.cpp",
+ "TestScalars.cpp",
+ ]
+
+FINAL_LIBRARY = "xul-gtest"
+
+include("/ipc/chromium/chromium-config.mozbuild")
diff --git a/toolkit/components/telemetry/tests/integration/tests/conftest.py b/toolkit/components/telemetry/tests/integration/tests/conftest.py
new file mode 100644
index 0000000000..7fcfee3450
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/conftest.py
@@ -0,0 +1,316 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import contextlib
+import mozinstall
+import os
+import pytest
+import re
+import sys
+import textwrap
+import time
+
+from marionette_driver import By, keys
+from marionette_driver.addons import Addons
+from marionette_driver.errors import MarionetteException
+from marionette_driver.marionette import Marionette
+from marionette_driver.wait import Wait
+from six import reraise
+from telemetry_harness.ping_server import PingServer
+
+CANARY_CLIENT_ID = "c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0"
+SERVER_ROOT = "toolkit/components/telemetry/tests/marionette/harness/www"
+UUID_PATTERN = re.compile(
+ r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+)
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+"""Get a build object we need to find a Firefox binary"""
+try:
+ from mozbuild.base import MozbuildObject
+
+ build = MozbuildObject.from_environment(cwd=here)
+except ImportError:
+ build = None
+
+
+@pytest.fixture(name="binary")
+def fixture_binary():
+ """Return a Firefox binary"""
+ try:
+ return build.get_binary_path()
+ except Exception:
+ print(str(Exception))
+
+ app = "firefox"
+ bindir = os.path.join(os.environ["PYTHON_TEST_TMP"], app)
+ if os.path.isdir(bindir):
+ try:
+ return mozinstall.get_binary(bindir, app_name=app)
+ except Exception:
+ print(str(Exception))
+
+ if "GECKO_BINARY_PATH" in os.environ:
+ return os.environ["GECKO_BINARY_PATH"]
+
+
+@pytest.fixture(name="marionette")
+def fixture_marionette(binary, ping_server):
+ """Start a marionette session with specific browser prefs"""
+ server_url = "{url}pings".format(url=ping_server.get_url("/"))
+ prefs = {
+ # Clear the region detection url to
+ # * avoid net access in tests
+ # * stabilize browser.search.region to avoid an extra subsession (bug 1579840#c40)
+ "browser.region.network.url": "",
+ # Disable smart sizing because it changes prefs at startup. (bug 1547750)
+ "browser.cache.disk.smart_size.enabled": False,
+ "toolkit.telemetry.server": server_url,
+ "telemetry.fog.test.localhost_port": -1,
+ "toolkit.telemetry.initDelay": 1,
+ "toolkit.telemetry.minSubsessionLength": 0,
+ "datareporting.healthreport.uploadEnabled": True,
+ "datareporting.policy.dataSubmissionEnabled": True,
+ "datareporting.policy.dataSubmissionPolicyBypassNotification": True,
+ "toolkit.telemetry.log.level": "Trace",
+ "toolkit.telemetry.log.dump": True,
+ "toolkit.telemetry.send.overrideOfficialCheck": True,
+ "toolkit.telemetry.testing.disableFuzzingDelay": True,
+ }
+ yield Marionette(host="localhost", port=0, bin=binary, prefs=prefs)
+
+
+@pytest.fixture(name="ping_server")
+def fixture_ping_server():
+ """Run a ping server on localhost on a free port assigned by the OS"""
+ server = PingServer(SERVER_ROOT, "http://localhost:0")
+ server.start()
+ yield server
+ server.stop()
+
+
+class Browser(object):
+ def __init__(self, marionette, ping_server):
+ self.marionette = marionette
+ self.ping_server = ping_server
+ self.addon_ids = []
+
+ def disable_telemetry(self):
+ self.marionette.instance.profile.set_persistent_preferences(
+ {"datareporting.healthreport.uploadEnabled": False}
+ )
+ self.marionette.set_pref("datareporting.healthreport.uploadEnabled", False)
+
+ def enable_search_events(self):
+ """
+ Event Telemetry categories are disabled by default.
+ Search events are in the "navigation" category and are not enabled by
+ default in builds of Firefox, so we enable them here.
+ """
+
+ script = """\
+ let {Services} = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ Services.telemetry.setEventRecordingEnabled("navigation", true);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(textwrap.dedent(script))
+
+ def enable_telemetry(self):
+ self.marionette.instance.profile.set_persistent_preferences(
+ {"datareporting.healthreport.uploadEnabled": True}
+ )
+ self.marionette.set_pref("datareporting.healthreport.uploadEnabled", True)
+
+ def get_client_id(self):
+ """Return the ID of the current client."""
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.marionette.execute_script(
+ 'Cu.import("resource://gre/modules/ClientID.jsm");'
+ "return ClientID.getCachedClientID();"
+ )
+
+ def get_default_search_engine(self):
+ """Retrieve the identifier of the default search engine.
+
+ We found that it's required to initialize the search service before
+ attempting to retrieve the default search engine. Not calling init
+ would result in a JavaScript error (see bug 1543960 for more
+ information).
+ """
+
+ script = """\
+ let [resolve] = arguments;
+ let searchService = Components.classes[
+ "@mozilla.org/browser/search-service;1"]
+ .getService(Components.interfaces.nsISearchService);
+ return searchService.init().then(function () {
+ resolve(searchService.defaultEngine.identifier);
+ });
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.marionette.execute_async_script(textwrap.dedent(script))
+
+ def install_addon(self):
+ resources_dir = os.path.join(os.path.dirname(__file__), "resources")
+ addon_path = os.path.abspath(os.path.join(resources_dir, "helloworld"))
+
+ try:
+ # Ensure the Environment has init'd so the installed addon
+ # triggers an "environment-change" ping.
+ script = """\
+ let [resolve] = arguments;
+ Cu.import("resource://gre/modules/TelemetryEnvironment.jsm");
+ TelemetryEnvironment.onInitialized().then(resolve);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_async_script(textwrap.dedent(script))
+
+ addons = Addons(self.marionette)
+ addon_id = addons.install(addon_path, temp=True)
+ except MarionetteException as e:
+ pytest.fail("{} - Error installing addon: {} - ".format(e.cause, e.message))
+ else:
+ self.addon_ids.append(addon_id)
+
+ @contextlib.contextmanager
+ def new_tab(self):
+ """Perform operations in a new tab and then close the new tab."""
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ start_tab = self.marionette.current_window_handle
+ new_tab = self.open_tab(focus=True)
+ self.marionette.switch_to_window(new_tab)
+
+ yield
+
+ self.marionette.close()
+ self.marionette.switch_to_window(start_tab)
+
+ def open_tab(self, focus=False):
+ current_tabs = self.marionette.window_handles
+
+ try:
+ result = self.marionette.open(type="tab", focus=focus)
+ if result["type"] != "tab":
+ raise Exception(
+ "Newly opened browsing context is of type {} and not tab.".format(
+ result["type"]
+ )
+ )
+ except Exception:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ reraise(
+ exc_type,
+ exc_type("Failed to trigger opening a new tab: {}".format(exc_value)),
+ exc_traceback,
+ )
+ else:
+ Wait(self.marionette).until(
+ lambda mn: len(mn.window_handles) == len(current_tabs) + 1,
+ message="No new tab has been opened",
+ )
+
+ [new_tab] = list(set(self.marionette.window_handles) - set(current_tabs))
+
+ return new_tab
+
+ def quit(self, in_app=False):
+ self.marionette.quit(in_app=in_app)
+
+ def restart(self):
+ self.marionette.restart(clean=False, in_app=True)
+
+ def search(self, text):
+ """Perform a search via the browser's URL bar."""
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script("gURLBar.select();")
+ urlbar = self.marionette.find_element(By.ID, "urlbar-input")
+ urlbar.send_keys(keys.Keys.DELETE)
+ urlbar.send_keys(text + keys.Keys.ENTER)
+
+ # Wait for 0.1 seconds before proceeding to decrease the chance
+ # of Firefox being shut down before Telemetry is recorded
+ time.sleep(0.1)
+
+ def search_in_new_tab(self, text):
+ """Open a new tab and perform a search via the browser's URL bar,
+ then close the new tab."""
+ with self.new_tab():
+ self.search(text)
+
+ def start_session(self):
+ self.marionette.start_session()
+
+ def wait_for_search_service_init(self):
+ script = """\
+ let [resolve] = arguments;
+ let searchService = Components.classes["@mozilla.org/browser/search-service;1"]
+ .getService(Components.interfaces.nsISearchService);
+ searchService.init().then(resolve);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_async_script(textwrap.dedent(script))
+
+
+@pytest.fixture(name="browser")
+def fixture_browser(marionette, ping_server):
+ """Return an instance of our Browser object"""
+ browser = Browser(marionette, ping_server)
+ browser.start_session()
+ yield browser
+ browser.quit()
+
+
+class Helpers(object):
+ def __init__(self, ping_server, marionette):
+ self.ping_server = ping_server
+ self.marionette = marionette
+
+ def assert_is_valid_uuid(self, value):
+ """Custom assertion for UUID's"""
+ assert value is not None
+ assert value != ""
+ assert value != CANARY_CLIENT_ID
+ assert re.match(UUID_PATTERN, value) is not None
+
+ def wait_for_ping(self, action_func, ping_filter):
+ [ping] = self.wait_for_pings(action_func, ping_filter, 1)
+ return ping
+
+ def wait_for_pings(self, action_func, ping_filter, count):
+ """Call the given action and wait for pings to come in and return
+ the `count` number of pings, that match the given filter."""
+ # Keep track of the current number of pings
+ current_num_pings = len(self.ping_server.pings)
+
+ # New list to store new pings that satisfy the filter
+ filtered_pings = []
+
+ def wait_func(*args, **kwargs):
+ # Ignore existing pings in self.ping_server.pings
+ new_pings = self.ping_server.pings[current_num_pings:]
+
+ # Filter pings to make sure we wait for the correct ping type
+ filtered_pings[:] = [p for p in new_pings if ping_filter(p)]
+
+ return len(filtered_pings) >= count
+
+ action_func()
+
+ try:
+ Wait(self.marionette, 60).until(wait_func)
+ except Exception as e:
+ pytest.fail("Error waiting for ping: {}".format(e))
+
+ return filtered_pings[:count]
+
+
+@pytest.fixture(name="helpers")
+def fixture_helpers(ping_server, marionette):
+ """Return an instace of our helpers object"""
+ return Helpers(ping_server, marionette)
diff --git a/toolkit/components/telemetry/tests/integration/tests/python.ini b/toolkit/components/telemetry/tests/integration/tests/python.ini
new file mode 100644
index 0000000000..3a610a19cf
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/python.ini
@@ -0,0 +1,9 @@
+[DEFAULT]
+subsuite = telemetry-integration-tests
+skip-if = python == 3
+
+[test_deletion_request_ping.py]
+[test_event_ping.py]
+[test_main_tab_scalars.py]
+[test_search_counts_across_sessions.py]
+[test_subsession_management.py]
diff --git a/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/helloworld.html b/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/helloworld.html
new file mode 100644
index 0000000000..146ad025d9
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/helloworld.html
@@ -0,0 +1,18 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8">
+ <style>
+ body {
+ background-color: lightgrey;
+ }
+ p {
+ font-size: 25px;
+ padding: 25px 50px;
+ }
+ </style>
+ </head>
+ <body>
+ <p>Hello World!</p>
+ </body>
+</html>
diff --git a/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/manifest.json b/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/manifest.json
new file mode 100644
index 0000000000..14ab99fa1c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/resources/helloworld/manifest.json
@@ -0,0 +1,12 @@
+{
+ "description": "Extension to be installed in Telemetry client integration tests.",
+ "manifest_version": 2,
+ "name": "helloworld",
+ "version": "1.0",
+ "homepage_url": "https://hg.mozilla.org/mozilla-central/",
+ "browser_action": {
+ "browser_style": true,
+ "default_title": "Hello World",
+ "default_popup": "helloworld.html"
+ }
+}
diff --git a/toolkit/components/telemetry/tests/integration/tests/test_deletion_request_ping.py b/toolkit/components/telemetry/tests/integration/tests/test_deletion_request_ping.py
new file mode 100644
index 0000000000..1667d1e214
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/test_deletion_request_ping.py
@@ -0,0 +1,65 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import mozunit
+
+from telemetry_harness.ping_filters import (
+ ANY_PING,
+ DELETION_REQUEST_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+def test_deletion_request_ping(browser, helpers):
+ """Test the "deletion-request" ping behaviour across sessions"""
+ # Get the client_id after installing an addon
+ client_id = helpers.wait_for_ping(browser.install_addon, ANY_PING)["clientId"]
+
+ # Make sure it's a valid UUID
+ helpers.assert_is_valid_uuid(client_id)
+
+ # Trigger a "deletion-request" ping.
+ ping = helpers.wait_for_ping(browser.disable_telemetry, DELETION_REQUEST_PING)
+
+ assert "clientId" in ping
+ assert "payload" in ping
+ assert "environment" not in ping["payload"]
+
+ # Close Firefox cleanly.
+ browser.quit(in_app=True)
+
+ # Start Firefox.
+ browser.start_session()
+
+ # Trigger an environment change, which isn't allowed to send a ping.
+ browser.install_addon()
+
+ # Ensure we've sent no pings since "optout".
+ assert browser.ping_server.pings[-1] == ping
+
+ # Turn Telemetry back on.
+ browser.enable_telemetry()
+
+ # Close Firefox cleanly, collecting its "main"/"shutdown" ping.
+ main_ping = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ # Ensure the "main" ping has changed its client id.
+ assert "clientId" in main_ping
+ new_client_id = main_ping["clientId"]
+ helpers.assert_is_valid_uuid(new_client_id)
+ assert new_client_id != client_id
+
+ # Ensure we note in the ping that the user opted in.
+ parent_scalars = main_ping["payload"]["processes"]["parent"]["scalars"]
+
+ assert "telemetry.data_upload_optin" in parent_scalars
+ assert parent_scalars["telemetry.data_upload_optin"] is True
+
+ # Ensure all pings sent during this test don't have the c0ffee client id.
+ for ping in browser.ping_server.pings:
+ if "clientId" in ping:
+ helpers.assert_is_valid_uuid(ping["clientId"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/integration/tests/test_event_ping.py b/toolkit/components/telemetry/tests/integration/tests/test_event_ping.py
new file mode 100644
index 0000000000..d1e6ee8e34
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/test_event_ping.py
@@ -0,0 +1,51 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import mozunit
+
+from telemetry_harness.ping_filters import EVENT_PING
+
+
+def test_event_ping(browser, helpers):
+ """
+ Barebones test for "event" ping:
+ Search, close Firefox, check "event" ping for search events.
+ """
+ browser.enable_search_events()
+ browser.wait_for_search_service_init()
+ browser.search("mozilla firefox")
+
+ payload = helpers.wait_for_ping(browser.restart, EVENT_PING)["payload"]
+
+ assert "shutdown" == payload["reason"]
+ assert 0 == payload["lostEventsCount"]
+ assert "events" in payload
+ assert "parent" in payload["events"]
+ assert find_event(payload["events"]["parent"])
+
+
+def find_event(events):
+ """Return the first event that has the expected timestamp, category method and object"""
+
+ for event in events:
+ # The event may optionally contain additonal fields
+ [timestamp, category, method, object_id] = event[:4]
+
+ assert timestamp > 0
+
+ if category != "navigation":
+ continue
+
+ if method != "search":
+ continue
+
+ if object_id != "urlbar":
+ continue
+
+ return True
+
+ return False
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/integration/tests/test_main_tab_scalars.py b/toolkit/components/telemetry/tests/integration/tests/test_main_tab_scalars.py
new file mode 100644
index 0000000000..61eea0e71e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/test_main_tab_scalars.py
@@ -0,0 +1,34 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import mozunit
+
+from telemetry_harness.ping_filters import MAIN_SHUTDOWN_PING
+
+
+def test_main_tab_scalars(browser, helpers):
+ with browser.marionette.using_context(browser.marionette.CONTEXT_CHROME):
+ start_tab = browser.marionette.current_window_handle
+ tab2 = browser.open_tab(focus=True)
+ browser.marionette.switch_to_window(tab2)
+ tab3 = browser.open_tab(focus=True)
+ browser.marionette.switch_to_window(tab3)
+ browser.marionette.close()
+ browser.marionette.switch_to_window(tab2)
+ browser.marionette.close()
+ browser.marionette.switch_to_window(start_tab)
+
+ ping = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ assert "main" == ping["type"]
+ assert browser.get_client_id() == ping["clientId"]
+
+ scalars = ping["payload"]["processes"]["parent"]["scalars"]
+
+ assert 3 == scalars["browser.engagement.max_concurrent_tab_count"]
+ assert 2 == scalars["browser.engagement.tab_open_event_count"]
+ assert 1 == scalars["browser.engagement.max_concurrent_window_count"]
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/integration/tests/test_search_counts_across_sessions.py b/toolkit/components/telemetry/tests/integration/tests/test_search_counts_across_sessions.py
new file mode 100644
index 0000000000..bccaf7276c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/test_search_counts_across_sessions.py
@@ -0,0 +1,170 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import mozunit
+
+from telemetry_harness.ping_filters import (
+ MAIN_ENVIRONMENT_CHANGE_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+def test_search_counts(browser, helpers):
+ """Test for SEARCH_COUNTS across sessions."""
+
+ # Session S1, subsession 1:
+ # - Open browser
+ # - Open new tab
+ # - Perform search (awesome bar or search bar)
+ # - Restart browser in new session
+ search_engine = browser.get_default_search_engine()
+ browser.search_in_new_tab("mozilla firefox")
+ ping1 = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ # Session S2, subsession 1:
+ # - Outcome 1
+ # - Received a main ping P1 for previous session
+ # - Ping base contents:
+ # - clientId should be set
+ # - sessionId should be set
+ # - subsessionId should be set
+ # - previousSessionId should not be set
+ # - previousSubsessionId should not be set
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 1
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should match performed search action
+
+ client_id = ping1["clientId"]
+ helpers.assert_is_valid_uuid(client_id)
+
+ ping1_info = ping1["payload"]["info"]
+ assert "shutdown" == ping1_info["reason"]
+
+ s1_session_id = ping1_info["sessionId"]
+ assert s1_session_id != ""
+
+ s1_s1_subsession_id = ping1_info["subsessionId"]
+ assert s1_s1_subsession_id != ""
+
+ assert ping1_info["previousSessionId"] is None
+ assert ping1_info["previousSubsessionId"] is None
+ assert ping1_info["subsessionCounter"] == 1
+ assert ping1_info["profileSubsessionCounter"] == 1
+
+ scalars1 = ping1["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars1
+ assert scalars1["browser.engagement.tab_open_event_count"] == 1
+
+ keyed_histograms1 = ping1["payload"]["keyedHistograms"]
+ search_counts1 = keyed_histograms1["SEARCH_COUNTS"][
+ "{}.urlbar".format(search_engine)
+ ]
+
+ assert search_counts1 == {
+ u"range": [1, 2],
+ u"bucket_count": 3,
+ u"histogram_type": 4,
+ u"values": {u"1": 0, u"0": 1},
+ u"sum": 1,
+ }
+
+ # - Install addon
+ # Session S2, subsession 2:
+ # - Outcome 2
+ # - Received a main ping P2 for previous subsession
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to a new value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P1s subsessionId value
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 2
+ # - reason should be "environment-change"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should not be in P2
+ # - Verify that there should be no listing for tab scalar as we started a new
+ # session
+
+ ping2 = helpers.wait_for_ping(browser.install_addon, MAIN_ENVIRONMENT_CHANGE_PING)
+
+ assert client_id == ping2["clientId"]
+
+ ping2_info = ping2["payload"]["info"]
+ assert ping2_info["reason"] == "environment-change"
+
+ s2_session_id = ping2_info["sessionId"]
+ assert s2_session_id != s1_session_id
+
+ s2_s1_subsession_id = ping2_info["subsessionId"]
+ assert s2_s1_subsession_id != s1_s1_subsession_id
+
+ assert ping2_info["previousSessionId"] == s1_session_id
+ assert ping2_info["previousSubsessionId"] == s1_s1_subsession_id
+ assert ping2_info["subsessionCounter"] == 1
+ assert ping2_info["profileSubsessionCounter"] == 2
+
+ scalars2 = ping2["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars2
+ assert "browser.engagement.tab_open_event_count" not in scalars2
+
+ keyed_histograms2 = ping2["payload"]["keyedHistograms"]
+ assert "SEARCH_COUNTS" not in keyed_histograms2
+
+ # - Perform Search
+ # - Restart Browser
+
+ browser.search("mozilla telemetry")
+ browser.search("python unittest")
+ browser.search("python pytest")
+
+ ping3 = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ # Session S3, subsession 1:
+ # - Outcome 3
+ # - Received a main ping P3 for session 2, subsession 1
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to P2s sessionId value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P2s subsessionId value
+ # - subSessionCounter should be 2
+ # - profileSubSessionCounter should be 3
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should be set per above search
+
+ assert client_id == ping3["clientId"]
+
+ ping3_info = ping3["payload"]["info"]
+ assert ping3_info["reason"] == "shutdown"
+ assert ping3_info["sessionId"] == s2_session_id
+
+ s2_s2_subsession_id = ping3_info["subsessionId"]
+ assert s2_s2_subsession_id != s1_s1_subsession_id
+ assert ping3_info["previousSessionId"] == s1_session_id
+ assert ping3_info["previousSubsessionId"] == s2_s1_subsession_id
+ assert ping3_info["subsessionCounter"] == 2
+ assert ping3_info["profileSubsessionCounter"] == 3
+
+ scalars3 = ping3["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars3
+
+ keyed_histograms3 = ping3["payload"]["keyedHistograms"]
+ search_counts3 = keyed_histograms3["SEARCH_COUNTS"][
+ "{}.urlbar".format(search_engine)
+ ]
+ assert search_counts3 == {
+ u"range": [1, 2],
+ u"bucket_count": 3,
+ u"histogram_type": 4,
+ u"values": {u"1": 0, u"0": 3},
+ u"sum": 3,
+ }
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/integration/tests/test_subsession_management.py b/toolkit/components/telemetry/tests/integration/tests/test_subsession_management.py
new file mode 100644
index 0000000000..41765358e5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/integration/tests/test_subsession_management.py
@@ -0,0 +1,148 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+import mozunit
+
+from telemetry_harness.ping_filters import (
+ MAIN_ENVIRONMENT_CHANGE_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+def test_subsession_management(browser, helpers):
+ """Test for Firefox Telemetry subsession management."""
+
+ # Session S1, subsession 1
+ # Actions:
+ # 1. Open browser
+ # 2. Open a new tab
+ # 3. Restart browser in new session
+
+ with browser.new_tab():
+ # If Firefox Telemetry is working correctly, this will
+ # be sufficient to record a tab open event.
+ pass
+
+ ping1 = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ # Session S2, subsession 1
+ # Outcome 1:
+ # Received a main ping P1 for previous session
+ # - Ping base contents:
+ # - clientId should be a valid UUID
+ # - reason should be "shutdown"
+ # - sessionId should be set
+ # - subsessionId should be set
+ # - previousSessionId should not be set
+ # - previousSubsessionId should not be set
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 1
+ # - Other ping contents:
+ # - tab_open_event_count in scalars
+
+ client_id = ping1["clientId"]
+ helpers.assert_is_valid_uuid(client_id)
+
+ ping1_info = ping1["payload"]["info"]
+ assert ping1_info["reason"] == "shutdown"
+
+ s1_session_id = ping1_info["sessionId"]
+ assert s1_session_id != ""
+
+ s1_s1_subsession_id = ping1_info["subsessionId"]
+ assert s1_s1_subsession_id != ""
+ assert ping1_info["previousSessionId"] is None
+ assert ping1_info["previousSubsessionId"] is None
+ assert ping1_info["subsessionCounter"] == 1
+ assert ping1_info["profileSubsessionCounter"] == 1
+
+ scalars1 = ping1["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars1
+ assert scalars1["browser.engagement.tab_open_event_count"] == 1
+
+ # Actions:
+ # 1. Install addon
+
+ ping2 = helpers.wait_for_ping(browser.install_addon, MAIN_ENVIRONMENT_CHANGE_PING)
+
+ [addon_id] = browser.addon_ids # Store the addon ID for verifying ping3 later
+
+ # Session S2, subsession 2
+ # Outcome 2:
+ # Received a main ping P2 for previous subsession
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to a new value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P1s subsessionId value
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 2
+ # - reason should be "environment-change"
+ # - Other ping contents:
+ # - tab_open_event_count not in scalars
+
+ assert ping2["clientId"] == client_id
+
+ ping2_info = ping2["payload"]["info"]
+ assert ping2_info["reason"] == "environment-change"
+
+ s2_session_id = ping2_info["sessionId"]
+ assert s2_session_id != s1_session_id
+
+ s2_s1_subsession_id = ping2_info["subsessionId"]
+ assert s2_s1_subsession_id != s1_s1_subsession_id
+ assert ping2_info["previousSessionId"] == s1_session_id
+ assert ping2_info["previousSubsessionId"] == s1_s1_subsession_id
+ assert ping2_info["subsessionCounter"] == 1
+ assert ping2_info["profileSubsessionCounter"] == 2
+
+ scalars2 = ping2["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars2
+ assert "browser.engagement.tab_open_event_count" not in scalars2
+
+ # Actions
+ # 1. Restart browser in new session
+
+ ping3 = helpers.wait_for_ping(browser.restart, MAIN_SHUTDOWN_PING)
+
+ # Session S3, subsession 1
+ # Outcome 3:
+ # Received a main ping P3 for session 2, subsession 2
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to P2s sessionId value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P2s subsessionId value
+ # - subSessionCounter should be 2
+ # - profileSubSessionCounter should be 3
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - addon ID in activeAddons in environment
+
+ assert ping3["clientId"] == client_id
+
+ ping3_info = ping3["payload"]["info"]
+ assert ping3_info["reason"] == "shutdown"
+
+ assert ping3_info["sessionId"] == s2_session_id
+
+ s2_s2_subsession_id = ping3_info["subsessionId"]
+ assert s2_s2_subsession_id != s1_s1_subsession_id
+ assert s2_s2_subsession_id != s2_s1_subsession_id
+ assert ping3_info["previousSessionId"] == s1_session_id
+ assert ping3_info["previousSubsessionId"] == s2_s1_subsession_id
+ assert ping3_info["subsessionCounter"] == 2
+ assert ping3_info["profileSubsessionCounter"] == 3
+
+ scalars3 = ping3["payload"]["processes"]["parent"]["scalars"]
+ assert "browser.engagement.window_open_event_count" not in scalars3
+ assert "browser.engagement.tab_open_event_count" not in scalars3
+
+ active_addons = ping3["environment"]["addons"]["activeAddons"]
+ assert addon_id in active_addons
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in b/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in
new file mode 100644
index 0000000000..e24a6b1ba6
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/MANIFEST.in
@@ -0,0 +1,3 @@
+exclude MANIFEST.in
+include requirements.txt
+recursive-include telemetry_harness/resources * \ No newline at end of file
diff --git a/toolkit/components/telemetry/tests/marionette/harness/requirements.txt b/toolkit/components/telemetry/tests/marionette/harness/requirements.txt
new file mode 100644
index 0000000000..a95e794fe0
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/requirements.txt
@@ -0,0 +1,2 @@
+marionette-harness >= 4.0.0
+requests==2.11.1 \ No newline at end of file
diff --git a/toolkit/components/telemetry/tests/marionette/harness/setup.py b/toolkit/components/telemetry/tests/marionette/harness/setup.py
new file mode 100644
index 0000000000..f650eb18ed
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/setup.py
@@ -0,0 +1,48 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+from setuptools import setup, find_packages
+
+PACKAGE_VERSION = "0.1"
+
+THIS_DIR = os.path.dirname(os.path.realpath(__name__))
+
+
+def read(*parts):
+ with open(os.path.join(THIS_DIR, *parts)) as f:
+ return f.read()
+
+
+setup(
+ name="telemetry-harness",
+ version=PACKAGE_VERSION,
+ description=(
+ "Custom Marionette runner classes and entry scripts for "
+ "Telemetry specific Marionette tests."
+ ),
+ classifiers=[
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ keywords="mozilla",
+ author="Firefox Test Engineering Team",
+ author_email="firefox-test-engineering@mozilla.org",
+ url="https://developer.mozilla.org/en-US/docs/Mozilla/QA/telemetry_harness",
+ license="MPL 2.0",
+ packages=find_packages(),
+ include_package_data=True,
+ zip_safe=False,
+ install_requires=read("requirements.txt").splitlines(),
+ entry_points="""
+ [console_scripts]
+ telemetry-harness = telemetry_harness.runtests:cli
+ """,
+)
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/__init__.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/__init__.py
new file mode 100644
index 0000000000..6fbe8159b2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_filters.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_filters.py
new file mode 100644
index 0000000000..9db0f5a72f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_filters.py
@@ -0,0 +1,29 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+class FOGPingFilter(object):
+ """Ping filter that accepts any FOG pings."""
+
+ def __call__(self, ping):
+ return True
+
+
+class FOGDocTypePingFilter(FOGPingFilter):
+ """Ping filter that accepts FOG pings that match the doc-type."""
+
+ def __init__(self, doc_type):
+ super(FOGDocTypePingFilter, self).__init__()
+ self.doc_type = doc_type
+
+ def __call__(self, ping):
+ if not super(FOGDocTypePingFilter, self).__call__(ping):
+ return False
+
+ # Verify that the given ping was submitted to the URL for the doc_type
+ return ping["request_url"]["doc_type"] == self.doc_type
+
+
+FOG_DELETION_REQUEST_PING = FOGDocTypePingFilter("deletion-request")
+FOG_ONE_PING_ONLY_PING = FOGDocTypePingFilter("one-ping-only")
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_server.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_server.py
new file mode 100644
index 0000000000..7d84b40d77
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_ping_server.py
@@ -0,0 +1,77 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import zlib
+
+from marionette_harness.runner import httpd
+from mozlog import get_default_logger
+from six.moves.urllib import parse as urlparse
+
+
+class FOGPingServer(object):
+ """HTTP server for receiving Firefox on Glean pings."""
+
+ def __init__(self, server_root, url):
+ self._logger = get_default_logger(component="fog_ping_server")
+ self.pings = []
+
+ @httpd.handlers.handler
+ def pings_handler(request, response):
+ """Handler for HTTP requests to the ping server."""
+ request_data = request.body
+
+ if request.headers.get("Content-Encoding") == "gzip":
+ request_data = zlib.decompress(request_data, zlib.MAX_WBITS | 16)
+
+ request_url = request.route_match.copy()
+
+ self.pings.append(
+ {
+ "request_url": request_url,
+ "payload": json.loads(request_data),
+ "debug_tag": request.headers.get("X-Debug-ID"),
+ }
+ )
+
+ self._logger.info(
+ "pings_handler received '{}' ping".format(request_url["doc_type"])
+ )
+
+ status_code = 200
+ content = "OK"
+ headers = [
+ ("Content-Type", "text/plain"),
+ ("Content-Length", len(content)),
+ ]
+
+ return (status_code, headers, content)
+
+ self._httpd = httpd.FixtureServer(server_root, url=url)
+
+ # See https://mozilla.github.io/glean/book/user/pings/index.html#ping-submission
+ self._httpd.router.register(
+ "POST",
+ "/submit/{application_id}/{doc_type}/{glean_schema_version}/{document_id}",
+ pings_handler,
+ )
+
+ @property
+ def url(self):
+ """Return the URL for the running HTTP FixtureServer."""
+ return self._httpd.get_url("/")
+
+ @property
+ def port(self):
+ """Return the port for the running HTTP FixtureServer."""
+ parse_result = urlparse.urlparse(self.url)
+ return parse_result.port
+
+ def start(self):
+ """Start the HTTP FixtureServer."""
+ return self._httpd.start()
+
+ def stop(self):
+ """Stop the HTTP FixtureServer."""
+ return self._httpd.stop()
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_testcase.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_testcase.py
new file mode 100644
index 0000000000..fad2781a80
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/fog_testcase.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozlog
+from telemetry_harness.fog_ping_server import FOGPingServer
+from telemetry_harness.testcase import TelemetryTestCase
+
+
+class FOGTestCase(TelemetryTestCase):
+ """Base testcase class for project FOG."""
+
+ def __init__(self, *args, **kwargs):
+ """Initialize the test case and create a ping server."""
+ super(FOGTestCase, self).__init__(*args, **kwargs)
+ self._logger = mozlog.get_default_logger(component="FOGTestCase")
+
+ def setUp(self, *args, **kwargs):
+ """Set up the test case and create a FOG ping server.
+
+ This test is skipped if the build doesn't support FOG.
+ """
+ super(FOGTestCase, self).setUp(*args, **kwargs)
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ fog_present = self.marionette.execute_script(
+ "return AppConstants.MOZ_GLEAN;"
+ )
+
+ if not fog_present:
+ # Before we skip this test, we need to quit marionette and the ping
+ # server created in TelemetryTestCase by running tearDown
+ super(FOGTestCase, self).tearDown(*args, **kwargs)
+ self.skipTest("FOG is only present in AppConstants.MOZ_GLEAN builds.")
+
+ self.fog_ping_server = FOGPingServer(
+ self.testvars["server_root"], "http://localhost:0"
+ )
+ self.fog_ping_server.start()
+
+ self._logger.info(
+ "Submitting to FOG ping server at {}".format(self.fog_ping_server.url)
+ )
+
+ self.marionette.enforce_gecko_prefs(
+ {
+ "telemetry.fog.test.localhost_port": self.fog_ping_server.port,
+ }
+ )
+
+ def tearDown(self, *args, **kwargs):
+ super(FOGTestCase, self).tearDown(*args, **kwargs)
+ self.fog_ping_server.stop()
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_filters.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_filters.py
new file mode 100644
index 0000000000..6e003b25d5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_filters.py
@@ -0,0 +1,75 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+class PingFilter(object):
+ """Ping filter that accepts any pings."""
+
+ def __call__(self, ping):
+ return True
+
+
+class DeletionRequestPingFilter(PingFilter):
+ """Ping filter that accepts deletion-request pings."""
+
+ def __call__(self, ping):
+ if not super(DeletionRequestPingFilter, self).__call__(ping):
+ return False
+
+ return ping["type"] == "deletion-request"
+
+
+class EventPingFilter(PingFilter):
+ """Ping filter that accepts event pings."""
+
+ def __call__(self, ping):
+ if not super(EventPingFilter, self).__call__(ping):
+ return False
+
+ return ping["type"] == "event"
+
+
+class FirstShutdownPingFilter(PingFilter):
+ """Ping filter that accepts first-shutdown pings."""
+
+ def __call__(self, ping):
+ if not super(FirstShutdownPingFilter, self).__call__(ping):
+ return False
+
+ return ping["type"] == "first-shutdown"
+
+
+class MainPingFilter(PingFilter):
+ """Ping filter that accepts main pings."""
+
+ def __call__(self, ping):
+ if not super(MainPingFilter, self).__call__(ping):
+ return False
+
+ return ping["type"] == "main"
+
+
+class MainPingReasonFilter(MainPingFilter):
+ """Ping filter that accepts main pings that match the
+ specified reason.
+ """
+
+ def __init__(self, reason):
+ super(MainPingReasonFilter, self).__init__()
+ self.reason = reason
+
+ def __call__(self, ping):
+ if not super(MainPingReasonFilter, self).__call__(ping):
+ return False
+
+ return ping["payload"]["info"]["reason"] == self.reason
+
+
+ANY_PING = PingFilter()
+DELETION_REQUEST_PING = DeletionRequestPingFilter()
+EVENT_PING = EventPingFilter()
+FIRST_SHUTDOWN_PING = FirstShutdownPingFilter()
+MAIN_PING = MainPingFilter()
+MAIN_SHUTDOWN_PING = MainPingReasonFilter("shutdown")
+MAIN_ENVIRONMENT_CHANGE_PING = MainPingReasonFilter("environment-change")
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_server.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_server.py
new file mode 100644
index 0000000000..d05c265504
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/ping_server.py
@@ -0,0 +1,65 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import zlib
+
+import mozlog
+
+from marionette_harness.runner import httpd
+
+
+class PingServer(object):
+ """HTTP server for receiving Firefox Client Telemetry pings."""
+
+ def __init__(self, server_root, url):
+ self._logger = mozlog.get_default_logger(component="pingserver")
+ self.pings = []
+
+ @httpd.handlers.handler
+ def pings_handler(request, response):
+ """Handler for HTTP requests to the ping server."""
+ request_data = request.body
+
+ if request.headers.get("Content-Encoding") == "gzip":
+ request_data = zlib.decompress(request_data, zlib.MAX_WBITS | 16)
+
+ ping_data = json.loads(request_data)
+
+ # Store JSON data to self.pings to be used by wait_for_pings()
+ self.pings.append(ping_data)
+
+ ping_type = ping_data["type"]
+
+ log_message = "pings_handler received '{}' ping".format(ping_type)
+
+ if ping_type == "main":
+ ping_reason = ping_data["payload"]["info"]["reason"]
+ log_message = "{} with reason '{}'".format(log_message, ping_reason)
+
+ self._logger.info(log_message)
+
+ status_code = 200
+ content = "OK"
+ headers = [
+ ("Content-Type", "text/plain"),
+ ("Content-Length", len(content)),
+ ]
+
+ return (status_code, headers, content)
+
+ self._httpd = httpd.FixtureServer(server_root, url=url)
+ self._httpd.router.register("POST", "/pings*", pings_handler)
+
+ def get_url(self, *args, **kwargs):
+ """Return a URL from the HTTP server."""
+ return self._httpd.get_url(*args, **kwargs)
+
+ def start(self):
+ """Start the HTTP server."""
+ return self._httpd.start()
+
+ def stop(self):
+ """Stop the HTTP server."""
+ return self._httpd.stop()
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/helloworld.html b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/helloworld.html
new file mode 100644
index 0000000000..146ad025d9
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/helloworld.html
@@ -0,0 +1,18 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8">
+ <style>
+ body {
+ background-color: lightgrey;
+ }
+ p {
+ font-size: 25px;
+ padding: 25px 50px;
+ }
+ </style>
+ </head>
+ <body>
+ <p>Hello World!</p>
+ </body>
+</html>
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/manifest.json b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/manifest.json
new file mode 100644
index 0000000000..0e35d8a2e3
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/resources/helloworld/manifest.json
@@ -0,0 +1,12 @@
+{
+ "description": "Extension to be installed in telemetry-tests-client tests.",
+ "manifest_version": 2,
+ "name": "helloworld",
+ "version": "1.0",
+ "homepage_url": "https://hg.mozilla.org/mozilla-central/",
+ "browser_action": {
+ "browser_style": true,
+ "default_title": "Hello World",
+ "default_popup": "helloworld.html"
+ }
+}
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py
new file mode 100644
index 0000000000..d95b17c03c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runner.py
@@ -0,0 +1,56 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from marionette_harness import BaseMarionetteTestRunner
+from testcase import TelemetryTestCase
+
+SERVER_URL = "http://localhost:8000"
+
+
+class TelemetryTestRunner(BaseMarionetteTestRunner):
+ """TestRunner for the telemetry-tests-client suite."""
+
+ def __init__(self, **kwargs):
+ """Set test variables and preferences specific to Firefox client
+ telemetry.
+ """
+
+ # Select the appropriate GeckoInstance
+ kwargs["app"] = "fxdesktop"
+
+ prefs = kwargs.pop("prefs", {})
+
+ # Set Firefox Client Telemetry specific preferences
+ prefs.update(
+ {
+ # Clear the region detection url to
+ # * avoid net access in tests
+ # * stabilize browser.search.region to avoid extra subsessions (bug 1579840#c40)
+ "browser.region.network.url": "",
+ # Disable smart sizing because it changes prefs at startup. (bug 1547750)
+ "browser.cache.disk.smart_size.enabled": False,
+ "toolkit.telemetry.server": "{}/pings".format(SERVER_URL),
+ "telemetry.fog.test.localhost_port": -1,
+ "toolkit.telemetry.initDelay": 1,
+ "toolkit.telemetry.minSubsessionLength": 0,
+ "datareporting.healthreport.uploadEnabled": True,
+ "datareporting.policy.dataSubmissionEnabled": True,
+ "datareporting.policy.dataSubmissionPolicyBypassNotification": True,
+ "toolkit.telemetry.log.level": "Trace",
+ "toolkit.telemetry.log.dump": True,
+ "toolkit.telemetry.send.overrideOfficialCheck": True,
+ "toolkit.telemetry.testing.disableFuzzingDelay": True,
+ # Disable Normandy to avoid extra subsessions due to Experiment
+ # activation in tests (bug 1641571)
+ "app.normandy.enabled": False,
+ }
+ )
+
+ super(TelemetryTestRunner, self).__init__(prefs=prefs, **kwargs)
+
+ self.testvars["server_root"] = kwargs["server_root"]
+ self.testvars["server_url"] = SERVER_URL
+
+ self.test_handlers = [TelemetryTestCase]
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py
new file mode 100644
index 0000000000..9b4b8872c9
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/runtests.py
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from marionette_harness.runtests import cli as mn_cli
+from runner import TelemetryTestRunner
+
+
+def cli(args=None):
+ mn_cli(runner_class=TelemetryTestRunner, args=args)
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py
new file mode 100644
index 0000000000..b4646e6565
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/harness/telemetry_harness/testcase.py
@@ -0,0 +1,242 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import contextlib
+import os
+import re
+import textwrap
+
+from marionette_driver.addons import Addons
+from marionette_driver.errors import MarionetteException
+from marionette_driver.wait import Wait
+from marionette_driver import By, keys
+from marionette_harness import MarionetteTestCase
+from marionette_harness.runner.mixins.window_manager import WindowManagerMixin
+
+from ping_server import PingServer
+
+
+CANARY_CLIENT_ID = "c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0"
+UUID_PATTERN = re.compile(
+ r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+)
+
+
+class TelemetryTestCase(WindowManagerMixin, MarionetteTestCase):
+ def __init__(self, *args, **kwargs):
+ """Initialize the test case and create a ping server."""
+ super(TelemetryTestCase, self).__init__(*args, **kwargs)
+
+ self.ping_server = PingServer(
+ self.testvars["server_root"], self.testvars["server_url"]
+ )
+
+ def setUp(self, *args, **kwargs):
+ """Set up the test case and start the ping server."""
+ super(TelemetryTestCase, self).setUp(*args, **kwargs)
+
+ # Store IDs of addons installed via self.install_addon()
+ self.addon_ids = []
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
+ self.marionette.navigate("about:about")
+
+ self.ping_server.start()
+
+ def disable_telemetry(self):
+ """Disable the Firefox Data Collection and Use in the current browser."""
+ self.marionette.instance.profile.set_persistent_preferences(
+ {"datareporting.healthreport.uploadEnabled": False}
+ )
+ self.marionette.set_pref("datareporting.healthreport.uploadEnabled", False)
+
+ def enable_telemetry(self):
+ """Enable the Firefox Data Collection and Use in the current browser."""
+ self.marionette.instance.profile.set_persistent_preferences(
+ {"datareporting.healthreport.uploadEnabled": True}
+ )
+ self.marionette.set_pref("datareporting.healthreport.uploadEnabled", True)
+
+ @contextlib.contextmanager
+ def new_tab(self):
+ """Perform operations in a new tab and then close the new tab."""
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ start_tab = self.marionette.current_window_handle
+ new_tab = self.open_tab(focus=True)
+ self.marionette.switch_to_window(new_tab)
+
+ yield
+
+ self.marionette.close()
+ self.marionette.switch_to_window(start_tab)
+
+ def search(self, text):
+ """Perform a search via the browser's URL bar."""
+
+ # Reload newtab to prevent urlbar from not accepting correct input
+ with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
+ self.marionette.navigate("about:newtab")
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script("gURLBar.select();")
+ urlbar = self.marionette.find_element(By.ID, "urlbar-input")
+ urlbar.send_keys(keys.Keys.DELETE)
+ urlbar.send_keys(text + keys.Keys.ENTER)
+ # This script checks that the search terms used for searching
+ # appear in the URL when the page loads.
+ script = """\
+ let location = document.location.toString()
+ function validate(term){
+ return location.includes(term)
+ }
+ return arguments[0].every(validate)
+ """
+ # Wait for search page to load
+ with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
+ Wait(self.marionette, 30, 0.5).until(
+ lambda driver: driver.execute_script(
+ script, script_args=[text.split()]
+ ),
+ message="Search terms not found, maybe the page didn't load?",
+ )
+
+ def search_in_new_tab(self, text):
+ """Open a new tab and perform a search via the browser's URL bar,
+ then close the new tab."""
+
+ with self.new_tab():
+ self.search(text)
+
+ def assertIsValidUUID(self, value):
+ """Check if the given UUID is valid."""
+
+ self.assertIsNotNone(value)
+ self.assertNotEqual(value, "")
+
+ # Check for client ID that is used when Telemetry upload is disabled
+ self.assertNotEqual(value, CANARY_CLIENT_ID, msg="UUID is CANARY CLIENT ID")
+
+ self.assertIsNotNone(
+ re.match(UUID_PATTERN, value),
+ msg="UUID does not match regular expression",
+ )
+
+ def wait_for_pings(self, action_func, ping_filter, count, ping_server=None):
+ """Call the given action and wait for pings to come in and return
+ the `count` number of pings, that match the given filter.
+ """
+
+ if ping_server is None:
+ ping_server = self.ping_server
+
+ # Keep track of the current number of pings
+ current_num_pings = len(ping_server.pings)
+
+ # New list to store new pings that satisfy the filter
+ filtered_pings = []
+
+ def wait_func(*args, **kwargs):
+ # Ignore existing pings in ping_server.pings
+ new_pings = ping_server.pings[current_num_pings:]
+
+ # Filter pings to make sure we wait for the correct ping type
+ filtered_pings[:] = [p for p in new_pings if ping_filter(p)]
+
+ return len(filtered_pings) >= count
+
+ self.logger.info(
+ "wait_for_pings running action '{action}'.".format(
+ action=action_func.__name__
+ )
+ )
+
+ # Call given action and wait for a ping
+ action_func()
+
+ try:
+ Wait(self.marionette, 60).until(wait_func)
+ except Exception as e:
+ self.fail("Error waiting for ping: {}".format(e.message))
+
+ return filtered_pings[:count]
+
+ def wait_for_ping(self, action_func, ping_filter, ping_server=None):
+ """Call wait_for_pings() with the given action_func and ping_filter and
+ return the first result.
+ """
+ [ping] = self.wait_for_pings(
+ action_func, ping_filter, 1, ping_server=ping_server
+ )
+ return ping
+
+ def restart_browser(self):
+ """Restarts browser while maintaining the same profile."""
+ return self.marionette.restart(clean=False, in_app=True)
+
+ def start_browser(self):
+ """Start the browser."""
+ return self.marionette.start_session()
+
+ def quit_browser(self):
+ """Quit the browser."""
+ return self.marionette.quit(in_app=True)
+
+ def install_addon(self):
+ """Install a minimal addon and add its ID to self.addon_ids."""
+
+ resources_dir = os.path.join(os.path.dirname(__file__), "resources")
+ addon_path = os.path.abspath(os.path.join(resources_dir, "helloworld"))
+
+ try:
+ # Ensure the Environment has init'd so the installed addon
+ # triggers an "environment-change" ping.
+ script = """\
+ let [resolve] = arguments;
+ Cu.import("resource://gre/modules/TelemetryEnvironment.jsm");
+ TelemetryEnvironment.onInitialized().then(resolve);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_async_script(textwrap.dedent(script))
+
+ addons = Addons(self.marionette)
+ addon_id = addons.install(addon_path, temp=True)
+ except MarionetteException as e:
+ self.fail("{} - Error installing addon: {} - ".format(e.cause, e.message))
+ else:
+ self.addon_ids.append(addon_id)
+
+ def set_persistent_profile_preferences(self, preferences):
+ """Wrapper for setting persistent preferences on a user profile"""
+ return self.marionette.instance.profile.set_persistent_preferences(preferences)
+
+ def set_preferences(self, preferences):
+ """Wrapper for setting persistent preferences on a user profile"""
+ return self.marionette.set_prefs(preferences)
+
+ @property
+ def client_id(self):
+ """Return the ID of the current client."""
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.marionette.execute_script(
+ 'Cu.import("resource://gre/modules/ClientID.jsm");'
+ "return ClientID.getCachedClientID();"
+ )
+
+ @property
+ def subsession_id(self):
+ """Return the ID of the current subsession."""
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ ping_data = self.marionette.execute_script(
+ 'Cu.import("resource://gre/modules/TelemetryController.jsm");'
+ "return TelemetryController.getCurrentPingData(true);"
+ )
+ return ping_data[u"payload"][u"info"][u"subsessionId"]
+
+ def tearDown(self, *args, **kwargs):
+ """Stop the ping server and tear down the testcase."""
+ super(TelemetryTestCase, self).tearDown()
+ self.ping_server.stop()
+ self.marionette.quit(clean=True)
diff --git a/toolkit/components/telemetry/tests/marionette/mach_commands.py b/toolkit/components/telemetry/tests/marionette/mach_commands.py
new file mode 100644
index 0000000000..783563baa3
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/mach_commands.py
@@ -0,0 +1,101 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import logging
+import os
+import sys
+
+from mach.decorators import CommandProvider, Command
+
+from mozbuild.base import (
+ MachCommandBase,
+ MachCommandConditions as conditions,
+ BinaryNotFoundException,
+)
+
+
+def create_parser_tests():
+ from marionette_harness.runtests import MarionetteArguments
+ from mozlog.structured import commandline
+
+ parser = MarionetteArguments()
+ commandline.add_logging_group(parser)
+ return parser
+
+
+def run_telemetry(tests, binary=None, topsrcdir=None, **kwargs):
+ from mozlog.structured import commandline
+
+ from telemetry_harness.runtests import TelemetryTestRunner
+
+ from marionette_harness.runtests import MarionetteHarness
+
+ parser = create_parser_tests()
+
+ if not tests:
+ tests = [
+ os.path.join(
+ topsrcdir,
+ "toolkit/components/telemetry/tests/marionette/tests/manifest.ini",
+ )
+ ]
+
+ args = argparse.Namespace(tests=tests)
+
+ args.binary = binary
+ args.logger = kwargs.pop("log", None)
+
+ for k, v in kwargs.iteritems():
+ setattr(args, k, v)
+
+ parser.verify_usage(args)
+
+ os.environ["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
+
+ if not args.logger:
+ args.logger = commandline.setup_logging(
+ "Telemetry Client Tests", args, {"mach": sys.stdout}
+ )
+ failed = MarionetteHarness(TelemetryTestRunner, args=vars(args)).run()
+ if failed > 0:
+ return 1
+ return 0
+
+
+@CommandProvider
+class TelemetryTest(MachCommandBase):
+ @Command(
+ "telemetry-tests-client",
+ category="testing",
+ description="Run tests specifically for the Telemetry client",
+ conditions=[conditions.is_firefox_or_android],
+ parser=create_parser_tests,
+ )
+ def telemetry_test(self, tests, **kwargs):
+ if "test_objects" in kwargs:
+ tests = []
+ for obj in kwargs["test_objects"]:
+ tests.append(obj["file_relpath"])
+ del kwargs["test_objects"]
+ if not kwargs.get("binary") and conditions.is_firefox(self):
+ try:
+ kwargs["binary"] = self.get_binary_path("app")
+ except BinaryNotFoundException as e:
+ self.log(
+ logging.ERROR,
+ "telemetry-tests-client",
+ {"error": str(e)},
+ "ERROR: {error}",
+ )
+ self.log(
+ logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
+ )
+ return 1
+ if not kwargs.get("server_root"):
+ kwargs[
+ "server_root"
+ ] = "toolkit/components/telemetry/tests/marionette/harness/www"
+ return run_telemetry(tests, topsrcdir=self.topsrcdir, **kwargs)
diff --git a/toolkit/components/telemetry/tests/marionette/moz.build b/toolkit/components/telemetry/tests/marionette/moz.build
new file mode 100644
index 0000000000..7a6aa0bc15
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/moz.build
@@ -0,0 +1,11 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+TELEMETRY_TESTS_CLIENT_MANIFESTS += ["tests/manifest.ini"]
+
+with Files("**"):
+ BUG_COMPONENT = ("Toolkit", "Telemetry")
+ SCHEDULES.exclusive = ["telemetry-tests-client"]
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini b/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini
new file mode 100644
index 0000000000..ec5d061640
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/manifest.ini
@@ -0,0 +1,10 @@
+[DEFAULT]
+tags = client
+
+[test_deletion_request_ping.py]
+[test_event_ping.py]
+[test_main_tab_scalars.py]
+[test_search_counts_across_sessions.py]
+[test_subsession_management.py]
+[test_fog_deletion_request_ping.py]
+[test_fog_custom_ping.py]
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_deletion_request_ping.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_deletion_request_ping.py
new file mode 100644
index 0000000000..cb07c6b2db
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_deletion_request_ping.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from telemetry_harness.testcase import TelemetryTestCase
+from telemetry_harness.ping_filters import (
+ ANY_PING,
+ DELETION_REQUEST_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+class TestDeletionRequestPing(TelemetryTestCase):
+ """Tests for "deletion-request" ping."""
+
+ def test_deletion_request_ping_across_sessions(self):
+ """Test the "deletion-request" ping behaviour across sessions."""
+
+ # Get the client_id.
+ client_id = self.wait_for_ping(self.install_addon, ANY_PING)["clientId"]
+ self.assertIsValidUUID(client_id)
+
+ # Trigger an "deletion-request" ping.
+ ping = self.wait_for_ping(self.disable_telemetry, DELETION_REQUEST_PING)
+
+ self.assertIn("clientId", ping)
+ self.assertIn("payload", ping)
+ self.assertNotIn("environment", ping["payload"])
+
+ # Close Firefox cleanly.
+ self.quit_browser()
+
+ # TODO: Check pending pings aren't persisted
+
+ # Start Firefox.
+ self.start_browser()
+
+ # Trigger an environment change, which isn't allowed to send a ping.
+ self.install_addon()
+
+ # Ensure we've sent no pings since "disabling telemetry".
+ self.assertEqual(self.ping_server.pings[-1], ping)
+
+ # Turn Telemetry back on.
+ self.enable_telemetry()
+
+ # Close Firefox cleanly, collecting its "main"/"shutdown" ping.
+ main_ping = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ # Ensure the "main" ping has changed its client id.
+ self.assertIn("clientId", main_ping)
+ self.assertIsValidUUID(main_ping["clientId"])
+ self.assertNotEqual(main_ping["clientId"], client_id)
+
+ # Ensure we note in the ping that the user opted in.
+ parent_scalars = main_ping["payload"]["processes"]["parent"]["scalars"]
+
+ self.assertIn("telemetry.data_upload_optin", parent_scalars)
+ self.assertIs(parent_scalars["telemetry.data_upload_optin"], True)
+
+ # Ensure all pings sent during this test don't have the c0ffee client id.
+ for ping in self.ping_server.pings:
+ if "clientId" in ping:
+ self.assertIsValidUUID(ping["clientId"])
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_event_ping.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_event_ping.py
new file mode 100644
index 0000000000..97062642c0
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_event_ping.py
@@ -0,0 +1,68 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import textwrap
+
+from telemetry_harness.testcase import TelemetryTestCase
+from telemetry_harness.ping_filters import EVENT_PING
+
+
+class TestEventPing(TelemetryTestCase):
+ """Tests for "event" ping."""
+
+ def enable_search_events(self):
+ """
+ Event Telemetry categories are disabled by default.
+ Search events are in the "navigation" category and are not enabled by
+ default in builds of Firefox, so we enable them here.
+ """
+
+ script = """\
+ let {Services} = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ Services.telemetry.setEventRecordingEnabled("navigation", true);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(textwrap.dedent(script))
+
+ def wait_for_search_service_init(self):
+ script = """\
+ let [resolve] = arguments;
+ let searchService = Components.classes["@mozilla.org/browser/search-service;1"]
+ .getService(Components.interfaces.nsISearchService);
+ searchService.init().then(resolve);
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_async_script(textwrap.dedent(script))
+
+ def test_event_ping(self):
+ """
+ Barebones test for "event" ping:
+ Search, close Firefox, check "event" ping for search events.
+ """
+
+ self.enable_search_events()
+ self.wait_for_search_service_init()
+
+ self.search("mozilla firefox")
+
+ payload = self.wait_for_ping(self.restart_browser, EVENT_PING)["payload"]
+
+ self.assertEqual(payload["reason"], "shutdown")
+ self.assertEqual(payload["lostEventsCount"], 0)
+
+ self.assertIn("events", payload)
+ self.assertIn("parent", payload["events"])
+ found_it = False
+
+ for event in payload["events"]["parent"]:
+ # The event may optionally contain additonal fields
+ [timestamp, category, method, obj] = event[:4]
+
+ self.assertTrue(timestamp > 0)
+ if category == "navigation" and method == "search" and obj == "urlbar":
+ found_it = True
+
+ self.assertTrue(found_it)
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_custom_ping.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_custom_ping.py
new file mode 100644
index 0000000000..11bf07472c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_custom_ping.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from telemetry_harness.fog_ping_filters import FOG_ONE_PING_ONLY_PING
+from telemetry_harness.fog_testcase import FOGTestCase
+
+
+class TestDeletionRequestPing(FOGTestCase):
+ """Tests for the "one-ping-only" FOG custom ping."""
+
+ def test_one_ping_only_ping(self):
+ def send_opo_ping(marionette):
+ ping_sending_script = "GleanPings.onePingOnly.submit();"
+ with marionette.using_context(marionette.CONTEXT_CHROME):
+ marionette.execute_script(ping_sending_script)
+
+ ping1 = self.wait_for_ping(
+ lambda: send_opo_ping(self.marionette),
+ FOG_ONE_PING_ONLY_PING,
+ ping_server=self.fog_ping_server,
+ )
+
+ self.assertNotIn("client_id", ping1["payload"]["client_info"])
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_deletion_request_ping.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_deletion_request_ping.py
new file mode 100644
index 0000000000..c273bc52be
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_fog_deletion_request_ping.py
@@ -0,0 +1,65 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import textwrap
+
+from telemetry_harness.fog_ping_filters import FOG_DELETION_REQUEST_PING
+from telemetry_harness.fog_testcase import FOGTestCase
+
+
+class TestDeletionRequestPing(FOGTestCase):
+ """Tests for FOG deletion-request ping."""
+
+ def test_deletion_request_ping_across_sessions(self):
+ """Test the "deletion-request" ping behaviour across sessions."""
+
+ self.search_in_new_tab("mozilla firefox")
+
+ ping1 = self.wait_for_ping(
+ self.disable_telemetry,
+ FOG_DELETION_REQUEST_PING,
+ ping_server=self.fog_ping_server,
+ )
+
+ self.assertIn("ping_info", ping1["payload"])
+ self.assertIn("client_info", ping1["payload"])
+
+ self.assertIn("client_id", ping1["payload"]["client_info"])
+ client_id1 = ping1["payload"]["client_info"]["client_id"]
+ self.assertIsValidUUID(client_id1)
+
+ self.restart_browser()
+
+ self.assertEqual(self.fog_ping_server.pings[-1], ping1)
+
+ self.enable_telemetry()
+ self.restart_browser()
+
+ debug_tag = "my-test-tag"
+ tagging_script = """\
+ let FOG = Components.classes["@mozilla.org/toolkit/glean;1"]
+ .createInstance(Components.interfaces.nsIFOG);
+ FOG.setTagPings("{}");
+ """.format(
+ debug_tag
+ )
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(textwrap.dedent(tagging_script))
+ self.search_in_new_tab("python unittest")
+
+ ping2 = self.wait_for_ping(
+ self.disable_telemetry,
+ FOG_DELETION_REQUEST_PING,
+ ping_server=self.fog_ping_server,
+ )
+
+ self.assertEqual(ping2["debug_tag"], debug_tag)
+
+ self.assertIn("client_id", ping2["payload"]["client_info"])
+ client_id2 = ping2["payload"]["client_info"]["client_id"]
+ self.assertIsValidUUID(client_id2)
+
+ # Verify that FOG creates a new client ID when a user
+ # opts out of sending technical and interaction data.
+ self.assertNotEqual(client_id2, client_id1)
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_main_tab_scalars.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_main_tab_scalars.py
new file mode 100644
index 0000000000..8c9102b932
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_main_tab_scalars.py
@@ -0,0 +1,39 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from telemetry_harness.testcase import TelemetryTestCase
+from telemetry_harness.ping_filters import MAIN_SHUTDOWN_PING
+
+
+class TestMainTabScalars(TelemetryTestCase):
+ """Tests for Telemetry Scalars."""
+
+ def test_main_tab_scalars(self):
+ """Test for Telemetry Scalars."""
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ start_tab = self.marionette.current_window_handle
+
+ tab2 = self.open_tab(focus=True)
+ self.marionette.switch_to_window(tab2)
+
+ tab3 = self.open_tab(focus=True)
+ self.marionette.switch_to_window(tab3)
+
+ self.marionette.close()
+ self.marionette.switch_to_window(tab2)
+
+ self.marionette.close()
+ self.marionette.switch_to_window(start_tab)
+
+ ping = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ self.assertEqual(ping["type"], "main")
+ self.assertEqual(ping["clientId"], self.client_id)
+
+ scalars = ping["payload"]["processes"]["parent"]["scalars"]
+
+ self.assertEqual(scalars["browser.engagement.max_concurrent_tab_count"], 3)
+ self.assertEqual(scalars["browser.engagement.tab_open_event_count"], 2)
+ self.assertEqual(scalars["browser.engagement.max_concurrent_window_count"], 1)
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_search_counts_across_sessions.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_search_counts_across_sessions.py
new file mode 100644
index 0000000000..1ae7378891
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_search_counts_across_sessions.py
@@ -0,0 +1,212 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import textwrap
+
+from telemetry_harness.testcase import TelemetryTestCase
+from telemetry_harness.ping_filters import (
+ MAIN_ENVIRONMENT_CHANGE_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+class TestSearchCounts(TelemetryTestCase):
+ """Test for SEARCH_COUNTS across sessions."""
+
+ def get_default_search_engine(self):
+ """Retrieve the identifier of the default search engine.
+
+ We found that it's required to initialize the search service before
+ attempting to retrieve the default search engine. Not calling init
+ would result in a JavaScript error (see bug 1543960 for more
+ information).
+ """
+
+ script = """\
+ let [resolve] = arguments;
+ let searchService = Components.classes[
+ "@mozilla.org/browser/search-service;1"]
+ .getService(Components.interfaces.nsISearchService);
+ return searchService.init().then(function () {
+ resolve(searchService.defaultEngine.identifier);
+ });
+ """
+
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ return self.marionette.execute_async_script(textwrap.dedent(script))
+
+ def setUp(self):
+ """Set up the test case and store the identifier of the default
+ search engine, which is required for reading SEARCH_COUNTS from
+ keyed histograms in pings.
+ """
+ super(TestSearchCounts, self).setUp()
+ self.search_engine = self.get_default_search_engine()
+
+ def test_search_counts(self):
+ """Test for SEARCH_COUNTS across sessions."""
+
+ # Session S1, subsession 1:
+ # - Open browser
+ # - Open new tab
+ # - Perform search (awesome bar or search bar)
+ # - Restart browser in new session
+
+ self.search_in_new_tab("mozilla firefox")
+
+ ping1 = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ # Session S2, subsession 1:
+ # - Outcome 1
+ # - Received a main ping P1 for previous session
+ # - Ping base contents:
+ # - clientId should be set
+ # - sessionId should be set
+ # - subsessionId should be set
+ # - previousSessionId should not be set
+ # - previousSubsessionId should not be set
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 1
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should match performed search action
+
+ client_id = ping1["clientId"]
+ self.assertIsValidUUID(client_id)
+
+ ping1_info = ping1["payload"]["info"]
+ self.assertEqual(ping1_info["reason"], "shutdown")
+
+ s1_session_id = ping1_info["sessionId"]
+ self.assertNotEqual(s1_session_id, "")
+
+ s1_s1_subsession_id = ping1_info["subsessionId"]
+ self.assertNotEqual(s1_s1_subsession_id, "")
+
+ self.assertIsNone(ping1_info["previousSessionId"])
+ self.assertIsNone(ping1_info["previousSubsessionId"])
+ self.assertEqual(ping1_info["subsessionCounter"], 1)
+ self.assertEqual(ping1_info["profileSubsessionCounter"], 1)
+
+ scalars1 = ping1["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars1)
+ self.assertEqual(scalars1["browser.engagement.tab_open_event_count"], 1)
+
+ keyed_histograms1 = ping1["payload"]["keyedHistograms"]
+ search_counts1 = keyed_histograms1["SEARCH_COUNTS"][
+ "{}.urlbar".format(self.search_engine)
+ ]
+ self.assertEqual(
+ search_counts1,
+ {
+ u"range": [1, 2],
+ u"bucket_count": 3,
+ u"histogram_type": 4,
+ u"values": {u"1": 0, u"0": 1},
+ u"sum": 1,
+ },
+ )
+
+ # - Install addon
+ # Session S2, subsession 2:
+ # - Outcome 2
+ # - Received a main ping P2 for previous subsession
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to a new value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P1s subsessionId value
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 2
+ # - reason should be "environment-change"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should not be in P2
+ # - Verify that there should be no listing for tab scalar as we started a new
+ # session
+
+ ping2 = self.wait_for_ping(self.install_addon, MAIN_ENVIRONMENT_CHANGE_PING)
+
+ self.assertEqual(ping2["clientId"], client_id)
+
+ ping2_info = ping2["payload"]["info"]
+ self.assertEqual(ping2_info["reason"], "environment-change")
+
+ s2_session_id = ping2_info["sessionId"]
+ self.assertNotEqual(s2_session_id, s1_session_id)
+
+ s2_s1_subsession_id = ping2_info["subsessionId"]
+ self.assertNotEqual(s2_s1_subsession_id, s1_s1_subsession_id)
+
+ self.assertEqual(ping2_info["previousSessionId"], s1_session_id)
+ self.assertEqual(ping2_info["previousSubsessionId"], s1_s1_subsession_id)
+ self.assertEqual(ping2_info["subsessionCounter"], 1)
+ self.assertEqual(ping2_info["profileSubsessionCounter"], 2)
+
+ scalars2 = ping2["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars2)
+ self.assertNotIn("browser.engagement.tab_open_event_count", scalars2)
+
+ keyed_histograms2 = ping2["payload"]["keyedHistograms"]
+ self.assertNotIn("SEARCH_COUNTS", keyed_histograms2)
+
+ # - Perform Search
+ # - Restart Browser
+
+ self.search("mozilla telemetry")
+ self.search("python unittest")
+ self.search("python pytest")
+
+ ping3 = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ # Session S3, subsession 1:
+ # - Outcome 3
+ # - Received a main ping P3 for session 2, subsession 1
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to P2s sessionId value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P2s subsessionId value
+ # - subSessionCounter should be 2
+ # - profileSubSessionCounter should be 3
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - SEARCH_COUNTS values should be set per above search
+
+ self.assertEqual(ping3["clientId"], client_id)
+
+ ping3_info = ping3["payload"]["info"]
+
+ self.assertEqual(ping3_info["reason"], "shutdown")
+
+ self.assertEqual(ping3_info["sessionId"], s2_session_id)
+
+ s2_s2_subsession_id = ping3_info["subsessionId"]
+ self.assertNotEqual(s2_s2_subsession_id, s1_s1_subsession_id)
+ self.assertNotEqual(s2_s2_subsession_id, s2_s1_subsession_id)
+
+ self.assertEqual(ping3_info["previousSessionId"], s1_session_id)
+ self.assertEqual(ping3_info["previousSubsessionId"], s2_s1_subsession_id)
+ self.assertEqual(ping3_info["subsessionCounter"], 2)
+ self.assertEqual(ping3_info["profileSubsessionCounter"], 3)
+
+ scalars3 = ping3["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars3)
+ self.assertNotIn("browser.engagement.tab_open_event_count", scalars3)
+
+ keyed_histograms3 = ping3["payload"]["keyedHistograms"]
+ search_counts3 = keyed_histograms3["SEARCH_COUNTS"][
+ "{}.urlbar".format(self.search_engine)
+ ]
+ self.assertEqual(
+ search_counts3,
+ {
+ u"range": [1, 2],
+ u"bucket_count": 3,
+ u"histogram_type": 4,
+ u"values": {u"1": 0, u"0": 3},
+ u"sum": 3,
+ },
+ )
diff --git a/toolkit/components/telemetry/tests/marionette/tests/client/test_subsession_management.py b/toolkit/components/telemetry/tests/marionette/tests/client/test_subsession_management.py
new file mode 100644
index 0000000000..79b7835e5c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/client/test_subsession_management.py
@@ -0,0 +1,147 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from telemetry_harness.testcase import TelemetryTestCase
+from telemetry_harness.ping_filters import (
+ MAIN_ENVIRONMENT_CHANGE_PING,
+ MAIN_SHUTDOWN_PING,
+)
+
+
+class TestSubsessionManagement(TelemetryTestCase):
+ """Tests for Firefox Telemetry subsession management."""
+
+ def test_subsession_management(self):
+ """Test for Firefox Telemetry subsession management."""
+
+ # Session S1, subsession 1
+ # Actions:
+ # 1. Open browser
+ # 2. Open a new tab
+ # 3. Restart browser in new session
+
+ with self.new_tab():
+ # If Firefox Telemetry is working correctly, this will
+ # be sufficient to record a tab open event.
+ pass
+
+ ping1 = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ # Session S2, subsession 1
+ # Outcome 1:
+ # Received a main ping P1 for previous session
+ # - Ping base contents:
+ # - clientId should be a valid UUID
+ # - reason should be "shutdown"
+ # - sessionId should be set
+ # - subsessionId should be set
+ # - previousSessionId should not be set
+ # - previousSubsessionId should not be set
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 1
+ # - Other ping contents:
+ # - tab_open_event_count in scalars
+
+ client_id = ping1["clientId"]
+ self.assertIsValidUUID(client_id)
+
+ ping1_info = ping1["payload"]["info"]
+ self.assertEqual(ping1_info["reason"], "shutdown")
+
+ s1_session_id = ping1_info["sessionId"]
+ self.assertNotEqual(s1_session_id, "")
+
+ s1_s1_subsession_id = ping1_info["subsessionId"]
+ self.assertNotEqual(s1_s1_subsession_id, "")
+ self.assertIsNone(ping1_info["previousSessionId"])
+ self.assertIsNone(ping1_info["previousSubsessionId"])
+ self.assertEqual(ping1_info["subsessionCounter"], 1)
+ self.assertEqual(ping1_info["profileSubsessionCounter"], 1)
+
+ scalars1 = ping1["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars1)
+ self.assertEqual(scalars1["browser.engagement.tab_open_event_count"], 1)
+
+ # Actions:
+ # 1. Install addon
+
+ ping2 = self.wait_for_ping(self.install_addon, MAIN_ENVIRONMENT_CHANGE_PING)
+
+ [addon_id] = self.addon_ids # Store the addon ID for verifying ping3 later
+
+ # Session S2, subsession 2
+ # Outcome 2:
+ # Received a main ping P2 for previous subsession
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to a new value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P1s subsessionId value
+ # - subSessionCounter should be 1
+ # - profileSubSessionCounter should be 2
+ # - reason should be "environment-change"
+ # - Other ping contents:
+ # - tab_open_event_count not in scalars
+
+ self.assertEqual(ping2["clientId"], client_id)
+
+ ping2_info = ping2["payload"]["info"]
+ self.assertEqual(ping2_info["reason"], "environment-change")
+
+ s2_session_id = ping2_info["sessionId"]
+ self.assertNotEqual(s2_session_id, s1_session_id)
+
+ s2_s1_subsession_id = ping2_info["subsessionId"]
+ self.assertNotEqual(s2_s1_subsession_id, s1_s1_subsession_id)
+ self.assertEqual(ping2_info["previousSessionId"], s1_session_id)
+ self.assertEqual(ping2_info["previousSubsessionId"], s1_s1_subsession_id)
+ self.assertEqual(ping2_info["subsessionCounter"], 1)
+ self.assertEqual(ping2_info["profileSubsessionCounter"], 2)
+
+ scalars2 = ping2["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars2)
+ self.assertNotIn("browser.engagement.tab_open_event_count", scalars2)
+
+ # Actions
+ # 1. Restart browser in new session
+
+ ping3 = self.wait_for_ping(self.restart_browser, MAIN_SHUTDOWN_PING)
+
+ # Session S3, subsession 1
+ # Outcome 3:
+ # Received a main ping P3 for session 2, subsession 2
+ # - Ping base contents:
+ # - clientId should be set to the same value
+ # - sessionId should be set to P2s sessionId value
+ # - subsessionId should be set to a new value
+ # - previousSessionId should be set to P1s sessionId value
+ # - previousSubsessionId should be set to P2s subsessionId value
+ # - subSessionCounter should be 2
+ # - profileSubSessionCounter should be 3
+ # - reason should be "shutdown"
+ # - Other ping contents:
+ # - addon ID in activeAddons in environment
+
+ self.assertEqual(ping3["clientId"], client_id)
+
+ ping3_info = ping3["payload"]["info"]
+ self.assertEqual(ping3_info["reason"], "shutdown")
+
+ self.assertEqual(ping3_info["sessionId"], s2_session_id)
+
+ s2_s2_subsession_id = ping3_info["subsessionId"]
+ self.assertNotEqual(s2_s2_subsession_id, s1_s1_subsession_id)
+ self.assertNotEqual(s2_s2_subsession_id, s2_s1_subsession_id)
+ self.assertEqual(ping3_info["previousSessionId"], s1_session_id)
+ self.assertEqual(ping3_info["previousSubsessionId"], s2_s1_subsession_id)
+ self.assertEqual(ping3_info["subsessionCounter"], 2)
+ self.assertEqual(ping3_info["profileSubsessionCounter"], 3)
+
+ scalars3 = ping3["payload"]["processes"]["parent"]["scalars"]
+ self.assertNotIn("browser.engagement.window_open_event_count", scalars3)
+ self.assertNotIn("browser.engagement.tab_open_event_count", scalars3)
+
+ active_addons = ping3["environment"]["addons"]["activeAddons"]
+ self.assertIn(addon_id, active_addons)
diff --git a/toolkit/components/telemetry/tests/marionette/tests/manifest.ini b/toolkit/components/telemetry/tests/marionette/tests/manifest.ini
new file mode 100644
index 0000000000..b5e6f442e9
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/manifest.ini
@@ -0,0 +1,2 @@
+[include:unit/manifest.ini]
+[include:client/manifest.ini]
diff --git a/toolkit/components/telemetry/tests/marionette/tests/unit/manifest.ini b/toolkit/components/telemetry/tests/marionette/tests/unit/manifest.ini
new file mode 100644
index 0000000000..9bb2de707a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/unit/manifest.ini
@@ -0,0 +1,4 @@
+[DEFAULT]
+tags = unit
+
+[test_ping_server_received_ping.py] \ No newline at end of file
diff --git a/toolkit/components/telemetry/tests/marionette/tests/unit/test_ping_server_received_ping.py b/toolkit/components/telemetry/tests/marionette/tests/unit/test_ping_server_received_ping.py
new file mode 100644
index 0000000000..71da6bef8f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/marionette/tests/unit/test_ping_server_received_ping.py
@@ -0,0 +1,46 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/
+
+import requests
+
+from telemetry_harness.testcase import TelemetryTestCase
+
+
+class TestPingServer(TelemetryTestCase):
+ def setUp(self, *args, **kwargs):
+ """Set up the test case retrieve the pings URL."""
+ super(TestPingServer, self).setUp(*args, **kwargs)
+ self.pings_url = self.ping_server.get_url("/pings")
+
+ def test_ping_server_received_ping(self):
+ ping_type = "server-test-ping"
+ ping_reason = "unit-test"
+
+ def send_ping_request():
+ """Perform a POST request to the ping server."""
+ data = {"type": ping_type, "reason": ping_reason}
+ headers = {
+ "Content-type": "application/json",
+ "Accept": "text/plain",
+ }
+
+ response = requests.post(self.pings_url, json=data, headers=headers)
+
+ self.assertEqual(
+ response.status_code,
+ 200,
+ msg="Error sending POST request to ping server: {response.text}".format(
+ response=response
+ ),
+ )
+ return response
+
+ def ping_filter_func(ping):
+ return ping["type"] == ping_type
+
+ [ping] = self.wait_for_pings(send_ping_request, ping_filter_func, 1)
+
+ self.assertIsNotNone(ping)
+ self.assertEqual(ping["type"], ping_type)
+ self.assertEqual(ping["reason"], ping_reason)
diff --git a/toolkit/components/telemetry/tests/modules-test.cpp b/toolkit/components/telemetry/tests/modules-test.cpp
new file mode 100644
index 0000000000..c82d3f497d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/modules-test.cpp
@@ -0,0 +1,27 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/*
+ * This source file is used to build different shared libraries:
+ *
+ * - libmodules-test; it is automatically built by our build system (see the
+ * moz.build in the same directory as this file);
+ *
+ * - testUnicodePDB32.dll and testUnicodePDB64.dll; they can be built by
+ * compiling this source file using MSVC and setting the target name to be
+ * "libmodμles", then renaming the resulting file:
+ * cl /Os /Zi modules-test.cpp /LINK /DLL /OUT:libmodμles.dll \
+ * /nodefaultlib /entry:nothing /opt:ref
+ * copy libmodμles.dll testUnicodePDB*ARCH*.dll
+ *
+ * - testNoPDB32.dll and testNoPDB64.dll; they can be built by compiling this
+ * file using MSVC, without enabling generation of a PDB:
+ * cl /Os modules-test.cpp /LINK /DLL /OUT:testNoPDB*ARCH*.dll \
+ * /nodefaultlib /entry:nothing
+ *
+ * Clearly, for testUnicodePDB and testNoPDB both a 32-bit and a 64-bit version
+ * have to be compiled, using the 32-bit and 64-bit MSVC toolchains.
+ *
+ */
+
+void nothing() {}
diff --git a/toolkit/components/telemetry/tests/moz.build b/toolkit/components/telemetry/tests/moz.build
new file mode 100644
index 0000000000..c25b364321
--- /dev/null
+++ b/toolkit/components/telemetry/tests/moz.build
@@ -0,0 +1,25 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DIST_INSTALL = False
+
+SOURCES += [
+ "modules-test.cpp",
+]
+
+SharedLibrary("modules-test")
+
+NO_PGO = True
+
+TESTING_JS_MODULES += [
+ "utils/TelemetryTestUtils.jsm",
+]
+
+if CONFIG["COMPILE_ENVIRONMENT"]:
+ shared_library = "!%smodules-test%s" % (CONFIG["DLL_PREFIX"], CONFIG["DLL_SUFFIX"])
+ TEST_HARNESS_FILES.xpcshell.toolkit.components.telemetry.tests.unit += [
+ shared_library
+ ]
diff --git a/toolkit/components/telemetry/tests/python/python.ini b/toolkit/components/telemetry/tests/python/python.ini
new file mode 100644
index 0000000000..2830282744
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/python.ini
@@ -0,0 +1,11 @@
+[DEFAULT]
+skip-if = python == 3
+subsuite = telemetry-python
+
+[test_gen_event_data_json.py]
+[test_gen_scalar_data_json.py]
+[test_histogramtools_non_strict.py]
+[test_histogramtools_strict.py]
+[test_parse_events.py]
+[test_parse_scalars.py]
+[test_usecounters.py]
diff --git a/toolkit/components/telemetry/tests/python/test_gen_event_data_json.py b/toolkit/components/telemetry/tests/python/test_gen_event_data_json.py
new file mode 100644
index 0000000000..57fe5579c7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_gen_event_data_json.py
@@ -0,0 +1,102 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import mozunit
+import os
+import sys
+import tempfile
+import unittest
+from StringIO import StringIO
+from os import path
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The generators live in "build_scripts", account for that.
+# NOTE: if the generators are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+import gen_event_data # noqa: E402
+
+
+class TestEventDataJson(unittest.TestCase):
+
+ maxDiff = None
+
+ def test_JSON_definitions_generation(self):
+ EVENTS_YAML = """
+with.optout:
+ testme1:
+ objects: ["test1"]
+ bug_numbers: [1456415]
+ notification_emails: ["telemetry-client-dev@mozilla.org"]
+ record_in_processes: ["main"]
+ description: opt-out event
+ release_channel_collection: opt-out
+ expiry_version: never
+ products:
+ - firefox
+ extra_keys:
+ message: a message 1
+with.optin:
+ testme2:
+ objects: ["test2"]
+ bug_numbers: [1456415]
+ notification_emails: ["telemetry-client-dev@mozilla.org"]
+ record_in_processes: ["main"]
+ description: opt-in event
+ release_channel_collection: opt-in
+ expiry_version: never
+ products: ['firefox', 'fennec']
+ extra_keys:
+ message: a message 2
+ """
+
+ EXPECTED_JSON = {
+ "with.optout": {
+ "testme1": {
+ "objects": ["test1"],
+ "expired": False,
+ "expires": "never",
+ "methods": ["testme1"],
+ "extra_keys": ["message"],
+ "record_on_release": True,
+ "products": ["firefox"],
+ }
+ },
+ "with.optin": {
+ "testme2": {
+ "objects": ["test2"],
+ "expired": False,
+ "expires": "never",
+ "methods": ["testme2"],
+ "extra_keys": ["message"],
+ "record_on_release": False,
+ "products": ["firefox", "fennec"],
+ }
+ },
+ }
+
+ io = StringIO()
+ try:
+ tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
+ # Write the event definition to the temporary file
+ tmpfile.write(EVENTS_YAML)
+ tmpfile.close()
+
+ # Let the parser generate the artifact definitions
+ gen_event_data.generate_JSON_definitions(io, tmpfile.name)
+ finally:
+ if tmpfile:
+ os.unlink(tmpfile.name)
+
+ event_definitions = json.loads(io.getvalue())
+
+ # Check that it generated the correct data
+ self.assertEqual(EXPECTED_JSON, event_definitions)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_gen_scalar_data_json.py b/toolkit/components/telemetry/tests/python/test_gen_scalar_data_json.py
new file mode 100644
index 0000000000..aecf8e4b9d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_gen_scalar_data_json.py
@@ -0,0 +1,100 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import mozunit
+import os
+import sys
+import tempfile
+import unittest
+from StringIO import StringIO
+from os import path
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The generators live in "build_scripts", account for that.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+import gen_scalar_data # noqa: E402
+
+
+class TestScalarDataJson(unittest.TestCase):
+
+ maxDiff = None
+
+ def test_JSON_definitions_generation(self):
+ SCALARS_YAML = """
+newscalar:
+ withoptin:
+ bug_numbers:
+ - 1456415
+ description: opt-in scalar
+ expires: never
+ kind: uint
+ notification_emails: ["telemetry-client-dev@mozilla.org"]
+ record_in_processes: ["main"]
+ release_channel_collection: opt-in
+ products:
+ - firefox
+ keyed: false
+ withoptout:
+ bug_numbers:
+ - 1456415
+ description: opt-out scalar
+ expires: never
+ kind: string
+ notification_emails: ["telemetry-client-dev@mozilla.org"]
+ record_in_processes: ["main"]
+ release_channel_collection: opt-out
+ products: ["firefox", "fennec"]
+ keyed: false
+ """
+
+ EXPECTED_JSON = {
+ "newscalar": {
+ "withoptout": {
+ "kind": "nsITelemetry::SCALAR_TYPE_STRING",
+ "expired": False,
+ "expires": "never",
+ "record_on_release": True,
+ "keyed": False,
+ "keys": [],
+ "stores": ["main"],
+ "products": ["firefox", "fennec"],
+ },
+ "withoptin": {
+ "kind": "nsITelemetry::SCALAR_TYPE_COUNT",
+ "expired": False,
+ "expires": "never",
+ "record_on_release": False,
+ "keyed": False,
+ "keys": [],
+ "stores": ["main"],
+ "products": ["firefox"],
+ },
+ }
+ }
+
+ io = StringIO()
+ try:
+ tmpfile = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
+ # Write the scalar definition to the temporary file
+ tmpfile.write(SCALARS_YAML)
+ tmpfile.close()
+
+ # Let the parser generate the artifact definitions
+ gen_scalar_data.generate_JSON_definitions(io, tmpfile.name)
+ finally:
+ if tmpfile:
+ os.unlink(tmpfile.name)
+
+ scalar_definitions = json.loads(io.getvalue())
+
+ # Check that it generated the correct data
+ self.assertEqual(EXPECTED_JSON, scalar_definitions)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_histogramtools_non_strict.py b/toolkit/components/telemetry/tests/python/test_histogramtools_non_strict.py
new file mode 100644
index 0000000000..056cb8a0e2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_histogramtools_non_strict.py
@@ -0,0 +1,145 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import mozunit
+import sys
+import unittest
+from os import path
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The parsers live in a subdirectory of "build_scripts", account for that.
+# NOTE: if the parsers are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+from mozparsers import parse_histograms # noqa: E402
+
+
+def load_histogram(histograms):
+ """Parse the passed Histogram and return a dictionary mapping histogram
+ names to histogram parameters.
+
+ :param histogram: Histogram as a python dictionary
+ :returns: Parsed Histogram dictionary mapping histogram names to histogram parameters
+ """
+
+ def hook(ps):
+ return parse_histograms.load_histograms_into_dict(ps, strict_type_checks=False)
+
+ return json.loads(json.dumps(histograms), object_pairs_hook=hook)
+
+
+class TestParser(unittest.TestCase):
+ def test_unknown_field(self):
+ SAMPLE_HISTOGRAM = {
+ "A11Y_INSTANTIATED_FLAG": {
+ "record_in_processes": ["main", "content"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "has accessibility support been instantiated",
+ "new_field": "Its a new field",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+
+ hist = parse_histograms.Histogram(
+ "A11Y_INSTANTIATED_FLAG",
+ histograms["A11Y_INSTANTIATED_FLAG"],
+ strict_type_checks=False,
+ )
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "flag")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+
+ def test_non_numeric_expressions(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_NON_NUMERIC_HISTOGRAM": {
+ "kind": "linear",
+ "description": "sample",
+ "n_buckets": "JS::GCReason::NUM_TELEMETRY_REASONS",
+ "high": "mozilla::StartupTimeline::MAX_EVENT_ID",
+ }
+ }
+
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ hist = parse_histograms.Histogram(
+ "TEST_NON_NUMERIC_HISTOGRAM",
+ histograms["TEST_NON_NUMERIC_HISTOGRAM"],
+ strict_type_checks=False,
+ )
+
+ # expected values come off parse_histograms.py
+ self.assertEqual(hist.n_buckets(), 101)
+ self.assertEqual(hist.high(), 12)
+
+ def test_devtools_database_parsing(self):
+ db = path.join(
+ TELEMETRY_ROOT_PATH,
+ path.pardir,
+ path.pardir,
+ path.pardir,
+ "devtools",
+ "shared",
+ "css",
+ "generated",
+ "properties-db.js",
+ )
+
+ histograms = list(parse_histograms.from_files([db], strict_type_checks=False))
+ histograms = [h.name() for h in histograms]
+
+ # Test a shorthand (animation)
+ self.assertTrue("USE_COUNTER2_CSS_PROPERTY_Animation_DOCUMENT" in histograms)
+
+ # Test a shorthand alias (-moz-animation).
+ self.assertTrue("USE_COUNTER2_CSS_PROPERTY_MozAnimation_DOCUMENT" in histograms)
+
+ # Test a longhand (animation-name)
+ self.assertTrue(
+ "USE_COUNTER2_CSS_PROPERTY_AnimationName_DOCUMENT" in histograms
+ )
+
+ # Test a longhand alias (-moz-animation-name)
+ self.assertTrue(
+ "USE_COUNTER2_CSS_PROPERTY_MozAnimationName_DOCUMENT" in histograms
+ )
+
+ def test_current_histogram(self):
+ HISTOGRAMS_PATH = path.join(TELEMETRY_ROOT_PATH, "Histograms.json")
+ all_histograms = list(
+ parse_histograms.from_files([HISTOGRAMS_PATH], strict_type_checks=False)
+ )
+ test_histogram = [
+ i for i in all_histograms if i.name() == "TELEMETRY_TEST_FLAG"
+ ][0]
+
+ self.assertEqual(test_histogram.expiration(), "never")
+ self.assertEqual(test_histogram.kind(), "flag")
+ self.assertEqual(test_histogram.record_in_processes(), ["main", "content"])
+ self.assertEqual(test_histogram.keyed(), False)
+
+ def test_no_products(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_EMPTY_PRODUCTS": {
+ "kind": "flag",
+ "description": "sample",
+ }
+ }
+
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ hist = parse_histograms.Histogram(
+ "TEST_EMPTY_PRODUCTS",
+ histograms["TEST_EMPTY_PRODUCTS"],
+ strict_type_checks=False,
+ )
+
+ self.assertEqual(hist.kind(), "flag")
+ # bug 1486072: absent `product` key becomes None instead of ["all"]
+ self.assertEqual(hist.products(), None)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py b/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py
new file mode 100644
index 0000000000..d4efe250d7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py
@@ -0,0 +1,565 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import sys
+import unittest
+import os
+from os import path
+from test_histogramtools_non_strict import load_histogram
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The parsers live in a subdirectory of "build_scripts", account for that.
+# NOTE: if the parsers are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+from mozparsers.shared_telemetry_utils import ParserError
+from mozparsers import parse_histograms
+
+
+class TestParser(unittest.TestCase):
+ def setUp(self):
+ def mockexit(x):
+ raise SystemExit(x)
+
+ self.oldexit = os._exit
+ os._exit = mockexit
+
+ def tearDown(self):
+ os._exit = self.oldexit
+
+ def test_valid_histogram(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_VALID_HISTOGRAM": {
+ "record_in_processes": ["main", "content", "socket"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ hist = parse_histograms.Histogram(
+ "TEST_VALID_HISTOGRAM",
+ histograms["TEST_VALID_HISTOGRAM"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertTrue(hist.expiration(), "never")
+ self.assertTrue(hist.kind(), "boolean")
+ self.assertTrue(hist.record_in_processes, ["main", "content"])
+ self.assertTrue(hist.record_into_store, ["main"])
+
+ def test_missing_bug_numbers(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": [],
+ "bug_numbers": ["TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS"],
+ "n_buckets": [],
+ "expiry_default": [],
+ "kind": [],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_BUG_NUMBERS"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "boolean")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+
+ parse_histograms.allowlists = None
+
+ def test_missing_alert_emails(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS": {
+ "record_in_processes": ["main", "content"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": ["TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS"],
+ "bug_numbers": [],
+ "n_buckets": [],
+ "expiry_default": [],
+ "kind": [],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_ALERT_EMAILS"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "boolean")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+
+ parse_histograms.allowlists = None
+
+ def test_high_value(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 2 ** 64,
+ "n_buckets": 100,
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_high_n_buckets(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 16777216,
+ "n_buckets": 200,
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": [],
+ "bug_numbers": [],
+ "n_buckets": ["TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS"],
+ "expiry_default": [],
+ "kind": [],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_N_BUCKETS"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "exponential")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+ self.assertEqual(hist.low(), 1024)
+ self.assertEqual(hist.high(), 16777216)
+ self.assertEqual(hist.n_buckets(), 200)
+
+ parse_histograms.allowlists = None
+
+ def test_expiry_default(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT": {
+ "record_in_processes": ["main", "content"],
+ "expires_in_version": "default",
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "kind": "boolean",
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": [],
+ "bug_numbers": [],
+ "n_buckets": [],
+ "expiry_default": ["TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT"],
+ "kind": [],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_EXPIRY_DEFAULT"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "default")
+ self.assertEqual(hist.kind(), "boolean")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+
+ parse_histograms.allowlists = None
+
+ def test_unsupported_kind_count(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_KIND": {
+ "record_in_processes": ["main", "content"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ self.assertRaises(
+ SystemExit,
+ parse_histograms.Histogram,
+ "TEST_HISTOGRAM_ALLOWLIST_KIND",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ strict_type_checks=True,
+ )
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": [],
+ "bug_numbers": [],
+ "n_buckets": [],
+ "expiry_default": [],
+ "kind": ["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_KIND",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "count")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+
+ parse_histograms.allowlists = None
+
+ def test_unsupported_kind_flag(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALLOWLIST_KIND": {
+ "record_in_processes": ["main", "content"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ self.assertRaises(
+ SystemExit,
+ parse_histograms.Histogram,
+ "TEST_HISTOGRAM_ALLOWLIST_KIND",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ strict_type_checks=True,
+ )
+
+ # Set global allowlists for parse_histograms.
+ parse_histograms.allowlists = {
+ "alert_emails": [],
+ "bug_numbers": [],
+ "n_buckets": [],
+ "expiry_default": [],
+ "kind": ["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ }
+
+ hist = parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALLOWLIST_KIND",
+ histograms["TEST_HISTOGRAM_ALLOWLIST_KIND"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEqual(hist.expiration(), "never")
+ self.assertEqual(hist.kind(), "flag")
+ self.assertEqual(hist.record_in_processes(), ["main", "content"])
+ self.assertEqual(hist.keyed(), False)
+
+ parse_histograms.allowlists = None
+
+ def test_multistore(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_VALID_HISTOGRAM": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ "products": ["firefox"],
+ "record_into_store": ["main", "sync"],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ hist = parse_histograms.Histogram(
+ "TEST_VALID_HISTOGRAM",
+ histograms["TEST_VALID_HISTOGRAM"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertTrue(hist.expiration(), "never")
+ self.assertTrue(hist.kind(), "boolean")
+ self.assertTrue(hist.record_into_store, ["main", "sync"])
+
+ def test_multistore_empty(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_EMPTY_MULTISTORE": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ "products": ["firefox"],
+ "record_into_store": [],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_EMPTY_MULTISTORE",
+ histograms["TEST_HISTOGRAM_EMPTY_MULTISTORE"],
+ strict_type_checks=True,
+ )
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_products_absent(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_NO_PRODUCTS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ def test_parse():
+ return parse_histograms.Histogram(
+ "TEST_NO_PRODUCTS",
+ histograms["TEST_NO_PRODUCTS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, test_parse)
+
+ def test_products_empty(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_EMPTY_PRODUCTS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ "products": [],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ def test_parse():
+ return parse_histograms.Histogram(
+ "TEST_EMPTY_PRODUCTS",
+ histograms["TEST_EMPTY_PRODUCTS"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, test_parse)
+
+ def test_products_all(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_ALL_PRODUCTS": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ "products": ["all"],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_ALL_PRODUCTS",
+ histograms["TEST_HISTOGRAM_ALL_PRODUCTS"],
+ strict_type_checks=True,
+ )
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_gv_streaming_unsupported_kind(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_GV_STREAMING": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Test histogram",
+ "products": ["geckoview_streaming"],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_GV_STREAMING",
+ histograms["TEST_HISTOGRAM_GV_STREAMING"],
+ strict_type_checks=True,
+ )
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_gv_streaming_keyed(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_HISTOGRAM_GV_STREAMING": {
+ "record_in_processes": ["main", "content"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 2 ** 64,
+ "n_buckets": 100,
+ "keyed": "true",
+ "description": "Test histogram",
+ "products": ["geckoview_streaming"],
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+ parse_histograms.Histogram(
+ "TEST_HISTOGRAM_GV_STREAMING",
+ histograms["TEST_HISTOGRAM_GV_STREAMING"],
+ strict_type_checks=True,
+ )
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_enumerated_histogram_with_100_buckets(self):
+ SAMPLE_HISTOGRAM = {
+ "TEST_100_BUCKETS_HISTOGRAM": {
+ "record_in_processes": ["main", "content", "socket"],
+ "alert_emails": ["team@mozilla.xyz"],
+ "bug_numbers": [1383793],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "products": ["firefox"],
+ "description": "Test histogram",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ hist = parse_histograms.Histogram(
+ "TEST_100_BUCKETS_HISTOGRAM",
+ histograms["TEST_100_BUCKETS_HISTOGRAM"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertTrue(hist.expiration(), "never")
+ self.assertTrue(hist.kind(), "enumerated")
+ self.assertTrue(hist.n_buckets(), 101)
+ self.assertTrue(hist.record_in_processes, ["main", "content"])
+ self.assertTrue(hist.record_into_store, ["main"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_parse_events.py b/toolkit/components/telemetry/tests/python/test_parse_events.py
new file mode 100644
index 0000000000..23111630f1
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_parse_events.py
@@ -0,0 +1,165 @@
+# This Source Code Form is subject to the terms of Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import yaml
+import mozunit
+import sys
+import unittest
+import os
+from os import path
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The parsers live in a subdirectory of "build_scripts", account for that.
+# NOTE: if the parsers are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+from mozparsers.shared_telemetry_utils import ParserError
+from mozparsers import parse_events
+
+
+def load_event(event):
+ """Parse the passed event and return a dictionary
+
+ :param event: Event as YAML string
+ :returns: Parsed Event dictionary
+ """
+ return yaml.safe_load(event)
+
+
+class TestParser(unittest.TestCase):
+ def setUp(self):
+ def mockexit(x):
+ raise SystemExit(x)
+
+ self.oldexit = os._exit
+ os._exit = mockexit
+
+ def tearDown(self):
+ os._exit = self.oldexit
+
+ def test_valid_event_defaults(self):
+ SAMPLE_EVENT = """
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["main"]
+description: This is a test entry for Telemetry.
+products: ["firefox"]
+expiry_version: never
+"""
+ name = "test_event"
+ event = load_event(SAMPLE_EVENT)
+ evt = parse_events.EventData("CATEGORY", name, event, strict_type_checks=True)
+ ParserError.exit_func()
+
+ self.assertEqual(evt.methods, [name])
+ self.assertEqual(evt.record_in_processes, ["main"])
+ self.assertEqual(evt.objects, ["object1", "object2"])
+ self.assertEqual(evt.products, ["firefox"])
+ self.assertEqual(evt.operating_systems, ["all"])
+ self.assertEqual(evt.extra_keys, [])
+
+ def test_wrong_collection(self):
+ SAMPLE_EVENT = """
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["main"]
+description: This is a test entry for Telemetry.
+expiry_version: never
+products: ["firefox"]
+release_channel_collection: none
+"""
+ event = load_event(SAMPLE_EVENT)
+ parse_events.EventData("CATEGORY", "test_event", event, strict_type_checks=True)
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_valid_event_custom(self):
+ SAMPLE_EVENT = """
+methods: ["method1", "method2"]
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["content"]
+description: This is a test entry for Telemetry.
+expiry_version: never
+extra_keys:
+ key1: test1
+ key2: test2
+products:
+ - fennec
+operating_systems:
+ - windows
+"""
+ name = "test_event"
+ event = load_event(SAMPLE_EVENT)
+ evt = parse_events.EventData("CATEGORY", name, event, strict_type_checks=True)
+ ParserError.exit_func()
+
+ self.assertEqual(evt.methods, ["method1", "method2"])
+ self.assertEqual(evt.objects, ["object1", "object2"])
+ self.assertEqual(evt.record_in_processes, ["content"])
+ self.assertEqual(evt.products, ["fennec"])
+ self.assertEqual(evt.operating_systems, ["windows"])
+ self.assertEqual(sorted(evt.extra_keys), ["key1", "key2"])
+
+ def test_absent_products(self):
+ SAMPLE_EVENT = """
+methods: ["method1", "method2"]
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["content"]
+description: This is a test entry for Telemetry.
+expiry_version: never
+"""
+ event = load_event(SAMPLE_EVENT)
+ self.assertRaises(
+ SystemExit,
+ lambda: parse_events.EventData(
+ "CATEGORY", "test_event", event, strict_type_checks=True
+ ),
+ )
+
+ def test_empty_products(self):
+ SAMPLE_EVENT = """
+methods: ["method1", "method2"]
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["content"]
+description: This is a test entry for Telemetry.
+products: []
+expiry_version: never
+"""
+ event = load_event(SAMPLE_EVENT)
+ self.assertRaises(
+ SystemExit,
+ lambda: parse_events.EventData(
+ "CATEGORY", "test_event", event, strict_type_checks=True
+ ),
+ )
+
+ def test_geckoview_streaming_product(self):
+ SAMPLE_EVENT = """
+methods: ["method1", "method2"]
+objects: ["object1", "object2"]
+bug_numbers: [12345]
+notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
+record_in_processes: ["content"]
+description: This is a test entry for Telemetry.
+products: ["geckoview_streaming"]
+expiry_version: never
+"""
+ event = load_event(SAMPLE_EVENT)
+ parse_events.EventData("CATEGORY", "test_event", event, strict_type_checks=True)
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_parse_scalars.py b/toolkit/components/telemetry/tests/python/test_parse_scalars.py
new file mode 100644
index 0000000000..dbf61ab66a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_parse_scalars.py
@@ -0,0 +1,266 @@
+# This Source Code Form is subject to the terms of Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import yaml
+import mozunit
+import sys
+import unittest
+import os
+from os import path
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The parsers live in a subdirectory of "build_scripts", account for that.
+# NOTE: if the parsers are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+from mozparsers.shared_telemetry_utils import ParserError
+from mozparsers import parse_scalars
+
+
+def load_scalar(scalar):
+ """Parse the passed Scalar and return a dictionary
+
+ :param scalar: Scalar as YAML string
+ :returns: Parsed Scalar dictionary
+ """
+ return yaml.safe_load(scalar)
+
+
+class TestParser(unittest.TestCase):
+ def setUp(self):
+ def mockexit(x):
+ raise SystemExit(x)
+
+ self.oldexit = os._exit
+ os._exit = mockexit
+
+ def tearDown(self):
+ os._exit = self.oldexit
+
+ def test_valid_email_address(self):
+ SAMPLE_SCALAR_VALID_ADDRESSES = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+ - test02@mozilla.com
+products: ["firefox"]
+bug_numbers:
+ - 12345
+"""
+ scalar = load_scalar(SAMPLE_SCALAR_VALID_ADDRESSES)
+ sclr = parse_scalars.ScalarType(
+ "CATEGORY", "PROVE", scalar, strict_type_checks=True
+ )
+ ParserError.exit_func()
+
+ self.assertEqual(
+ sclr.notification_emails, ["test01@mozilla.com", "test02@mozilla.com"]
+ )
+
+ def test_invalid_email_address(self):
+ SAMPLE_SCALAR_INVALID_ADDRESSES = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com, test02@mozilla.com
+products: ["firefox"]
+bug_numbers:
+ - 12345
+"""
+ scalar = load_scalar(SAMPLE_SCALAR_INVALID_ADDRESSES)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_multistore_default(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+products: ["firefox"]
+bug_numbers:
+ - 12345
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ sclr = parse_scalars.ScalarType(
+ "CATEGORY", "PROVE", scalar, strict_type_checks=True
+ )
+ ParserError.exit_func()
+
+ self.assertEqual(sclr.record_into_store, ["main"])
+
+ def test_multistore_extended(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+bug_numbers:
+ - 12345
+products: ["firefox"]
+record_into_store:
+ - main
+ - sync
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ sclr = parse_scalars.ScalarType(
+ "CATEGORY", "PROVE", scalar, strict_type_checks=True
+ )
+ ParserError.exit_func()
+
+ self.assertEqual(sclr.record_into_store, ["main", "sync"])
+
+ def test_multistore_empty(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+bug_numbers:
+ - 12345
+products: ["firefox"]
+record_into_store: []
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_operating_systems_default(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+products: ["firefox"]
+bug_numbers:
+ - 12345
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ sclr = parse_scalars.ScalarType(
+ "CATEGORY", "PROVE", scalar, strict_type_checks=True
+ )
+ ParserError.exit_func()
+
+ self.assertEqual(sclr.operating_systems, ["all"])
+
+ def test_operating_systems_custom(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+bug_numbers:
+ - 12345
+products: ["firefox"]
+operating_systems:
+ - windows
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ sclr = parse_scalars.ScalarType(
+ "CATEGORY", "PROVE", scalar, strict_type_checks=True
+ )
+ ParserError.exit_func()
+
+ self.assertEqual(sclr.operating_systems, ["windows"])
+
+ def test_operating_systems_empty(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+bug_numbers:
+ - 12345
+products: ["firefox"]
+operating_systems: []
+"""
+ scalar = load_scalar(SAMPLE_SCALAR)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_products_absent(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+bug_numbers:
+ - 12345
+"""
+
+ scalar = load_scalar(SAMPLE_SCALAR)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_products_empty(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+products: []
+bug_numbers:
+ - 12345
+"""
+
+ scalar = load_scalar(SAMPLE_SCALAR)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+ def test_gv_streaming_keyed(self):
+ SAMPLE_SCALAR = """
+description: A nice one-line description.
+expires: never
+record_in_processes:
+ - 'main'
+kind: uint
+notification_emails:
+ - test01@mozilla.com
+products: ['geckoview_streaming']
+keyed: true
+bug_numbers:
+ - 12345
+"""
+
+ scalar = load_scalar(SAMPLE_SCALAR)
+ parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
+ self.assertRaises(SystemExit, ParserError.exit_func)
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_usecounters.py b/toolkit/components/telemetry/tests/python/test_usecounters.py
new file mode 100644
index 0000000000..005296d8b7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/python/test_usecounters.py
@@ -0,0 +1,69 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import mozunit
+import sys
+import unittest
+from os import path
+from test_histogramtools_non_strict import load_histogram
+
+TELEMETRY_ROOT_PATH = path.abspath(
+ path.join(path.dirname(__file__), path.pardir, path.pardir)
+)
+sys.path.append(TELEMETRY_ROOT_PATH)
+# The parsers live in a subdirectory of "build_scripts", account for that.
+# NOTE: if the parsers are moved, this logic will need to be updated.
+sys.path.append(path.join(TELEMETRY_ROOT_PATH, "build_scripts"))
+from mozparsers.shared_telemetry_utils import ParserError
+from mozparsers import parse_histograms
+
+
+class TestParser(unittest.TestCase):
+ def test_usecounter_collection_enabled(self):
+ SAMPLE_HISTOGRAM = {
+ "USE_COUNTER2_TEST_HISTOGRAM": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a foo used bar",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ hist = parse_histograms.Histogram(
+ "USE_COUNTER2_TEST_HISTOGRAM",
+ histograms["USE_COUNTER2_TEST_HISTOGRAM"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEquals(hist.dataset(), "nsITelemetry::DATASET_ALL_CHANNELS")
+ self.assertEquals(hist.products(), ["firefox", "fennec"])
+
+ def test_usecounter_histogram(self):
+ SAMPLE_HISTOGRAM = {
+ "USE_COUNTER2_TEST_HISTOGRAM": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a foo used bar",
+ }
+ }
+ histograms = load_histogram(SAMPLE_HISTOGRAM)
+ parse_histograms.load_allowlist()
+
+ hist = parse_histograms.Histogram(
+ "USE_COUNTER2_TEST_HISTOGRAM",
+ histograms["USE_COUNTER2_TEST_HISTOGRAM"],
+ strict_type_checks=True,
+ )
+
+ ParserError.exit_func()
+ self.assertEquals(hist.expiration(), "never")
+ self.assertEquals(hist.kind(), "boolean")
+ self.assertEquals(hist.description(), "Whether a foo used bar")
+ self.assertEquals(hist.products(), ["firefox", "fennec"])
+
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm b/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm
new file mode 100644
index 0000000000..ac06fdfe3a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm
@@ -0,0 +1,81 @@
+const { TelemetryArchive } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryArchive.jsm"
+);
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+
+var EXPORTED_SYMBOLS = ["TelemetryArchiveTesting"];
+
+function checkForProperties(ping, expected) {
+ for (let [props, val] of expected) {
+ let test = ping;
+ for (let prop of props) {
+ test = test[prop];
+ if (test === undefined) {
+ return false;
+ }
+ }
+ if (test !== val) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * A helper object that allows test code to check whether a telemetry ping
+ * was properly saved. To use, first initialize to collect the starting pings
+ * and then check for new ping data.
+ */
+function Checker() {}
+Checker.prototype = {
+ promiseInit() {
+ this._pingMap = new Map();
+ return TelemetryArchive.promiseArchivedPingList().then(plist => {
+ for (let ping of plist) {
+ this._pingMap.set(ping.id, ping);
+ }
+ });
+ },
+
+ /**
+ * Find and return a new ping with certain properties.
+ *
+ * @param expected: an array of [['prop'...], 'value'] to check
+ * For example:
+ * [
+ * [['environment', 'build', 'applicationId'], '20150101010101'],
+ * [['version'], 1],
+ * [['metadata', 'OOMAllocationSize'], 123456789],
+ * ]
+ * @returns a matching ping if found, or null
+ */
+ async promiseFindPing(type, expected) {
+ let candidates = [];
+ let plist = await TelemetryArchive.promiseArchivedPingList();
+ for (let ping of plist) {
+ if (this._pingMap.has(ping.id)) {
+ continue;
+ }
+ if (ping.type == type) {
+ candidates.push(ping);
+ }
+ }
+
+ for (let candidate of candidates) {
+ let ping = await TelemetryArchive.promiseArchivedPingById(candidate.id);
+ if (checkForProperties(ping, expected)) {
+ return ping;
+ }
+ }
+ return null;
+ },
+};
+
+const TelemetryArchiveTesting = {
+ setup() {
+ Services.prefs.setCharPref("toolkit.telemetry.log.level", "Trace");
+ Services.prefs.setBoolPref("toolkit.telemetry.archive.enabled", true);
+ },
+
+ Checker,
+};
diff --git a/toolkit/components/telemetry/tests/unit/data/search-extensions/engines.json b/toolkit/components/telemetry/tests/unit/data/search-extensions/engines.json
new file mode 100644
index 0000000000..2437805455
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/data/search-extensions/engines.json
@@ -0,0 +1,12 @@
+{
+ "data": [
+ {
+ "webExtension": {
+ "id":"telemetrySearchIdentifier@search.mozilla.org"
+ },
+ "appliesTo": [{
+ "included": { "everywhere": true }
+ }]
+ }
+ ]
+}
diff --git a/toolkit/components/telemetry/tests/unit/data/search-extensions/telemetrySearchIdentifier/manifest.json b/toolkit/components/telemetry/tests/unit/data/search-extensions/telemetrySearchIdentifier/manifest.json
new file mode 100644
index 0000000000..b9ec570c9f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/data/search-extensions/telemetrySearchIdentifier/manifest.json
@@ -0,0 +1,29 @@
+{
+ "name": "telemetrySearchIdentifier",
+ "manifest_version": 2,
+ "version": "1.0",
+ "description": "telemetrySearchIdentifier",
+ "applications": {
+ "gecko": {
+ "id": "telemetrySearchIdentifier@search.mozilla.org"
+ }
+ },
+ "hidden": true,
+ "chrome_settings_overrides": {
+ "search_provider": {
+ "name": "telemetrySearchIdentifier",
+ "search_url": "https://ar.wikipedia.org/wiki/%D8%AE%D8%A7%D8%B5:%D8%A8%D8%AD%D8%AB",
+ "params": [
+ {
+ "name": "search",
+ "value": "{searchTerms}"
+ },
+ {
+ "name": "sourceId",
+ "value": "Mozilla-search"
+ }
+ ],
+ "suggest_url": "https://ar.wikipedia.org/w/api.php?action=opensearch&search={searchTerms}"
+ }
+ }
+}
diff --git a/toolkit/components/telemetry/tests/unit/engine.xml b/toolkit/components/telemetry/tests/unit/engine.xml
new file mode 100644
index 0000000000..2304fcdd7b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/engine.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<SearchPlugin xmlns="http://www.mozilla.org/2006/browser/search/">
+<ShortName>engine-telemetry</ShortName>
+<Url type="text/html" method="GET" template="http://www.example.com/search">
+ <Param name="q" value="{searchTerms}"/>
+</Url>
+</SearchPlugin>
diff --git a/toolkit/components/telemetry/tests/unit/head.js b/toolkit/components/telemetry/tests/unit/head.js
new file mode 100644
index 0000000000..bbd0d67de0
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/head.js
@@ -0,0 +1,589 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/FileUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/httpd.js", this);
+var { AppConstants } = ChromeUtils.import(
+ "resource://gre/modules/AppConstants.jsm"
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "AddonTestUtils",
+ "resource://testing-common/AddonTestUtils.jsm"
+);
+ChromeUtils.defineModuleGetter(this, "OS", "resource://gre/modules/osfile.jsm");
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetrySend",
+ "resource://gre/modules/TelemetrySend.jsm"
+);
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryStorage",
+ "resource://gre/modules/TelemetryStorage.jsm"
+);
+ChromeUtils.defineModuleGetter(this, "Log", "resource://gre/modules/Log.jsm");
+ChromeUtils.defineModuleGetter(
+ this,
+ "NetUtil",
+ "resource://gre/modules/NetUtil.jsm"
+);
+
+const gIsWindows = AppConstants.platform == "win";
+const gIsMac = AppConstants.platform == "macosx";
+const gIsAndroid = AppConstants.platform == "android";
+const gIsLinux = AppConstants.platform == "linux";
+
+// Desktop Firefox, ie. not mobile Firefox or Thunderbird.
+const gIsFirefox = AppConstants.MOZ_APP_NAME == "firefox";
+
+const Telemetry = Services.telemetry;
+
+const MILLISECONDS_PER_MINUTE = 60 * 1000;
+const MILLISECONDS_PER_HOUR = 60 * MILLISECONDS_PER_MINUTE;
+const MILLISECONDS_PER_DAY = 24 * MILLISECONDS_PER_HOUR;
+
+const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
+
+var gGlobalScope = this;
+
+const PingServer = {
+ _httpServer: null,
+ _started: false,
+ _defers: [PromiseUtils.defer()],
+ _currentDeferred: 0,
+ _logger: null,
+
+ get port() {
+ return this._httpServer.identity.primaryPort;
+ },
+
+ get started() {
+ return this._started;
+ },
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(
+ "Toolkit.Telemetry",
+ "PingServer::"
+ );
+ }
+
+ return this._logger;
+ },
+
+ registerPingHandler(handler) {
+ const wrapped = wrapWithExceptionHandler(handler);
+ this._httpServer.registerPrefixHandler("/submit/telemetry/", wrapped);
+ },
+
+ resetPingHandler() {
+ this.registerPingHandler((request, response) => {
+ let r = request;
+ this._log.trace(
+ `defaultPingHandler() - ${r.method} ${r.scheme}://${r.host}:${r.port}${r.path}`
+ );
+ let deferred = this._defers[this._defers.length - 1];
+ this._defers.push(PromiseUtils.defer());
+ deferred.resolve(request);
+ });
+ },
+
+ start() {
+ this._httpServer = new HttpServer();
+ this._httpServer.start(-1);
+ this._started = true;
+ this.clearRequests();
+ this.resetPingHandler();
+ },
+
+ stop() {
+ return new Promise(resolve => {
+ this._httpServer.stop(resolve);
+ this._started = false;
+ });
+ },
+
+ clearRequests() {
+ this._defers = [PromiseUtils.defer()];
+ this._currentDeferred = 0;
+ },
+
+ promiseNextRequest() {
+ const deferred = this._defers[this._currentDeferred++];
+ // Send the ping to the consumer on the next tick, so that the completion gets
+ // signaled to Telemetry.
+ return new Promise(r =>
+ Services.tm.dispatchToMainThread(() => r(deferred.promise))
+ );
+ },
+
+ promiseNextPing() {
+ return this.promiseNextRequest().then(request =>
+ decodeRequestPayload(request)
+ );
+ },
+
+ async promiseNextRequests(count) {
+ let results = [];
+ for (let i = 0; i < count; ++i) {
+ results.push(await this.promiseNextRequest());
+ }
+
+ return results;
+ },
+
+ promiseNextPings(count) {
+ return this.promiseNextRequests(count).then(requests => {
+ return Array.from(requests, decodeRequestPayload);
+ });
+ },
+};
+
+/**
+ * Decode the payload of an HTTP request into a ping.
+ * @param {Object} request The data representing an HTTP request (nsIHttpRequest).
+ * @return {Object} The decoded ping payload.
+ */
+function decodeRequestPayload(request) {
+ let s = request.bodyInputStream;
+ let payload = null;
+
+ if (
+ request.hasHeader("content-encoding") &&
+ request.getHeader("content-encoding") == "gzip"
+ ) {
+ let observer = {
+ buffer: "",
+ onStreamComplete(loader, context, status, length, result) {
+ // String.fromCharCode can only deal with 500,000 characters
+ // at a time, so chunk the result into parts of that size.
+ const chunkSize = 500000;
+ for (let offset = 0; offset < result.length; offset += chunkSize) {
+ this.buffer += String.fromCharCode.apply(
+ String,
+ result.slice(offset, offset + chunkSize)
+ );
+ }
+ },
+ };
+
+ let scs = Cc["@mozilla.org/streamConverters;1"].getService(
+ Ci.nsIStreamConverterService
+ );
+ let listener = Cc["@mozilla.org/network/stream-loader;1"].createInstance(
+ Ci.nsIStreamLoader
+ );
+ listener.init(observer);
+ let converter = scs.asyncConvertData(
+ "gzip",
+ "uncompressed",
+ listener,
+ null
+ );
+ converter.onStartRequest(null, null);
+ converter.onDataAvailable(null, s, 0, s.available());
+ converter.onStopRequest(null, null, null);
+ let unicodeConverter = Cc[
+ "@mozilla.org/intl/scriptableunicodeconverter"
+ ].createInstance(Ci.nsIScriptableUnicodeConverter);
+ unicodeConverter.charset = "UTF-8";
+ let utf8string = unicodeConverter.ConvertToUnicode(observer.buffer);
+ utf8string += unicodeConverter.Finish();
+ payload = JSON.parse(utf8string);
+ } else {
+ let bytes = NetUtil.readInputStream(s, s.available());
+ payload = JSON.parse(new TextDecoder().decode(bytes));
+ }
+
+ if (payload && "clientId" in payload) {
+ // Check for canary value
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ payload.clientId,
+ `Known clientId shouldn't appear in a "${payload.type}" ping on the server.`
+ );
+ }
+
+ return payload;
+}
+
+function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
+ const APP_VERSION = "1";
+ const APP_NAME = "XPCShell";
+ const PING_FORMAT_VERSION = 4;
+ const PLATFORM_VERSION = "1.9.2";
+ const MANDATORY_PING_FIELDS = [
+ "type",
+ "id",
+ "creationDate",
+ "version",
+ "application",
+ "payload",
+ ];
+
+ const APPLICATION_TEST_DATA = {
+ buildId: gAppInfo.appBuildID,
+ name: APP_NAME,
+ version: APP_VERSION,
+ displayVersion: AppConstants.MOZ_APP_VERSION_DISPLAY,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ // Check that the ping contains all the mandatory fields.
+ for (let f of MANDATORY_PING_FIELDS) {
+ Assert.ok(f in aPing, f + " must be available.");
+ }
+
+ Assert.equal(aPing.type, aType, "The ping must have the correct type.");
+ Assert.equal(
+ aPing.version,
+ PING_FORMAT_VERSION,
+ "The ping must have the correct version."
+ );
+
+ // Test the application section.
+ for (let f in APPLICATION_TEST_DATA) {
+ Assert.equal(
+ aPing.application[f],
+ APPLICATION_TEST_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // We can't check the values for channel and architecture. Just make
+ // sure they are in.
+ Assert.ok(
+ "architecture" in aPing.application,
+ "The application section must have an architecture field."
+ );
+ Assert.ok(
+ "channel" in aPing.application,
+ "The application section must have a channel field."
+ );
+
+ // Check the clientId and environment fields, as needed.
+ Assert.equal("clientId" in aPing, aHasClientId);
+ Assert.equal("environment" in aPing, aHasEnvironment);
+}
+
+function wrapWithExceptionHandler(f) {
+ function wrapper(...args) {
+ try {
+ f(...args);
+ } catch (ex) {
+ if (typeof ex != "object") {
+ throw ex;
+ }
+ dump("Caught exception: " + ex.message + "\n");
+ dump(ex.stack);
+ do_test_finished();
+ }
+ }
+ return wrapper;
+}
+
+function loadAddonManager(...args) {
+ AddonTestUtils.init(gGlobalScope);
+ AddonTestUtils.overrideCertDB();
+ createAppInfo(...args);
+
+ // As we're not running in application, we need to setup the features directory
+ // used by system add-ons.
+ const distroDir = FileUtils.getDir("ProfD", ["sysfeatures", "app0"], true);
+ AddonTestUtils.registerDirectory("XREAppFeat", distroDir);
+ AddonTestUtils.awaitPromise(
+ AddonTestUtils.overrideBuiltIns({
+ system: ["tel-system-xpi@tests.mozilla.org"],
+ })
+ );
+ return AddonTestUtils.promiseStartupManager();
+}
+
+function finishAddonManagerStartup() {
+ Services.obs.notifyObservers(null, "test-load-xpi-database");
+}
+
+var gAppInfo = null;
+
+function createAppInfo(
+ ID = "xpcshell@tests.mozilla.org",
+ name = "XPCShell",
+ version = "1.0",
+ platformVersion = "1.0"
+) {
+ AddonTestUtils.createAppInfo(ID, name, version, platformVersion);
+ gAppInfo = AddonTestUtils.appInfo;
+}
+
+// Fake the timeout functions for the TelemetryScheduler.
+function fakeSchedulerTimer(set, clear) {
+ let scheduler = ChromeUtils.import(
+ "resource://gre/modules/TelemetryScheduler.jsm",
+ null
+ );
+ scheduler.Policy.setSchedulerTickTimeout = set;
+ scheduler.Policy.clearSchedulerTickTimeout = clear;
+}
+
+/* global TelemetrySession:false, TelemetryEnvironment:false, TelemetryController:false,
+ TelemetryStorage:false, TelemetrySend:false, TelemetryReportingPolicy:false
+ */
+
+/**
+ * Fake the current date.
+ * This passes all received arguments to a new Date constructor and
+ * uses the resulting date to fake the time in Telemetry modules.
+ *
+ * @return Date The new faked date.
+ */
+function fakeNow(...args) {
+ const date = new Date(...args);
+ const modules = [
+ ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", null),
+ ChromeUtils.import("resource://gre/modules/TelemetryEnvironment.jsm", null),
+ ChromeUtils.import(
+ "resource://gre/modules/TelemetryControllerParent.jsm",
+ null
+ ),
+ ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", null),
+ ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", null),
+ ChromeUtils.import(
+ "resource://gre/modules/TelemetryReportingPolicy.jsm",
+ null
+ ),
+ ChromeUtils.import("resource://gre/modules/TelemetryScheduler.jsm", null),
+ ];
+
+ for (let m of modules) {
+ m.Policy.now = () => date;
+ }
+
+ return new Date(date);
+}
+
+function fakeMonotonicNow(ms) {
+ const m = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySession.jsm",
+ null
+ );
+ m.Policy.monotonicNow = () => ms;
+ return ms;
+}
+
+// Fake the timeout functions for TelemetryController sending.
+function fakePingSendTimer(set, clear) {
+ let module = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ let obj = Cu.cloneInto({ set, clear }, module, { cloneFunctions: true });
+ module.Policy.setSchedulerTickTimeout = obj.set;
+ module.Policy.clearSchedulerTickTimeout = obj.clear;
+}
+
+function fakeMidnightPingFuzzingDelay(delayMs) {
+ let module = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ module.Policy.midnightPingFuzzingDelay = () => delayMs;
+}
+
+function fakeGeneratePingId(func) {
+ let module = ChromeUtils.import(
+ "resource://gre/modules/TelemetryControllerParent.jsm",
+ null
+ );
+ module.Policy.generatePingId = func;
+}
+
+function fakeCachedClientId(uuid) {
+ let module = ChromeUtils.import(
+ "resource://gre/modules/TelemetryControllerParent.jsm",
+ null
+ );
+ module.Policy.getCachedClientID = () => uuid;
+}
+
+// Fake the gzip compression for the next ping to be sent out
+// and immediately reset to the original function.
+function fakeGzipCompressStringForNextPing(length) {
+ let send = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ let largePayload = generateString(length);
+ send.Policy.gzipCompressString = data => {
+ send.Policy.gzipCompressString = send.gzipCompressString;
+ return largePayload;
+ };
+}
+
+function fakeIntlReady() {
+ const m = ChromeUtils.import(
+ "resource://gre/modules/TelemetryEnvironment.jsm",
+ null
+ );
+ m.Policy._intlLoaded = true;
+ // Dispatch the observer event in case the promise has been registered already.
+ Services.obs.notifyObservers(null, "browser-delayed-startup-finished");
+}
+
+// Override the uninstall ping file names
+function fakeUninstallPingPath(aPathFcn) {
+ const m = ChromeUtils.import(
+ "resource://gre/modules/TelemetryStorage.jsm",
+ null
+ );
+ m.Policy.getUninstallPingPath =
+ aPathFcn ||
+ (id => ({
+ directory: new FileUtils.File(OS.Constants.Path.profileDir),
+ file: `uninstall_ping_0123456789ABCDEF_${id}.json`,
+ }));
+}
+
+// Return a date that is |offset| ms in the future from |date|.
+function futureDate(date, offset) {
+ return new Date(date.getTime() + offset);
+}
+
+function truncateToDays(aMsec) {
+ return Math.floor(aMsec / MILLISECONDS_PER_DAY);
+}
+
+// Returns a promise that resolves to true when the passed promise rejects,
+// false otherwise.
+function promiseRejects(promise) {
+ return promise.then(
+ () => false,
+ () => true
+ );
+}
+
+// Generates a random string of at least a specific length.
+function generateRandomString(length) {
+ let string = "";
+
+ while (string.length < length) {
+ string += Math.random().toString(36);
+ }
+
+ return string.substring(0, length);
+}
+
+function generateString(length) {
+ return new Array(length + 1).join("a");
+}
+
+// Short-hand for retrieving the histogram with that id.
+function getHistogram(histogramId) {
+ return Telemetry.getHistogramById(histogramId);
+}
+
+// Short-hand for retrieving the snapshot of the Histogram with that id.
+function getSnapshot(histogramId) {
+ return Telemetry.getHistogramById(histogramId).snapshot();
+}
+
+// Helper for setting an empty list of Environment preferences to watch.
+function setEmptyPrefWatchlist() {
+ const { TelemetryEnvironment } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryEnvironment.jsm"
+ );
+ return TelemetryEnvironment.onInitialized().then(() =>
+ TelemetryEnvironment.testWatchPreferences(new Map())
+ );
+}
+
+if (runningInParent) {
+ // Set logging preferences for all the tests.
+ Services.prefs.setCharPref("toolkit.telemetry.log.level", "Trace");
+ // Telemetry archiving should be on.
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.ArchiveEnabled, true);
+ // Telemetry xpcshell tests cannot show the infobar.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.BypassNotification,
+ true
+ );
+ // FHR uploads should be enabled.
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+ // Many tests expect the shutdown and the new-profile to not be sent on shutdown
+ // and will fail if receive an unexpected ping. Let's globally disable these features:
+ // the relevant tests will enable these prefs when needed.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.ShutdownPingSender,
+ false
+ );
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.ShutdownPingSenderFirstSession,
+ false
+ );
+ Services.prefs.setBoolPref("toolkit.telemetry.newProfilePing.enabled", false);
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.FirstShutdownPingEnabled,
+ false
+ );
+ // Turn off Health Ping submission.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.HealthPingEnabled,
+ false
+ );
+
+ // Speed up child process accumulations
+ Services.prefs.setIntPref(TelemetryUtils.Preferences.IPCBatchTimeout, 10);
+
+ // Make sure ecosystem telemetry is disabled, no matter which build
+ // Individual tests will enable it when appropriate
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.EcosystemTelemetryEnabled,
+ false
+ );
+
+ // Non-unified Telemetry (e.g. Fennec on Android) needs the preference to be set
+ // in order to enable Telemetry.
+ if (Services.prefs.getBoolPref(TelemetryUtils.Preferences.Unified, false)) {
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.OverridePreRelease,
+ true
+ );
+ } else {
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.TelemetryEnabled,
+ true
+ );
+ }
+
+ fakePingSendTimer(
+ (callback, timeout) => {
+ Services.tm.dispatchToMainThread(() => callback());
+ },
+ () => {}
+ );
+
+ // This gets imported via fakeNow();
+ registerCleanupFunction(() => TelemetrySend.shutdown());
+}
+
+TelemetryController.testInitLogging();
+
+// Avoid timers interrupting test behavior.
+fakeSchedulerTimer(
+ () => {},
+ () => {}
+);
+// Make pind sending predictable.
+fakeMidnightPingFuzzingDelay(0);
+
+// Avoid using the directory service, which is not registered in some tests.
+fakeUninstallPingPath();
diff --git a/toolkit/components/telemetry/tests/unit/testNoPDB32.dll b/toolkit/components/telemetry/tests/unit/testNoPDB32.dll
new file mode 100644
index 0000000000..e7f9febc4b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testNoPDB32.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/testNoPDB64.dll b/toolkit/components/telemetry/tests/unit/testNoPDB64.dll
new file mode 100644
index 0000000000..19f95c98ed
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testNoPDB64.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/testNoPDBAArch64.dll b/toolkit/components/telemetry/tests/unit/testNoPDBAArch64.dll
new file mode 100755
index 0000000000..ecfff07036
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testNoPDBAArch64.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/testUnicodePDB32.dll b/toolkit/components/telemetry/tests/unit/testUnicodePDB32.dll
new file mode 100644
index 0000000000..d3eec65ea5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testUnicodePDB32.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/testUnicodePDB64.dll b/toolkit/components/telemetry/tests/unit/testUnicodePDB64.dll
new file mode 100644
index 0000000000..c11f8453de
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testUnicodePDB64.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/testUnicodePDBAArch64.dll b/toolkit/components/telemetry/tests/unit/testUnicodePDBAArch64.dll
new file mode 100755
index 0000000000..a892a84315
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/testUnicodePDBAArch64.dll
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/test_ChildEvents.js b/toolkit/components/telemetry/tests/unit/test_ChildEvents.js
new file mode 100644
index 0000000000..ff3da1954c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ChildEvents.js
@@ -0,0 +1,226 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+
+const MESSAGE_CHILD_TEST_DONE = "ChildTest:Done";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const RECORDED_CONTENT_EVENTS = [
+ ["telemetry.test", "content_only", "object1"],
+ ["telemetry.test", "main_and_content", "object1"],
+ ["telemetry.test", "content_only", "object1", "some value"],
+ ["telemetry.test", "content_only", "object1", null, { foo: "x", bar: "y" }],
+ [
+ "telemetry.test",
+ "content_only",
+ "object1",
+ "some value",
+ { foo: "x", bar: "y" },
+ ],
+];
+
+const UNRECORDED_CONTENT_EVENTS = [["telemetry.test", "main_only", "object1"]];
+
+const RECORDED_PARENT_EVENTS = [
+ ["telemetry.test", "main_and_content", "object1"],
+ ["telemetry.test", "main_only", "object1"],
+];
+
+const UNRECORDED_PARENT_EVENTS = [
+ ["telemetry.test", "content_only", "object1"],
+];
+
+const RECORDED_DYNAMIC_EVENTS = [
+ ["telemetry.test.dynamic", "test1", "object1"],
+ ["telemetry.test.dynamic", "test2", "object1"],
+];
+
+function run_child_test() {
+ // Record some events in the "content" process.
+ RECORDED_CONTENT_EVENTS.forEach(e => Telemetry.recordEvent(...e));
+ // These events should not be recorded for the content process.
+ UNRECORDED_CONTENT_EVENTS.forEach(e => Telemetry.recordEvent(...e));
+ // Record some dynamic events from the content process.
+ RECORDED_DYNAMIC_EVENTS.forEach(e => Telemetry.recordEvent(...e));
+}
+
+/**
+ * This function waits until content events are reported into the
+ * events snapshot.
+ */
+async function waitForContentEvents() {
+ await ContentTaskUtils.waitForCondition(() => {
+ const snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ return (
+ Object.keys(snapshot).includes("content") &&
+ Object.keys(snapshot).includes("dynamic")
+ );
+ });
+}
+
+add_task(async function() {
+ if (!runningInParent) {
+ TelemetryController.testSetupContent();
+ run_child_test();
+ do_send_remote_message(MESSAGE_CHILD_TEST_DONE);
+ return;
+ }
+
+ // Setup.
+ do_get_profile(true);
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ await TelemetryController.testSetup();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ // Enable recording for the test event category.
+ Telemetry.setEventRecordingEnabled("telemetry.test", true);
+
+ // Register dynamic test events.
+ Telemetry.registerEvents("telemetry.test.dynamic", {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ // Event with extra_keys.
+ test2: {
+ methods: ["test2", "test2b"],
+ objects: ["object1"],
+ extra_keys: ["key1", "key2"],
+ },
+ });
+
+ // Run test in child, don't wait for it to finish: just wait for the
+ // MESSAGE_CHILD_TEST_DONE.
+ const timestampBeforeChildEvents = Telemetry.msSinceProcessStart();
+ run_test_in_child("test_ChildEvents.js");
+ await do_await_remote_message(MESSAGE_CHILD_TEST_DONE);
+
+ // Once events are set by the content process, they don't immediately get
+ // sent to the parent process. Wait for the Telemetry IPC Timer to trigger
+ // and batch send the data back to the parent process.
+ await waitForContentEvents();
+ const timestampAfterChildEvents = Telemetry.msSinceProcessStart();
+
+ // Also record some events in the parent.
+ RECORDED_PARENT_EVENTS.forEach(e => Telemetry.recordEvent(...e));
+ UNRECORDED_PARENT_EVENTS.forEach(e => Telemetry.recordEvent(...e));
+
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+
+ Assert.ok("parent" in snapshot, "Should have main process section");
+ Assert.ok(
+ !!snapshot.parent.length,
+ "Main process section should have events."
+ );
+ Assert.ok("content" in snapshot, "Should have child process section");
+ Assert.ok(
+ !!snapshot.content.length,
+ "Child process section should have events."
+ );
+ Assert.ok("dynamic" in snapshot, "Should have dynamic process section");
+ Assert.ok(
+ !!snapshot.dynamic.length,
+ "Dynamic process section should have events."
+ );
+
+ // Check that the expected events are present from the content process.
+ let contentEvents = snapshot.content.map(e => e.slice(1));
+ Assert.equal(
+ contentEvents.length,
+ RECORDED_CONTENT_EVENTS.length,
+ "Should match expected event count."
+ );
+ for (let i = 0; i < RECORDED_CONTENT_EVENTS.length; ++i) {
+ Assert.deepEqual(
+ contentEvents[i],
+ RECORDED_CONTENT_EVENTS[i],
+ "Should have recorded expected event."
+ );
+ }
+
+ // Check that the expected events are present from the parent process.
+ let parentEvents = snapshot.parent.map(e => e.slice(1));
+ Assert.equal(
+ parentEvents.length,
+ RECORDED_PARENT_EVENTS.length,
+ "Should match expected event count."
+ );
+ for (let i = 0; i < RECORDED_PARENT_EVENTS.length; ++i) {
+ Assert.deepEqual(
+ parentEvents[i],
+ RECORDED_PARENT_EVENTS[i],
+ "Should have recorded expected event."
+ );
+ }
+
+ // Check that the expected dynamic events are present.
+ let dynamicEvents = snapshot.dynamic.map(e => e.slice(1));
+ Assert.equal(
+ dynamicEvents.length,
+ RECORDED_DYNAMIC_EVENTS.length,
+ "Should match expected event count."
+ );
+ for (let i = 0; i < RECORDED_DYNAMIC_EVENTS.length; ++i) {
+ Assert.deepEqual(
+ dynamicEvents[i],
+ RECORDED_DYNAMIC_EVENTS[i],
+ "Should have recorded expected event."
+ );
+ }
+
+ // Check that the event timestamps are in the expected ranges.
+ let contentTimestamps = snapshot.content.map(e => e[0]);
+ let parentTimestamps = snapshot.parent.map(e => e[0]);
+
+ Assert.ok(
+ contentTimestamps.every(
+ ts =>
+ ts > Math.floor(timestampBeforeChildEvents) &&
+ ts < timestampAfterChildEvents
+ ),
+ "All content event timestamps should be in the expected time range."
+ );
+ Assert.ok(
+ parentTimestamps.every(ts => ts >= Math.floor(timestampAfterChildEvents)),
+ "All parent event timestamps should be in the expected time range."
+ );
+
+ // Make sure all events are cleared from storage properly.
+ snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ true
+ );
+ Assert.greaterOrEqual(
+ Object.keys(snapshot).length,
+ 2,
+ "Should have events from at least two processes."
+ );
+ snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ true
+ );
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should have cleared all events from storage."
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js b/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js
new file mode 100644
index 0000000000..f077c8107c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js
@@ -0,0 +1,333 @@
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+
+const MESSAGE_CHILD_TEST_DONE = "ChildTest:Done";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+function run_child_test() {
+ // Setup histograms with some fixed values.
+ let flagHist = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ flagHist.add(1);
+ let countHist = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", false);
+ countHist.add();
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", true);
+ countHist.add();
+ countHist.add();
+ let categHist = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL");
+ categHist.add("Label2");
+ categHist.add("Label3");
+
+ let flagKeyed = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_FLAG");
+ flagKeyed.add("a", 1);
+ flagKeyed.add("b", 1);
+ let countKeyed = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_COUNT"
+ );
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT", false);
+ countKeyed.add("a");
+ countKeyed.add("b");
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT", true);
+ countKeyed.add("a");
+ countKeyed.add("b");
+ countKeyed.add("b");
+
+ // Test record_in_processes
+ let contentLinear = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_CONTENT_PROCESS"
+ );
+ contentLinear.add(10);
+ let contentKeyed = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_CONTENT_PROCESS"
+ );
+ contentKeyed.add("content", 1);
+ let contentFlag = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_FLAG_CONTENT_PROCESS"
+ );
+ contentFlag.add(true);
+ let mainFlag = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG_MAIN_PROCESS");
+ mainFlag.add(true);
+ let allLinear = Telemetry.getHistogramById("TELEMETRY_TEST_ALL_PROCESSES");
+ allLinear.add(10);
+ let allChildLinear = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_ALL_CHILD_PROCESSES"
+ );
+ allChildLinear.add(10);
+
+ // Test snapshot APIs.
+ // Should be forbidden in content processes.
+ Assert.throws(
+ () => Telemetry.getHistogramById("TELEMETRY_TEST_COUNT").snapshot(),
+ /Histograms can only be snapshotted in the parent process/,
+ "Snapshotting should be forbidden in the content process"
+ );
+
+ Assert.throws(
+ () =>
+ Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT").snapshot(),
+ /Keyed histograms can only be snapshotted in the parent process/,
+ "Snapshotting should be forbidden in the content process"
+ );
+
+ Assert.throws(
+ () => Telemetry.getHistogramById("TELEMETRY_TEST_COUNT").clear(),
+ /Histograms can only be cleared in the parent process/,
+ "Clearing should be forbidden in the content process"
+ );
+
+ Assert.throws(
+ () => Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT").clear(),
+ /Keyed histograms can only be cleared in the parent process/,
+ "Clearing should be forbidden in the content process"
+ );
+
+ Assert.throws(
+ () => Telemetry.getSnapshotForHistograms(),
+ /NS_ERROR_FAILURE/,
+ "Snapshotting should be forbidden in the content process"
+ );
+
+ Assert.throws(
+ () => Telemetry.getSnapshotForKeyedHistograms(),
+ /NS_ERROR_FAILURE/,
+ "Snapshotting should be forbidden in the content process"
+ );
+}
+
+function check_histogram_values(payload) {
+ const hs = payload.histograms;
+ Assert.ok("TELEMETRY_TEST_COUNT" in hs, "Should have count test histogram.");
+ Assert.ok("TELEMETRY_TEST_FLAG" in hs, "Should have flag test histogram.");
+ Assert.ok(
+ "TELEMETRY_TEST_CATEGORICAL" in hs,
+ "Should have categorical test histogram."
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_COUNT.sum,
+ 2,
+ "Count test histogram should have the right value."
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_FLAG.sum,
+ 1,
+ "Flag test histogram should have the right value."
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_CATEGORICAL.sum,
+ 3,
+ "Categorical test histogram should have the right sum."
+ );
+
+ const kh = payload.keyedHistograms;
+ Assert.ok(
+ "TELEMETRY_TEST_KEYED_COUNT" in kh,
+ "Should have keyed count test histogram."
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_KEYED_FLAG" in kh,
+ "Should have keyed flag test histogram."
+ );
+ Assert.equal(
+ kh.TELEMETRY_TEST_KEYED_COUNT.a.sum,
+ 1,
+ "Keyed count test histogram should have the right value."
+ );
+ Assert.equal(
+ kh.TELEMETRY_TEST_KEYED_COUNT.b.sum,
+ 2,
+ "Keyed count test histogram should have the right value."
+ );
+ Assert.equal(
+ kh.TELEMETRY_TEST_KEYED_FLAG.a.sum,
+ 1,
+ "Keyed flag test histogram should have the right value."
+ );
+ Assert.equal(
+ kh.TELEMETRY_TEST_KEYED_FLAG.b.sum,
+ 1,
+ "Keyed flag test histogram should have the right value."
+ );
+}
+
+add_task(async function() {
+ if (!runningInParent) {
+ TelemetryController.testSetupContent();
+ run_child_test();
+ dump("... done with child test\n");
+ do_send_remote_message(MESSAGE_CHILD_TEST_DONE);
+ return;
+ }
+
+ // Setup.
+ do_get_profile(true);
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ await TelemetryController.testSetup();
+ if (runningInParent) {
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ }
+
+ // Run test in child, don't wait for it to finish.
+ run_test_in_child("test_ChildHistograms.js");
+ await do_await_remote_message(MESSAGE_CHILD_TEST_DONE);
+
+ await ContentTaskUtils.waitForCondition(() => {
+ let payload = TelemetrySession.getPayload("test-ping");
+ return (
+ payload &&
+ "processes" in payload &&
+ "content" in payload.processes &&
+ "histograms" in payload.processes.content &&
+ "TELEMETRY_TEST_COUNT" in payload.processes.content.histograms
+ );
+ });
+
+ // Test record_in_processes in main process, too
+ let contentLinear = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_CONTENT_PROCESS"
+ );
+ contentLinear.add(20);
+ let contentKeyed = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_CONTENT_PROCESS"
+ );
+ contentKeyed.add("parent", 1);
+ let contentFlag = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_FLAG_CONTENT_PROCESS"
+ );
+ contentFlag.add(true);
+ let mainFlag = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG_MAIN_PROCESS");
+ mainFlag.add(true);
+ let allLinear = Telemetry.getHistogramById("TELEMETRY_TEST_ALL_PROCESSES");
+ allLinear.add(20);
+ let allChildLinear = Telemetry.getHistogramById(
+ "TELEMETRY_TEST_ALL_CHILD_PROCESSES"
+ );
+ allChildLinear.add(20);
+ let countKeyed = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_COUNT"
+ );
+ countKeyed.add("a");
+
+ const payload = TelemetrySession.getPayload("test-ping");
+ Assert.ok("processes" in payload, "Should have processes section");
+ Assert.ok(
+ "content" in payload.processes,
+ "Should have child process section"
+ );
+ Assert.ok(
+ "histograms" in payload.processes.content,
+ "Child process section should have histograms."
+ );
+ Assert.ok(
+ "keyedHistograms" in payload.processes.content,
+ "Child process section should have keyed histograms."
+ );
+ check_histogram_values(payload.processes.content);
+
+ // Check record_in_processes
+ // Content Process
+ let hs = payload.processes.content.histograms;
+ let khs = payload.processes.content.keyedHistograms;
+ Assert.ok(
+ "TELEMETRY_TEST_CONTENT_PROCESS" in hs,
+ "Should have content process histogram"
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_CONTENT_PROCESS.sum,
+ 10,
+ "Should have correct value"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_KEYED_CONTENT_PROCESS" in khs,
+ "Should have keyed content process histogram"
+ );
+ Assert.equal(
+ khs.TELEMETRY_TEST_KEYED_CONTENT_PROCESS.content.sum,
+ 1,
+ "Should have correct value"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_FLAG_CONTENT_PROCESS" in hs,
+ "Should have content process histogram"
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_FLAG_CONTENT_PROCESS.sum,
+ 1,
+ "Should have correct value"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_ALL_PROCESSES" in hs,
+ "Should have content process histogram"
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_ALL_PROCESSES.sum,
+ 10,
+ "Should have correct value"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_ALL_CHILD_PROCESSES" in hs,
+ "Should have content process histogram"
+ );
+ Assert.equal(
+ hs.TELEMETRY_TEST_ALL_CHILD_PROCESSES.sum,
+ 10,
+ "Should have correct value"
+ );
+ Assert.ok(
+ !("TELEMETRY_TEST_FLAG_MAIN_PROCESS" in hs),
+ "Should not have main process histogram in child process payload"
+ );
+
+ // Main Process
+ let mainHs = payload.histograms;
+ let mainKhs = payload.keyedHistograms;
+ Assert.ok(
+ !("TELEMETRY_TEST_CONTENT_PROCESS" in mainHs),
+ "Should not have content process histogram in main process payload"
+ );
+ Assert.ok(
+ !("TELEMETRY_TEST_KEYED_CONTENT_PROCESS" in mainKhs),
+ "Should not have keyed content process histogram in main process payload"
+ );
+ Assert.ok(
+ !("TELEMETRY_TEST_FLAG_CONTENT_PROCESS" in mainHs),
+ "Should not have content process histogram in main process payload"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_ALL_PROCESSES" in mainHs,
+ "Should have all-process histogram in main process payload"
+ );
+ Assert.equal(
+ mainHs.TELEMETRY_TEST_ALL_PROCESSES.sum,
+ 20,
+ "Should have correct value"
+ );
+ Assert.ok(
+ !("TELEMETRY_TEST_ALL_CHILD_PROCESSES" in mainHs),
+ "Should not have all-child process histogram in main process payload"
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_FLAG_MAIN_PROCESS" in mainHs,
+ "Should have main process histogram in main process payload"
+ );
+ Assert.equal(
+ mainHs.TELEMETRY_TEST_FLAG_MAIN_PROCESS.sum,
+ 1,
+ "Should have correct value"
+ );
+ Assert.equal(
+ mainKhs.TELEMETRY_TEST_KEYED_COUNT.a.sum,
+ 1,
+ "Should have correct value in parent"
+ );
+
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_ChildScalars.js b/toolkit/components/telemetry/tests/unit/test_ChildScalars.js
new file mode 100644
index 0000000000..c6c1660aec
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ChildScalars.js
@@ -0,0 +1,242 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+
+const MESSAGE_CHILD_TEST_DONE = "ChildTest:Done";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+const KEYED_UINT_SCALAR = "telemetry.test.keyed_unsigned_int";
+const KEYED_BOOL_SCALAR = "telemetry.test.keyed_boolean_kind";
+const CONTENT_ONLY_UINT_SCALAR = "telemetry.test.content_only_uint";
+const ALL_PROCESSES_UINT_SCALAR = "telemetry.test.all_processes_uint";
+const ALL_CHILD_PROCESSES_STRING_SCALAR =
+ "telemetry.test.all_child_processes_string";
+
+function run_child_test() {
+ // Attempt to set some scalar values from the "content" process.
+ // The next scalars are not allowed to be recorded in the content process.
+ Telemetry.scalarSet(UINT_SCALAR, 1);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "should-not-be-recorded", 1);
+
+ // The next scalars shou be recorded in only the content process.
+ Telemetry.scalarSet(CONTENT_ONLY_UINT_SCALAR, 37);
+ Telemetry.scalarSet(ALL_CHILD_PROCESSES_STRING_SCALAR, "all-child-processes");
+
+ // The next scalar will be recorded in the parent and content processes.
+ Telemetry.keyedScalarSet(KEYED_BOOL_SCALAR, "content-key", true);
+ Telemetry.keyedScalarSet(KEYED_BOOL_SCALAR, "content-key2", false);
+ Telemetry.scalarSet(ALL_PROCESSES_UINT_SCALAR, 37);
+}
+
+function setParentScalars() {
+ // The following scalars are not allowed to be recorded in the parent process.
+ Telemetry.scalarSet(CONTENT_ONLY_UINT_SCALAR, 15);
+ Telemetry.scalarSet(ALL_CHILD_PROCESSES_STRING_SCALAR, "all-child-processes");
+
+ // The next ones will be recorded only in the parent.
+ Telemetry.scalarSet(UINT_SCALAR, 15);
+
+ // This last batch will be available both in the parent and child processes.
+ Telemetry.keyedScalarSet(KEYED_BOOL_SCALAR, "parent-key", false);
+ Telemetry.scalarSet(ALL_PROCESSES_UINT_SCALAR, 37);
+}
+
+function checkParentScalars(processData) {
+ const scalars = processData.scalars;
+ const keyedScalars = processData.keyedScalars;
+
+ // Check the plain scalars, make sure we're only recording what we expect.
+ Assert.ok(
+ !(CONTENT_ONLY_UINT_SCALAR in scalars),
+ "Scalars must not be recorded in other processes unless allowed."
+ );
+ Assert.ok(
+ !(ALL_CHILD_PROCESSES_STRING_SCALAR in scalars),
+ "Scalars must not be recorded in other processes unless allowed."
+ );
+ Assert.ok(
+ UINT_SCALAR in scalars,
+ `${UINT_SCALAR} must be recorded in the parent process.`
+ );
+ Assert.equal(
+ scalars[UINT_SCALAR],
+ 15,
+ `${UINT_SCALAR} must have the correct value (parent process).`
+ );
+ Assert.ok(
+ ALL_PROCESSES_UINT_SCALAR in scalars,
+ `${ALL_PROCESSES_UINT_SCALAR} must be recorded in the parent process.`
+ );
+ Assert.equal(
+ scalars[ALL_PROCESSES_UINT_SCALAR],
+ 37,
+ `${ALL_PROCESSES_UINT_SCALAR} must have the correct value (parent process).`
+ );
+
+ // Now check the keyed scalars.
+ Assert.ok(
+ KEYED_BOOL_SCALAR in keyedScalars,
+ `${KEYED_BOOL_SCALAR} must be recorded in the parent process.`
+ );
+ Assert.ok(
+ "parent-key" in keyedScalars[KEYED_BOOL_SCALAR],
+ `${KEYED_BOOL_SCALAR} must be recorded in the parent process.`
+ );
+ Assert.equal(
+ Object.keys(keyedScalars[KEYED_BOOL_SCALAR]).length,
+ 1,
+ `${KEYED_BOOL_SCALAR} must only contain the expected key in parent process.`
+ );
+ Assert.equal(
+ keyedScalars[KEYED_BOOL_SCALAR]["parent-key"],
+ false,
+ `${KEYED_BOOL_SCALAR} must have the correct value (parent process).`
+ );
+}
+
+function checkContentScalars(processData) {
+ const scalars = processData.scalars;
+ const keyedScalars = processData.keyedScalars;
+
+ // Check the plain scalars for the content process.
+ Assert.ok(
+ !(UINT_SCALAR in scalars),
+ "Scalars must not be recorded in other processes unless allowed."
+ );
+ Assert.ok(
+ !(KEYED_UINT_SCALAR in keyedScalars),
+ "Keyed scalars must not be recorded in other processes unless allowed."
+ );
+ Assert.ok(
+ CONTENT_ONLY_UINT_SCALAR in scalars,
+ `${CONTENT_ONLY_UINT_SCALAR} must be recorded in the content process.`
+ );
+ Assert.equal(
+ scalars[CONTENT_ONLY_UINT_SCALAR],
+ 37,
+ `${CONTENT_ONLY_UINT_SCALAR} must have the correct value (content process).`
+ );
+ Assert.ok(
+ ALL_CHILD_PROCESSES_STRING_SCALAR in scalars,
+ `${ALL_CHILD_PROCESSES_STRING_SCALAR} must be recorded in the content process.`
+ );
+ Assert.equal(
+ scalars[ALL_CHILD_PROCESSES_STRING_SCALAR],
+ "all-child-processes",
+ `${ALL_CHILD_PROCESSES_STRING_SCALAR} must have the correct value (content process).`
+ );
+ Assert.ok(
+ ALL_PROCESSES_UINT_SCALAR in scalars,
+ `${ALL_PROCESSES_UINT_SCALAR} must be recorded in the content process.`
+ );
+ Assert.equal(
+ scalars[ALL_PROCESSES_UINT_SCALAR],
+ 37,
+ `${ALL_PROCESSES_UINT_SCALAR} must have the correct value (content process).`
+ );
+
+ // Check the keyed scalars.
+ Assert.ok(
+ KEYED_BOOL_SCALAR in keyedScalars,
+ `${KEYED_BOOL_SCALAR} must be recorded in the content process.`
+ );
+ Assert.ok(
+ "content-key" in keyedScalars[KEYED_BOOL_SCALAR],
+ `${KEYED_BOOL_SCALAR} must be recorded in the content process.`
+ );
+ Assert.ok(
+ "content-key2" in keyedScalars[KEYED_BOOL_SCALAR],
+ `${KEYED_BOOL_SCALAR} must be recorded in the content process.`
+ );
+ Assert.equal(
+ keyedScalars[KEYED_BOOL_SCALAR]["content-key"],
+ true,
+ `${KEYED_BOOL_SCALAR} must have the correct value (content process).`
+ );
+ Assert.equal(
+ keyedScalars[KEYED_BOOL_SCALAR]["content-key2"],
+ false,
+ `${KEYED_BOOL_SCALAR} must have the correct value (content process).`
+ );
+ Assert.equal(
+ Object.keys(keyedScalars[KEYED_BOOL_SCALAR]).length,
+ 2,
+ `${KEYED_BOOL_SCALAR} must contain the expected keys in content process.`
+ );
+}
+
+/**
+ * This function waits until content scalars are reported into the
+ * scalar snapshot.
+ */
+async function waitForContentScalars() {
+ await ContentTaskUtils.waitForCondition(() => {
+ const scalars = Telemetry.getSnapshotForScalars("main", false);
+ return Object.keys(scalars).includes("content");
+ });
+}
+
+add_task(async function() {
+ if (!runningInParent) {
+ TelemetryController.testSetupContent();
+ run_child_test();
+ do_send_remote_message(MESSAGE_CHILD_TEST_DONE);
+ return;
+ }
+
+ // Setup.
+ do_get_profile(true);
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ await TelemetryController.testSetup();
+ if (runningInParent) {
+ setParentScalars();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ }
+
+ // Run test in child, don't wait for it to finish: just wait for the
+ // MESSAGE_CHILD_TEST_DONE.
+ run_test_in_child("test_ChildScalars.js");
+ await do_await_remote_message(MESSAGE_CHILD_TEST_DONE);
+
+ // Once scalars are set by the content process, they don't immediately get
+ // sent to the parent process. Wait for the Telemetry IPC Timer to trigger
+ // and batch send the data back to the parent process.
+ await waitForContentScalars();
+
+ // Get an "environment-changed" ping rather than a "test-ping", as
+ // scalar measurements are only supported in subsession pings.
+ const payload = TelemetrySession.getPayload("environment-change");
+
+ // Validate the scalar data.
+ Assert.ok("processes" in payload, "Should have processes section");
+ Assert.ok(
+ "content" in payload.processes,
+ "Should have child process section"
+ );
+ Assert.ok(
+ "scalars" in payload.processes.content,
+ "Child process section should have scalars."
+ );
+ Assert.ok(
+ "keyedScalars" in payload.processes.content,
+ "Child process section should have keyed scalars."
+ );
+ checkParentScalars(payload.processes.parent);
+ checkContentScalars(payload.processes.content);
+
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_CoveragePing.js b/toolkit/components/telemetry/tests/unit/test_CoveragePing.js
new file mode 100644
index 0000000000..2533850255
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_CoveragePing.js
@@ -0,0 +1,115 @@
+/* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim: set sts=2 sw=2 et tw=80: */
+"use strict";
+
+const { NetUtil } = ChromeUtils.import("resource://gre/modules/NetUtil.jsm");
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+
+const { HttpServer } = ChromeUtils.import("resource://testing-common/httpd.js");
+
+const COVERAGE_VERSION = "2";
+
+const COVERAGE_ENABLED_PREF = "toolkit.coverage.enabled";
+const OPT_OUT_PREF = "toolkit.coverage.opt-out";
+const ALREADY_RUN_PREF = `toolkit.coverage.already-run.v${COVERAGE_VERSION}`;
+const COVERAGE_UUID_PREF = `toolkit.coverage.uuid.v${COVERAGE_VERSION}`;
+const TELEMETRY_ENABLED_PREF = "datareporting.healthreport.uploadEnabled";
+const REPORTING_ENDPOINT_BASE_PREF = "toolkit.coverage.endpoint.base";
+const REPORTING_ENDPOINT = "submit/coverage/coverage";
+
+Services.prefs.setIntPref("toolkit.coverage.log-level", 20);
+
+add_task(async function setup() {
+ let uuid = "test123";
+ Services.prefs.setCharPref(COVERAGE_UUID_PREF, uuid);
+
+ const server = new HttpServer();
+ server.start(-1);
+ const serverPort = server.identity.primaryPort;
+
+ Services.prefs.setCharPref(
+ REPORTING_ENDPOINT_BASE_PREF,
+ `http://localhost:${serverPort}`
+ );
+
+ server.registerPathHandler(
+ `/${REPORTING_ENDPOINT}/${COVERAGE_VERSION}/${uuid}`,
+ (request, response) => {
+ equal(request.method, "PUT");
+ let telemetryEnabled = Services.prefs.getBoolPref(
+ TELEMETRY_ENABLED_PREF,
+ false
+ );
+
+ let requestBody = NetUtil.readInputStreamToString(
+ request.bodyInputStream,
+ request.bodyInputStream.available()
+ );
+
+ let resultObj = JSON.parse(requestBody);
+
+ deepEqual(Object.keys(resultObj), [
+ "appUpdateChannel",
+ "osName",
+ "osVersion",
+ "telemetryEnabled",
+ ]);
+
+ if (telemetryEnabled) {
+ ok(resultObj.telemetryEnabled);
+ } else {
+ ok(!resultObj.telemetryEnabled);
+ }
+
+ const response_body = "OK";
+ response.bodyOutputStream.write(response_body, response_body.length);
+ server.stop();
+ }
+ );
+
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ await TelemetryController.testSetup();
+});
+
+add_task(async function test_prefs() {
+ // Telemetry reporting setting does not control this ping, but it
+ // reported by this ping.
+ Services.prefs.setBoolPref(TELEMETRY_ENABLED_PREF, false);
+
+ // should not run if enabled pref is false
+ Services.prefs.setBoolPref(COVERAGE_ENABLED_PREF, false);
+ Services.prefs.setBoolPref(ALREADY_RUN_PREF, false);
+ Services.prefs.setBoolPref(OPT_OUT_PREF, false);
+
+ await TelemetryController.testReset();
+
+ let alreadyRun = Services.prefs.getBoolPref(ALREADY_RUN_PREF, false);
+ ok(!alreadyRun, "should not have run with enabled pref false");
+
+ // should not run if opt-out pref is true
+ Services.prefs.setBoolPref(COVERAGE_ENABLED_PREF, true);
+ Services.prefs.setBoolPref(ALREADY_RUN_PREF, false);
+ Services.prefs.setBoolPref(OPT_OUT_PREF, true);
+
+ await TelemetryController.testReset();
+
+ // should run if opt-out pref is false and coverage is enabled
+ Services.prefs.setBoolPref(COVERAGE_ENABLED_PREF, true);
+ Services.prefs.setBoolPref(ALREADY_RUN_PREF, false);
+ Services.prefs.setBoolPref(OPT_OUT_PREF, false);
+
+ await TelemetryController.testReset();
+
+ // the telemetry setting should be set correctly
+ Services.prefs.setBoolPref(TELEMETRY_ENABLED_PREF, true);
+
+ await TelemetryController.testReset();
+
+ alreadyRun = Services.prefs.getBoolPref(ALREADY_RUN_PREF, false);
+
+ ok(alreadyRun, "should run if no opt-out and enabled");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_EcosystemTelemetry.js b/toolkit/components/telemetry/tests/unit/test_EcosystemTelemetry.js
new file mode 100644
index 0000000000..44741b9cd2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_EcosystemTelemetry.js
@@ -0,0 +1,430 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+
+XPCOMUtils.defineLazyModuleGetters(this, {
+ ONLOGIN_NOTIFICATION: "resource://gre/modules/FxAccountsCommon.js",
+ ONLOGOUT_NOTIFICATION: "resource://gre/modules/FxAccountsCommon.js",
+ ONVERIFIED_NOTIFICATION: "resource://gre/modules/FxAccountsCommon.js",
+});
+ChromeUtils.defineModuleGetter(
+ this,
+ "EcosystemTelemetry",
+ "resource://gre/modules/EcosystemTelemetry.jsm"
+);
+
+const TEST_PING_TYPE = "test-ping-type";
+
+const RE_VALID_GUID = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/;
+
+function fakeIdleNotification(topic) {
+ let scheduler = ChromeUtils.import(
+ "resource://gre/modules/TelemetryScheduler.jsm",
+ null
+ );
+ return scheduler.TelemetryScheduler.observe(null, topic, null);
+}
+
+async function promiseNoPing() {
+ // We check there's not one of our pings pending by sending a test ping, then
+ // immediately fetching a pending ping and checking it's that test one.
+ TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, {});
+ let ping = await PingServer.promiseNextPing();
+ Assert.equal(ping.type, TEST_PING_TYPE, "Should be a test ping.");
+}
+
+function checkPingStructure(ping, reason) {
+ Assert.equal(
+ ping.type,
+ EcosystemTelemetry.PING_TYPE,
+ "Should be an ecosystem ping."
+ );
+
+ Assert.ok(!("clientId" in ping), "Ping must not contain a client ID.");
+ Assert.ok("environment" in ping, "Ping must contain an environment.");
+ let environment = ping.environment;
+
+ // Check that the environment is indeed minimal
+ const ALLOWED_ENVIRONMENT_KEYS = ["settings", "system", "profile"];
+ Assert.deepEqual(
+ ALLOWED_ENVIRONMENT_KEYS,
+ Object.keys(environment),
+ "Environment should only contain a limited set of keys."
+ );
+
+ // Check that fields of the environment are indeed minimal
+ Assert.deepEqual(
+ ["locale"],
+ Object.keys(environment.settings),
+ "Settings environment should only contain locale"
+ );
+ Assert.deepEqual(
+ ["cpu", "memoryMB", "os"],
+ Object.keys(environment.system).sort(),
+ "System environment should contain a limited set of keys"
+ );
+ Assert.deepEqual(
+ ["locale", "name", "version"],
+ Object.keys(environment.system.os).sort(),
+ "system.environment.os should contain a limited set of keys"
+ );
+
+ // Check the payload for required fields.
+ let payload = ping.payload;
+ Assert.equal(payload.reason, reason, "Ping reason must match.");
+ Assert.ok(
+ payload.duration >= 0,
+ "Payload must have a duration greater or equal to 0"
+ );
+ Assert.ok("ecosystemAnonId" in payload, "payload must have ecosystemAnonId");
+ Assert.ok(
+ RE_VALID_GUID.test(payload.ecosystemClientId),
+ "ecosystemClientId must be a valid GUID"
+ );
+
+ Assert.ok("scalars" in payload, "Payload must contain scalars");
+ Assert.ok("keyedScalars" in payload, "Payload must contain keyed scalars");
+ Assert.ok("histograms" in payload, "Payload must contain histograms");
+ Assert.ok(
+ "keyedHistograms" in payload,
+ "Payload must contain keyed histograms"
+ );
+}
+
+function fakeAnonId(fn) {
+ const m = ChromeUtils.import(
+ "resource://gre/modules/EcosystemTelemetry.jsm",
+ null
+ );
+ let oldFn = m.Policy.getEcosystemAnonId;
+ m.Policy.getEcosystemAnonId = fn;
+ return oldFn;
+}
+
+registerCleanupFunction(function() {
+ PingServer.stop();
+});
+
+add_task(async function setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ // Start the local ping server and setup Telemetry to use it during the tests.
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+
+ await TelemetryController.testSetup();
+});
+
+// We make absolute sure the Ecosystem ping is never triggered on Fennec/Non-unified Telemetry
+add_task(
+ {
+ skip_if: () => !gIsAndroid,
+ },
+ async function test_no_ecosystem_ping_on_fennec() {
+ // Force preference to true, we should have an additional check on Android/Unified Telemetry
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ // This is invoked in regular intervals by the timer.
+ // Would trigger ping sending.
+ EcosystemTelemetry.periodicPing();
+ await promiseNoPing();
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_disabled_non_fxa_production() {
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ Assert.ok(EcosystemTelemetry.enabled(), "enabled by default");
+ Preferences.set("identity.fxaccounts.autoconfig.uri", "http://");
+ Assert.ok(!EcosystemTelemetry.enabled(), "disabled if non-prod");
+ Preferences.set(
+ TelemetryUtils.Preferences.EcosystemTelemetryAllowForNonProductionFxA,
+ true
+ );
+ Assert.ok(
+ EcosystemTelemetry.enabled(),
+ "enabled for non-prod but preference override"
+ );
+ Preferences.reset("identity.fxaccounts.autoconfig.uri");
+ Preferences.reset(
+ TelemetryUtils.Preferences.EcosystemTelemetryAllowForNonProductionFxA
+ );
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_nosending_if_disabled() {
+ Preferences.set(
+ TelemetryUtils.Preferences.EcosystemTelemetryEnabled,
+ false
+ );
+ EcosystemTelemetry.testReset();
+
+ // This is invoked in regular intervals by the timer.
+ // Would trigger ping sending.
+ EcosystemTelemetry.periodicPing();
+ await promiseNoPing();
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_no_default_send() {
+ // No user's logged in, nothing is mocked, so nothing is sent.
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ // This is invoked in regular intervals by the timer.
+ EcosystemTelemetry.periodicPing();
+
+ await promiseNoPing();
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_login_workflow() {
+ // Fake the whole login/logout workflow by triggering the events directly.
+
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ let originalAnonId = fakeAnonId(() => null);
+ let ping;
+
+ // 1. No user, timer invoked
+ EcosystemTelemetry.periodicPing();
+ await promiseNoPing();
+
+ // 2. User logs in, but we fail to obtain a valid uid.
+ // No ping will be generated.
+ fakeAnonId(() => null);
+ EcosystemTelemetry.observe(null, ONLOGIN_NOTIFICATION, null);
+
+ EcosystemTelemetry.periodicPing();
+ await promiseNoPing();
+
+ // Once we've failed to get the ID, we don't try again until next startup
+ // or another login-related event - so...
+ // 3. uid becomes available after verification.
+ fakeAnonId(() => "test_login_workflow:my.anon.id");
+ EcosystemTelemetry.observe(null, ONVERIFIED_NOTIFICATION, null);
+ print("triggering ping now that we have an anon-id");
+ EcosystemTelemetry.periodicPing();
+ ping = await PingServer.promiseNextPing();
+ checkPingStructure(ping, "periodic");
+ Assert.equal(
+ ping.payload.ecosystemAnonId,
+ "test_login_workflow:my.anon.id"
+ );
+ const origClientId = ping.payload.ecosystemClientId;
+
+ // 4. User disconnects account, should get an immediate ping.
+ print("user disconnects");
+ // We need to arrange for the new empty anonid before the notification.
+ fakeAnonId(() => null);
+ await EcosystemTelemetry.observe(null, ONLOGOUT_NOTIFICATION, null);
+ ping = await PingServer.promiseNextPing();
+ checkPingStructure(ping, "logout");
+ Assert.equal(
+ ping.payload.ecosystemAnonId,
+ "test_login_workflow:my.anon.id",
+ "should have been submitted with the old anonid"
+ );
+ Assert.equal(
+ ping.payload.ecosystemClientId,
+ origClientId,
+ "should have been submitted with the old clientid"
+ );
+ Assert.equal(
+ await EcosystemTelemetry.promiseEcosystemAnonId,
+ null,
+ "should resolve to null immediately after logout"
+ );
+
+ // 5. No user, timer invoked
+ print("timer fires after disconnection");
+ EcosystemTelemetry.periodicPing();
+ await promiseNoPing();
+
+ // 6. Transition back to logged in, pings should again be sent.
+ fakeAnonId(() => "test_login_workflow:my.anon.id.2");
+ EcosystemTelemetry.observe(null, ONVERIFIED_NOTIFICATION, null);
+ print("triggering ping now the user has logged back in");
+ EcosystemTelemetry.periodicPing();
+ ping = await PingServer.promiseNextPing();
+ checkPingStructure(ping, "periodic");
+ Assert.equal(
+ ping.payload.ecosystemAnonId,
+ "test_login_workflow:my.anon.id.2"
+ );
+ Assert.notEqual(
+ ping.payload.ecosystemClientId,
+ origClientId,
+ "should have a different clientid after signing out then back in"
+ );
+
+ // Reset policy.
+ fakeAnonId(originalAnonId);
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_shutdown_logged_in() {
+ // Check shutdown when a user's logged in does the right thing.
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ let originalAnonId = fakeAnonId(() =>
+ Promise.resolve("test_shutdown_logged_in:my.anon.id")
+ );
+
+ EcosystemTelemetry.observe(null, ONLOGIN_NOTIFICATION, null);
+
+ // No ping expected yet.
+ await promiseNoPing();
+
+ // Shutdown
+ EcosystemTelemetry.shutdown();
+ let ping = await PingServer.promiseNextPing();
+ checkPingStructure(ping, "shutdown");
+ Assert.equal(
+ ping.payload.ecosystemAnonId,
+ "test_shutdown_logged_in:my.anon.id",
+ "our anon ID is in the ping"
+ );
+ fakeAnonId(originalAnonId);
+ }
+);
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_shutdown_not_logged_in() {
+ // Check shutdown when no user is logged in does the right thing.
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ let originalAnonId = fakeAnonId(() => Promise.resolve(null));
+
+ // No ping expected yet.
+ await promiseNoPing();
+
+ // Shutdown
+ EcosystemTelemetry.shutdown();
+
+ // Still no ping.
+ await promiseNoPing();
+ fakeAnonId(originalAnonId);
+ }
+);
+
+// Test that a periodic ping is triggered by the scheduler at midnight
+//
+// Based on `test_TelemetrySession#test_DailyDueAndIdle`.
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_periodic_ping() {
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let receivedPing = null;
+ // Register a ping handler that will assert when receiving multiple ecosystem pings.
+ // We can ignore other pings, such as the periodic ping.
+ PingServer.registerPingHandler(req => {
+ const ping = decodeRequestPayload(req);
+ if (ping.type == EcosystemTelemetry.PING_TYPE) {
+ Assert.ok(
+ !receivedPing,
+ "Telemetry must only send one periodic ecosystem ping."
+ );
+ receivedPing = ping;
+ }
+ });
+
+ // Faking scheduler timer has to happen before resetting TelemetryController
+ // to be effective.
+ let schedulerTickCallback = null;
+ let now = new Date(2040, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control periodic collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ Preferences.set(TelemetryUtils.Preferences.EcosystemTelemetryEnabled, true);
+ EcosystemTelemetry.testReset();
+
+ // Have to arrange for an anon-id to be configured.
+ let originalAnonId = fakeAnonId(() => "test_periodic_ping:my.anon.id");
+ EcosystemTelemetry.observe(null, ONLOGIN_NOTIFICATION, null);
+
+ // As a sanity check we trigger a keyedHistogram and scalar declared as
+ // being in our ping, just to help ensure that the payload was assembled
+ // in the correct shape.
+ let h = Services.telemetry.getKeyedHistogramById("SEARCH_COUNTS");
+ h.add("test-key");
+ Telemetry.scalarSet("browser.engagement.total_uri_count", 2);
+
+ // Trigger the periodic ecosystem ping.
+ let firstPeriodicDue = new Date(2040, 1, 2, 0, 0, 0);
+ fakeNow(firstPeriodicDue);
+
+ // Run a scheduler tick: it should trigger the periodic ping.
+ Assert.ok(!!schedulerTickCallback);
+ let tickPromise = schedulerTickCallback();
+
+ // Send an idle and then an active user notification.
+ fakeIdleNotification("idle");
+ fakeIdleNotification("active");
+
+ // Wait on the tick promise.
+ await tickPromise;
+
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Decode the ping contained in the request and check that's a periodic ping.
+ Assert.ok(receivedPing, "Telemetry must send one ecosystem periodic ping.");
+ checkPingStructure(receivedPing, "periodic");
+ // And check the content we expect is there.
+ Assert.ok(receivedPing.payload.keyedHistograms.parent.SEARCH_COUNTS);
+ Assert.equal(
+ receivedPing.payload.scalars.parent["browser.engagement.total_uri_count"],
+ 2
+ );
+
+ fakeAnonId(originalAnonId);
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/test_EventPing.js b/toolkit/components/telemetry/tests/unit/test_EventPing.js
new file mode 100644
index 0000000000..63d616d732
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_EventPing.js
@@ -0,0 +1,290 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import(
+ "resource://testing-common/TelemetryArchiveTesting.jsm",
+ this
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryEventPing",
+ "resource://gre/modules/EventPing.jsm"
+);
+
+function checkPingStructure(type, payload, options) {
+ Assert.equal(
+ type,
+ TelemetryEventPing.EVENT_PING_TYPE,
+ "Should be an event ping."
+ );
+ // Check the payload for required fields.
+ Assert.ok("reason" in payload, "Payload must have reason.");
+ Assert.ok(
+ "processStartTimestamp" in payload,
+ "Payload must have processStartTimestamp."
+ );
+ Assert.ok("sessionId" in payload, "Payload must have sessionId.");
+ Assert.ok("subsessionId" in payload, "Payload must have subsessionId.");
+ Assert.ok("lostEventsCount" in payload, "Payload must have lostEventsCount.");
+ Assert.ok("events" in payload, "Payload must have events.");
+}
+
+function fakePolicy(set, clear, send) {
+ let mod = ChromeUtils.import("resource://gre/modules/EventPing.jsm", null);
+ mod.Policy.setTimeout = set;
+ mod.Policy.clearTimeout = clear;
+ mod.Policy.sendPing = send;
+}
+
+function pass() {
+ /* intentionally empty */
+}
+function fail() {
+ Assert.ok(false, "Not allowed");
+}
+
+function recordEvents(howMany) {
+ for (let i = 0; i < howMany; i++) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ }
+}
+
+add_task(async function setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ await TelemetryController.testSetup();
+ TelemetryEventPing.testReset();
+ Telemetry.setEventRecordingEnabled("telemetry.test", true);
+});
+
+// Tests often take the form of faking policy within faked policy.
+// This is to allow us to record events in addition to any that were
+// recorded to trigger the submit in the first place.
+// This works because we start the timer at the top of _submitPing, giving us
+// this opportunity.
+// This results in things looking this way:
+/*
+fakePolicy((callback, delay) => {
+ // Code that runs at the top of _submitPing
+ fakePolicy(pass, pass, (type, payload, options) => {
+ // Code that runs at the bottom of _submitPing
+ });
+}, pass, fail);
+// Code that triggers _submitPing to run
+*/
+
+add_task(async function test_eventLimitReached() {
+ Telemetry.clearEvents();
+ TelemetryEventPing.testReset();
+
+ let pingCount = 0;
+
+ fakePolicy(pass, pass, fail);
+ recordEvents(999);
+ fakePolicy(
+ (callback, delay) => {
+ Telemetry.recordEvent("telemetry.test", "test2", "object1");
+ fakePolicy(pass, pass, (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.ok(options.addClientId, "Adds the client id.");
+ Assert.ok(options.addEnvironment, "Adds the environment.");
+ Assert.ok(!options.usePingSender, "Doesn't require pingsender.");
+ Assert.equal(
+ payload.reason,
+ TelemetryEventPing.Reason.MAX,
+ "Sending because we hit max"
+ );
+ Assert.equal(
+ payload.events.parent.length,
+ 1000,
+ "Has one thousand events"
+ );
+ Assert.equal(payload.lostEventsCount, 0, "Lost no events");
+ Assert.ok(
+ !payload.events.parent.some(ev => ev[1] === "test2"),
+ "Should not have included the final event (yet)."
+ );
+ pingCount++;
+ });
+ },
+ pass,
+ fail
+ );
+ // Now trigger the submit.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ Assert.equal(pingCount, 1, "Should have sent a ping");
+
+ // With a recent MAX ping sent, record another max amount of events (and then two extras).
+ fakePolicy(fail, fail, fail);
+ recordEvents(998);
+ fakePolicy(
+ (callback, delay) => {
+ Telemetry.recordEvent("telemetry.test", "test2", "object2");
+ Telemetry.recordEvent("telemetry.test", "test2", "object2");
+ fakePolicy(pass, pass, (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.ok(options.addClientId, "Adds the client id.");
+ Assert.ok(options.addEnvironment, "Adds the environment.");
+ Assert.ok(!options.usePingSender, "Doesn't require pingsender.");
+ Assert.equal(
+ payload.reason,
+ TelemetryEventPing.Reason.MAX,
+ "Sending because we hit max"
+ );
+ Assert.equal(
+ payload.events.parent.length,
+ 1000,
+ "Has one thousand events"
+ );
+ Assert.equal(payload.lostEventsCount, 2, "Lost two events");
+ Assert.equal(
+ payload.events.parent[0][2],
+ "test2",
+ "The first event of the second bunch should be the leftover event of the first bunch."
+ );
+ Assert.ok(
+ !payload.events.parent.some(ev => ev[3] === "object2"),
+ "Should not have included any of the lost two events."
+ );
+ pingCount++;
+ });
+ callback(); // Trigger the send immediately.
+ },
+ pass,
+ fail
+ );
+ recordEvents(1);
+ Assert.equal(pingCount, 2, "Should have sent a second ping");
+
+ // Ensure we send a subsequent MAX ping exactly on 1000 events, and without
+ // the two events we lost.
+ fakePolicy(fail, fail, fail);
+ recordEvents(999);
+ fakePolicy((callback, delay) => {
+ fakePolicy(pass, pass, (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.ok(options.addClientId, "Adds the client id.");
+ Assert.ok(options.addEnvironment, "Adds the environment.");
+ Assert.ok(!options.usePingSender, "Doesn't require pingsender.");
+ Assert.equal(
+ payload.reason,
+ TelemetryEventPing.Reason.MAX,
+ "Sending because we hit max"
+ );
+ Assert.equal(
+ payload.events.parent.length,
+ 1000,
+ "Has one thousand events"
+ );
+ Assert.equal(payload.lostEventsCount, 0, "Lost no events");
+ Assert.ok(
+ !payload.events.parent.some(ev => ev[3] === "object2"),
+ "Should not have included any of the lost two events from the previous bunch."
+ );
+ pingCount++;
+ });
+ callback(); // Trigger the send immediately
+ });
+ recordEvents(1);
+ Assert.equal(pingCount, 3, "Should have sent a third ping");
+});
+
+add_task(async function test_timers() {
+ Telemetry.clearEvents();
+ TelemetryEventPing.testReset();
+
+ // Immediately after submitting a MAX ping, we should set the timer for the
+ // next interval.
+ recordEvents(999);
+ fakePolicy(
+ (callback, delay) => {
+ Assert.equal(
+ delay,
+ TelemetryEventPing.minFrequency,
+ "Timer should be started with the min frequency"
+ );
+ },
+ pass,
+ pass
+ );
+ recordEvents(1);
+
+ fakePolicy(
+ (callback, delay) => {
+ Assert.ok(
+ delay <= TelemetryEventPing.maxFrequency,
+ "Timer should be at most the max frequency for a subsequent MAX ping."
+ );
+ },
+ pass,
+ pass
+ );
+ recordEvents(1000);
+});
+
+add_task(async function test_periodic() {
+ Telemetry.clearEvents();
+ TelemetryEventPing.testReset();
+
+ fakePolicy(
+ (callback, delay) => {
+ Assert.equal(
+ delay,
+ TelemetryEventPing.minFrequency,
+ "Timer should default to the min frequency"
+ );
+ fakePolicy(pass, pass, (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.ok(options.addClientId, "Adds the client id.");
+ Assert.ok(options.addEnvironment, "Adds the environment.");
+ Assert.ok(!options.usePingSender, "Doesn't require pingsender.");
+ Assert.equal(
+ payload.reason,
+ TelemetryEventPing.Reason.PERIODIC,
+ "Sending because we hit a timer"
+ );
+ Assert.equal(payload.events.parent.length, 1, "Has one event");
+ Assert.equal(payload.lostEventsCount, 0, "Lost no events");
+ });
+ callback();
+ },
+ pass,
+ fail
+ );
+
+ recordEvents(1);
+ TelemetryEventPing._startTimer();
+});
+
+// Ensure this is the final test in the suite, as it shuts things down.
+add_task(async function test_shutdown() {
+ Telemetry.clearEvents();
+ TelemetryEventPing.testReset();
+
+ recordEvents(999);
+ fakePolicy(pass, pass, (type, payload, options) => {
+ Assert.ok(options.addClientId, "Adds the client id.");
+ Assert.ok(options.addEnvironment, "Adds the environment.");
+ Assert.ok(options.usePingSender, "Asks for pingsender.");
+ Assert.equal(
+ payload.reason,
+ TelemetryEventPing.Reason.SHUTDOWN,
+ "Sending because we are shutting down"
+ );
+ Assert.equal(payload.events.parent.length, 999, "Has 999 events");
+ Assert.equal(payload.lostEventsCount, 0, "No lost events");
+ });
+ TelemetryEventPing.shutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_HealthPing.js b/toolkit/components/telemetry/tests/unit/test_HealthPing.js
new file mode 100644
index 0000000000..f29016a2af
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_HealthPing.js
@@ -0,0 +1,403 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+// This tests the public Telemetry API for submitting Health pings.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import(
+ "resource://testing-common/TelemetryArchiveTesting.jsm",
+ this
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryHealthPing",
+ "resource://gre/modules/HealthPing.jsm"
+);
+
+function checkHealthPingStructure(ping, expectedFailuresDict) {
+ let payload = ping.payload;
+ Assert.equal(
+ ping.type,
+ TelemetryHealthPing.HEALTH_PING_TYPE,
+ "Should have recorded a health ping."
+ );
+
+ for (let [key, value] of Object.entries(expectedFailuresDict)) {
+ Assert.deepEqual(
+ payload[key],
+ value,
+ "Should have recorded correct entry with key: " + key
+ );
+ }
+}
+
+function fakeHealthSchedulerTimer(set, clear) {
+ let telemetryHealthPing = ChromeUtils.import(
+ "resource://gre/modules/HealthPing.jsm",
+ null
+ );
+ telemetryHealthPing.Policy.setSchedulerTickTimeout = set;
+ telemetryHealthPing.Policy.clearSchedulerTickTimeout = clear;
+}
+
+async function waitForConditionWithPromise(
+ promiseFn,
+ timeoutMsg,
+ tryCount = 30
+) {
+ const SINGLE_TRY_TIMEOUT = 100;
+ let tries = 0;
+ do {
+ try {
+ return await promiseFn();
+ } catch (ex) {}
+ await new Promise(resolve => do_timeout(SINGLE_TRY_TIMEOUT, resolve));
+ } while (++tries <= tryCount);
+ throw new Error(timeoutMsg);
+}
+
+function fakeSendSubmissionTimeout(timeOut) {
+ let telemetryHealthPing = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ telemetryHealthPing.Policy.pingSubmissionTimeout = () => timeOut;
+}
+
+add_task(async function setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ Preferences.set(TelemetryUtils.Preferences.HealthPingEnabled, true);
+
+ await TelemetryController.testSetup();
+ PingServer.start();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+});
+
+add_task(async function test_sendImmediately() {
+ PingServer.clearRequests();
+ TelemetryHealthPing.testReset();
+
+ await TelemetryHealthPing.recordSendFailure("testProblem");
+ let ping = await PingServer.promiseNextPing();
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.SEND_FAILURE]: {
+ testProblem: 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.IMMEDIATE,
+ });
+});
+
+add_task(async function test_sendOnDelay() {
+ PingServer.clearRequests();
+ TelemetryHealthPing.testReset();
+
+ // This first failure should immediately trigger a ping. After this, subsequent failures should be throttled.
+ await TelemetryHealthPing.recordSendFailure("testFailure");
+ let testPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ testPing.type,
+ TelemetryHealthPing.HEALTH_PING_TYPE,
+ "Should have recorded a health ping."
+ );
+
+ // Retrieve delayed call back.
+ let pingSubmissionCallBack = null;
+ fakeHealthSchedulerTimer(
+ callBack => (pingSubmissionCallBack = callBack),
+ () => {}
+ );
+
+ // Record two failures, health ping must not be send now.
+ await TelemetryHealthPing.recordSendFailure("testFailure");
+ await TelemetryHealthPing.recordSendFailure("testFailure");
+
+ // Wait for sending delayed health ping.
+ await pingSubmissionCallBack();
+
+ let ping = await PingServer.promiseNextPing();
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.SEND_FAILURE]: {
+ testFailure: 2,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.DELAYED,
+ });
+});
+
+add_task(async function test_sendOverSizedPing() {
+ TelemetryHealthPing.testReset();
+ PingServer.clearRequests();
+ let OVER_SIZED_PING_TYPE = "over-sized-ping";
+ let overSizedData = generateRandomString(2 * 1024 * 1024);
+
+ await TelemetryController.submitExternalPing(OVER_SIZED_PING_TYPE, {
+ data: overSizedData,
+ });
+ let ping = await PingServer.promiseNextPing();
+
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.DISCARDED_FOR_SIZE]: {
+ [OVER_SIZED_PING_TYPE]: 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.IMMEDIATE,
+ });
+});
+
+add_task(async function test_healthPingOnTop() {
+ PingServer.clearRequests();
+ TelemetryHealthPing.testReset();
+
+ let PING_TYPE = "priority-ping";
+
+ // Fake now to be in throttled state.
+ let now = fakeNow(2050, 1, 2, 0, 0, 0);
+ fakeMidnightPingFuzzingDelay(60 * 1000);
+
+ for (let value of [PING_TYPE, PING_TYPE, "health", PING_TYPE]) {
+ TelemetryController.submitExternalPing(value, {});
+ }
+
+ // Now trigger sending pings again.
+ fakeNow(futureDate(now, 5 * 60 * 1000));
+ await TelemetrySend.notifyCanUpload();
+ let scheduler = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ scheduler.SendScheduler.triggerSendingPings(true);
+
+ let pings = await PingServer.promiseNextPings(4);
+ Assert.equal(
+ pings[0].type,
+ "health",
+ "Should have received the health ping first."
+ );
+});
+
+add_task(async function test_sendOnTimeout() {
+ TelemetryHealthPing.testReset();
+ await TelemetrySend.reset();
+ PingServer.clearRequests();
+ let PING_TYPE = "ping-on-timeout";
+
+ // Disable send retry to make this test more deterministic.
+ fakePingSendTimer(
+ () => {},
+ () => {}
+ );
+
+ // Set up small ping submission timeout to always have timeout error.
+ fakeSendSubmissionTimeout(2);
+
+ await TelemetryController.submitExternalPing(PING_TYPE, {});
+
+ let response;
+ PingServer.registerPingHandler((req, res) => {
+ PingServer.resetPingHandler();
+ // We don't finish the response yet to make sure to trigger a timeout.
+ res.processAsync();
+ response = res;
+ });
+
+ // Wait for health ping.
+ let ac = new TelemetryArchiveTesting.Checker();
+ await ac.promiseInit();
+ await waitForConditionWithPromise(() => {
+ ac.promiseFindPing("health", []);
+ }, "Failed to find health ping");
+
+ if (response) {
+ response.finish();
+ }
+
+ let telemetryHealthPing = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ fakeSendSubmissionTimeout(telemetryHealthPing.PING_SUBMIT_TIMEOUT_MS);
+ PingServer.resetPingHandler();
+ TelemetrySend.notifyCanUpload();
+
+ let pings = await PingServer.promiseNextPings(2);
+ let healthPing = pings.find(ping => ping.type === "health");
+ checkHealthPingStructure(healthPing, {
+ [TelemetryHealthPing.FailureType.SEND_FAILURE]: {
+ timeout: 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.IMMEDIATE,
+ });
+ await TelemetryStorage.testClearPendingPings();
+});
+
+add_task(async function test_sendOnlyTopTenDiscardedPings() {
+ TelemetryHealthPing.testReset();
+ await TelemetrySend.reset();
+ PingServer.clearRequests();
+ let PING_TYPE = "sort-discarded";
+
+ // This first failure should immediately trigger a ping. After this, subsequent failures should be throttled.
+ await TelemetryHealthPing.recordSendFailure("testFailure");
+ let testPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ testPing.type,
+ TelemetryHealthPing.HEALTH_PING_TYPE,
+ "Should have recorded a health ping."
+ );
+
+ // Retrieve delayed call back.
+ let pingSubmissionCallBack = null;
+ fakeHealthSchedulerTimer(
+ callBack => (pingSubmissionCallBack = callBack),
+ () => {}
+ );
+
+ // Add failures
+ for (let i = 1; i < 12; i++) {
+ for (let j = 1; j < i; j++) {
+ TelemetryHealthPing.recordDiscardedPing(PING_TYPE + i);
+ }
+ }
+
+ await TelemetrySend.reset();
+ await pingSubmissionCallBack();
+ let ping = await PingServer.promiseNextPing();
+
+ checkHealthPingStructure(ping, {
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.DELAYED,
+ [TelemetryHealthPing.FailureType.DISCARDED_FOR_SIZE]: {
+ [PING_TYPE + 11]: 10,
+ [PING_TYPE + 10]: 9,
+ [PING_TYPE + 9]: 8,
+ [PING_TYPE + 8]: 7,
+ [PING_TYPE + 7]: 6,
+ [PING_TYPE + 6]: 5,
+ [PING_TYPE + 5]: 4,
+ [PING_TYPE + 4]: 3,
+ [PING_TYPE + 3]: 2,
+ [PING_TYPE + 2]: 1,
+ },
+ });
+});
+
+add_task(async function test_discardedForSizePending() {
+ TelemetryHealthPing.testReset();
+ PingServer.clearRequests();
+
+ const PING_TYPE = "discarded-for-size-pending";
+
+ const OVERSIZED_PING_ID = "9b21ec8f-f762-4d28-a2c1-44e1c4694f24";
+ // Create a pending oversized ping.
+ let overSizedPayload = generateRandomString(2 * 1024 * 1024);
+ const OVERSIZED_PING = {
+ id: OVERSIZED_PING_ID,
+ type: PING_TYPE,
+ creationDate: new Date().toISOString(),
+ // Generate a 2MB string to use as the ping payload.
+ payload: overSizedPayload,
+ };
+
+ // Test loadPendingPing.
+ await TelemetryStorage.savePendingPing(OVERSIZED_PING);
+ // Try to manually load the oversized ping.
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(OVERSIZED_PING_ID),
+ /loadPendingPing - exceeded the maximum ping size/,
+ "The oversized ping should have been pruned."
+ );
+
+ let ping = await PingServer.promiseNextPing();
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.DISCARDED_FOR_SIZE]: {
+ "<unknown>": 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.IMMEDIATE,
+ });
+
+ // Test _scanPendingPings.
+ TelemetryHealthPing.testReset();
+ await TelemetryStorage.savePendingPing(OVERSIZED_PING);
+ await TelemetryStorage.loadPendingPingList();
+
+ ping = await PingServer.promiseNextPing();
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.DISCARDED_FOR_SIZE]: {
+ "<unknown>": 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.IMMEDIATE,
+ });
+});
+
+add_task(async function test_usePingSenderOnShutdown() {
+ if (
+ gIsAndroid ||
+ (AppConstants.platform == "linux" && OS.Constants.Sys.bits == 32)
+ ) {
+ // We don't support the pingsender on Android, yet, see bug 1335917.
+ // We also don't support the pingsender testing on Treeherder for
+ // Linux 32 bit (due to missing libraries). So skip it there too.
+ // See bug 1310703 comment 78.
+ return;
+ }
+
+ TelemetryHealthPing.testReset();
+ await TelemetrySend.reset();
+ PingServer.clearRequests();
+
+ // This first failure should immediately trigger a ping.
+ // After this, subsequent failures should be throttled.
+ await TelemetryHealthPing.recordSendFailure("testFailure");
+ await PingServer.promiseNextPing();
+
+ TelemetryHealthPing.recordSendFailure("testFailure");
+ let nextRequest = PingServer.promiseNextRequest();
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ let request = await nextRequest;
+ let ping = decodeRequestPayload(request);
+
+ checkHealthPingStructure(ping, {
+ [TelemetryHealthPing.FailureType.SEND_FAILURE]: {
+ testFailure: 1,
+ },
+ os: TelemetryHealthPing.OsInfo,
+ reason: TelemetryHealthPing.Reason.SHUT_DOWN,
+ });
+
+ // Check that the health ping is sent at shutdown using the pingsender.
+ Assert.equal(
+ request.getHeader("User-Agent"),
+ "pingsender/1.0",
+ "Should have received the correct user agent string."
+ );
+ Assert.equal(
+ request.getHeader("X-PingSender-Version"),
+ "1.0",
+ "Should have received the correct PingSender version string."
+ );
+});
+
+add_task(async function cleanup() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_MigratePendingPings.js b/toolkit/components/telemetry/tests/unit/test_MigratePendingPings.js
new file mode 100644
index 0000000000..28b93de1fd
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_MigratePendingPings.js
@@ -0,0 +1,151 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/osfile.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/AppData.jsm", this);
+
+// The name of the pending pings directory outside of the user profile,
+// in the user app data directory.
+const PENDING_PING_DIR_NAME = "Pending Pings";
+
+async function createFakeAppDir() {
+ // Create a directory inside the profile and register it as UAppData, so
+ // we can stick fake crash pings inside there. We put it inside the profile
+ // just because we know that will get cleaned up after the test runs.
+ let profileDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
+
+ // Create "<profile>/UAppData/Pending Pings".
+ const pendingPingsPath = OS.Path.join(
+ profileDir.path,
+ "UAppData",
+ PENDING_PING_DIR_NAME
+ );
+ await OS.File.makeDir(pendingPingsPath, {
+ ignoreExisting: true,
+ from: OS.Constants.Path.profileDir,
+ });
+
+ await makeFakeAppDir();
+}
+
+add_task(async function setup() {
+ // Init the profile.
+ do_get_profile();
+ await createFakeAppDir();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+});
+
+add_task(async function test_migrateUnsentPings() {
+ const PINGS = [
+ {
+ type: "crash",
+ id: TelemetryUtils.generateUUID(),
+ payload: { foo: "bar" },
+ dateCreated: new Date(2010, 1, 1, 10, 0, 0),
+ },
+ {
+ type: "other",
+ id: TelemetryUtils.generateUUID(),
+ payload: { moo: "meh" },
+ dateCreated: new Date(2010, 2, 1, 10, 2, 0),
+ },
+ ];
+ const APP_DATA_DIR = Services.dirsvc.get("UAppData", Ci.nsIFile).path;
+ const APPDATA_PINGS_DIR = OS.Path.join(APP_DATA_DIR, PENDING_PING_DIR_NAME);
+
+ // Create some pending pings outside of the user profile.
+ for (let ping of PINGS) {
+ const pingPath = OS.Path.join(APPDATA_PINGS_DIR, ping.id + ".json");
+ await TelemetryStorage.savePingToFile(ping, pingPath, true);
+ }
+
+ // Make sure the pending ping list is empty.
+ await TelemetryStorage.testClearPendingPings();
+
+ // Start the migration from TelemetryStorage.
+ let pendingPings = await TelemetryStorage.loadPendingPingList();
+ Assert.equal(
+ pendingPings.length,
+ 2,
+ "TelemetryStorage must have migrated 2 pings."
+ );
+
+ for (let ping of PINGS) {
+ // Verify that the pings were migrated and are among the pending pings.
+ Assert.ok(
+ pendingPings.find(p => p.id == ping.id),
+ "The ping must have been migrated."
+ );
+
+ // Try to load the migrated ping from the user profile.
+ let migratedPing = await TelemetryStorage.loadPendingPing(ping.id);
+ Assert.equal(
+ ping.id,
+ migratedPing.id,
+ "Should have loaded the correct ping id."
+ );
+ Assert.equal(
+ ping.type,
+ migratedPing.type,
+ "Should have loaded the correct ping type."
+ );
+ Assert.deepEqual(
+ ping.payload,
+ migratedPing.payload,
+ "Should have loaded the correct payload."
+ );
+
+ // Verify that the pings are no longer outside of the user profile.
+ const pingPath = OS.Path.join(APPDATA_PINGS_DIR, ping.id + ".json");
+ Assert.ok(
+ !(await OS.File.exists(pingPath)),
+ "The ping should not be in the Pending Pings directory anymore."
+ );
+ }
+});
+
+add_task(async function test_migrateIncompatiblePing() {
+ const APP_DATA_DIR = Services.dirsvc.get("UAppData", Ci.nsIFile).path;
+ const APPDATA_PINGS_DIR = OS.Path.join(APP_DATA_DIR, PENDING_PING_DIR_NAME);
+
+ // Create a ping incompatible with migration outside of the user profile.
+ const pingPath = OS.Path.join(APPDATA_PINGS_DIR, "incompatible.json");
+ await TelemetryStorage.savePingToFile({ incom: "patible" }, pingPath, true);
+
+ // Ensure the pending ping list is empty.
+ await TelemetryStorage.testClearPendingPings();
+ TelemetryStorage.reset();
+
+ // Start the migration from TelemetryStorage.
+ let pendingPings = await TelemetryStorage.loadPendingPingList();
+ Assert.equal(
+ pendingPings.length,
+ 0,
+ "TelemetryStorage must have migrated no pings." +
+ JSON.stringify(pendingPings)
+ );
+
+ Assert.ok(
+ !(await OS.File.exists(pingPath)),
+ "The incompatible ping must have been deleted by the migration"
+ );
+});
+
+add_task(async function teardown() {
+ // Delete the UAppData directory and make sure nothing breaks.
+ const APP_DATA_DIR = Services.dirsvc.get("UAppData", Ci.nsIFile).path;
+ await OS.File.removeDir(APP_DATA_DIR, { ignorePermissions: true });
+ Assert.ok(
+ !(await OS.File.exists(APP_DATA_DIR)),
+ "The UAppData directory must not exist anymore."
+ );
+ TelemetryStorage.reset();
+ await TelemetryStorage.loadPendingPingList();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_ModulesPing.js b/toolkit/components/telemetry/tests/unit/test_ModulesPing.js
new file mode 100644
index 0000000000..1fd2510014
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ModulesPing.js
@@ -0,0 +1,297 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { Preferences } = ChromeUtils.import(
+ "resource://gre/modules/Preferences.jsm"
+);
+const { ctypes } = ChromeUtils.import("resource://gre/modules/ctypes.jsm");
+const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm");
+
+const MAX_NAME_LENGTH = 64;
+
+// The following libraries (except libxul) are all built from the
+// toolkit/components/telemetry/tests/modules-test.cpp file, which contains
+// instructions on how to build them.
+const libModules = ctypes.libraryName("modules-test");
+const libUnicode = ctypes.libraryName("modμles-test");
+const libLongName =
+ "lorem_ipsum_dolor_sit_amet_consectetur_adipiscing_elit_Fusce_sit_amet_tellus_non_magna_euismod_vestibulum_Vivamus_turpis_duis.dll";
+
+function chooseDLL(x86, x64, aarch64) {
+ let xpcomabi = Services.appinfo.XPCOMABI;
+ let cpu = xpcomabi.split("-")[0];
+ switch (cpu) {
+ case "aarch64":
+ return aarch64;
+ case "x86_64":
+ return x64;
+ case "x86":
+ return x86;
+ // This case only happens on Android, which gets skipped below. The previous
+ // code was returning the x86 version when testing for arm.
+ case "arm":
+ return x86;
+ default:
+ Assert.ok(false, "unexpected CPU type: " + cpu);
+ return x86;
+ }
+}
+
+const libUnicodePDB = chooseDLL(
+ "testUnicodePDB32.dll",
+ "testUnicodePDB64.dll",
+ "testUnicodePDBAArch64.dll"
+);
+const libNoPDB = chooseDLL(
+ "testNoPDB32.dll",
+ "testNoPDB64.dll",
+ "testNoPDBAArch64.dll"
+);
+const libxul = OS.Path.basename(OS.Constants.Path.libxul);
+
+const libModulesFile = do_get_file(libModules).path;
+const libUnicodeFile = OS.Path.join(
+ OS.Path.dirname(libModulesFile),
+ libUnicode
+);
+const libLongNameFile = OS.Path.join(
+ OS.Path.dirname(libModulesFile),
+ libLongName
+);
+const libUnicodePDBFile = do_get_file(libUnicodePDB).path;
+const libNoPDBFile = do_get_file(libNoPDB).path;
+
+let libModulesHandle,
+ libUnicodeHandle,
+ libLongNameHandle,
+ libUnicodePDBHandle,
+ libNoPDBHandle;
+
+let expectedLibs;
+if (AppConstants.platform === "win") {
+ const version = AppConstants.MOZ_APP_VERSION.substring(
+ 0,
+ AppConstants.MOZ_APP_VERSION.indexOf(".") + 2
+ );
+
+ expectedLibs = [
+ {
+ name: libxul,
+ debugName: libxul.replace(".dll", ".pdb"),
+ version,
+ },
+ {
+ name: libModules,
+ debugName: libModules.replace(".dll", ".pdb"),
+ version,
+ },
+ {
+ name: libUnicode,
+ debugName: libModules.replace(".dll", ".pdb"),
+ version,
+ },
+ {
+ name: libLongName.substring(0, MAX_NAME_LENGTH - 1) + "…",
+ debugName: libModules.replace(".dll", ".pdb"),
+ version,
+ },
+ {
+ name: libUnicodePDB,
+ debugName: "libmodμles.pdb",
+ version: null,
+ },
+ {
+ name: libNoPDB,
+ debugName: null,
+ version: null,
+ },
+ {
+ // We choose this DLL because it's guaranteed to exist in our process and
+ // be signed on all Windows versions that we support.
+ name: "ntdll.dll",
+ // debugName changes depending on OS version and is irrelevant to this test
+ // version changes depending on OS version and is irrelevant to this test
+ certSubject: "Microsoft Windows",
+ },
+ ];
+} else if (AppConstants.platform === "android") {
+ // Listing shared libraries doesn't work in Android xpcshell tests.
+ // https://hg.mozilla.org/mozilla-central/file/0eef1d5a39366059677c6d7944cfe8a97265a011/tools/profiler/core/shared-libraries-linux.cc#l95
+ expectedLibs = [];
+} else {
+ expectedLibs = [
+ {
+ name: libxul,
+ debugName: libxul,
+ version: null,
+ },
+ {
+ name: libModules,
+ debugName: libModules,
+ version: null,
+ },
+ {
+ name: libUnicode,
+ debugName: libUnicode,
+ version: null,
+ },
+ {
+ name: libLongName.substring(0, MAX_NAME_LENGTH - 1) + "…",
+ debugName: libLongName.substring(0, MAX_NAME_LENGTH - 1) + "…",
+ version: null,
+ },
+ ];
+}
+
+add_task(async function setup() {
+ do_get_profile();
+
+ await OS.File.copy(libModulesFile, libUnicodeFile);
+ await OS.File.copy(libModulesFile, libLongName);
+
+ if (AppConstants.platform !== "android") {
+ libModulesHandle = ctypes.open(libModulesFile);
+ libUnicodeHandle = ctypes.open(libUnicodeFile);
+ libLongNameHandle = ctypes.open(libLongNameFile);
+ if (AppConstants.platform === "win") {
+ libUnicodePDBHandle = ctypes.open(libUnicodePDBFile);
+ libNoPDBHandle = ctypes.open(libNoPDBFile);
+ }
+ }
+
+ // Force the timer to fire (using a small interval).
+ Cc["@mozilla.org/updates/timer-manager;1"]
+ .getService(Ci.nsIObserver)
+ .observe(null, "utm-test-init", "");
+ Preferences.set("toolkit.telemetry.modulesPing.interval", 0);
+ Preferences.set("app.update.url", "http://localhost");
+
+ // Start the local ping server and setup Telemetry to use it during the tests.
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+});
+
+registerCleanupFunction(function() {
+ if (libModulesHandle) {
+ libModulesHandle.close();
+ }
+ if (libUnicodeHandle) {
+ libUnicodeHandle.close();
+ }
+ if (libLongNameHandle) {
+ libLongNameHandle.close();
+ }
+ if (libUnicodePDBHandle) {
+ libUnicodePDBHandle.close();
+ }
+ if (libNoPDBHandle) {
+ libNoPDBHandle.close();
+ }
+
+ return OS.File.remove(libUnicodeFile)
+ .then(() => OS.File.remove(libLongNameFile))
+ .then(() => PingServer.stop());
+});
+
+add_task(
+ {
+ skip_if: () => !AppConstants.MOZ_GECKO_PROFILER,
+ },
+ async function test_send_ping() {
+ await TelemetryController.testSetup();
+
+ let found = await PingServer.promiseNextPing();
+ Assert.ok(!!found, "Telemetry ping submitted.");
+ Assert.strictEqual(found.type, "modules", "Ping type is 'modules'");
+ Assert.ok(found.environment, "'modules' ping has an environment.");
+ Assert.ok(!!found.clientId, "'modules' ping has a client ID.");
+ Assert.ok(
+ !!found.payload.modules,
+ "Telemetry ping payload contains the 'modules' array."
+ );
+
+ let nameComparator;
+ if (AppConstants.platform === "win") {
+ // Do case-insensitive checking of file/module names on Windows
+ nameComparator = function(a, b) {
+ if (typeof a === "string" && typeof b === "string") {
+ return a.toLowerCase() === b.toLowerCase();
+ }
+
+ return a === b;
+ };
+ } else {
+ nameComparator = function(a, b) {
+ return a === b;
+ };
+ }
+
+ for (let lib of expectedLibs) {
+ let test_lib = found.payload.modules.find(module =>
+ nameComparator(module.name, lib.name)
+ );
+
+ Assert.ok(!!test_lib, "There is a '" + lib.name + "' module.");
+
+ if ("version" in lib) {
+ if (lib.version !== null) {
+ Assert.ok(
+ test_lib.version.startsWith(lib.version),
+ "The version of the " +
+ lib.name +
+ " module (" +
+ test_lib.version +
+ ") is correct (it starts with '" +
+ lib.version +
+ "')."
+ );
+ } else {
+ Assert.strictEqual(
+ test_lib.version,
+ null,
+ "The version of the " + lib.name + " module is null."
+ );
+ }
+ }
+
+ if ("debugName" in lib) {
+ Assert.ok(
+ nameComparator(test_lib.debugName, lib.debugName),
+ "The " + lib.name + " module has the correct debug name."
+ );
+ }
+
+ if (lib.debugName === null) {
+ Assert.strictEqual(
+ test_lib.debugID,
+ null,
+ "The " + lib.name + " module doesn't have a debug ID."
+ );
+ } else {
+ Assert.greater(
+ test_lib.debugID.length,
+ 0,
+ "The " + lib.name + " module has a debug ID."
+ );
+ }
+
+ if ("certSubject" in lib) {
+ Assert.strictEqual(
+ test_lib.certSubject,
+ lib.certSubject,
+ "The " + lib.name + " module has the expected cert subject."
+ );
+ }
+ }
+
+ let test_lib = found.payload.modules.find(
+ module => module.name === libLongName
+ );
+ Assert.ok(!test_lib, "There isn't a '" + libLongName + "' module.");
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/test_PingAPI.js b/toolkit/components/telemetry/tests/unit/test_PingAPI.js
new file mode 100644
index 0000000000..0b92e19fba
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_PingAPI.js
@@ -0,0 +1,711 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+// This tests the public Telemetry API for submitting pings.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/ClientID.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryArchive.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/osfile.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+
+XPCOMUtils.defineLazyGetter(this, "gPingsArchivePath", function() {
+ return OS.Path.join(
+ OS.Constants.Path.profileDir,
+ "datareporting",
+ "archived"
+ );
+});
+
+/**
+ * Fakes the archive storage quota.
+ * @param {Integer} aArchiveQuota The new quota, in bytes.
+ */
+function fakeStorageQuota(aArchiveQuota) {
+ let storage = ChromeUtils.import(
+ "resource://gre/modules/TelemetryStorage.jsm",
+ null
+ );
+ storage.Policy.getArchiveQuota = () => aArchiveQuota;
+}
+
+/**
+ * Lists all the valid archived pings and their metadata, sorted by creation date.
+ *
+ * @param aFileName {String} The filename.
+ * @return {Object[]} A list of objects with the extracted data in the form:
+ * { timestamp: <number>,
+ * id: <string>,
+ * type: <string>,
+ * size: <integer> }
+ */
+var getArchivedPingsInfo = async function() {
+ let dirIterator = new OS.File.DirectoryIterator(gPingsArchivePath);
+ let subdirs = (await dirIterator.nextBatch()).filter(e => e.isDir);
+ let archivedPings = [];
+
+ // Iterate through the subdirs of |gPingsArchivePath|.
+ for (let dir of subdirs) {
+ let fileIterator = new OS.File.DirectoryIterator(dir.path);
+ let files = (await fileIterator.nextBatch()).filter(e => !e.isDir);
+
+ // Then get a list of the files for the current subdir.
+ for (let f of files) {
+ let pingInfo = TelemetryStorage._testGetArchivedPingDataFromFileName(
+ f.name
+ );
+ if (!pingInfo) {
+ // This is not a valid archived ping, skip it.
+ continue;
+ }
+ // Find the size of the ping and then add the info to the array.
+ pingInfo.size = (await OS.File.stat(f.path)).size;
+ archivedPings.push(pingInfo);
+ }
+ }
+
+ // Sort the list by creation date and then return it.
+ archivedPings.sort((a, b) => b.timestamp - a.timestamp);
+ return archivedPings;
+};
+
+add_task(async function test_setup() {
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+});
+
+add_task(async function test_archivedPings() {
+ // TelemetryController should not be fully initialized at this point.
+ // Submitting pings should still work fine.
+
+ const PINGS = [
+ {
+ type: "test-ping-api-1",
+ payload: { foo: "bar" },
+ dateCreated: new Date(2010, 1, 1, 10, 0, 0),
+ },
+ {
+ type: "test-ping-api-2",
+ payload: { moo: "meh" },
+ dateCreated: new Date(2010, 2, 1, 10, 0, 0),
+ },
+ ];
+
+ // Submit pings and check the ping list.
+ let expectedPingList = [];
+
+ for (let data of PINGS) {
+ fakeNow(data.dateCreated);
+ data.id = await TelemetryController.submitExternalPing(
+ data.type,
+ data.payload
+ );
+ let list = await TelemetryArchive.promiseArchivedPingList();
+
+ expectedPingList.push({
+ id: data.id,
+ type: data.type,
+ timestampCreated: data.dateCreated.getTime(),
+ });
+ Assert.deepEqual(
+ list,
+ expectedPingList,
+ "Archived ping list should contain submitted pings"
+ );
+ }
+
+ // Check loading the archived pings.
+ let checkLoadingPings = async function() {
+ for (let data of PINGS) {
+ let ping = await TelemetryArchive.promiseArchivedPingById(data.id);
+ Assert.equal(ping.id, data.id, "Archived ping should have matching id");
+ Assert.equal(
+ ping.type,
+ data.type,
+ "Archived ping should have matching type"
+ );
+ Assert.equal(
+ ping.creationDate,
+ data.dateCreated.toISOString(),
+ "Archived ping should have matching creation date"
+ );
+ }
+ };
+
+ await checkLoadingPings();
+
+ // Check that we find the archived pings again by scanning after a restart.
+ await TelemetryController.testReset();
+
+ let pingList = await TelemetryArchive.promiseArchivedPingList();
+ Assert.deepEqual(
+ expectedPingList,
+ pingList,
+ "Should have submitted pings in archive list after restart"
+ );
+ await checkLoadingPings();
+
+ // Write invalid pings into the archive with both valid and invalid names.
+ let writeToArchivedDir = async function(
+ dirname,
+ filename,
+ content,
+ compressed
+ ) {
+ const dirPath = OS.Path.join(gPingsArchivePath, dirname);
+ await OS.File.makeDir(dirPath, { ignoreExisting: true });
+ const filePath = OS.Path.join(dirPath, filename);
+ const options = { tmpPath: filePath + ".tmp", noOverwrite: false };
+ if (compressed) {
+ options.compression = "lz4";
+ }
+ await OS.File.writeAtomic(filePath, content, options);
+ };
+
+ const FAKE_ID1 = "10000000-0123-0123-0123-0123456789a1";
+ const FAKE_ID2 = "20000000-0123-0123-0123-0123456789a2";
+ const FAKE_ID3 = "20000000-0123-0123-0123-0123456789a3";
+ const FAKE_TYPE = "foo";
+
+ // These should get rejected.
+ await writeToArchivedDir("xx", "foo.json", "{}");
+ await writeToArchivedDir("2010-02", "xx.xx.xx.json", "{}");
+ // This one should get picked up...
+ await writeToArchivedDir(
+ "2010-02",
+ "1." + FAKE_ID1 + "." + FAKE_TYPE + ".json",
+ "{}"
+ );
+ // ... but get overwritten by this one.
+ await writeToArchivedDir(
+ "2010-02",
+ "2." + FAKE_ID1 + "." + FAKE_TYPE + ".json",
+ ""
+ );
+ // This should get picked up fine.
+ await writeToArchivedDir(
+ "2010-02",
+ "3." + FAKE_ID2 + "." + FAKE_TYPE + ".json",
+ ""
+ );
+ // This compressed ping should get picked up fine as well.
+ await writeToArchivedDir(
+ "2010-02",
+ "4." + FAKE_ID3 + "." + FAKE_TYPE + ".jsonlz4",
+ ""
+ );
+
+ expectedPingList.push({
+ id: FAKE_ID1,
+ type: "foo",
+ timestampCreated: 2,
+ });
+ expectedPingList.push({
+ id: FAKE_ID2,
+ type: "foo",
+ timestampCreated: 3,
+ });
+ expectedPingList.push({
+ id: FAKE_ID3,
+ type: "foo",
+ timestampCreated: 4,
+ });
+ expectedPingList.sort((a, b) => a.timestampCreated - b.timestampCreated);
+
+ // Reset the TelemetryArchive so we scan the archived dir again.
+ await TelemetryController.testReset();
+
+ // Check that we are still picking up the valid archived pings on disk,
+ // plus the valid ones above.
+ pingList = await TelemetryArchive.promiseArchivedPingList();
+ Assert.deepEqual(
+ expectedPingList,
+ pingList,
+ "Should have picked up valid archived pings"
+ );
+ await checkLoadingPings();
+
+ // Now check that we fail to load the two invalid pings from above.
+ Assert.ok(
+ await promiseRejects(TelemetryArchive.promiseArchivedPingById(FAKE_ID1)),
+ "Should have rejected invalid ping"
+ );
+ Assert.ok(
+ await promiseRejects(TelemetryArchive.promiseArchivedPingById(FAKE_ID2)),
+ "Should have rejected invalid ping"
+ );
+});
+
+add_task(async function test_archiveCleanup() {
+ const PING_TYPE = "foo";
+
+ // Empty the archive.
+ await OS.File.removeDir(gPingsArchivePath);
+
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SCAN_PING_COUNT").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_DIRECTORIES_COUNT").clear();
+ // Also reset these histograms to make sure normal sized pings don't get counted.
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED").clear();
+ Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB"
+ ).clear();
+
+ // Build the cache. Nothing should be evicted as there's no ping directory.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testCleanupTaskPromise();
+ await TelemetryArchive.promiseArchivedPingList();
+
+ let h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_SCAN_PING_COUNT"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report 0 pings scanned if no archive dir exists."
+ );
+ // One directory out of four was removed as well.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report 0 evicted dirs if no archive dir exists."
+ );
+
+ let expectedPrunedInfo = [];
+ let expectedNotPrunedInfo = [];
+
+ let checkArchive = async function() {
+ // Check that the pruned pings are not on disk anymore.
+ for (let prunedInfo of expectedPrunedInfo) {
+ await Assert.rejects(
+ TelemetryArchive.promiseArchivedPingById(prunedInfo.id),
+ /TelemetryStorage.loadArchivedPing - no ping with id/,
+ "Ping " + prunedInfo.id + " should have been pruned."
+ );
+ const pingPath = TelemetryStorage._testGetArchivedPingPath(
+ prunedInfo.id,
+ prunedInfo.creationDate,
+ PING_TYPE
+ );
+ Assert.ok(
+ !(await OS.File.exists(pingPath)),
+ "The ping should not be on the disk anymore."
+ );
+ }
+
+ // Check that the expected pings are there.
+ for (let expectedInfo of expectedNotPrunedInfo) {
+ Assert.ok(
+ await TelemetryArchive.promiseArchivedPingById(expectedInfo.id),
+ "Ping" + expectedInfo.id + " should be in the archive."
+ );
+ }
+ };
+
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SESSION_PING_COUNT").clear();
+
+ // Create a ping which should be pruned because it is past the retention period.
+ let date = fakeNow(2010, 1, 1, 1, 0, 0);
+ let firstDate = date;
+ let pingId = await TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedPrunedInfo.push({ id: pingId, creationDate: date });
+
+ // Create a ping which should be kept because it is within the retention period.
+ const oldestDirectoryDate = fakeNow(2010, 2, 1, 1, 0, 0);
+ pingId = await TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedNotPrunedInfo.push({ id: pingId, creationDate: oldestDirectoryDate });
+
+ // Create 20 other pings which are within the retention period, but would be affected
+ // by the disk quota.
+ for (let month of [3, 4]) {
+ for (let minute = 0; minute < 10; minute++) {
+ date = fakeNow(2010, month, 1, 1, minute, 0);
+ pingId = await TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedNotPrunedInfo.push({ id: pingId, creationDate: date });
+ }
+ }
+
+ // We expect all the pings we archived to be in this histogram.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SESSION_PING_COUNT");
+ Assert.equal(
+ h.snapshot().sum,
+ 22,
+ "All the pings must be live-accumulated in the histogram."
+ );
+ // Reset the histogram that will be populated by the archive scan.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE").clear();
+
+ // Move the current date 60 days ahead of the first ping.
+ fakeNow(futureDate(firstDate, 60 * MILLISECONDS_PER_DAY));
+ // Reset TelemetryArchive and TelemetryController to start the startup cleanup.
+ await TelemetryController.testReset();
+ // Wait for the cleanup to finish.
+ await TelemetryStorage.testCleanupTaskPromise();
+ // Then scan the archived dir.
+ await TelemetryArchive.promiseArchivedPingList();
+
+ // Check that the archive is in the correct state.
+ await checkArchive();
+
+ // Make sure the ping count is correct after the scan (one ping was removed).
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_SCAN_PING_COUNT"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 21,
+ "The histogram must count all the pings in the archive."
+ );
+ // One directory out of four was removed as well.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 1,
+ "Telemetry must correctly report removed archive directories."
+ );
+ // Check that the remaining directories are correctly counted.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_DIRECTORIES_COUNT"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 3,
+ "Telemetry must correctly report the remaining archive directories."
+ );
+ // Check that the remaining directories are correctly counted.
+ const oldestAgeInMonths = 1;
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ oldestAgeInMonths,
+ "Telemetry must correctly report age of the oldest directory in the archive."
+ );
+
+ // We need to test the archive size before we hit the quota, otherwise a special
+ // value is recorded.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA").clear();
+ Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS"
+ ).clear();
+
+ // Move the current date 60 days ahead of the second ping.
+ fakeNow(futureDate(oldestDirectoryDate, 60 * MILLISECONDS_PER_DAY));
+ // Reset TelemetryController and TelemetryArchive.
+ await TelemetryController.testReset();
+ // Wait for the cleanup to finish.
+ await TelemetryStorage.testCleanupTaskPromise();
+ // Then scan the archived dir again.
+ await TelemetryArchive.promiseArchivedPingList();
+
+ // Move the oldest ping to the unexpected pings list.
+ expectedPrunedInfo.push(expectedNotPrunedInfo.shift());
+ // Check that the archive is in the correct state.
+ await checkArchive();
+
+ // Find how much disk space the archive takes.
+ const archivedPingsInfo = await getArchivedPingsInfo();
+ let archiveSizeInBytes = archivedPingsInfo.reduce(
+ (lastResult, element) => lastResult + element.size,
+ 0
+ );
+
+ // Check that the correct values for quota probes are reported when no quota is hit.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").snapshot();
+ Assert.equal(
+ h.sum,
+ Math.round(archiveSizeInBytes / 1024 / 1024),
+ "Telemetry must report the correct archive size."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report 0 evictions if quota is not hit."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report a null elapsed time if quota is not hit."
+ );
+
+ // Set the quota to 80% of the space.
+ const testQuotaInBytes = archiveSizeInBytes * 0.8;
+ fakeStorageQuota(testQuotaInBytes);
+
+ // The storage prunes archived pings until we reach 90% of the requested storage quota.
+ // Based on that, find how many pings should be kept.
+ const safeQuotaSize = testQuotaInBytes * 0.9;
+ let sizeInBytes = 0;
+ let pingsWithinQuota = [];
+ let pingsOutsideQuota = [];
+
+ for (let pingInfo of archivedPingsInfo) {
+ sizeInBytes += pingInfo.size;
+ if (sizeInBytes >= safeQuotaSize) {
+ pingsOutsideQuota.push({
+ id: pingInfo.id,
+ creationDate: new Date(pingInfo.timestamp),
+ });
+ continue;
+ }
+ pingsWithinQuota.push({
+ id: pingInfo.id,
+ creationDate: new Date(pingInfo.timestamp),
+ });
+ }
+
+ expectedNotPrunedInfo = pingsWithinQuota;
+ expectedPrunedInfo = expectedPrunedInfo.concat(pingsOutsideQuota);
+
+ // Reset TelemetryArchive and TelemetryController to start the startup cleanup.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testCleanupTaskPromise();
+ await TelemetryArchive.promiseArchivedPingList();
+ // Check that the archive is in the correct state.
+ await checkArchive();
+
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ pingsOutsideQuota.length,
+ "Telemetry must correctly report the over quota pings evicted from the archive."
+ );
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").snapshot();
+ Assert.equal(
+ h.sum,
+ 300,
+ "Archive quota was hit, a special size must be reported."
+ );
+
+ // Trigger a cleanup again and make sure we're not removing anything.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testCleanupTaskPromise();
+ await TelemetryArchive.promiseArchivedPingList();
+ await checkArchive();
+
+ const OVERSIZED_PING_ID = "9b21ec8f-f762-4d28-a2c1-44e1c4694f24";
+ // Create and archive an oversized, uncompressed, ping.
+ const OVERSIZED_PING = {
+ id: OVERSIZED_PING_ID,
+ type: PING_TYPE,
+ creationDate: new Date().toISOString(),
+ // Generate a ~2MB string to use as the payload.
+ payload: generateRandomString(2 * 1024 * 1024),
+ };
+ await TelemetryArchive.promiseArchivePing(OVERSIZED_PING);
+
+ // Get the size of the archived ping.
+ const oversizedPingPath =
+ TelemetryStorage._testGetArchivedPingPath(
+ OVERSIZED_PING.id,
+ new Date(OVERSIZED_PING.creationDate),
+ PING_TYPE
+ ) + "lz4";
+ const archivedPingSizeMB = Math.floor(
+ (await OS.File.stat(oversizedPingPath)).size / 1024 / 1024
+ );
+
+ // We expect the oversized ping to be pruned when scanning the archive.
+ expectedPrunedInfo.push({
+ id: OVERSIZED_PING_ID,
+ creationDate: new Date(OVERSIZED_PING.creationDate),
+ });
+
+ // Scan the archive.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testCleanupTaskPromise();
+ await TelemetryArchive.promiseArchivedPingList();
+ // The following also checks that non oversized pings are not removed.
+ await checkArchive();
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 1,
+ "Telemetry must report 1 oversized ping in the archive."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB"
+ ).snapshot();
+ Assert.equal(
+ h.values[archivedPingSizeMB],
+ 1,
+ "Telemetry must report the correct size for the oversized ping."
+ );
+});
+
+add_task(async function test_clientId() {
+ // Check that a ping submitted after the delayed telemetry initialization completed
+ // should get a valid client id.
+ await TelemetryController.testReset();
+ const clientId = await ClientID.getClientID();
+
+ let id = await TelemetryController.submitExternalPing(
+ "test-type",
+ {},
+ { addClientId: true }
+ );
+ let ping = await TelemetryArchive.promiseArchivedPingById(id);
+
+ Assert.ok(!!ping, "Should have loaded the ping.");
+ Assert.ok("clientId" in ping, "Ping should have a client id.");
+ Assert.ok(UUID_REGEX.test(ping.clientId), "Client id is in UUID format.");
+ Assert.equal(
+ ping.clientId,
+ clientId,
+ "Ping client id should match the global client id."
+ );
+
+ // We should have cached the client id now. Lets confirm that by
+ // checking the client id on a ping submitted before the async
+ // controller setup is finished.
+ let promiseSetup = TelemetryController.testReset();
+ id = await TelemetryController.submitExternalPing(
+ "test-type",
+ {},
+ { addClientId: true }
+ );
+ ping = await TelemetryArchive.promiseArchivedPingById(id);
+ Assert.equal(ping.clientId, clientId);
+
+ // Finish setup.
+ await promiseSetup;
+});
+
+add_task(async function test_InvalidPingType() {
+ const TYPES = [
+ "a",
+ "-",
+ "¿€€€?",
+ "-foo-",
+ "-moo",
+ "zoo-",
+ ".bar",
+ "asfd.asdf",
+ ];
+
+ for (let type of TYPES) {
+ let histogram = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_INVALID_PING_TYPE_SUBMITTED"
+ );
+ Assert.ok(
+ !(type in histogram.snapshot()),
+ "Should not have counted this invalid ping yet: " + type
+ );
+ Assert.ok(
+ promiseRejects(TelemetryController.submitExternalPing(type, {})),
+ "Ping type should have been rejected."
+ );
+ Assert.equal(
+ histogram.snapshot()[type].sum,
+ 1,
+ "Should have counted this as an invalid ping type."
+ );
+ }
+});
+
+add_task(async function test_InvalidPayloadType() {
+ const PAYLOAD_TYPES = [19, "string", [1, 2, 3, 4], null, undefined];
+
+ let histogram = Telemetry.getHistogramById(
+ "TELEMETRY_INVALID_PAYLOAD_SUBMITTED"
+ );
+ for (let i = 0; i < PAYLOAD_TYPES.length; i++) {
+ histogram.clear();
+ Assert.equal(
+ histogram.snapshot().sum,
+ 0,
+ "Should not have counted this invalid payload yet: " +
+ JSON.stringify(PAYLOAD_TYPES[i])
+ );
+ Assert.ok(
+ await promiseRejects(
+ TelemetryController.submitExternalPing("payload-test", PAYLOAD_TYPES[i])
+ ),
+ "Payload type should have been rejected."
+ );
+ Assert.equal(
+ histogram.snapshot().sum,
+ 1,
+ "Should have counted this as an invalid payload type."
+ );
+ }
+});
+
+add_task(async function test_currentPingData() {
+ await TelemetryController.testSetup();
+
+ // Setup test data.
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ h.add(1);
+ let k = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"
+ );
+ k.clear();
+ k.add("a", 1);
+
+ // Get current ping data objects and check that their data is sane.
+ for (let subsession of [true, false]) {
+ let ping = TelemetryController.getCurrentPingData(subsession);
+
+ Assert.ok(!!ping, "Should have gotten a ping.");
+ Assert.equal(ping.type, "main", "Ping should have correct type.");
+ const expectedReason = subsession
+ ? "gather-subsession-payload"
+ : "gather-payload";
+ Assert.equal(
+ ping.payload.info.reason,
+ expectedReason,
+ "Ping should have the correct reason."
+ );
+
+ let id = "TELEMETRY_TEST_RELEASE_OPTOUT";
+ Assert.ok(
+ id in ping.payload.histograms,
+ "Payload should have test count histogram."
+ );
+ Assert.equal(
+ ping.payload.histograms[id].sum,
+ 1,
+ "Test count value should match."
+ );
+ id = "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT";
+ Assert.ok(
+ id in ping.payload.keyedHistograms,
+ "Payload should have keyed test histogram."
+ );
+ Assert.equal(
+ ping.payload.keyedHistograms[id].a.sum,
+ 1,
+ "Keyed test value should match."
+ );
+ }
+});
+
+add_task(async function test_shutdown() {
+ await TelemetryController.testShutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_PingSender.js b/toolkit/components/telemetry/tests/unit/test_PingSender.js
new file mode 100644
index 0000000000..8cfba28274
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_PingSender.js
@@ -0,0 +1,229 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* eslint-disable mozilla/no-arbitrary-setTimeout */
+
+// This tests submitting a ping using the stand-alone pingsender program.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/osfile.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Timer.jsm", this);
+
+function generateTestPingData() {
+ return {
+ type: "test-pingsender-type",
+ id: TelemetryUtils.generateUUID(),
+ creationDate: new Date().toISOString(),
+ version: 4,
+ payload: {
+ dummy: "stuff",
+ },
+ };
+}
+
+function testSendingPings(pingPaths) {
+ const url = "http://localhost:" + PingServer.port + "/submit/telemetry/";
+ const pings = pingPaths.map(path => {
+ return {
+ url,
+ path,
+ };
+ });
+ TelemetrySend.testRunPingSender(pings, (_, topic, __) => {
+ switch (topic) {
+ case "process-finished": // finished indicates an exit code of 0
+ Assert.ok(true, "Pingsender should be able to post to localhost");
+ break;
+ case "process-failed": // failed indicates an exit code != 0
+ Assert.ok(false, "Pingsender should be able to post to localhost");
+ break;
+ }
+ });
+}
+
+/**
+ * Wait for a ping file to be deleted from the pending pings directory.
+ */
+function waitForPingDeletion(pingId) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, pingId);
+
+ let checkFn = (resolve, reject) =>
+ setTimeout(() => {
+ OS.File.exists(path).then(exists => {
+ if (!exists) {
+ Assert.ok(true, pingId + " was deleted");
+ resolve();
+ } else {
+ checkFn(resolve, reject);
+ }
+ }, reject);
+ }, 250);
+
+ return new Promise((resolve, reject) => checkFn(resolve, reject));
+}
+
+add_task(async function setup() {
+ // Init the profile.
+ do_get_profile(true);
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // Start the ping server and let Telemetry know about it.
+ PingServer.start();
+});
+
+add_task(async function test_pingSender() {
+ // Generate a new ping and save it among the pending pings.
+ const data = generateTestPingData();
+ await TelemetryStorage.savePing(data, true);
+
+ // Get the local path of the saved ping.
+ const pingPath = OS.Path.join(TelemetryStorage.pingDirectoryPath, data.id);
+
+ // Spawn an HTTP server that returns an error. We will be running the
+ // PingSender twice, trying to send the ping to this server. After the
+ // second time, we will resolve |deferred404Hit|.
+ let failingServer = new HttpServer();
+ let deferred404Hit = PromiseUtils.defer();
+ let hitCount = 0;
+ failingServer.registerPathHandler("/lookup_fail", (metadata, response) => {
+ response.setStatusLine("1.1", 404, "Not Found");
+ hitCount++;
+
+ if (hitCount >= 2) {
+ // Resolve the promise on the next tick.
+ Services.tm.dispatchToMainThread(() => deferred404Hit.resolve());
+ }
+ });
+ failingServer.start(-1);
+
+ // Try to send the ping twice using the pingsender (we expect 404 both times).
+ const errorUrl =
+ "http://localhost:" + failingServer.identity.primaryPort + "/lookup_fail";
+ TelemetrySend.testRunPingSender([{ url: errorUrl, path: pingPath }]);
+ TelemetrySend.testRunPingSender([{ url: errorUrl, path: pingPath }]);
+
+ // Wait until we hit the 404 server twice. After that, make sure that the ping
+ // still exists locally.
+ await deferred404Hit.promise;
+ Assert.ok(
+ await OS.File.exists(pingPath),
+ "The pending ping must not be deleted if we fail to send using the PingSender"
+ );
+
+ // Try to send it using the pingsender.
+ testSendingPings([pingPath]);
+
+ let req = await PingServer.promiseNextRequest();
+ let ping = decodeRequestPayload(req);
+
+ Assert.equal(
+ req.getHeader("User-Agent"),
+ "pingsender/1.0",
+ "Should have received the correct user agent string."
+ );
+ Assert.equal(
+ req.getHeader("X-PingSender-Version"),
+ "1.0",
+ "Should have received the correct PingSender version string."
+ );
+ Assert.equal(
+ req.getHeader("Content-Encoding"),
+ "gzip",
+ "Should have a gzip encoded ping."
+ );
+ Assert.ok(req.getHeader("Date"), "Should have received a Date header.");
+ Assert.equal(ping.id, data.id, "Should have received the correct ping id.");
+ Assert.equal(
+ ping.type,
+ data.type,
+ "Should have received the correct ping type."
+ );
+ Assert.deepEqual(
+ ping.payload,
+ data.payload,
+ "Should have received the correct payload."
+ );
+
+ // Check that the PingSender removed the pending ping.
+ await waitForPingDeletion(data.id);
+
+ // Confirm we can't send a ping to another destination url
+ let bannedUris = [
+ "https://example.com",
+ "http://localhost.com",
+ "http://localHOST.com",
+ "http://localhost@example.com",
+ "http://localhost:bob@example.com",
+ "http://localhost:localhost@localhost.example.com",
+ ];
+ for (let indx in bannedUris) {
+ TelemetrySend.testRunPingSender(
+ [{ url: bannedUris[indx], path: pingPath }],
+ (_, topic, __) => {
+ switch (topic) {
+ case "process-finished": // finished indicates an exit code of 0
+ Assert.equal(
+ false,
+ true,
+ "Pingsender should not be able to post to any banned urls: " +
+ bannedUris[indx]
+ );
+ break;
+ case "process-failed": // failed indicates an exit code != 0
+ Assert.equal(
+ true,
+ true,
+ "Pingsender should not be able to post to any banned urls: " +
+ bannedUris[indx]
+ );
+ break;
+ }
+ }
+ );
+ }
+
+ // Shut down the failing server. We do this now, and not right after using it,
+ // to make sure we're not interfering with the test.
+ await new Promise(r => failingServer.stop(r));
+});
+
+add_task(async function test_pingSender_multiple_pings() {
+ // Generate two new pings and save them among the pending pings.
+ const data = [generateTestPingData(), generateTestPingData()];
+
+ for (const d of data) {
+ await TelemetryStorage.savePing(d, true);
+ }
+
+ // Get the local path of the saved pings.
+ const pingPaths = data.map(d =>
+ OS.Path.join(TelemetryStorage.pingDirectoryPath, d.id)
+ );
+
+ // Try to send them using the pingsender.
+ testSendingPings(pingPaths);
+
+ // Check the pings
+ for (const d of data) {
+ let req = await PingServer.promiseNextRequest();
+ let ping = decodeRequestPayload(req);
+ Assert.equal(ping.id, d.id, "Should have received the correct ping id.");
+ }
+
+ // Check that the PingSender removed the pending pings.
+ for (const d of data) {
+ await waitForPingDeletion(d.id);
+ }
+});
+
+add_task(async function cleanup() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_PrioPing.js b/toolkit/components/telemetry/tests/unit/test_PrioPing.js
new file mode 100644
index 0000000000..29930ca259
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_PrioPing.js
@@ -0,0 +1,140 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryPrioPing",
+ "resource://gre/modules/PrioPing.jsm"
+);
+
+function checkPingStructure(type, payload, options) {
+ Assert.equal(
+ type,
+ TelemetryPrioPing.PRIO_PING_TYPE,
+ "Should be a prio ping."
+ );
+ // Check the payload for required fields.
+ Assert.ok("version" in payload, "Payload must have version.");
+ Assert.ok("reason" in payload, "Payload must have reason.");
+ Assert.ok(
+ Object.values(TelemetryPrioPing.Reason).some(
+ reason => payload.reason === reason
+ ),
+ "Should be a known reason."
+ );
+ Assert.ok(
+ Array.isArray(payload.prioData),
+ "Payload prioData must be present and an array."
+ );
+ payload.prioData.forEach(prioData => {
+ Assert.ok("encoding" in prioData, "All prioData must have encodings.");
+ Assert.ok("prio" in prioData, "All prioData must have prio blocks.");
+ });
+ // Ensure we forbid client id and environment
+ Assert.equal(options.addClientId, false, "Must forbid client Id.");
+ Assert.equal(options.addEnvironment, false, "Must forbid Environment.");
+}
+
+function fakePolicy(set, clear, send, snapshot) {
+ let mod = ChromeUtils.import("resource://gre/modules/PrioPing.jsm", null);
+ mod.Policy.setTimeout = set;
+ mod.Policy.clearTimeout = clear;
+ mod.Policy.sendPing = send;
+ mod.Policy.getEncodedOriginSnapshot = snapshot;
+}
+
+function pass() {
+ /* intentionally empty */
+}
+function fail() {
+ Assert.ok(false, "Not allowed");
+}
+function fakeSnapshot() {
+ return [
+ {
+ encoding: "telemetry.test-1-1",
+ prio: {},
+ },
+ {
+ encoding: "telemetry.test-1-1",
+ prio: {},
+ },
+ ];
+}
+
+add_task(async function setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ await TelemetryController.testSetup();
+ TelemetryPrioPing.testReset();
+});
+
+// Similarly to test_EventPing tests in this file often follow the form:
+// 1: Fake out timeout, ping submission, and snapshotting
+// 2: Trigger a "prio" ping to happen
+// 3: Inside the fake ping submission, ensure the ping is correctly formed.
+// In sinon this would be replaced with spies and .wasCalledWith().
+
+add_task(async function test_limit_reached() {
+ // Ensure that on being notified of the limit we immediately trigger a ping
+ // with reason "max"
+
+ fakePolicy(
+ pass,
+ pass,
+ (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.equal(
+ payload.reason,
+ TelemetryPrioPing.Reason.MAX,
+ "Sent using max reason."
+ );
+ },
+ fakeSnapshot
+ );
+ Services.obs.notifyObservers(null, "origin-telemetry-storage-limit-reached");
+});
+
+add_task(async function test_periodic() {
+ fakePolicy(
+ pass,
+ pass,
+ (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.equal(
+ payload.reason,
+ TelemetryPrioPing.Reason.PERIODIC,
+ "Sent with periodic reason."
+ );
+ },
+ fakeSnapshot
+ );
+
+ // This is normally triggered by the scheduler once a day
+ TelemetryPrioPing.periodicPing();
+});
+
+add_task(async function test_shutdown() {
+ fakePolicy(
+ fail,
+ pass,
+ (type, payload, options) => {
+ checkPingStructure(type, payload, options);
+ Assert.equal(
+ payload.reason,
+ TelemetryPrioPing.Reason.SHUTDOWN,
+ "Sent with shutdown reason."
+ );
+ },
+ fakeSnapshot
+ );
+ await TelemetryPrioPing.shutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_SocketScalars.js b/toolkit/components/telemetry/tests/unit/test_SocketScalars.js
new file mode 100644
index 0000000000..1d7c0cebfd
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_SocketScalars.js
@@ -0,0 +1,55 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+const { TelemetryController } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryController.jsm"
+);
+const { ContentTaskUtils } = ChromeUtils.import(
+ "resource://testing-common/ContentTaskUtils.jsm"
+);
+
+const SOCKET_ONLY_UINT_SCALAR = "telemetry.test.socket_only_uint";
+
+/**
+ * This function waits until socket scalars are reported into the
+ * scalar snapshot.
+ */
+async function waitForSocketScalars() {
+ await ContentTaskUtils.waitForCondition(() => {
+ const scalars = Telemetry.getSnapshotForScalars("main", false);
+ return Object.keys(scalars).includes("socket");
+ });
+}
+
+add_task(async function() {
+ if (!Services.prefs.getBoolPref("network.process.enabled")) {
+ Assert.ok(
+ true,
+ "Test finished: no point to test telemetry from socket process with lanuching the process"
+ );
+ return;
+ }
+
+ do_test_pending();
+
+ do_get_profile(true);
+ await TelemetryController.testSetup();
+
+ Services.io.socketProcessTelemetryPing();
+
+ // Once scalars are set by the socket process, they don't immediately get
+ // sent to the parent process. Wait for the Telemetry IPC Timer to trigger
+ // and batch send the data back to the parent process.
+ await waitForSocketScalars();
+
+ Assert.equal(
+ Telemetry.getSnapshotForScalars("main", false).socket[
+ SOCKET_ONLY_UINT_SCALAR
+ ],
+ 42,
+ `${SOCKET_ONLY_UINT_SCALAR} must have the correct value (socket process).`
+ );
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js b/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js
new file mode 100644
index 0000000000..71f90dd64e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js
@@ -0,0 +1,282 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryArchive.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+ChromeUtils.import("resource://gre/modules/osfile.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+
+const MS_IN_ONE_HOUR = 60 * 60 * 1000;
+const MS_IN_ONE_DAY = 24 * MS_IN_ONE_HOUR;
+
+const PREF_BRANCH = "toolkit.telemetry.";
+
+const REASON_ABORTED_SESSION = "aborted-session";
+const REASON_DAILY = "daily";
+const REASON_ENVIRONMENT_CHANGE = "environment-change";
+const REASON_SHUTDOWN = "shutdown";
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, "datareporting");
+});
+
+var promiseValidateArchivedPings = async function(aExpectedReasons) {
+ // The list of ping reasons which mark the session end (and must reset the subsession
+ // count).
+ const SESSION_END_PING_REASONS = new Set([
+ REASON_ABORTED_SESSION,
+ REASON_SHUTDOWN,
+ ]);
+
+ let list = await TelemetryArchive.promiseArchivedPingList();
+
+ // We're just interested in the "main" pings.
+ list = list.filter(p => p.type == "main");
+
+ Assert.equal(
+ aExpectedReasons.length,
+ list.length,
+ "All the expected pings must be received."
+ );
+
+ let previousPing = await TelemetryArchive.promiseArchivedPingById(list[0].id);
+ Assert.equal(
+ aExpectedReasons.shift(),
+ previousPing.payload.info.reason,
+ "Telemetry should only get pings with expected reasons."
+ );
+ Assert.equal(
+ previousPing.payload.info.previousSessionId,
+ null,
+ "The first session must report a null previous session id."
+ );
+ Assert.equal(
+ previousPing.payload.info.previousSubsessionId,
+ null,
+ "The first subsession must report a null previous subsession id."
+ );
+ Assert.equal(
+ previousPing.payload.info.profileSubsessionCounter,
+ 1,
+ "profileSubsessionCounter must be 1 the first time."
+ );
+ Assert.equal(
+ previousPing.payload.info.subsessionCounter,
+ 1,
+ "subsessionCounter must be 1 the first time."
+ );
+
+ let expectedSubsessionCounter = 1;
+ let expectedPreviousSessionId = previousPing.payload.info.sessionId;
+
+ for (let i = 1; i < list.length; i++) {
+ let currentPing = await TelemetryArchive.promiseArchivedPingById(
+ list[i].id
+ );
+ let currentInfo = currentPing.payload.info;
+ let previousInfo = previousPing.payload.info;
+ info(
+ "Archive entry " +
+ i +
+ " - id: " +
+ currentPing.id +
+ ", reason: " +
+ currentInfo.reason
+ );
+
+ Assert.equal(
+ aExpectedReasons.shift(),
+ currentInfo.reason,
+ "Telemetry should only get pings with expected reasons."
+ );
+ Assert.equal(
+ currentInfo.previousSessionId,
+ expectedPreviousSessionId,
+ "Telemetry must correctly chain session identifiers."
+ );
+ Assert.equal(
+ currentInfo.previousSubsessionId,
+ previousInfo.subsessionId,
+ "Telemetry must correctly chain subsession identifiers."
+ );
+ Assert.equal(
+ currentInfo.profileSubsessionCounter,
+ previousInfo.profileSubsessionCounter + 1,
+ "Telemetry must correctly track the profile subsessions count."
+ );
+ Assert.equal(
+ currentInfo.subsessionCounter,
+ expectedSubsessionCounter,
+ "The subsession counter should be monotonically increasing."
+ );
+
+ // Store the current ping as previous.
+ previousPing = currentPing;
+ // Reset the expected subsession counter, if required. Otherwise increment the expected
+ // subsession counter.
+ // If this is the final subsession of a session we need to update expected values accordingly.
+ if (SESSION_END_PING_REASONS.has(currentInfo.reason)) {
+ expectedSubsessionCounter = 1;
+ expectedPreviousSessionId = currentInfo.sessionId;
+ } else {
+ expectedSubsessionCounter++;
+ }
+ }
+};
+
+add_task(async function test_setup() {
+ do_test_pending();
+
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+});
+
+add_task(async function test_subsessionsChaining() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ const PREF_TEST = PREF_BRANCH + "test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Fake the clock data to manually trigger an aborted-session ping and a daily ping.
+ // This is also helpful to make sure we get the archived pings in an expected order.
+ let now = fakeNow(2009, 9, 18, 0, 0, 0);
+ let monotonicNow = fakeMonotonicNow(1000);
+
+ let moveClockForward = minutes => {
+ let ms = minutes * MILLISECONDS_PER_MINUTE;
+ now = fakeNow(futureDate(now, ms));
+ monotonicNow = fakeMonotonicNow(monotonicNow + ms);
+ };
+
+ // Keep track of the ping reasons we're expecting in this test.
+ let expectedReasons = [];
+
+ // Start and shut down Telemetry. We expect a shutdown ping with profileSubsessionCounter: 1,
+ // subsessionCounter: 1, subsessionId: A, and previousSubsessionId: null to be archived.
+ await TelemetryController.testSetup();
+ await TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry but don't wait for it to initialise before shutting down. We expect a
+ // shutdown ping with profileSubsessionCounter: 2, subsessionCounter: 1, subsessionId: B
+ // and previousSubsessionId: A to be archived.
+ moveClockForward(30);
+ TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and simulate an aborted-session ping. We expect an aborted-session ping
+ // with profileSubsessionCounter: 3, subsessionCounter: 1, subsessionId: C and
+ // previousSubsessionId: B to be archived.
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+ moveClockForward(6);
+ // Trigger the an aborted session ping save. When testing,we are not saving the aborted-session
+ // ping as soon as Telemetry starts, otherwise we would end up with unexpected pings being
+ // sent when calling |TelemetryController.testReset()|, thus breaking some tests.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Start Telemetry and trigger an environment change through a pref modification. We expect
+ // an environment-change ping with profileSubsessionCounter: 4, subsessionCounter: 1,
+ // subsessionId: D and previousSubsessionId: C to be archived.
+ moveClockForward(30);
+ await TelemetryController.testReset();
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 1);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Shut down Telemetry. We expect a shutdown ping with profileSubsessionCounter: 5,
+ // subsessionCounter: 2, subsessionId: E and previousSubsessionId: D to be archived.
+ moveClockForward(30);
+ await TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and trigger a daily ping. We expect a daily ping with
+ // profileSubsessionCounter: 6, subsessionCounter: 1, subsessionId: F and
+ // previousSubsessionId: E to be archived.
+ moveClockForward(30);
+ await TelemetryController.testReset();
+
+ // Delay the callback around midnight.
+ now = fakeNow(futureDate(now, MS_IN_ONE_DAY));
+ // Trigger the daily ping.
+ await schedulerTickCallback();
+ expectedReasons.push(REASON_DAILY);
+
+ // Trigger an environment change ping. We expect an environment-changed ping with
+ // profileSubsessionCounter: 7, subsessionCounter: 2, subsessionId: G and
+ // previousSubsessionId: F to be archived.
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 0);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Shut down Telemetry and trigger a shutdown ping.
+ moveClockForward(30);
+ await TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and trigger an environment change.
+ await TelemetryController.testReset();
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 1);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Don't shut down, instead trigger an aborted-session ping.
+ moveClockForward(6);
+ // Trigger the an aborted session ping save.
+ await schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Start Telemetry and trigger a daily ping.
+ moveClockForward(30);
+ await TelemetryController.testReset();
+ // Delay the callback around midnight.
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+ // Trigger the daily ping.
+ await schedulerTickCallback();
+ expectedReasons.push(REASON_DAILY);
+
+ // Trigger an environment change.
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 0);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // And an aborted-session ping again.
+ moveClockForward(6);
+ // Trigger the an aborted session ping save.
+ await schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Make sure the aborted-session ping gets archived.
+ await TelemetryController.testReset();
+
+ await promiseValidateArchivedPings(expectedReasons);
+});
+
+add_task(async function() {
+ await TelemetryController.testShutdown();
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_SyncPingIntegration.js b/toolkit/components/telemetry/tests/unit/test_SyncPingIntegration.js
new file mode 100644
index 0000000000..58b9a6b78c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_SyncPingIntegration.js
@@ -0,0 +1,76 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+// Enable the collection (during test) for all products so even products
+// that don't collect the data will be able to run the test without failure.
+Services.prefs.setBoolPref(
+ "toolkit.telemetry.testing.overrideProductsCheck",
+ true
+);
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+});
+
+add_task(async function test_register_twice_fails() {
+ TelemetryController.registerSyncPingShutdown(() => {});
+ Assert.throws(
+ () => TelemetryController.registerSyncPingShutdown(() => {}),
+ /The sync ping shutdown handler is already registered./
+ );
+ await TelemetryController.testReset();
+});
+
+add_task(async function test_reset_clears_handler() {
+ await TelemetryController.testSetup();
+ TelemetryController.registerSyncPingShutdown(() => {});
+ await TelemetryController.testReset();
+ // If this works the reset must have cleared it.
+ TelemetryController.registerSyncPingShutdown(() => {});
+ await TelemetryController.testReset();
+});
+
+add_task(async function test_shutdown_handler_submits() {
+ let handlerCalled = false;
+ await TelemetryController.testSetup();
+ TelemetryController.registerSyncPingShutdown(() => {
+ handlerCalled = true;
+ // and submit a ping.
+ let ping = {
+ why: "shutdown",
+ };
+ TelemetryController.submitExternalPing("sync", ping);
+ });
+
+ await TelemetryController.testShutdown();
+ Assert.ok(handlerCalled);
+ // and check we recorded telemetry about it.
+ let snapshot = Telemetry.getSnapshotForScalars("main", true).parent || {};
+ Assert.equal(
+ snapshot["telemetry.sync_shutdown_ping_sent"],
+ true,
+ "recorded that we sent a ping."
+ );
+ await TelemetryController.testReset();
+});
+
+add_task(async function test_shutdown_handler_no_submit() {
+ let handlerCalled = false;
+ await TelemetryController.testSetup();
+ TelemetryController.registerSyncPingShutdown(() => {
+ handlerCalled = true;
+ // but don't submit a ping.
+ });
+
+ await TelemetryController.testShutdown();
+ Assert.ok(handlerCalled);
+ // and check we didn't record our scalar.
+ let snapshot = Telemetry.getSnapshotForScalars("main", true).parent || {};
+ Assert.ok(
+ !("telemetry.sync_shutdown_ping_sent" in snapshot),
+ "should not have recorded we sent a ping"
+ );
+ await TelemetryController.testReset();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryAndroidEnvironment.js b/toolkit/components/telemetry/tests/unit/test_TelemetryAndroidEnvironment.js
new file mode 100644
index 0000000000..7c2e9d8447
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryAndroidEnvironment.js
@@ -0,0 +1,62 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+/* Android-only TelemetryEnvironment xpcshell test that ensures that the device data is stored in the Environment.
+ */
+
+ChromeUtils.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+
+/**
+ * Check that a value is a string and not empty.
+ *
+ * @param aValue The variable to check.
+ * @return True if |aValue| has type "string" and is not empty, False otherwise.
+ */
+function checkString(aValue) {
+ return typeof aValue == "string" && aValue != "";
+}
+
+/**
+ * If value is non-null, check if it's a valid string.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid string, false if it's non-null and an invalid
+ * string.
+ */
+function checkNullOrString(aValue) {
+ if (aValue) {
+ return checkString(aValue);
+ } else if (aValue === null) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * If value is non-null, check if it's a boolean.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid boolean, false if it's non-null and an invalid
+ * boolean.
+ */
+function checkNullOrBool(aValue) {
+ return aValue === null || typeof aValue == "boolean";
+}
+
+function checkSystemSection(data) {
+ Assert.ok("system" in data, "There must be a system section in Environment.");
+ // Device data is only available on Android.
+ if (gIsAndroid) {
+ let deviceData = data.system.device;
+ Assert.ok(checkNullOrString(deviceData.model));
+ Assert.ok(checkNullOrString(deviceData.manufacturer));
+ Assert.ok(checkNullOrString(deviceData.hardware));
+ Assert.ok(checkNullOrBool(deviceData.isTablet));
+ }
+}
+
+add_task(async function test_systemEnvironment() {
+ let environmentData = TelemetryEnvironment.currentEnvironment;
+ checkSystemSection(environmentData);
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryChildEvents_buildFaster.js b/toolkit/components/telemetry/tests/unit/test_TelemetryChildEvents_buildFaster.js
new file mode 100644
index 0000000000..2f6d1fb16b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryChildEvents_buildFaster.js
@@ -0,0 +1,132 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+
+const MESSAGE_CHILD_TEST_DONE = "ChildTest:Done";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const TEST_STATIC_EVENT_NAME = "telemetry.test";
+const TEST_EVENT_NAME = "telemetry.test.child";
+
+function run_child_test() {
+ Telemetry.recordEvent(TEST_EVENT_NAME, "child", "builtin");
+ Telemetry.recordEvent(TEST_STATIC_EVENT_NAME, "main_and_content", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "child", "anotherone");
+}
+
+/**
+ * This function waits until content events are reported into the
+ * events snapshot.
+ */
+async function waitForContentEvents() {
+ await ContentTaskUtils.waitForCondition(() => {
+ const snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ return Object.keys(snapshot).includes("content");
+ });
+}
+
+add_task(async function test_setup() {
+ if (!runningInParent) {
+ TelemetryController.testSetupContent();
+ run_child_test();
+ do_send_remote_message(MESSAGE_CHILD_TEST_DONE);
+ return;
+ }
+
+ // Setup.
+ do_get_profile(true);
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ await TelemetryController.testSetup();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ // Enable recording for the test event category.
+
+ // Register some dynamic builtin test events.
+ Telemetry.registerBuiltinEvents(TEST_EVENT_NAME, {
+ dynamic: {
+ methods: ["dynamic", "child"],
+ objects: ["builtin", "anotherone"],
+ },
+ dynamic_expired: {
+ methods: ["check"],
+ objects: ["expiry"],
+ expired: true,
+ },
+ });
+ Telemetry.setEventRecordingEnabled(TEST_STATIC_EVENT_NAME, true);
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+
+ Telemetry.recordEvent(TEST_EVENT_NAME, "dynamic", "builtin");
+ Telemetry.recordEvent(TEST_STATIC_EVENT_NAME, "main_and_content", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "dynamic", "anotherone");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "check", "expiry");
+
+ // Run test in child, don't wait for it to finish: just wait for the
+ // MESSAGE_CHILD_TEST_DONE.
+ run_test_in_child("test_TelemetryChildEvents_buildFaster.js");
+ await do_await_remote_message(MESSAGE_CHILD_TEST_DONE);
+
+ // Once events are set by the content process, they don't immediately get
+ // sent to the parent process. Wait for the Telemetry IPC Timer to trigger
+ // and batch send the data back to the parent process.
+ await waitForContentEvents();
+
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+ Assert.ok(
+ "content" in snapshot,
+ "Should have content events in the snapshot."
+ );
+
+ // All events should now be recorded in the right order
+ let expectedParent = [
+ [TEST_EVENT_NAME, "dynamic", "builtin"],
+ [TEST_STATIC_EVENT_NAME, "main_and_content", "object1"],
+ [TEST_EVENT_NAME, "dynamic", "anotherone"],
+ ];
+ let expectedContent = [
+ [TEST_EVENT_NAME, "child", "builtin"],
+ [TEST_STATIC_EVENT_NAME, "main_and_content", "object1"],
+ [TEST_EVENT_NAME, "child", "anotherone"],
+ ];
+
+ Assert.equal(
+ snapshot.parent.length,
+ expectedParent.length,
+ "Should have recorded the right amount of events in parent."
+ );
+ for (let i = 0; i < expectedParent.length; ++i) {
+ Assert.deepEqual(
+ snapshot.parent[i].slice(1),
+ expectedParent[i],
+ "Should have recorded the expected event data in parent."
+ );
+ }
+
+ Assert.equal(
+ snapshot.content.length,
+ expectedContent.length,
+ "Should have recorded the right amount of events in content."
+ );
+ for (let i = 0; i < expectedContent.length; ++i) {
+ Assert.deepEqual(
+ snapshot.content[i].slice(1),
+ expectedContent[i],
+ "Should have recorded the expected event data in content."
+ );
+ }
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryClientID_reset.js b/toolkit/components/telemetry/tests/unit/test_TelemetryClientID_reset.js
new file mode 100644
index 0000000000..4a0268a0cc
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryClientID_reset.js
@@ -0,0 +1,180 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const { ClientID } = ChromeUtils.import("resource://gre/modules/ClientID.jsm");
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+const { Preferences } = ChromeUtils.import(
+ "resource://gre/modules/Preferences.jsm"
+);
+
+const PING_FORMAT_VERSION = 4;
+const DELETION_REQUEST_PING_TYPE = "deletion-request";
+const TEST_PING_TYPE = "test-ping-type";
+
+function sendPing(addEnvironment = false) {
+ let options = {
+ addClientId: true,
+ addEnvironment,
+ };
+ return TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, options);
+}
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ await new Promise(resolve =>
+ Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(resolve))
+ );
+
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+ await TelemetryController.testSetup();
+});
+
+/**
+ * Testing the following scenario:
+ *
+ * 1. Telemetry upload gets disabled
+ * 2. Canary client ID is set
+ * 3. Instance is shut down
+ * 4. Telemetry upload flag is toggled
+ * 5. Instance is started again
+ * 6. Detect that upload is enabled and reset client ID
+ *
+ * This scenario e.g. happens when switching between channels
+ * with and without the deletion-request ping reset included.
+ */
+add_task(async function test_clientid_reset_after_reenabling() {
+ await sendPing();
+ let ping = await PingServer.promiseNextPing();
+ Assert.equal(ping.type, TEST_PING_TYPE, "The ping must be a test ping");
+ Assert.ok("clientId" in ping);
+
+ let firstClientId = ping.clientId;
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ firstClientId,
+ "Client ID should be valid and random"
+ );
+
+ // Disable FHR upload: this should trigger a deletion-request ping.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+
+ ping = await PingServer.promiseNextPing();
+ Assert.equal(
+ ping.type,
+ DELETION_REQUEST_PING_TYPE,
+ "The ping must be a deletion-request ping"
+ );
+ Assert.equal(ping.clientId, firstClientId);
+ let clientId = await ClientID.getClientID();
+ Assert.equal(TelemetryUtils.knownClientID, clientId);
+
+ // Now shutdown the instance
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+
+ // Flip the pref again
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // Start the instance
+ await TelemetryController.testReset();
+
+ let newClientId = await ClientID.getClientID();
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ newClientId,
+ "Client ID should be valid and random"
+ );
+ Assert.notEqual(
+ firstClientId,
+ newClientId,
+ "Client ID should be newly generated"
+ );
+});
+
+/**
+ * Testing the following scenario:
+ * (Reverse of the first test)
+ *
+ * 1. Telemetry upload gets disabled, canary client ID is set
+ * 2. Telemetry upload is enabled
+ * 3. New client ID is generated.
+ * 3. Instance is shut down
+ * 4. Telemetry upload flag is toggled
+ * 5. Instance is started again
+ * 6. Detect that upload is disabled and sets canary client ID
+ *
+ * This scenario e.g. happens when switching between channels
+ * with and without the deletion-request ping reset included.
+ */
+add_task(async function test_clientid_canary_after_disabling() {
+ await sendPing();
+ let ping = await PingServer.promiseNextPing();
+ Assert.equal(ping.type, TEST_PING_TYPE, "The ping must be a test ping");
+ Assert.ok("clientId" in ping);
+
+ let firstClientId = ping.clientId;
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ firstClientId,
+ "Client ID should be valid and random"
+ );
+
+ // Disable FHR upload: this should trigger a deletion-request ping.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+
+ ping = await PingServer.promiseNextPing();
+ Assert.equal(
+ ping.type,
+ DELETION_REQUEST_PING_TYPE,
+ "The ping must be a deletion-request ping"
+ );
+ Assert.equal(ping.clientId, firstClientId);
+ let clientId = await ClientID.getClientID();
+ Assert.equal(TelemetryUtils.knownClientID, clientId);
+
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+ await sendPing();
+ ping = await PingServer.promiseNextPing();
+ Assert.equal(ping.type, TEST_PING_TYPE, "The ping must be a test ping");
+ Assert.notEqual(
+ firstClientId,
+ ping.clientId,
+ "Client ID should be newly generated"
+ );
+
+ // Now shutdown the instance
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+
+ // Flip the pref again
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+
+ // Start the instance
+ await TelemetryController.testReset();
+
+ let newClientId = await ClientID.getClientID();
+ Assert.equal(
+ TelemetryUtils.knownClientID,
+ newClientId,
+ "Client ID should be a canary when upload disabled"
+ );
+});
+
+add_task(async function stopServer() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryController.js b/toolkit/components/telemetry/tests/unit/test_TelemetryController.js
new file mode 100644
index 0000000000..3b52a12f4e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryController.js
@@ -0,0 +1,1271 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* This testcase triggers two telemetry pings.
+ *
+ * Telemetry code keeps histograms of past telemetry pings. The first
+ * ping populates these histograms. One of those histograms is then
+ * checked in the second request.
+ */
+
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+const { ClientID } = ChromeUtils.import("resource://gre/modules/ClientID.jsm");
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryArchive.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+const { Preferences } = ChromeUtils.import(
+ "resource://gre/modules/Preferences.jsm"
+);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+const { TestUtils } = ChromeUtils.import(
+ "resource://testing-common/TestUtils.jsm"
+);
+ChromeUtils.import(
+ "resource://testing-common/TelemetryArchiveTesting.jsm",
+ this
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "jwcrypto",
+ "resource://services-crypto/jwcrypto.jsm"
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "JsonSchemaValidator",
+ "resource://gre/modules/components-utils/JsonSchemaValidator.jsm"
+);
+
+const PING_FORMAT_VERSION = 4;
+const DELETION_REQUEST_PING_TYPE = "deletion-request";
+const TEST_PING_TYPE = "test-ping-type";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_NAME = "XPCShell";
+
+var gClientID = null;
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", async function() {
+ let profileDir = await PathUtils.getProfileDir();
+ return PathUtils.join(profileDir, "datareporting");
+});
+
+function sendPing(aSendClientId, aSendEnvironment) {
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ } else {
+ TelemetrySend.setServer("http://doesnotexist");
+ }
+
+ let options = {
+ addClientId: aSendClientId,
+ addEnvironment: aSendEnvironment,
+ };
+ return TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, options);
+}
+
+function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
+ const MANDATORY_PING_FIELDS = [
+ "type",
+ "id",
+ "creationDate",
+ "version",
+ "application",
+ "payload",
+ ];
+
+ const APPLICATION_TEST_DATA = {
+ buildId: gAppInfo.appBuildID,
+ name: APP_NAME,
+ version: APP_VERSION,
+ displayVersion: AppConstants.MOZ_APP_VERSION_DISPLAY,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ // Check that the ping contains all the mandatory fields.
+ for (let f of MANDATORY_PING_FIELDS) {
+ Assert.ok(f in aPing, f + " must be available.");
+ }
+
+ Assert.equal(aPing.type, aType, "The ping must have the correct type.");
+ Assert.equal(
+ aPing.version,
+ PING_FORMAT_VERSION,
+ "The ping must have the correct version."
+ );
+
+ // Test the application section.
+ for (let f in APPLICATION_TEST_DATA) {
+ Assert.equal(
+ aPing.application[f],
+ APPLICATION_TEST_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // We can't check the values for channel and architecture. Just make
+ // sure they are in.
+ Assert.ok(
+ "architecture" in aPing.application,
+ "The application section must have an architecture field."
+ );
+ Assert.ok(
+ "channel" in aPing.application,
+ "The application section must have a channel field."
+ );
+
+ // Check the clientId and environment fields, as needed.
+ Assert.equal("clientId" in aPing, aHasClientId);
+ Assert.equal("environment" in aPing, aHasEnvironment);
+}
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ await new Promise(resolve =>
+ Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(resolve))
+ );
+});
+
+add_task(async function asyncSetup() {
+ await TelemetryController.testSetup();
+});
+
+// Ensure that not overwriting an existing file fails silently
+add_task(async function test_overwritePing() {
+ let ping = { id: "foo" };
+ await TelemetryStorage.savePing(ping, true);
+ await TelemetryStorage.savePing(ping, false);
+ await TelemetryStorage.cleanupPingFile(ping);
+});
+
+// Checks that a sent ping is correctly received by a dummy http server.
+add_task(async function test_simplePing() {
+ PingServer.start();
+ // Update the Telemetry Server preference with the address of the local server.
+ // Otherwise we might end up sending stuff to a non-existing server after
+ // |TelemetryController.testReset| is called.
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ await sendPing(false, false);
+ let request = await PingServer.promiseNextRequest();
+
+ let ping = decodeRequestPayload(request);
+ checkPingFormat(ping, TEST_PING_TYPE, false, false);
+});
+
+add_task(async function test_disableDataUpload() {
+ const OPTIN_PROBE = "telemetry.data_upload_optin";
+ const isUnified = Preferences.get(TelemetryUtils.Preferences.Unified, false);
+ if (!isUnified) {
+ // Skipping the test if unified telemetry is off, as no deletion-request ping will be generated.
+ return;
+ }
+
+ // Check that the optin probe is not set.
+ // (If there are no recorded scalars, "parent" will be undefined).
+ let snapshot = Telemetry.getSnapshotForScalars("main", false).parent || {};
+ Assert.ok(
+ !(OPTIN_PROBE in snapshot),
+ "Data optin scalar should not be set at start"
+ );
+
+ // Send a first ping to get the current used client id
+ await sendPing(true, false);
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ let firstClientId = ping.clientId;
+
+ Assert.ok(firstClientId, "Test ping needs a client ID");
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ firstClientId,
+ "Client ID should be valid and random"
+ );
+
+ // The next step should trigger an event, watch for it.
+ let disableObserved = TestUtils.topicObserved(
+ TelemetryUtils.TELEMETRY_UPLOAD_DISABLED_TOPIC
+ );
+
+ // Disable FHR upload: this should trigger a deletion-request ping.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+
+ // Wait for the disable event
+ await disableObserved;
+
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_REQUEST_PING_TYPE, true, false);
+ // Wait on ping activity to settle.
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ snapshot = Telemetry.getSnapshotForScalars("main", false).parent || {};
+ Assert.ok(
+ !(OPTIN_PROBE in snapshot),
+ "Data optin scalar should not be set after opt out"
+ );
+
+ // Restore FHR Upload.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // We need to wait until the scalar is set
+ await ContentTaskUtils.waitForCondition(() => {
+ const scalarSnapshot = Telemetry.getSnapshotForScalars("main", false);
+ return (
+ Object.keys(scalarSnapshot).includes("parent") &&
+ OPTIN_PROBE in scalarSnapshot.parent
+ );
+ });
+
+ snapshot = Telemetry.getSnapshotForScalars("main", false).parent || {};
+ Assert.ok(
+ snapshot[OPTIN_PROBE],
+ "Enabling data upload should set optin probe"
+ );
+
+ // The clientId should've been reset when we restored FHR Upload.
+ let secondClientId = TelemetryController.getCurrentPingData().clientId;
+ Assert.notEqual(
+ firstClientId,
+ secondClientId,
+ "The client id must have changed"
+ );
+ let secondEcosystemClientId = await ClientID.getEcosystemClientID();
+
+ // Simulate a failure in sending the deletion-request ping by disabling the HTTP server.
+ await PingServer.stop();
+
+ // Try to send a ping. It will be saved as pending and get deleted when disabling upload.
+ TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Disable FHR upload to send a deletion-request ping again.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+ // Wait for the deletion-request ping to be submitted.
+ await TelemetryController.testPromiseDeletionRequestPingSubmitted();
+
+ // Wait on sending activity to settle, as |TelemetryController.testReset()| doesn't do that.
+ await TelemetrySend.testWaitOnOutgoingPings();
+ // Wait for the pending pings to be deleted. Resetting TelemetryController doesn't
+ // trigger the shutdown, so we need to call it ourselves.
+ await TelemetryStorage.shutdown();
+ // Simulate a restart, and spin the send task.
+ await TelemetryController.testReset();
+
+ // Disabling Telemetry upload must clear out all the pending pings.
+ let pendingPings = await TelemetryStorage.loadPendingPingList();
+ Assert.equal(
+ pendingPings.length,
+ 1,
+ "All the pending pings should have been deleted, except the deletion-request ping"
+ );
+
+ // Enable the ping server again.
+ PingServer.start();
+ // We set the new server using the pref, otherwise it would get reset with
+ // |TelemetryController.testReset|.
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ // Stop the sending task and then start it again.
+ await TelemetrySend.shutdown();
+ // Reset the controller to spin the ping sending task.
+ await TelemetryController.testReset();
+
+ // Re-enable Telemetry
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // Send a test ping
+ await sendPing(true, false);
+
+ // We should have received the test ping first.
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+
+ // The data in the test ping should be different than before
+ Assert.notEqual(
+ TelemetryUtils.knownClientID,
+ ping.clientId,
+ "Client ID should be reset to a random value"
+ );
+ Assert.notEqual(
+ firstClientId,
+ ping.clientId,
+ "Client ID should be different from the previous value"
+ );
+
+ // The "deletion-request" ping should come next, as it was pending.
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_REQUEST_PING_TYPE, true, false);
+ Assert.equal(
+ secondClientId,
+ ping.clientId,
+ "Deletion must be requested for correct client id"
+ );
+ if (AppConstants.MOZ_APP_NAME != "thunderbird") {
+ // We don't record the old ecosystem client ID on Thunderbird,
+ // since the FxA and telemetry infrastructure is different there.
+ Assert.equal(
+ secondEcosystemClientId,
+ ping.payload.scalars.parent["deletion.request.ecosystem_client_id"],
+ "Deletion must be requested for correct ecosystem client ID"
+ );
+ }
+
+ // Wait on ping activity to settle before moving on to the next test. If we were
+ // to shut down telemetry, even though the PingServer caught the expected pings,
+ // TelemetrySend could still be processing them (clearing pings would happen in
+ // a couple of ticks). Shutting down would cancel the request and save them as
+ // pending pings.
+ await TelemetrySend.testWaitOnOutgoingPings();
+});
+
+add_task(async function test_pingHasClientId() {
+ // Make sure we have no cached client ID for this test: we'll try to send
+ // a ping with it while Telemetry is being initialized.
+ Preferences.reset(TelemetryUtils.Preferences.CachedClientId);
+ await TelemetryController.testShutdown();
+ await ClientID._reset();
+ await TelemetryStorage.testClearPendingPings();
+ // And also clear the counter histogram since we're here.
+ let h = Telemetry.getHistogramById(
+ "TELEMETRY_PING_SUBMISSION_WAITING_CLIENTID"
+ );
+ h.clear();
+
+ // Init telemetry and try to send a ping with a client ID.
+ let promisePingSetup = TelemetryController.testReset();
+ await sendPing(true, false);
+ Assert.equal(
+ h.snapshot().sum,
+ 1,
+ "We must have a ping waiting for the clientId early during startup."
+ );
+ // Wait until we are fully initialized. Pings will be assembled but won't get
+ // sent before then.
+ await promisePingSetup;
+
+ let ping = await PingServer.promiseNextPing();
+ // Fetch the client ID after initializing and fetching the the ping, so we
+ // don't unintentionally trigger its loading. We'll still need the client ID
+ // to see if the ping looks sane.
+ gClientID = await ClientID.getClientID();
+
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(
+ ping.clientId,
+ gClientID,
+ "The correct clientId must be reported."
+ );
+
+ // Shutdown Telemetry so we can safely restart it.
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+
+ // We should have cached the client ID now. Lets confirm that by checking it before
+ // the async ping setup is finished.
+ h.clear();
+ promisePingSetup = TelemetryController.testReset();
+ await sendPing(true, false);
+ await promisePingSetup;
+
+ // Check that we received the cached client id.
+ Assert.equal(h.snapshot().sum, 0, "We must have used the cached clientId.");
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(
+ ping.clientId,
+ gClientID,
+ "Telemetry should report the correct cached clientId."
+ );
+
+ // Check that sending a ping without relying on the cache, after the
+ // initialization, still works.
+ Preferences.reset(TelemetryUtils.Preferences.CachedClientId);
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+ await sendPing(true, false);
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(
+ ping.clientId,
+ gClientID,
+ "The correct clientId must be reported."
+ );
+ Assert.equal(
+ h.snapshot().sum,
+ 0,
+ "No ping should have been waiting for a clientId."
+ );
+});
+
+add_task(async function test_pingHasEnvironment() {
+ // Send a ping with the environment data.
+ await sendPing(false, true);
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, false, true);
+
+ // Test a field in the environment build section.
+ Assert.equal(ping.application.buildId, ping.environment.build.buildId);
+});
+
+add_task(async function test_pingHasEnvironmentAndClientId() {
+ // Send a ping with the environment data and client id.
+ await sendPing(true, true);
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+
+ // Test a field in the environment build section.
+ Assert.equal(ping.application.buildId, ping.environment.build.buildId);
+ // Test that we have the correct clientId.
+ Assert.equal(
+ ping.clientId,
+ gClientID,
+ "The correct clientId must be reported."
+ );
+});
+
+add_task(async function test_archivePings() {
+ let now = new Date(2009, 10, 18, 12, 0, 0);
+ fakeNow(now);
+
+ // Disable ping upload so that pings don't get sent.
+ // With unified telemetry the FHR upload pref controls this,
+ // with non-unified telemetry the Telemetry enabled pref.
+ const isUnified = Preferences.get(TelemetryUtils.Preferences.Unified, false);
+ const uploadPref = isUnified
+ ? TelemetryUtils.Preferences.FhrUploadEnabled
+ : TelemetryUtils.Preferences.TelemetryEnabled;
+ Preferences.set(uploadPref, false);
+
+ // If we're using unified telemetry, disabling ping upload will generate a "deletion-request" ping. Catch it.
+ if (isUnified) {
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_REQUEST_PING_TYPE, true, false);
+ }
+
+ // Register a new Ping Handler that asserts if a ping is received, then send a ping.
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Telemetry must not send pings if not allowed to.")
+ );
+ let pingId = await sendPing(true, true);
+
+ // Check that the ping was archived, even with upload disabled.
+ let ping = await TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(
+ ping.id,
+ pingId,
+ "TelemetryController should still archive pings."
+ );
+
+ // Check that pings don't get archived if not allowed to.
+ now = new Date(2010, 10, 18, 12, 0, 0);
+ fakeNow(now);
+ Preferences.set(TelemetryUtils.Preferences.ArchiveEnabled, false);
+ pingId = await sendPing(true, true);
+ let promise = TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.ok(
+ await promiseRejects(promise),
+ "TelemetryController should not archive pings if the archive pref is disabled."
+ );
+
+ // Enable archiving and the upload so that pings get sent and archived again.
+ Preferences.set(uploadPref, true);
+ Preferences.set(TelemetryUtils.Preferences.ArchiveEnabled, true);
+
+ now = new Date(2014, 6, 18, 22, 0, 0);
+ fakeNow(now);
+ // Restore the non asserting ping handler.
+ PingServer.resetPingHandler();
+ pingId = await sendPing(true, true);
+
+ // Check that we archive pings when successfully sending them.
+ await PingServer.promiseNextPing();
+ ping = await TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(
+ ping.id,
+ pingId,
+ "TelemetryController should still archive pings if ping upload is enabled."
+ );
+});
+
+// Test that we fuzz the submission time around midnight properly
+// to avoid overloading the telemetry servers.
+add_task(async function test_midnightPingSendFuzzing() {
+ const fuzzingDelay = 60 * 60 * 1000;
+ fakeMidnightPingFuzzingDelay(fuzzingDelay);
+ let now = new Date(2030, 5, 1, 11, 0, 0);
+ fakeNow(now);
+
+ let waitForTimer = () =>
+ new Promise(resolve => {
+ fakePingSendTimer(
+ (callback, timeout) => {
+ resolve([callback, timeout]);
+ },
+ () => {}
+ );
+ });
+
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+
+ // A ping after midnight within the fuzzing delay should not get sent.
+ now = new Date(2030, 5, 2, 0, 40, 0);
+ fakeNow(now);
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No ping should be received yet.");
+ });
+ let timerPromise = waitForTimer();
+ await sendPing(true, true);
+ let [timerCallback, timerTimeout] = await timerPromise;
+ Assert.ok(!!timerCallback);
+ Assert.deepEqual(
+ futureDate(now, timerTimeout),
+ new Date(2030, 5, 2, 1, 0, 0)
+ );
+
+ // A ping just before the end of the fuzzing delay should not get sent.
+ now = new Date(2030, 5, 2, 0, 59, 59);
+ fakeNow(now);
+ timerPromise = waitForTimer();
+ await sendPing(true, true);
+ [timerCallback, timerTimeout] = await timerPromise;
+ Assert.deepEqual(timerTimeout, 1 * 1000);
+
+ // Restore the previous ping handler.
+ PingServer.resetPingHandler();
+
+ // Setting the clock to after the fuzzing delay, we should trigger the two ping sends
+ // with the timer callback.
+ now = futureDate(now, timerTimeout);
+ fakeNow(now);
+ await timerCallback();
+ const pings = await PingServer.promiseNextPings(2);
+ for (let ping of pings) {
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ }
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Moving the clock further we should still send pings immediately.
+ now = futureDate(now, 5 * 60 * 1000);
+ await sendPing(true, true);
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Check that pings shortly before midnight are immediately sent.
+ now = fakeNow(2030, 5, 3, 23, 59, 0);
+ await sendPing(true, true);
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Clean-up.
+ fakeMidnightPingFuzzingDelay(0);
+ fakePingSendTimer(
+ () => {},
+ () => {}
+ );
+});
+
+add_task(async function test_changePingAfterSubmission() {
+ // Submit a ping with a custom payload.
+ let payload = { canary: "test" };
+ let pingPromise = TelemetryController.submitExternalPing(
+ TEST_PING_TYPE,
+ payload
+ );
+
+ // Change the payload with a predefined value.
+ payload.canary = "changed";
+
+ // Wait for the ping to be archived.
+ const pingId = await pingPromise;
+
+ // Make sure our changes didn't affect the submitted payload.
+ let archivedCopy = await TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(
+ archivedCopy.payload.canary,
+ "test",
+ "The payload must not be changed after being submitted."
+ );
+});
+
+add_task(
+ {
+ skip_if: () =>
+ Services.prefs.getBoolPref(TelemetryUtils.Preferences.Unified, false),
+ },
+ async function test_telemetryEnabledUnexpectedValue() {
+ // Remove the default value for toolkit.telemetry.enabled from the default prefs.
+ // Otherwise, we wouldn't be able to set the pref to a string.
+ let defaultPrefBranch = Services.prefs.getDefaultBranch(null);
+ defaultPrefBranch.deleteBranch(TelemetryUtils.Preferences.TelemetryEnabled);
+
+ // Set the preferences controlling the Telemetry status to a string.
+ Preferences.set(TelemetryUtils.Preferences.TelemetryEnabled, "false");
+ // Check that Telemetry is not enabled.
+ await TelemetryController.testReset();
+ Assert.equal(
+ Telemetry.canRecordExtended,
+ false,
+ "Invalid values must not enable Telemetry recording."
+ );
+
+ // Delete the pref again.
+ defaultPrefBranch.deleteBranch(TelemetryUtils.Preferences.TelemetryEnabled);
+
+ // Make sure that flipping it to true works.
+ Preferences.set(TelemetryUtils.Preferences.TelemetryEnabled, true);
+ await TelemetryController.testReset();
+ Assert.equal(
+ Telemetry.canRecordExtended,
+ true,
+ "True must enable Telemetry recording."
+ );
+
+ // Also check that the false works as well.
+ Preferences.set(TelemetryUtils.Preferences.TelemetryEnabled, false);
+ await TelemetryController.testReset();
+ Assert.equal(
+ Telemetry.canRecordExtended,
+ false,
+ "False must disable Telemetry recording."
+ );
+
+ // Restore the state of the pref.
+ Preferences.set(TelemetryUtils.Preferences.TelemetryEnabled, true);
+ }
+);
+
+add_task(async function test_telemetryCleanFHRDatabase() {
+ const FHR_DBNAME_PREF = "datareporting.healthreport.dbName";
+ const CUSTOM_DB_NAME = "unlikely.to.be.used.sqlite";
+ const DEFAULT_DB_NAME = "healthreport.sqlite";
+
+ // Check that we're able to remove a FHR DB with a custom name.
+ const profileDir = await PathUtils.getProfileDir();
+ const CUSTOM_DB_PATHS = [
+ PathUtils.join(profileDir, CUSTOM_DB_NAME),
+ PathUtils.join(profileDir, CUSTOM_DB_NAME + "-wal"),
+ PathUtils.join(profileDir, CUSTOM_DB_NAME + "-shm"),
+ ];
+ Preferences.set(FHR_DBNAME_PREF, CUSTOM_DB_NAME);
+
+ // Write fake DB files to the profile directory.
+ for (let dbFilePath of CUSTOM_DB_PATHS) {
+ await IOUtils.writeUTF8(dbFilePath, "some data");
+ }
+
+ // Trigger the cleanup and check that the files were removed.
+ await TelemetryStorage.removeFHRDatabase();
+ for (let dbFilePath of CUSTOM_DB_PATHS) {
+ try {
+ await IOUtils.read(dbFilePath);
+ } catch (e) {
+ Assert.ok(e instanceof DOMException);
+ Assert.equal(
+ e.name,
+ "NotFoundError",
+ "The DB must not be on the disk anymore: " + dbFilePath
+ );
+ }
+ }
+
+ // We should not break anything if there's no DB file.
+ await TelemetryStorage.removeFHRDatabase();
+
+ // Check that we're able to remove a FHR DB with the default name.
+ Preferences.reset(FHR_DBNAME_PREF);
+
+ const DEFAULT_DB_PATHS = [
+ PathUtils.join(profileDir, DEFAULT_DB_NAME),
+ PathUtils.join(profileDir, DEFAULT_DB_NAME + "-wal"),
+ PathUtils.join(profileDir, DEFAULT_DB_NAME + "-shm"),
+ ];
+
+ // Write fake DB files to the profile directory.
+ for (let dbFilePath of DEFAULT_DB_PATHS) {
+ await IOUtils.writeUTF8(dbFilePath, "some data");
+ }
+
+ // Trigger the cleanup and check that the files were removed.
+ await TelemetryStorage.removeFHRDatabase();
+ for (let dbFilePath of DEFAULT_DB_PATHS) {
+ try {
+ await IOUtils.read(dbFilePath);
+ } catch (e) {
+ Assert.ok(e instanceof DOMException);
+ Assert.equal(
+ e.name,
+ "NotFoundError",
+ "The DB must not be on the disk anymore: " + dbFilePath
+ );
+ }
+ }
+});
+
+add_task(async function test_sendNewProfile() {
+ if (
+ gIsAndroid ||
+ (AppConstants.platform == "linux" && OS.Constants.Sys.bits == 32)
+ ) {
+ // We don't support the pingsender on Android, yet, see bug 1335917.
+ // We also don't suppor the pingsender testing on Treeherder for
+ // Linux 32 bit (due to missing libraries). So skip it there too.
+ // See bug 1310703 comment 78.
+ return;
+ }
+
+ const NEWPROFILE_PING_TYPE = "new-profile";
+ const PREF_NEWPROFILE_ENABLED = "toolkit.telemetry.newProfilePing.enabled";
+ const PREF_NEWPROFILE_DELAY = "toolkit.telemetry.newProfilePing.delay";
+
+ // Make sure Telemetry is shut down before beginning and that we have
+ // no pending pings.
+ let resetTest = async function() {
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ };
+ await resetTest();
+
+ // Make sure to reset all the new-profile ping prefs.
+ const stateFilePath = PathUtils.join(
+ await DATAREPORTING_PATH,
+ "session-state.json"
+ );
+ await IOUtils.remove(stateFilePath);
+ Preferences.set(PREF_NEWPROFILE_DELAY, 1);
+ Preferences.set(PREF_NEWPROFILE_ENABLED, true);
+
+ // Check that a new-profile ping is sent on the first session.
+ let nextReq = PingServer.promiseNextRequest();
+ await TelemetryController.testReset();
+ let req = await nextReq;
+ let ping = decodeRequestPayload(req);
+ checkPingFormat(ping, NEWPROFILE_PING_TYPE, true, true);
+ Assert.equal(
+ ping.payload.reason,
+ "startup",
+ "The new-profile ping generated after startup must have the correct reason"
+ );
+ Assert.ok(
+ "parent" in ping.payload.processes,
+ "The new-profile ping generated after startup must have processes.parent data"
+ );
+
+ // Check that is not sent with the pingsender during startup.
+ Assert.throws(
+ () => req.getHeader("X-PingSender-Version"),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Should not have used the pingsender."
+ );
+
+ // Make sure that the new-profile ping is sent at shutdown if it wasn't sent before.
+ await resetTest();
+ await IOUtils.remove(stateFilePath);
+ Preferences.reset(PREF_NEWPROFILE_DELAY);
+
+ nextReq = PingServer.promiseNextRequest();
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ req = await nextReq;
+ ping = decodeRequestPayload(req);
+ checkPingFormat(ping, NEWPROFILE_PING_TYPE, true, true);
+ Assert.equal(
+ ping.payload.reason,
+ "shutdown",
+ "The new-profile ping generated at shutdown must have the correct reason"
+ );
+ Assert.ok(
+ "parent" in ping.payload.processes,
+ "The new-profile ping generated at shutdown must have processes.parent data"
+ );
+
+ // Check that the new-profile ping is sent at shutdown using the pingsender.
+ Assert.equal(
+ req.getHeader("User-Agent"),
+ "pingsender/1.0",
+ "Should have received the correct user agent string."
+ );
+ Assert.equal(
+ req.getHeader("X-PingSender-Version"),
+ "1.0",
+ "Should have received the correct PingSender version string."
+ );
+
+ // Check that no new-profile ping is sent on second sessions, not at startup
+ // nor at shutdown.
+ await resetTest();
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "The new-profile ping must be sent only on new profiles.")
+ );
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ // Check that we don't send the new-profile ping if the profile already contains
+ // a state file (but no "newProfilePingSent" property).
+ await resetTest();
+ await IOUtils.remove(stateFilePath);
+ const sessionState = {
+ sessionId: null,
+ subsessionId: null,
+ profileSubsessionCounter: 3785,
+ };
+ await CommonUtils.writeJSON(sessionState, stateFilePath);
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ // Reset the pref and restart Telemetry.
+ Preferences.reset(PREF_NEWPROFILE_ENABLED);
+ PingServer.resetPingHandler();
+});
+
+add_task(async function test_encryptedPing() {
+ if (gIsAndroid) {
+ // The underlying jwcrypto module being used here is not currently available on Android.
+ return;
+ }
+ Cu.importGlobalProperties(["crypto"]);
+
+ const ECDH_PARAMS = {
+ name: "ECDH",
+ namedCurve: "P-256",
+ };
+
+ const privateKey = {
+ crv: "P-256",
+ d: "rcs093UlGDG6piwHenmSDoAxbzMIXT43JkQbkt3xEmI",
+ ext: true,
+ key_ops: ["deriveKey"],
+ kty: "EC",
+ x: "h12feyTYBZ__wO_AnM1a5-KTDlko3-YyQ_en19jyrs0",
+ y: "6GSfzo14ehDyH5E-xCOedJDAYlN0AGPMCtIgFbheLko",
+ };
+
+ const publicKey = {
+ crv: "P-256",
+ ext: true,
+ kty: "EC",
+ x: "h12feyTYBZ__wO_AnM1a5-KTDlko3-YyQ_en19jyrs0",
+ y: "6GSfzo14ehDyH5E-xCOedJDAYlN0AGPMCtIgFbheLko",
+ };
+
+ const pioneerId = "12345";
+ const schemaName = "abc";
+ const schemaNamespace = "def";
+ const schemaVersion = 2;
+
+ Services.prefs.setStringPref("toolkit.telemetry.pioneerId", pioneerId);
+
+ // Stop the sending task and then start it again.
+ await TelemetrySend.shutdown();
+ // Reset the controller to spin the ping sending task.
+ await TelemetryController.testReset();
+
+ // Submit a ping with a custom payload, which will be encrypted.
+ let payload = { canary: "test" };
+ let pingPromise = TelemetryController.submitExternalPing(
+ "pioneer-study",
+ payload,
+ {
+ studyName: "pioneer-dev-1@allizom.org",
+ addPioneerId: true,
+ useEncryption: true,
+ encryptionKeyId: "pioneer-dev-20200423",
+ publicKey,
+ schemaName,
+ schemaNamespace,
+ schemaVersion,
+ }
+ );
+
+ // Wait for the ping to be archived.
+ const pingId = await pingPromise;
+
+ let archivedCopy = await TelemetryArchive.promiseArchivedPingById(pingId);
+
+ Assert.notEqual(
+ archivedCopy.payload.encryptedData,
+ payload,
+ "The encrypted payload must not match the plaintext."
+ );
+
+ Assert.equal(
+ archivedCopy.payload.pioneerId,
+ pioneerId,
+ "Pioneer ID in ping must match the pref."
+ );
+
+ // Validate ping against schema.
+ const schema = {
+ $schema: "http://json-schema.org/draft-04/schema#",
+ properties: {
+ application: {
+ additionalProperties: false,
+ properties: {
+ architecture: {
+ type: "string",
+ },
+ buildId: {
+ pattern: "^[0-9]{10}",
+ type: "string",
+ },
+ channel: {
+ type: "string",
+ },
+ displayVersion: {
+ pattern: "^[0-9]{2,3}\\.",
+ type: "string",
+ },
+ name: {
+ type: "string",
+ },
+ platformVersion: {
+ pattern: "^[0-9]{2,3}\\.",
+ type: "string",
+ },
+ vendor: {
+ type: "string",
+ },
+ version: {
+ pattern: "^[0-9]{2,3}\\.",
+ type: "string",
+ },
+ xpcomAbi: {
+ type: "string",
+ },
+ },
+ required: [
+ "architecture",
+ "buildId",
+ "channel",
+ "name",
+ "platformVersion",
+ "version",
+ "vendor",
+ "xpcomAbi",
+ ],
+ type: "object",
+ },
+ creationDate: {
+ pattern:
+ "^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\\.[0-9]{3}Z$",
+ type: "string",
+ },
+ id: {
+ pattern:
+ "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$",
+ type: "string",
+ },
+ payload: {
+ description: "",
+ properties: {
+ encryptedData: {
+ description: "JOSE/JWE encrypted payload.",
+ type: "string",
+ },
+ encryptionKeyId: {
+ description: "JOSE/JWK key id, e.g. pioneer-20170520.",
+ type: "string",
+ },
+ pioneerId: {
+ description: "Custom pioneer id, must not be Telemetry clientId",
+ pattern:
+ "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$",
+ type: "string",
+ },
+ schemaName: {
+ description:
+ "Name of a schema used for validation of the encryptedData",
+ maxLength: 100,
+ minLength: 1,
+ pattern: "^\\S+$",
+ type: "string",
+ },
+ schemaNamespace: {
+ description:
+ "The namespace of the schema used for validation and routing to a dataset.",
+ maxLength: 100,
+ minLength: 1,
+ pattern: "^\\S+$",
+ type: "string",
+ },
+ schemaVersion: {
+ description: "Integer version number of the schema",
+ minimum: 1,
+ type: "integer",
+ },
+ studyName: {
+ description: "Name of a particular study. Usually the addon_id.",
+ maxLength: 100,
+ minLength: 1,
+ pattern: "^\\S+$",
+ type: "string",
+ },
+ },
+ required: [
+ "encryptedData",
+ "encryptionKeyId",
+ "pioneerId",
+ "studyName",
+ "schemaName",
+ "schemaNamespace",
+ "schemaVersion",
+ ],
+ title: "pioneer-study",
+ type: "object",
+ },
+ type: {
+ description: "doc_type, restated",
+ enum: ["pioneer-study"],
+ type: "string",
+ },
+ version: {
+ maximum: 4,
+ minimum: 4,
+ type: "integer",
+ },
+ },
+ required: [
+ "application",
+ "creationDate",
+ "id",
+ "payload",
+ "type",
+ "version",
+ ],
+ title: "pioneer-study",
+ type: "object",
+ };
+
+ const result = JsonSchemaValidator.validate(archivedCopy, schema);
+
+ Assert.ok(
+ result.valid,
+ `Archived ping should validate against schema: ${result.error}`
+ );
+
+ // check that payload can be decrypted.
+ const privateJWK = await crypto.subtle.importKey(
+ "jwk",
+ privateKey,
+ ECDH_PARAMS,
+ false,
+ ["deriveKey"]
+ );
+
+ const decryptedJWE = await jwcrypto.decryptJWE(
+ archivedCopy.payload.encryptedData,
+ privateJWK
+ );
+
+ Assert.deepEqual(
+ JSON.parse(new TextDecoder("utf-8").decode(decryptedJWE)),
+ payload,
+ "decrypted payload should match"
+ );
+});
+
+add_task(async function test_encryptedPing_overrideId() {
+ if (gIsAndroid) {
+ // The underlying jwcrypto module being used here is not currently available on Android.
+ return;
+ }
+ Cu.importGlobalProperties(["crypto"]);
+
+ const publicKey = {
+ crv: "P-256",
+ ext: true,
+ kty: "EC",
+ x: "h12feyTYBZ__wO_AnM1a5-KTDlko3-YyQ_en19jyrs0",
+ y: "6GSfzo14ehDyH5E-xCOedJDAYlN0AGPMCtIgFbheLko",
+ };
+
+ const prefPioneerId = "12345";
+ const overriddenPioneerId = "c0ffeeaa-bbbb-abab-baba-eeff0ceeff0c";
+ const schemaName = "abc";
+ const schemaNamespace = "def";
+ const schemaVersion = 2;
+
+ Services.prefs.setStringPref("toolkit.telemetry.pioneerId", prefPioneerId);
+
+ let archiveTester = new TelemetryArchiveTesting.Checker();
+ await archiveTester.promiseInit();
+
+ // Submit a ping with a custom payload, which will be encrypted.
+ let payload = { canary: "test" };
+ let pingPromise = TelemetryController.submitExternalPing(
+ "test-pioneer-study-override",
+ payload,
+ {
+ studyName: "pioneer-dev-1@allizom.org",
+ addPioneerId: true,
+ overridePioneerId: overriddenPioneerId,
+ useEncryption: true,
+ encryptionKeyId: "pioneer-dev-20200423",
+ publicKey,
+ schemaName,
+ schemaNamespace,
+ schemaVersion,
+ }
+ );
+
+ // Wait for the ping to be submitted, to have the ping id to scan the
+ // archive for.
+ const pingId = await pingPromise;
+
+ // And then wait for the ping to be available in the archive.
+ await TestUtils.waitForCondition(
+ () => archiveTester.promiseFindPing("test-pioneer-study-override", []),
+ "Failed to find the pioneer ping"
+ );
+
+ let archivedCopy = await TelemetryArchive.promiseArchivedPingById(pingId);
+
+ Assert.notEqual(
+ archivedCopy.payload.encryptedData,
+ payload,
+ "The encrypted payload must not match the plaintext."
+ );
+
+ Assert.equal(
+ archivedCopy.payload.pioneerId,
+ overriddenPioneerId,
+ "Pioneer ID in ping must match the provided override."
+ );
+});
+
+// Testing shutdown and checking that pings sent afterwards are rejected.
+add_task(async function test_pingRejection() {
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ await sendPing(false, false).then(
+ () => Assert.ok(false, "Pings submitted after shutdown must be rejected."),
+ () => Assert.ok(true, "Ping submitted after shutdown correctly rejected.")
+ );
+});
+
+add_task(async function test_newCanRecordsMatchTheOld() {
+ Assert.equal(
+ Telemetry.canRecordBase,
+ Telemetry.canRecordReleaseData,
+ "Release Data is the new way to say Base Collection"
+ );
+ Assert.equal(
+ Telemetry.canRecordExtended,
+ Telemetry.canRecordPrereleaseData,
+ "Prerelease Data is the new way to say Extended Collection"
+ );
+});
+
+add_task(function test_histogram_filtering() {
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ count.add(1);
+ keyed.add("a", 1);
+
+ let snapshot = Telemetry.getSnapshotForHistograms(
+ "main",
+ false,
+ /* filter */ false
+ ).parent;
+ let keyedSnapshot = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false,
+ /* filter */ false
+ ).parent;
+ Assert.ok(COUNT_ID in snapshot, "test histogram should be snapshotted");
+ Assert.ok(
+ KEYED_ID in keyedSnapshot,
+ "test keyed histogram should be snapshotted"
+ );
+
+ snapshot = Telemetry.getSnapshotForHistograms(
+ "main",
+ false,
+ /* filter */ true
+ ).parent;
+ keyedSnapshot = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false,
+ /* filter */ true
+ ).parent;
+ Assert.ok(
+ !(COUNT_ID in snapshot),
+ "test histogram should not be snapshotted"
+ );
+ Assert.ok(
+ !(KEYED_ID in keyedSnapshot),
+ "test keyed histogram should not be snapshotted"
+ );
+});
+
+add_task(function test_scalar_filtering() {
+ const COUNT_ID = "telemetry.test.unsigned_int_kind";
+ const KEYED_ID = "telemetry.test.keyed_unsigned_int";
+
+ Telemetry.scalarSet(COUNT_ID, 2);
+ Telemetry.keyedScalarSet(KEYED_ID, "a", 2);
+
+ let snapshot = Telemetry.getSnapshotForScalars(
+ "main",
+ false,
+ /* filter */ false
+ ).parent;
+ let keyedSnapshot = Telemetry.getSnapshotForKeyedScalars(
+ "main",
+ false,
+ /* filter */ false
+ ).parent;
+ Assert.ok(COUNT_ID in snapshot, "test scalars should be snapshotted");
+ Assert.ok(
+ KEYED_ID in keyedSnapshot,
+ "test keyed scalars should be snapshotted"
+ );
+
+ snapshot = Telemetry.getSnapshotForScalars("main", false, /* filter */ true)
+ .parent;
+ keyedSnapshot = Telemetry.getSnapshotForKeyedScalars(
+ "main",
+ false,
+ /* filter */ true
+ ).parent;
+ Assert.ok(!(COUNT_ID in snapshot), "test scalars should not be snapshotted");
+ Assert.ok(
+ !(KEYED_ID in keyedSnapshot),
+ "test keyed scalars should not be snapshotted"
+ );
+});
+
+add_task(async function stopServer() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js
new file mode 100644
index 0000000000..0255bce2e7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js
@@ -0,0 +1,69 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* Test inclusion of previous build ID in telemetry pings when build ID changes.
+ * bug 841028
+ *
+ * Cases to cover:
+ * 1) Run with no "previousBuildID" stored in prefs:
+ * -> no previousBuildID in telemetry system info, new value set in prefs.
+ * 2) previousBuildID in prefs, equal to current build ID:
+ * -> no previousBuildID in telemetry, prefs not updated.
+ * 3) previousBuildID in prefs, not equal to current build ID:
+ * -> previousBuildID in telemetry, new value set in prefs.
+ */
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+
+// Set up our dummy AppInfo object so we can control the appBuildID.
+ChromeUtils.import("resource://testing-common/AppInfo.jsm", this);
+updateAppInfo();
+
+// Check that when run with no previous build ID stored, we update the pref but do not
+// put anything into the metadata.
+add_task(async function test_firstRun() {
+ await TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ Assert.equal(false, "previousBuildID" in metadata);
+ let appBuildID = getAppInfo().appBuildID;
+ let buildIDPref = Services.prefs.getCharPref(
+ TelemetryUtils.Preferences.PreviousBuildID
+ );
+ Assert.equal(appBuildID, buildIDPref);
+});
+
+// Check that a subsequent run with the same build ID does not put prev build ID in
+// metadata. Assumes testFirstRun() has already been called to set the previousBuildID pref.
+add_task(async function test_secondRun() {
+ await TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ Assert.equal(false, "previousBuildID" in metadata);
+});
+
+// Set up telemetry with a different app build ID and check that the old build ID
+// is returned in the metadata and the pref is updated to the new build ID.
+// Assumes testFirstRun() has been called to set the previousBuildID pref.
+const NEW_BUILD_ID = "20130314";
+add_task(async function test_newBuild() {
+ let info = getAppInfo();
+ let oldBuildID = info.appBuildID;
+ info.appBuildID = NEW_BUILD_ID;
+ await TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ Assert.equal(metadata.previousBuildId, oldBuildID);
+ let buildIDPref = Services.prefs.getCharPref(
+ TelemetryUtils.Preferences.PreviousBuildID
+ );
+ Assert.equal(NEW_BUILD_ID, buildIDPref);
+});
+
+function run_test() {
+ // Make sure we have a profile directory.
+ do_get_profile();
+
+ run_next_test();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js
new file mode 100644
index 0000000000..4dc8ee54e1
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js
@@ -0,0 +1,75 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Test that TelemetryController sends close to shutdown don't lead
+// to AsyncShutdown timeouts.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/Timer.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/AsyncShutdown.jsm", this);
+ChromeUtils.import("resource://testing-common/httpd.js", this);
+
+function contentHandler(metadata, response) {
+ dump("contentHandler called for path: " + metadata._path + "\n");
+ // We intentionally don't finish writing the response here to let the
+ // client time out.
+ response.processAsync();
+ response.setHeader("Content-Type", "text/plain");
+}
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+});
+
+/**
+ * Ensures that TelemetryController does not hang processing shutdown
+ * phases. Assumes that Telemetry shutdown routines do not take longer than
+ * CRASH_TIMEOUT_MS to complete.
+ */
+add_task(async function test_sendTelemetryShutsDownWithinReasonableTimeout() {
+ const CRASH_TIMEOUT_MS = 10 * 1000;
+ // Enable testing mode for AsyncShutdown, otherwise some testing-only functionality
+ // is not available.
+ Services.prefs.setBoolPref("toolkit.asyncshutdown.testing", true);
+ // Reducing the max delay for waitiing on phases to complete from 1 minute
+ // (standard) to 20 seconds to avoid blocking the tests in case of misbehavior.
+ Services.prefs.setIntPref(
+ "toolkit.asyncshutdown.crash_timeout",
+ CRASH_TIMEOUT_MS
+ );
+
+ let httpServer = new HttpServer();
+ httpServer.registerPrefixHandler("/", contentHandler);
+ httpServer.start(-1);
+
+ await TelemetryController.testSetup();
+ TelemetrySend.setServer(
+ "http://localhost:" + httpServer.identity.primaryPort
+ );
+ let submissionPromise = TelemetryController.submitExternalPing(
+ "test-ping-type",
+ {}
+ );
+
+ // Trigger the AsyncShutdown phase TelemetryController hangs off.
+ AsyncShutdown.profileBeforeChange._trigger();
+ AsyncShutdown.sendTelemetry._trigger();
+ // Now wait for the ping submission.
+ await submissionPromise;
+
+ // If we get here, we didn't time out in the shutdown routines.
+ Assert.ok(true, "Didn't time out on shutdown.");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js b/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js
new file mode 100644
index 0000000000..137f62a524
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js
@@ -0,0 +1,76 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Check that TelemetrySession notifies correctly on idle-daily.
+
+ChromeUtils.import("resource://testing-common/httpd.js", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+
+var gHttpServer = null;
+
+add_task(async function test_setup() {
+ do_get_profile();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // Start the webserver to check if the pending ping correctly arrives.
+ gHttpServer = new HttpServer();
+ gHttpServer.start(-1);
+});
+
+add_task(async function testSendPendingOnIdleDaily() {
+ // Create a valid pending ping.
+ const PENDING_PING = {
+ id: "2133234d-4ea1-44f4-909e-ce8c6c41e0fc",
+ type: "test-ping",
+ version: 4,
+ application: {},
+ payload: {},
+ };
+ await TelemetryStorage.savePing(PENDING_PING, true);
+
+ // Telemetry will not send this ping at startup, because it's not overdue.
+ await TelemetryController.testSetup();
+ TelemetrySend.setServer(
+ "http://localhost:" + gHttpServer.identity.primaryPort
+ );
+
+ let pendingPromise = new Promise(resolve =>
+ gHttpServer.registerPrefixHandler("/submit/telemetry/", request =>
+ resolve(request)
+ )
+ );
+
+ let gatherPromise = PromiseUtils.defer();
+ Services.obs.addObserver(gatherPromise.resolve, "gather-telemetry");
+
+ // Check that we are correctly receiving the gather-telemetry notification.
+ TelemetrySession.observe(null, "idle-daily", null);
+ await gatherPromise.promise;
+ Assert.ok(true, "Received gather-telemetry notification.");
+
+ Services.obs.removeObserver(gatherPromise.resolve, "gather-telemetry");
+
+ // Check that the pending ping is correctly received.
+ let module = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySend.jsm",
+ null
+ );
+ module.TelemetrySendImpl.observe(null, "idle-daily", null);
+ let request = await pendingPromise;
+ let ping = decodeRequestPayload(request);
+
+ // Validate the ping data.
+ Assert.equal(ping.id, PENDING_PING.id);
+ Assert.equal(ping.type, PENDING_PING.type);
+
+ await new Promise(resolve => gHttpServer.stop(resolve));
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
new file mode 100644
index 0000000000..504692e6c4
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
@@ -0,0 +1,2711 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { AddonManager, AddonManagerPrivate } = ChromeUtils.import(
+ "resource://gre/modules/AddonManager.jsm"
+);
+ChromeUtils.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/PromiseUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Timer.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+const { HttpServer } = ChromeUtils.import("resource://testing-common/httpd.js");
+ChromeUtils.import("resource://testing-common/MockRegistrar.jsm", this);
+const { FileUtils } = ChromeUtils.import(
+ "resource://gre/modules/FileUtils.jsm"
+);
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm");
+const { SearchTestUtils } = ChromeUtils.import(
+ "resource://testing-common/SearchTestUtils.jsm"
+);
+if (AppConstants.MOZ_GLEAN) {
+ Cu.importGlobalProperties(["Glean"]);
+}
+
+// AttributionCode is only needed for Firefox
+ChromeUtils.defineModuleGetter(
+ this,
+ "AttributionCode",
+ "resource:///modules/AttributionCode.jsm"
+);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "ExtensionTestUtils",
+ "resource://testing-common/ExtensionXPCShellUtils.jsm"
+);
+
+SearchTestUtils.init(this);
+
+async function installXPIFromURL(url) {
+ let install = await AddonManager.getInstallForURL(url);
+ return install.install();
+}
+
+function promiseNextTick() {
+ return new Promise(resolve => executeSoon(resolve));
+}
+
+// The webserver hosting the addons.
+var gHttpServer = null;
+// The URL of the webserver root.
+var gHttpRoot = null;
+// The URL of the data directory, on the webserver.
+var gDataRoot = null;
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const DISTRIBUTION_ID = "distributor-id";
+const DISTRIBUTION_VERSION = "4.5.6b";
+const DISTRIBUTOR_NAME = "Some Distributor";
+const DISTRIBUTOR_CHANNEL = "A Channel";
+const PARTNER_NAME = "test";
+const PARTNER_ID = "NicePartner-ID-3785";
+const DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC =
+ "distribution-customization-complete";
+
+const GFX_VENDOR_ID = "0xabcd";
+const GFX_DEVICE_ID = "0x1234";
+
+// The profile reset date, in milliseconds (Today)
+const PROFILE_RESET_DATE_MS = Date.now();
+// The profile creation date, in milliseconds (Yesterday).
+const PROFILE_FIRST_USE_MS = PROFILE_RESET_DATE_MS - MILLISECONDS_PER_DAY;
+const PROFILE_CREATION_DATE_MS = PROFILE_FIRST_USE_MS - MILLISECONDS_PER_DAY;
+
+const FLASH_PLUGIN_NAME = "Shockwave Flash";
+const FLASH_PLUGIN_DESC = "A mock flash plugin";
+const FLASH_PLUGIN_VERSION = "\u201c1.1.1.1\u201d";
+const PLUGIN_MIME_TYPE1 = "application/x-shockwave-flash";
+const PLUGIN_MIME_TYPE2 = "text/plain";
+
+const PLUGIN2_NAME = "Quicktime";
+const PLUGIN2_DESC = "A mock Quicktime plugin";
+const PLUGIN2_VERSION = "2.3";
+
+const PLUGIN_UPDATED_TOPIC = "plugins-list-updated";
+
+// system add-ons are enabled at startup, so record date when the test starts
+const SYSTEM_ADDON_INSTALL_DATE = Date.now();
+
+const EXPECTED_HDD_FIELDS = ["profile", "binary", "system"];
+
+// Valid attribution code to write so that settings.attribution can be tested.
+const ATTRIBUTION_CODE = "source%3Dgoogle.com";
+
+const pluginHost = Cc["@mozilla.org/plugin/host;1"].getService(
+ Ci.nsIPluginHost
+);
+
+/**
+ * Used to mock plugin tags in our fake plugin host.
+ */
+function PluginTag(aName, aDescription, aVersion, aEnabled) {
+ this.pluginTag = pluginHost.createFakePlugin({
+ handlerURI: "resource://fake-plugin/${Math.random()}.xhtml",
+ mimeEntries: this.mimeTypes.map(type => ({ type })),
+ name: aName,
+ description: aDescription,
+ fileName: `${aName}.so`,
+ version: aVersion,
+ });
+ this.name = aName;
+ this.description = aDescription;
+ this.version = aVersion;
+ this.disabled = !aEnabled;
+}
+
+PluginTag.prototype = {
+ name: null,
+ description: null,
+ version: null,
+ filename: null,
+ fullpath: null,
+ blocklisted: false,
+ clicktoplay: true,
+
+ get disabled() {
+ return this.pluginTag.enabledState == Ci.nsIPluginTag.STATE_DISABLED;
+ },
+ set disabled(val) {
+ this.pluginTag.enabledState =
+ Ci.nsIPluginTag[val ? "STATE_DISABLED" : "STATE_CLICKTOPLAY"];
+ },
+
+ mimeTypes: [PLUGIN_MIME_TYPE1, PLUGIN_MIME_TYPE2],
+
+ getMimeTypes() {
+ return this.mimeTypes;
+ },
+};
+
+// A container for the plugins handled by the fake plugin host.
+var gInstalledPlugins = [
+ new PluginTag("Java", "A mock Java plugin", "1.0", false /* Disabled */),
+ new PluginTag(
+ FLASH_PLUGIN_NAME,
+ FLASH_PLUGIN_DESC,
+ FLASH_PLUGIN_VERSION,
+ true
+ ),
+];
+
+// A fake plugin host for testing plugin telemetry environment.
+var PluginHost = {
+ getPluginTags() {
+ return gInstalledPlugins.map(plugin => plugin.pluginTag);
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsIPluginHost"]),
+};
+
+function registerFakePluginHost() {
+ MockRegistrar.register("@mozilla.org/plugin/host;1", PluginHost);
+}
+
+var SysInfo = {
+ overrides: {},
+
+ getProperty(name) {
+ // Assert.ok(false, "Mock SysInfo: " + name + ", " + JSON.stringify(this.overrides));
+ if (name in this.overrides) {
+ return this.overrides[name];
+ }
+
+ return this._genuine.QueryInterface(Ci.nsIPropertyBag).getProperty(name);
+ },
+
+ getPropertyAsACString(name) {
+ return this.get(name);
+ },
+
+ getPropertyAsUint32(name) {
+ return this.get(name);
+ },
+
+ get(name) {
+ return this._genuine.QueryInterface(Ci.nsIPropertyBag2).get(name);
+ },
+
+ get diskInfo() {
+ return this._genuine.QueryInterface(Ci.nsISystemInfo).diskInfo;
+ },
+
+ get osInfo() {
+ return this._genuine.QueryInterface(Ci.nsISystemInfo).osInfo;
+ },
+
+ get processInfo() {
+ return this._genuine.QueryInterface(Ci.nsISystemInfo).processInfo;
+ },
+
+ QueryInterface: ChromeUtils.generateQI(["nsIPropertyBag2", "nsISystemInfo"]),
+};
+
+function registerFakeSysInfo() {
+ MockRegistrar.register("@mozilla.org/system-info;1", SysInfo);
+}
+
+function MockAddonWrapper(aAddon) {
+ this.addon = aAddon;
+}
+MockAddonWrapper.prototype = {
+ get id() {
+ return this.addon.id;
+ },
+
+ get type() {
+ return "service";
+ },
+
+ get appDisabled() {
+ return false;
+ },
+
+ get isCompatible() {
+ return true;
+ },
+
+ get isPlatformCompatible() {
+ return true;
+ },
+
+ get scope() {
+ return AddonManager.SCOPE_PROFILE;
+ },
+
+ get foreignInstall() {
+ return false;
+ },
+
+ get providesUpdatesSecurely() {
+ return true;
+ },
+
+ get blocklistState() {
+ return 0; // Not blocked.
+ },
+
+ get pendingOperations() {
+ return AddonManager.PENDING_NONE;
+ },
+
+ get permissions() {
+ return AddonManager.PERM_CAN_UNINSTALL | AddonManager.PERM_CAN_DISABLE;
+ },
+
+ get isActive() {
+ return true;
+ },
+
+ get name() {
+ return this.addon.name;
+ },
+
+ get version() {
+ return this.addon.version;
+ },
+
+ get creator() {
+ return new AddonManagerPrivate.AddonAuthor(this.addon.author);
+ },
+
+ get userDisabled() {
+ return this.appDisabled;
+ },
+};
+
+function createMockAddonProvider(aName) {
+ let mockProvider = {
+ _addons: [],
+
+ get name() {
+ return aName;
+ },
+
+ addAddon(aAddon) {
+ this._addons.push(aAddon);
+ AddonManagerPrivate.callAddonListeners(
+ "onInstalled",
+ new MockAddonWrapper(aAddon)
+ );
+ },
+
+ async getAddonsByTypes(aTypes) {
+ return this._addons
+ .filter(a => !aTypes || aTypes.includes(a.type))
+ .map(a => new MockAddonWrapper(a));
+ },
+
+ shutdown() {
+ return Promise.resolve();
+ },
+ };
+
+ return mockProvider;
+}
+
+function spoofGfxAdapter() {
+ try {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfoDebug);
+ gfxInfo.fireTestProcess();
+ gfxInfo.spoofVendorID(GFX_VENDOR_ID);
+ gfxInfo.spoofDeviceID(GFX_DEVICE_ID);
+ } catch (x) {
+ // If we can't test gfxInfo, that's fine, we'll note it later.
+ }
+}
+
+function spoofProfileReset() {
+ return CommonUtils.writeJSON(
+ {
+ created: PROFILE_CREATION_DATE_MS,
+ reset: PROFILE_RESET_DATE_MS,
+ firstUse: PROFILE_FIRST_USE_MS,
+ },
+ OS.Path.join(OS.Constants.Path.profileDir, "times.json")
+ );
+}
+
+function spoofPartnerInfo() {
+ let prefsToSpoof = {};
+ prefsToSpoof["distribution.id"] = DISTRIBUTION_ID;
+ prefsToSpoof["distribution.version"] = DISTRIBUTION_VERSION;
+ prefsToSpoof["app.distributor"] = DISTRIBUTOR_NAME;
+ prefsToSpoof["app.distributor.channel"] = DISTRIBUTOR_CHANNEL;
+ prefsToSpoof["app.partner.test"] = PARTNER_NAME;
+ prefsToSpoof["mozilla.partner.id"] = PARTNER_ID;
+
+ // Spoof the preferences.
+ for (let pref in prefsToSpoof) {
+ Preferences.set(pref, prefsToSpoof[pref]);
+ }
+}
+
+async function spoofAttributionData() {
+ if (gIsWindows || gIsMac) {
+ AttributionCode._clearCache();
+ await AttributionCode.writeAttributionFile(ATTRIBUTION_CODE);
+ }
+}
+
+function cleanupAttributionData() {
+ if (gIsWindows || gIsMac) {
+ AttributionCode.attributionFile.remove(false);
+ AttributionCode._clearCache();
+ }
+}
+
+/**
+ * Check that a value is a string and not empty.
+ *
+ * @param aValue The variable to check.
+ * @return True if |aValue| has type "string" and is not empty, False otherwise.
+ */
+function checkString(aValue) {
+ return typeof aValue == "string" && aValue != "";
+}
+
+/**
+ * If value is non-null, check if it's a valid string.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid string, false if it's non-null and an invalid
+ * string.
+ */
+function checkNullOrString(aValue) {
+ if (aValue) {
+ return checkString(aValue);
+ } else if (aValue === null) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * If value is non-null, check if it's a boolean.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid boolean, false if it's non-null and an invalid
+ * boolean.
+ */
+function checkNullOrBool(aValue) {
+ return aValue === null || typeof aValue == "boolean";
+}
+
+function checkBuildSection(data) {
+ const expectedInfo = {
+ applicationId: APP_ID,
+ applicationName: APP_NAME,
+ buildId: gAppInfo.appBuildID,
+ version: APP_VERSION,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ Assert.ok("build" in data, "There must be a build section in Environment.");
+
+ for (let f in expectedInfo) {
+ Assert.ok(checkString(data.build[f]), f + " must be a valid string.");
+ Assert.equal(
+ data.build[f],
+ expectedInfo[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // Make sure architecture is in the environment.
+ Assert.ok(checkString(data.build.architecture));
+
+ Assert.equal(
+ data.build.updaterAvailable,
+ AppConstants.MOZ_UPDATER,
+ "build.updaterAvailable must equal AppConstants.MOZ_UPDATER"
+ );
+}
+
+function checkSettingsSection(data) {
+ const EXPECTED_FIELDS_TYPES = {
+ blocklistEnabled: "boolean",
+ e10sEnabled: "boolean",
+ e10sMultiProcesses: "number",
+ fissionEnabled: "boolean",
+ intl: "object",
+ locale: "string",
+ telemetryEnabled: "boolean",
+ update: "object",
+ userPrefs: "object",
+ };
+
+ Assert.ok(
+ "settings" in data,
+ "There must be a settings section in Environment."
+ );
+
+ for (let f in EXPECTED_FIELDS_TYPES) {
+ Assert.equal(
+ typeof data.settings[f],
+ EXPECTED_FIELDS_TYPES[f],
+ f + " must have the correct type."
+ );
+ }
+
+ // This property is not always present, but when it is, it must be a number.
+ if ("launcherProcessState" in data.settings) {
+ Assert.equal(typeof data.settings.launcherProcessState, "number");
+ }
+
+ // Check "addonCompatibilityCheckEnabled" separately.
+ Assert.equal(
+ data.settings.addonCompatibilityCheckEnabled,
+ AddonManager.checkCompatibility
+ );
+
+ // Check "isDefaultBrowser" separately, as it is not available on Android an can either be
+ // null or boolean on other platforms.
+ if (gIsAndroid) {
+ Assert.ok(
+ !("isDefaultBrowser" in data.settings),
+ "Must not be available on Android."
+ );
+ } else if ("isDefaultBrowser" in data.settings) {
+ // isDefaultBrowser might not be available in the payload, since it's
+ // gathered after the session was restored.
+ Assert.ok(checkNullOrBool(data.settings.isDefaultBrowser));
+ }
+
+ // Check "channel" separately, as it can either be null or string.
+ let update = data.settings.update;
+ Assert.ok(checkNullOrString(update.channel));
+ Assert.equal(typeof update.enabled, "boolean");
+ Assert.equal(typeof update.autoDownload, "boolean");
+
+ // Check "defaultSearchEngine" separately, as it can either be undefined or string.
+ if ("defaultSearchEngine" in data.settings) {
+ checkString(data.settings.defaultSearchEngine);
+ Assert.equal(typeof data.settings.defaultSearchEngineData, "object");
+ }
+
+ if ("defaultPrivateSearchEngineData" in data.settings) {
+ Assert.equal(typeof data.settings.defaultPrivateSearchEngineData, "object");
+ }
+
+ if ((gIsWindows || gIsMac) && AppConstants.MOZ_BUILD_APP == "browser") {
+ Assert.equal(typeof data.settings.attribution, "object");
+ Assert.equal(data.settings.attribution.source, "google.com");
+ }
+
+ checkIntlSettings(data.settings);
+}
+
+function checkIntlSettings({ intl }) {
+ let fields = [
+ "requestedLocales",
+ "availableLocales",
+ "appLocales",
+ "acceptLanguages",
+ ];
+
+ for (let field of fields) {
+ Assert.ok(Array.isArray(intl[field]), `${field} is an array`);
+ }
+
+ // These fields may be null if they aren't ready yet. This is mostly to deal
+ // with test failures on Android, but they aren't guaranteed to exist.
+ let optionalFields = ["systemLocales", "regionalPrefsLocales"];
+
+ for (let field of optionalFields) {
+ let isArray = Array.isArray(intl[field]);
+ let isNull = intl[field] === null;
+ Assert.ok(isArray || isNull, `${field} is an array or null`);
+ }
+}
+
+function checkProfileSection(data) {
+ Assert.ok(
+ "profile" in data,
+ "There must be a profile section in Environment."
+ );
+ Assert.equal(
+ data.profile.creationDate,
+ truncateToDays(PROFILE_CREATION_DATE_MS)
+ );
+ Assert.equal(data.profile.resetDate, truncateToDays(PROFILE_RESET_DATE_MS));
+ Assert.equal(data.profile.firstUseDate, truncateToDays(PROFILE_FIRST_USE_MS));
+}
+
+function checkPartnerSection(data, isInitial) {
+ const EXPECTED_FIELDS = {
+ distributionId: DISTRIBUTION_ID,
+ distributionVersion: DISTRIBUTION_VERSION,
+ partnerId: PARTNER_ID,
+ distributor: DISTRIBUTOR_NAME,
+ distributorChannel: DISTRIBUTOR_CHANNEL,
+ };
+
+ Assert.ok(
+ "partner" in data,
+ "There must be a partner section in Environment."
+ );
+
+ for (let f in EXPECTED_FIELDS) {
+ let expected = isInitial ? null : EXPECTED_FIELDS[f];
+ Assert.strictEqual(
+ data.partner[f],
+ expected,
+ f + " must have the correct value."
+ );
+ }
+
+ // Check that "partnerNames" exists and contains the correct element.
+ Assert.ok(Array.isArray(data.partner.partnerNames));
+ if (isInitial) {
+ Assert.equal(data.partner.partnerNames.length, 0);
+ } else {
+ Assert.ok(data.partner.partnerNames.includes(PARTNER_NAME));
+ }
+}
+
+function checkGfxAdapter(data) {
+ const EXPECTED_ADAPTER_FIELDS_TYPES = {
+ description: "string",
+ vendorID: "string",
+ deviceID: "string",
+ subsysID: "string",
+ RAM: "number",
+ driver: "string",
+ driverVendor: "string",
+ driverVersion: "string",
+ driverDate: "string",
+ GPUActive: "boolean",
+ };
+
+ for (let f in EXPECTED_ADAPTER_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+
+ if (data[f]) {
+ // Since we have a non-null value, check if it has the correct type.
+ Assert.equal(
+ typeof data[f],
+ EXPECTED_ADAPTER_FIELDS_TYPES[f],
+ f + " must have the correct type."
+ );
+ }
+ }
+}
+
+function checkSystemSection(data, assertProcessData) {
+ const EXPECTED_FIELDS = [
+ "memoryMB",
+ "cpu",
+ "os",
+ "hdd",
+ "gfx",
+ "appleModelId",
+ ];
+
+ Assert.ok("system" in data, "There must be a system section in Environment.");
+
+ // Make sure we have all the top level sections and fields.
+ for (let f of EXPECTED_FIELDS) {
+ Assert.ok(f in data.system, f + " must be available.");
+ }
+
+ Assert.ok(
+ Number.isFinite(data.system.memoryMB),
+ "MemoryMB must be a number."
+ );
+
+ if (assertProcessData) {
+ if (gIsWindows || gIsMac || gIsLinux) {
+ let EXTRA_CPU_FIELDS = [
+ "cores",
+ "model",
+ "family",
+ "stepping",
+ "l2cacheKB",
+ "l3cacheKB",
+ "speedMHz",
+ "vendor",
+ ];
+
+ for (let f of EXTRA_CPU_FIELDS) {
+ // Note this is testing TelemetryEnvironment.js only, not that the
+ // values are valid - null is the fallback.
+ Assert.ok(f in data.system.cpu, f + " must be available under cpu.");
+ }
+
+ if (gIsWindows) {
+ Assert.equal(
+ typeof data.system.isWow64,
+ "boolean",
+ "isWow64 must be available on Windows and have the correct type."
+ );
+ Assert.equal(
+ typeof data.system.isWowARM64,
+ "boolean",
+ "isWowARM64 must be available on Windows and have the correct type."
+ );
+ Assert.ok(
+ "virtualMaxMB" in data.system,
+ "virtualMaxMB must be available."
+ );
+ Assert.ok(
+ Number.isFinite(data.system.virtualMaxMB),
+ "virtualMaxMB must be a number."
+ );
+
+ for (let f of [
+ "count",
+ "model",
+ "family",
+ "stepping",
+ "l2cacheKB",
+ "l3cacheKB",
+ "speedMHz",
+ ]) {
+ Assert.ok(
+ Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null."
+ );
+ }
+ }
+
+ // These should be numbers if they are not null
+ for (let f of [
+ "count",
+ "model",
+ "family",
+ "stepping",
+ "l2cacheKB",
+ "l3cacheKB",
+ "speedMHz",
+ ]) {
+ Assert.ok(
+ !(f in data.system.cpu) ||
+ data.system.cpu[f] === null ||
+ Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null."
+ );
+ }
+
+ // We insist these are available
+ for (let f of ["cores"]) {
+ Assert.ok(
+ !(f in data.system.cpu) || Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null."
+ );
+ }
+ }
+ }
+
+ let cpuData = data.system.cpu;
+
+ Assert.ok(
+ Array.isArray(cpuData.extensions),
+ "CPU extensions must be available."
+ );
+
+ let osData = data.system.os;
+ Assert.ok(checkNullOrString(osData.name));
+ Assert.ok(checkNullOrString(osData.version));
+ Assert.ok(checkNullOrString(osData.locale));
+
+ // Service pack is only available on Windows.
+ if (gIsWindows) {
+ Assert.ok(
+ Number.isFinite(osData.servicePackMajor),
+ "ServicePackMajor must be a number."
+ );
+ Assert.ok(
+ Number.isFinite(osData.servicePackMinor),
+ "ServicePackMinor must be a number."
+ );
+ if ("windowsBuildNumber" in osData) {
+ // This might not be available on all Windows platforms.
+ Assert.ok(
+ Number.isFinite(osData.windowsBuildNumber),
+ "windowsBuildNumber must be a number."
+ );
+ }
+ if ("windowsUBR" in osData) {
+ // This might not be available on all Windows platforms.
+ Assert.ok(
+ osData.windowsUBR === null || Number.isFinite(osData.windowsUBR),
+ "windowsUBR must be null or a number."
+ );
+ }
+ } else if (gIsAndroid) {
+ Assert.ok(checkNullOrString(osData.kernelVersion));
+ }
+
+ for (let disk of EXPECTED_HDD_FIELDS) {
+ Assert.ok(checkNullOrString(data.system.hdd[disk].model));
+ Assert.ok(checkNullOrString(data.system.hdd[disk].revision));
+ Assert.ok(checkNullOrString(data.system.hdd[disk].type));
+ }
+
+ let gfxData = data.system.gfx;
+ Assert.ok("D2DEnabled" in gfxData);
+ Assert.ok("DWriteEnabled" in gfxData);
+ Assert.ok("Headless" in gfxData);
+ Assert.ok("EmbeddedInFirefoxReality" in gfxData);
+ // DWriteVersion is disabled due to main thread jank and will be enabled
+ // again as part of bug 1154500.
+ // Assert.ok("DWriteVersion" in gfxData);
+ if (gIsWindows) {
+ Assert.equal(typeof gfxData.D2DEnabled, "boolean");
+ Assert.equal(typeof gfxData.DWriteEnabled, "boolean");
+ Assert.equal(typeof gfxData.EmbeddedInFirefoxReality, "boolean");
+ // As above, will be enabled again as part of bug 1154500.
+ // Assert.ok(checkString(gfxData.DWriteVersion));
+ }
+
+ Assert.ok("adapters" in gfxData);
+ Assert.ok(
+ !!gfxData.adapters.length,
+ "There must be at least one GFX adapter."
+ );
+ for (let adapter of gfxData.adapters) {
+ checkGfxAdapter(adapter);
+ }
+ Assert.equal(typeof gfxData.adapters[0].GPUActive, "boolean");
+ Assert.ok(
+ gfxData.adapters[0].GPUActive,
+ "The first GFX adapter must be active."
+ );
+
+ Assert.ok(Array.isArray(gfxData.monitors));
+ if (gIsWindows || gIsMac || gIsLinux) {
+ Assert.ok(gfxData.monitors.length >= 1, "There is at least one monitor.");
+ Assert.equal(typeof gfxData.monitors[0].screenWidth, "number");
+ Assert.equal(typeof gfxData.monitors[0].screenHeight, "number");
+ if (gIsWindows) {
+ Assert.equal(typeof gfxData.monitors[0].refreshRate, "number");
+ Assert.equal(typeof gfxData.monitors[0].pseudoDisplay, "boolean");
+ }
+ if (gIsMac) {
+ Assert.equal(typeof gfxData.monitors[0].scale, "number");
+ }
+ }
+
+ Assert.equal(typeof gfxData.features, "object");
+ Assert.equal(typeof gfxData.features.compositor, "string");
+
+ Assert.equal(typeof gfxData.features.gpuProcess, "object");
+ Assert.equal(typeof gfxData.features.gpuProcess.status, "string");
+
+ try {
+ // If we've not got nsIGfxInfoDebug, then this will throw and stop us doing
+ // this test.
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfoDebug);
+
+ if (gIsWindows || gIsMac) {
+ Assert.equal(GFX_VENDOR_ID, gfxData.adapters[0].vendorID);
+ Assert.equal(GFX_DEVICE_ID, gfxData.adapters[0].deviceID);
+ }
+
+ let features = gfxInfo.getFeatures();
+ Assert.equal(features.compositor, gfxData.features.compositor);
+ Assert.equal(
+ features.gpuProcess.status,
+ gfxData.features.gpuProcess.status
+ );
+ Assert.equal(features.opengl, gfxData.features.opengl);
+ Assert.equal(features.webgl, gfxData.features.webgl);
+ } catch (e) {}
+
+ if (gIsMac) {
+ Assert.ok(checkString(data.system.appleModelId));
+ } else {
+ Assert.ok(checkNullOrString(data.system.appleModelId));
+ }
+
+ // This feature is only available on Windows 8+
+ if (AppConstants.isPlatformAndVersionAtLeast("win", "6.2")) {
+ Assert.ok("sec" in data.system, "sec must be available under data.system");
+
+ let SEC_FIELDS = ["antivirus", "antispyware", "firewall"];
+ for (let f of SEC_FIELDS) {
+ Assert.ok(
+ f in data.system.sec,
+ f + " must be available under data.system.sec"
+ );
+
+ let value = data.system.sec[f];
+ // value is null on Windows Server
+ Assert.ok(
+ value === null || Array.isArray(value),
+ f + " must be either null or an array"
+ );
+ if (Array.isArray(value)) {
+ for (let product of value) {
+ Assert.equal(
+ typeof product,
+ "string",
+ "Each element of " + f + " must be a string"
+ );
+ }
+ }
+ }
+ }
+}
+
+function checkActiveAddon(data, partialRecord) {
+ let signedState = "number";
+ // system add-ons have an undefined signState
+ if (data.isSystem) {
+ signedState = "undefined";
+ }
+
+ const EXPECTED_ADDON_FIELDS_TYPES = {
+ version: "string",
+ scope: "number",
+ type: "string",
+ updateDay: "number",
+ isSystem: "boolean",
+ isWebExtension: "boolean",
+ multiprocessCompatible: "boolean",
+ };
+
+ const FULL_ADDON_FIELD_TYPES = {
+ blocklisted: "boolean",
+ name: "string",
+ userDisabled: "boolean",
+ appDisabled: "boolean",
+ foreignInstall: "boolean",
+ hasBinaryComponents: "boolean",
+ installDay: "number",
+ signedState,
+ };
+
+ let fields = EXPECTED_ADDON_FIELDS_TYPES;
+ if (!partialRecord) {
+ fields = Object.assign({}, fields, FULL_ADDON_FIELD_TYPES);
+ }
+
+ for (let [name, type] of Object.entries(fields)) {
+ Assert.ok(name in data, name + " must be available.");
+ Assert.equal(
+ typeof data[name],
+ type,
+ name + " must have the correct type."
+ );
+ }
+
+ if (!partialRecord) {
+ // We check "description" separately, as it can be null.
+ Assert.ok(checkNullOrString(data.description));
+ }
+}
+
+function checkPlugin(data) {
+ const EXPECTED_PLUGIN_FIELDS_TYPES = {
+ name: "string",
+ version: "string",
+ description: "string",
+ blocklisted: "boolean",
+ disabled: "boolean",
+ clicktoplay: "boolean",
+ updateDay: "number",
+ };
+
+ for (let f in EXPECTED_PLUGIN_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+ Assert.equal(
+ typeof data[f],
+ EXPECTED_PLUGIN_FIELDS_TYPES[f],
+ f + " must have the correct type."
+ );
+ }
+
+ Assert.ok(Array.isArray(data.mimeTypes));
+ for (let type of data.mimeTypes) {
+ Assert.ok(checkString(type));
+ }
+}
+
+function checkTheme(data) {
+ const EXPECTED_THEME_FIELDS_TYPES = {
+ id: "string",
+ blocklisted: "boolean",
+ name: "string",
+ userDisabled: "boolean",
+ appDisabled: "boolean",
+ version: "string",
+ scope: "number",
+ foreignInstall: "boolean",
+ installDay: "number",
+ updateDay: "number",
+ };
+
+ for (let f in EXPECTED_THEME_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+ Assert.equal(
+ typeof data[f],
+ EXPECTED_THEME_FIELDS_TYPES[f],
+ f + " must have the correct type."
+ );
+ }
+
+ // We check "description" separately, as it can be null.
+ Assert.ok(checkNullOrString(data.description));
+}
+
+function checkActiveGMPlugin(data) {
+ // GMP plugin version defaults to null until GMPDownloader runs to update it.
+ if (data.version) {
+ Assert.equal(typeof data.version, "string");
+ }
+ Assert.equal(typeof data.userDisabled, "boolean");
+ Assert.equal(typeof data.applyBackgroundUpdates, "number");
+}
+
+function checkAddonsSection(data, expectBrokenAddons, partialAddonsRecords) {
+ const EXPECTED_FIELDS = [
+ "activeAddons",
+ "theme",
+ "activePlugins",
+ "activeGMPlugins",
+ ];
+
+ Assert.ok(
+ "addons" in data,
+ "There must be an addons section in Environment."
+ );
+ for (let f of EXPECTED_FIELDS) {
+ Assert.ok(f in data.addons, f + " must be available.");
+ }
+
+ // Check the active addons, if available.
+ if (!expectBrokenAddons) {
+ let activeAddons = data.addons.activeAddons;
+ for (let addon in activeAddons) {
+ checkActiveAddon(activeAddons[addon], partialAddonsRecords);
+ }
+ }
+
+ // Check "theme" structure.
+ if (Object.keys(data.addons.theme).length !== 0) {
+ checkTheme(data.addons.theme);
+ }
+
+ // Check the active plugins.
+ Assert.ok(Array.isArray(data.addons.activePlugins));
+ for (let plugin of data.addons.activePlugins) {
+ checkPlugin(plugin);
+ }
+
+ // Check active GMPlugins
+ let activeGMPlugins = data.addons.activeGMPlugins;
+ for (let gmPlugin in activeGMPlugins) {
+ checkActiveGMPlugin(activeGMPlugins[gmPlugin]);
+ }
+}
+
+function checkExperimentsSection(data) {
+ // We don't expect the experiments section to be always available.
+ let experiments = data.experiments || {};
+ if (!Object.keys(experiments).length) {
+ return;
+ }
+
+ for (let id in experiments) {
+ Assert.ok(checkString(id), id + " must be a valid string.");
+
+ // Check that we have valid experiment info.
+ let experimentData = experiments[id];
+ Assert.ok(
+ "branch" in experimentData,
+ "The experiment must have branch data."
+ );
+ Assert.ok(
+ checkString(experimentData.branch),
+ "The experiment data must be valid."
+ );
+ if ("type" in experimentData) {
+ Assert.ok(checkString(experimentData.type));
+ }
+ }
+}
+
+function checkEnvironmentData(data, options = {}) {
+ const {
+ isInitial = false,
+ expectBrokenAddons = false,
+ assertProcessData = false,
+ } = options;
+
+ checkBuildSection(data);
+ checkSettingsSection(data);
+ checkProfileSection(data);
+ checkPartnerSection(data, isInitial);
+ checkSystemSection(data, assertProcessData);
+ checkAddonsSection(data, expectBrokenAddons);
+}
+
+add_task(async function setup() {
+ registerFakeSysInfo();
+ spoofGfxAdapter();
+ do_get_profile();
+
+ if (AppConstants.MOZ_GLEAN) {
+ // We need to ensure FOG is initialized, otherwise we will panic trying to get test values.
+ let FOG = Cc["@mozilla.org/toolkit/glean;1"].createInstance(Ci.nsIFOG);
+ FOG.initializeFOG();
+ }
+
+ // The system add-on must be installed before AddonManager is started.
+ const distroDir = FileUtils.getDir("ProfD", ["sysfeatures", "app0"], true);
+ do_get_file("system.xpi").copyTo(
+ distroDir,
+ "tel-system-xpi@tests.mozilla.org.xpi"
+ );
+ let system_addon = FileUtils.File(distroDir.path);
+ system_addon.append("tel-system-xpi@tests.mozilla.org.xpi");
+ system_addon.lastModifiedTime = SYSTEM_ADDON_INSTALL_DATE;
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+
+ // The test runs in a fresh profile so starting the AddonManager causes
+ // the addons database to be created (as does setting new theme).
+ // For test_addonsStartup below, we want to test a "warm" startup where
+ // there is already a database on disk. Simulate that here by just
+ // restarting the AddonManager.
+ await AddonTestUtils.promiseShutdownManager();
+ await AddonTestUtils.overrideBuiltIns({ system: [] });
+ AddonTestUtils.addonStartup.remove(true);
+ await AddonTestUtils.promiseStartupManager();
+ // Override ExtensionXPCShellUtils.jsm's overriding of the pref as the
+ // search service needs it.
+ Services.prefs.clearUserPref("services.settings.default_bucket");
+
+ // Register a fake plugin host for consistent flash version data.
+ registerFakePluginHost();
+
+ // Setup a webserver to serve Addons, Plugins, etc.
+ gHttpServer = new HttpServer();
+ gHttpServer.start(-1);
+ let port = gHttpServer.identity.primaryPort;
+ gHttpRoot = "http://localhost:" + port + "/";
+ gDataRoot = gHttpRoot + "data/";
+ gHttpServer.registerDirectory("/data/", do_get_cwd());
+ registerCleanupFunction(() => gHttpServer.stop(() => {}));
+
+ // Create the attribution data file, so that settings.attribution will exist.
+ // The attribution functionality only exists in Firefox.
+ if (AppConstants.MOZ_BUILD_APP == "browser") {
+ spoofAttributionData();
+ registerCleanupFunction(cleanupAttributionData);
+ }
+
+ await spoofProfileReset();
+ await TelemetryEnvironment.delayedInit();
+ await SearchTestUtils.useTestEngines("data", "search-extensions");
+});
+
+add_task(async function test_checkEnvironment() {
+ // During startup we have partial addon records.
+ // First make sure we haven't yet read the addons DB, then test that
+ // we have some partial addons data.
+ Assert.equal(
+ AddonManagerPrivate.isDBLoaded(),
+ false,
+ "addons database is not loaded"
+ );
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkAddonsSection(data, false, true);
+
+ // Check that settings.intl is lazily loaded.
+ Assert.equal(
+ typeof data.settings.intl,
+ "object",
+ "intl is initially an object"
+ );
+ Assert.equal(
+ Object.keys(data.settings.intl).length,
+ 0,
+ "intl is initially empty"
+ );
+
+ // Now continue with startup.
+ let initPromise = TelemetryEnvironment.onInitialized();
+ finishAddonManagerStartup();
+
+ // Fake the delayed startup event for intl data to load.
+ fakeIntlReady();
+
+ let environmentData = await initPromise;
+ checkEnvironmentData(environmentData, { isInitial: true });
+
+ spoofPartnerInfo();
+ Services.obs.notifyObservers(null, DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC);
+
+ environmentData = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(environmentData, { assertProcessData: true });
+});
+
+add_task(async function test_prefWatchPolicies() {
+ const PREF_TEST_1 = "toolkit.telemetry.test.pref_new";
+ const PREF_TEST_2 = "toolkit.telemetry.test.pref1";
+ const PREF_TEST_3 = "toolkit.telemetry.test.pref2";
+ const PREF_TEST_4 = "toolkit.telemetry.test.pref_old";
+ const PREF_TEST_5 = "toolkit.telemetry.test.requiresRestart";
+
+ const expectedValue = "some-test-value";
+ const unexpectedValue = "unexpected-test-value";
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST_1, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ [PREF_TEST_2, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ [PREF_TEST_3, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ [PREF_TEST_4, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ [
+ PREF_TEST_5,
+ { what: TelemetryEnvironment.RECORD_PREF_VALUE, requiresRestart: true },
+ ],
+ ]);
+
+ Preferences.set(PREF_TEST_4, expectedValue);
+ Preferences.set(PREF_TEST_5, expectedValue);
+
+ // Set the Environment preferences to watch.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+
+ // Check that the pref values are missing or present as expected
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_1],
+ undefined
+ );
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_4],
+ expectedValue
+ );
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_5],
+ expectedValue
+ );
+
+ TelemetryEnvironment.registerChangeListener(
+ "testWatchPrefs",
+ (reason, data) => deferred.resolve(data)
+ );
+ let oldEnvironmentData = TelemetryEnvironment.currentEnvironment;
+
+ // Trigger a change in the watched preferences.
+ Preferences.set(PREF_TEST_1, expectedValue);
+ Preferences.set(PREF_TEST_2, false);
+ Preferences.set(PREF_TEST_5, unexpectedValue);
+ let eventEnvironmentData = await deferred.promise;
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs");
+
+ // Check environment contains the correct data.
+ Assert.deepEqual(oldEnvironmentData, eventEnvironmentData);
+ let userPrefs = TelemetryEnvironment.currentEnvironment.settings.userPrefs;
+
+ Assert.equal(
+ userPrefs[PREF_TEST_1],
+ expectedValue,
+ "Environment contains the correct preference value."
+ );
+ Assert.equal(
+ userPrefs[PREF_TEST_2],
+ "<user-set>",
+ "Report that the pref was user set but the value is not shown."
+ );
+ Assert.ok(
+ !(PREF_TEST_3 in userPrefs),
+ "Do not report if preference not user set."
+ );
+ Assert.equal(
+ userPrefs[PREF_TEST_5],
+ expectedValue,
+ "The pref value in the environment data should still be the same"
+ );
+});
+
+add_task(async function test_prefWatch_prefReset() {
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ ]);
+
+ // Set the preference to a non-default value.
+ Preferences.set(PREF_TEST, false);
+
+ // Set the Environment preferences to watch.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "testWatchPrefs_reset",
+ deferred.resolve
+ );
+
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST],
+ "<user-set>"
+ );
+
+ // Trigger a change in the watched preferences.
+ Preferences.reset(PREF_TEST);
+ await deferred.promise;
+
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST],
+ undefined
+ );
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs_reset");
+});
+
+add_task(async function test_prefDefault() {
+ const PREF_TEST = "toolkit.telemetry.test.defaultpref1";
+ const expectedValue = "some-test-value";
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_DEFAULTPREF_VALUE }],
+ ]);
+
+ // Set the preference to a default value.
+ Services.prefs.getDefaultBranch(null).setCharPref(PREF_TEST, expectedValue);
+
+ // Set the Environment preferences to watch.
+ // We're not watching, but this function does the setup we need.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST],
+ expectedValue
+ );
+});
+
+add_task(async function test_prefDefaultState() {
+ const PREF_TEST = "toolkit.telemetry.test.defaultpref2";
+ const expectedValue = "some-test-value";
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_DEFAULTPREF_STATE }],
+ ]);
+
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ Assert.equal(
+ PREF_TEST in TelemetryEnvironment.currentEnvironment.settings.userPrefs,
+ false
+ );
+
+ // Set the preference to a default value.
+ Services.prefs.getDefaultBranch(null).setCharPref(PREF_TEST, expectedValue);
+
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST],
+ "<set>"
+ );
+});
+
+add_task(async function test_prefInvalid() {
+ const PREF_TEST_1 = "toolkit.telemetry.test.invalid1";
+ const PREF_TEST_2 = "toolkit.telemetry.test.invalid2";
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST_1, { what: TelemetryEnvironment.RECORD_DEFAULTPREF_VALUE }],
+ [PREF_TEST_2, { what: TelemetryEnvironment.RECORD_DEFAULTPREF_STATE }],
+ ]);
+
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_1],
+ undefined
+ );
+ Assert.strictEqual(
+ TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_2],
+ undefined
+ );
+});
+
+add_task(async function test_addonsWatch_InterestingChange() {
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-webext@tests.mozilla.org";
+ // We only expect a single notification for each install, uninstall, enable, disable.
+ const EXPECTED_NOTIFICATIONS = 4;
+
+ let receivedNotifications = 0;
+
+ let registerCheckpointPromise = aExpected => {
+ return new Promise(resolve =>
+ TelemetryEnvironment.registerChangeListener(
+ "testWatchAddons_Changes" + aExpected,
+ (reason, data) => {
+ Assert.equal(reason, "addons-changed");
+ receivedNotifications++;
+ resolve();
+ }
+ )
+ );
+ };
+
+ let assertCheckpoint = aExpected => {
+ Assert.equal(receivedNotifications, aExpected);
+ TelemetryEnvironment.unregisterChangeListener(
+ "testWatchAddons_Changes" + aExpected
+ );
+ };
+
+ // Test for receiving one notification after each change.
+ let checkpointPromise = registerCheckpointPromise(1);
+ await installXPIFromURL(ADDON_INSTALL_URL);
+ await checkpointPromise;
+ assertCheckpoint(1);
+ Assert.ok(
+ ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons
+ );
+
+ checkpointPromise = registerCheckpointPromise(2);
+ let addon = await AddonManager.getAddonByID(ADDON_ID);
+ await addon.disable();
+ await checkpointPromise;
+ assertCheckpoint(2);
+ Assert.ok(
+ !(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons)
+ );
+
+ checkpointPromise = registerCheckpointPromise(3);
+ let startupPromise = AddonTestUtils.promiseWebExtensionStartup(ADDON_ID);
+ await addon.enable();
+ await checkpointPromise;
+ assertCheckpoint(3);
+ Assert.ok(
+ ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons
+ );
+ await startupPromise;
+
+ checkpointPromise = registerCheckpointPromise(4);
+ (await AddonManager.getAddonByID(ADDON_ID)).uninstall();
+ await checkpointPromise;
+ assertCheckpoint(4);
+ Assert.ok(
+ !(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons)
+ );
+
+ Assert.equal(
+ receivedNotifications,
+ EXPECTED_NOTIFICATIONS,
+ "We must only receive the notifications we expect."
+ );
+});
+
+add_task(async function test_pluginsWatch_Add() {
+ if (!gIsFirefox) {
+ Assert.ok(true, "Skipping: there is no Plugin Manager on Android.");
+ return;
+ }
+
+ Assert.equal(
+ TelemetryEnvironment.currentEnvironment.addons.activePlugins.length,
+ 1
+ );
+
+ let newPlugin = new PluginTag(
+ PLUGIN2_NAME,
+ PLUGIN2_DESC,
+ PLUGIN2_VERSION,
+ true
+ );
+ gInstalledPlugins.push(newPlugin);
+
+ let receivedNotifications = 0;
+ let callback = (reason, data) => {
+ receivedNotifications++;
+ };
+ TelemetryEnvironment.registerChangeListener("testWatchPlugins_Add", callback);
+
+ Services.obs.notifyObservers(null, PLUGIN_UPDATED_TOPIC);
+
+ await ContentTaskUtils.waitForCondition(() => {
+ return (
+ TelemetryEnvironment.currentEnvironment.addons.activePlugins.length == 2
+ );
+ });
+
+ TelemetryEnvironment.unregisterChangeListener("testWatchPlugins_Add");
+
+ Assert.equal(
+ receivedNotifications,
+ 0,
+ "We must not receive any notifications."
+ );
+});
+
+add_task(async function test_pluginsWatch_Remove() {
+ if (!gIsFirefox) {
+ Assert.ok(true, "Skipping: there is no Plugin Manager on Android.");
+ return;
+ }
+
+ Assert.equal(
+ TelemetryEnvironment.currentEnvironment.addons.activePlugins.length,
+ 2
+ );
+
+ // Find the test plugin.
+ let plugin = gInstalledPlugins.find(p => p.name == PLUGIN2_NAME);
+ Assert.ok(plugin, "The test plugin must exist.");
+
+ // Remove it from the PluginHost.
+ gInstalledPlugins = gInstalledPlugins.filter(p => p != plugin);
+
+ let receivedNotifications = 0;
+ let callback = () => {
+ receivedNotifications++;
+ };
+ TelemetryEnvironment.registerChangeListener(
+ "testWatchPlugins_Remove",
+ callback
+ );
+
+ Services.obs.notifyObservers(null, PLUGIN_UPDATED_TOPIC);
+
+ await ContentTaskUtils.waitForCondition(() => {
+ return (
+ TelemetryEnvironment.currentEnvironment.addons.activePlugins.length == 1
+ );
+ });
+
+ TelemetryEnvironment.unregisterChangeListener("testWatchPlugins_Remove");
+
+ Assert.equal(
+ receivedNotifications,
+ 0,
+ "We must not receive any notifications."
+ );
+});
+
+add_task(async function test_addonsWatch_NotInterestingChange() {
+ // We are not interested to dictionary addons changes.
+ const DICTIONARY_ADDON_INSTALL_URL = gDataRoot + "dictionary.xpi";
+ const INTERESTING_ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+
+ let receivedNotification = false;
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("testNotInteresting", () => {
+ Assert.ok(
+ !receivedNotification,
+ "Should not receive multiple notifications"
+ );
+ receivedNotification = true;
+ deferred.resolve();
+ });
+
+ let dictionaryAddon = await installXPIFromURL(DICTIONARY_ADDON_INSTALL_URL);
+ let interestingAddon = await installXPIFromURL(INTERESTING_ADDON_INSTALL_URL);
+
+ await deferred.promise;
+ Assert.ok(
+ !(
+ "telemetry-dictionary@tests.mozilla.org" in
+ TelemetryEnvironment.currentEnvironment.addons.activeAddons
+ ),
+ "Dictionaries should not appear in active addons."
+ );
+
+ TelemetryEnvironment.unregisterChangeListener("testNotInteresting");
+
+ dictionaryAddon.uninstall();
+ await interestingAddon.startupPromise;
+ interestingAddon.uninstall();
+});
+
+add_task(async function test_addonsAndPlugins() {
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-webext@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A restartless addon which gets enabled without a reboot.",
+ name: "XPI Telemetry Restartless Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: AddonManager.SIGNEDSTATE_PRIVILEGED,
+ isSystem: false,
+ isWebExtension: true,
+ multiprocessCompatible: true,
+ };
+ const SYSTEM_ADDON_ID = "tel-system-xpi@tests.mozilla.org";
+ const EXPECTED_SYSTEM_ADDON_DATA = {
+ blocklisted: false,
+ description: "A system addon which is shipped with Firefox.",
+ name: "XPI Telemetry System Add-on Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: truncateToDays(SYSTEM_ADDON_INSTALL_DATE),
+ updateDay: truncateToDays(SYSTEM_ADDON_INSTALL_DATE),
+ signedState: undefined,
+ isSystem: true,
+ isWebExtension: true,
+ multiprocessCompatible: true,
+ };
+
+ const WEBEXTENSION_ADDON_ID = "tel-webextension-xpi@tests.mozilla.org";
+ const WEBEXTENSION_ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_WEBEXTENSION_ADDON_DATA = {
+ blocklisted: false,
+ description: "A webextension addon.",
+ name: "XPI Telemetry WebExtension Add-on Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: WEBEXTENSION_ADDON_INSTALL_DATE,
+ updateDay: WEBEXTENSION_ADDON_INSTALL_DATE,
+ signedState: AddonManager.SIGNEDSTATE_PRIVILEGED,
+ isSystem: false,
+ isWebExtension: true,
+ multiprocessCompatible: true,
+ };
+
+ const EXPECTED_PLUGIN_DATA = {
+ name: FLASH_PLUGIN_NAME,
+ version: FLASH_PLUGIN_VERSION,
+ description: FLASH_PLUGIN_DESC,
+ blocklisted: false,
+ disabled: false,
+ clicktoplay: true,
+ };
+
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_WebExtension",
+ (reason, data) => {
+ Assert.equal(reason, "addons-changed");
+ deferred.resolve();
+ }
+ );
+
+ // Install an add-on so we have some data.
+ let addon = await installXPIFromURL(ADDON_INSTALL_URL);
+
+ // Install a webextension as well.
+ ExtensionTestUtils.init(this);
+
+ let webextension = ExtensionTestUtils.loadExtension({
+ useAddonManager: "permanent",
+ manifest: {
+ name: "XPI Telemetry WebExtension Add-on Test",
+ description: "A webextension addon.",
+ version: "1.0",
+ applications: {
+ gecko: {
+ id: WEBEXTENSION_ADDON_ID,
+ },
+ },
+ },
+ });
+
+ await webextension.startup();
+ await deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("test_WebExtension");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check addon data.
+ Assert.ok(
+ ADDON_ID in data.addons.activeAddons,
+ "We must have one active addon."
+ );
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+ for (let f in EXPECTED_ADDON_DATA) {
+ Assert.equal(
+ targetAddon[f],
+ EXPECTED_ADDON_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // Check system add-on data.
+ Assert.ok(
+ SYSTEM_ADDON_ID in data.addons.activeAddons,
+ "We must have one active system addon."
+ );
+ let targetSystemAddon = data.addons.activeAddons[SYSTEM_ADDON_ID];
+ for (let f in EXPECTED_SYSTEM_ADDON_DATA) {
+ Assert.equal(
+ targetSystemAddon[f],
+ EXPECTED_SYSTEM_ADDON_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // Check webextension add-on data.
+ Assert.ok(
+ WEBEXTENSION_ADDON_ID in data.addons.activeAddons,
+ "We must have one active webextension addon."
+ );
+ let targetWebExtensionAddon = data.addons.activeAddons[WEBEXTENSION_ADDON_ID];
+ for (let f in EXPECTED_WEBEXTENSION_ADDON_DATA) {
+ Assert.equal(
+ targetWebExtensionAddon[f],
+ EXPECTED_WEBEXTENSION_ADDON_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ await webextension.unload();
+
+ // Check plugin data.
+ Assert.equal(
+ data.addons.activePlugins.length,
+ 1,
+ "We must have only one active plugin."
+ );
+ let targetPlugin = data.addons.activePlugins[0];
+ for (let f in EXPECTED_PLUGIN_DATA) {
+ Assert.equal(
+ targetPlugin[f],
+ EXPECTED_PLUGIN_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // Check plugin mime types.
+ Assert.ok(targetPlugin.mimeTypes.find(m => m == PLUGIN_MIME_TYPE1));
+ Assert.ok(targetPlugin.mimeTypes.find(m => m == PLUGIN_MIME_TYPE2));
+ Assert.ok(!targetPlugin.mimeTypes.find(m => m == "Not There."));
+
+ // Uninstall the addon.
+ await addon.startupPromise;
+ await addon.uninstall();
+});
+
+add_task(async function test_signedAddon() {
+ AddonTestUtils.useRealCertChecks = true;
+
+ const ADDON_INSTALL_URL = gDataRoot + "signed-webext.xpi";
+ const ADDON_ID = "tel-signed-webext@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A signed webextension",
+ name: "XPI Telemetry Signed Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: AddonManager.SIGNEDSTATE_SIGNED,
+ };
+
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_signedAddon",
+ deferred.resolve
+ );
+
+ // Install the addon.
+ let addon = await installXPIFromURL(ADDON_INSTALL_URL);
+
+ await deferred.promise;
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("test_signedAddon");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check addon data.
+ Assert.ok(
+ ADDON_ID in data.addons.activeAddons,
+ "Add-on should be in the environment."
+ );
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+ for (let f in EXPECTED_ADDON_DATA) {
+ Assert.equal(
+ targetAddon[f],
+ EXPECTED_ADDON_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ AddonTestUtils.useRealCertChecks = false;
+ await addon.startupPromise;
+ await addon.uninstall();
+});
+
+add_task(async function test_addonsFieldsLimit() {
+ const ADDON_INSTALL_URL = gDataRoot + "long-fields.xpi";
+ const ADDON_ID = "tel-longfields-webext@tests.mozilla.org";
+
+ // Install the addon and wait for the TelemetryEnvironment to pick it up.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_longFieldsAddon",
+ deferred.resolve
+ );
+ let addon = await installXPIFromURL(ADDON_INSTALL_URL);
+ await deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("test_longFieldsAddon");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check that the addon is available and that the string fields are limited.
+ Assert.ok(
+ ADDON_ID in data.addons.activeAddons,
+ "Add-on should be in the environment."
+ );
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+
+ // TelemetryEnvironment limits the length of string fields for activeAddons to 100 chars,
+ // to mitigate misbehaving addons.
+ Assert.lessOrEqual(
+ targetAddon.version.length,
+ 100,
+ "The version string must have been limited"
+ );
+ Assert.lessOrEqual(
+ targetAddon.name.length,
+ 100,
+ "The name string must have been limited"
+ );
+ Assert.lessOrEqual(
+ targetAddon.description.length,
+ 100,
+ "The description string must have been limited"
+ );
+
+ await addon.startupPromise;
+ await addon.uninstall();
+});
+
+add_task(async function test_collectionWithbrokenAddonData() {
+ const BROKEN_ADDON_ID = "telemetry-test2.example.com@services.mozilla.org";
+ const BROKEN_MANIFEST = {
+ id: "telemetry-test2.example.com@services.mozilla.org",
+ name: "telemetry broken addon",
+ origin: "https://telemetry-test2.example.com",
+ version: 1, // This is intentionally not a string.
+ signedState: AddonManager.SIGNEDSTATE_SIGNED,
+ type: "extension",
+ };
+
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-webext@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A restartless addon which gets enabled without a reboot.",
+ name: "XPI Telemetry Restartless Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: AddonManager.SIGNEDSTATE_MISSING,
+ };
+
+ let receivedNotifications = 0;
+
+ let registerCheckpointPromise = aExpected => {
+ return new Promise(resolve =>
+ TelemetryEnvironment.registerChangeListener(
+ "testBrokenAddon_collection" + aExpected,
+ (reason, data) => {
+ Assert.equal(reason, "addons-changed");
+ receivedNotifications++;
+ resolve();
+ }
+ )
+ );
+ };
+
+ let assertCheckpoint = aExpected => {
+ Assert.equal(receivedNotifications, aExpected);
+ TelemetryEnvironment.unregisterChangeListener(
+ "testBrokenAddon_collection" + aExpected
+ );
+ };
+
+ // Register the broken provider and install the broken addon.
+ let checkpointPromise = registerCheckpointPromise(1);
+ let brokenAddonProvider = createMockAddonProvider(
+ "Broken Extensions Provider"
+ );
+ AddonManagerPrivate.registerProvider(brokenAddonProvider);
+ brokenAddonProvider.addAddon(BROKEN_MANIFEST);
+ await checkpointPromise;
+ assertCheckpoint(1);
+
+ // Now install an addon which returns the correct information.
+ checkpointPromise = registerCheckpointPromise(2);
+ let addon = await installXPIFromURL(ADDON_INSTALL_URL);
+ await checkpointPromise;
+ assertCheckpoint(2);
+
+ // Check that the new environment contains the Social addon installed with the broken
+ // manifest and the rest of the data.
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data, { expectBrokenAddons: true });
+
+ let activeAddons = data.addons.activeAddons;
+ Assert.ok(
+ BROKEN_ADDON_ID in activeAddons,
+ "The addon with the broken manifest must be reported."
+ );
+ Assert.equal(
+ activeAddons[BROKEN_ADDON_ID].version,
+ null,
+ "null should be reported for invalid data."
+ );
+ Assert.ok(ADDON_ID in activeAddons, "The valid addon must be reported.");
+ Assert.equal(
+ activeAddons[ADDON_ID].description,
+ EXPECTED_ADDON_DATA.description,
+ "The description for the valid addon should be correct."
+ );
+
+ // Unregister the broken provider so we don't mess with other tests.
+ AddonManagerPrivate.unregisterProvider(brokenAddonProvider);
+
+ // Uninstall the valid addon.
+ await addon.startupPromise;
+ await addon.uninstall();
+});
+
+async function checkDefaultSearch(privateOn, reInitSearchService) {
+ // Start off with separate default engine for private browsing turned off.
+ Preferences.set(
+ "browser.search.separatePrivateDefault.ui.enabled",
+ privateOn
+ );
+ Preferences.set("browser.search.separatePrivateDefault", privateOn);
+
+ let data = await TelemetryEnvironment.testCleanRestart().onInitialized();
+ checkEnvironmentData(data);
+ Assert.ok(!("defaultSearchEngine" in data.settings));
+ Assert.ok(!("defaultSearchEngineData" in data.settings));
+ Assert.ok(!("defaultPrivateSearchEngine" in data.settings));
+ Assert.ok(!("defaultPrivateSearchEngineData" in data.settings));
+
+ // Load the engines definitions from a xpcshell data: that's needed so that
+ // the search provider reports an engine identifier.
+
+ // Initialize the search service.
+ if (reInitSearchService) {
+ Services.search.wrappedJSObject.reset();
+ }
+ await Services.search.init();
+ await promiseNextTick();
+
+ // Our default engine from the JAR file has an identifier. Check if it is correctly
+ // reported.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, "telemetrySearchIdentifier");
+ let expectedSearchEngineData = {
+ name: "telemetrySearchIdentifier",
+ loadPath:
+ "[other]addEngineWithDetails:telemetrySearchIdentifier@search.mozilla.org",
+ origin: "default",
+ submissionURL:
+ "https://ar.wikipedia.org/wiki/%D8%AE%D8%A7%D8%B5:%D8%A8%D8%AD%D8%AB?search=&sourceId=Mozilla-search",
+ };
+ Assert.deepEqual(
+ data.settings.defaultSearchEngineData,
+ expectedSearchEngineData
+ );
+ if (privateOn) {
+ Assert.equal(
+ data.settings.defaultPrivateSearchEngine,
+ "telemetrySearchIdentifier"
+ );
+ Assert.deepEqual(
+ data.settings.defaultPrivateSearchEngineData,
+ expectedSearchEngineData,
+ "Should have the correct data for the private search engine"
+ );
+ } else {
+ Assert.ok(
+ !("defaultPrivateSearchEngine" in data.settings),
+ "Should not have private name recorded as the pref for separate is off"
+ );
+ Assert.ok(
+ !("defaultPrivateSearchEngineData" in data.settings),
+ "Should not have private data recorded as the pref for separate is off"
+ );
+ }
+
+ // Add a new search engine (this will have no engine identifier).
+ const SEARCH_ENGINE_ID = "telemetry_default";
+ const SEARCH_ENGINE_URL = `http://www.example.org/${
+ privateOn ? "private" : ""
+ }?search={searchTerms}`;
+ await Services.search.addEngineWithDetails(SEARCH_ENGINE_ID, {
+ method: "get",
+ template: SEARCH_ENGINE_URL,
+ });
+
+ // Register a new change listener and then wait for the search engine change to be notified.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "testWatch_SearchDefault",
+ deferred.resolve
+ );
+ if (privateOn) {
+ // As we had no default and no search engines, the normal mode engine will
+ // assume the same as the added engine. To ensure the telemetry is different
+ // we enforce a different default here.
+ const engine = await Services.search.getEngineByName(
+ "telemetrySearchIdentifier"
+ );
+ engine.hidden = false;
+ await Services.search.setDefault(engine);
+ await Services.search.setDefaultPrivate(
+ Services.search.getEngineByName(SEARCH_ENGINE_ID)
+ );
+ } else {
+ await Services.search.setDefault(
+ Services.search.getEngineByName(SEARCH_ENGINE_ID)
+ );
+ }
+ await deferred.promise;
+
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ const EXPECTED_SEARCH_ENGINE = "other-" + SEARCH_ENGINE_ID;
+ const EXPECTED_SEARCH_ENGINE_DATA = {
+ name: "telemetry_default",
+ loadPath: "[other]addEngineWithDetails:telemetry_default@test.engine",
+ origin: "verified",
+ };
+ if (privateOn) {
+ Assert.equal(
+ data.settings.defaultSearchEngine,
+ "telemetrySearchIdentifier"
+ );
+ Assert.deepEqual(
+ data.settings.defaultSearchEngineData,
+ expectedSearchEngineData
+ );
+ Assert.equal(
+ data.settings.defaultPrivateSearchEngine,
+ EXPECTED_SEARCH_ENGINE
+ );
+ Assert.deepEqual(
+ data.settings.defaultPrivateSearchEngineData,
+ EXPECTED_SEARCH_ENGINE_DATA
+ );
+ } else {
+ Assert.equal(data.settings.defaultSearchEngine, EXPECTED_SEARCH_ENGINE);
+ Assert.deepEqual(
+ data.settings.defaultSearchEngineData,
+ EXPECTED_SEARCH_ENGINE_DATA
+ );
+ }
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+}
+
+add_task(async function test_defaultSearchEngine() {
+ await checkDefaultSearch(false);
+
+ // Cleanly install an engine from an xml file, and check if origin is
+ // recorded as "verified".
+ let promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener(
+ "testWatch_SearchDefault",
+ resolve
+ );
+ });
+ let engine = await new Promise((resolve, reject) => {
+ Services.obs.addObserver(function obs(obsSubject, obsTopic, obsData) {
+ try {
+ let searchEngine = obsSubject.QueryInterface(Ci.nsISearchEngine);
+ info("Observed " + obsData + " for " + searchEngine.name);
+ if (
+ obsData != "engine-added" ||
+ searchEngine.name != "engine-telemetry"
+ ) {
+ return;
+ }
+
+ Services.obs.removeObserver(obs, "browser-search-engine-modified");
+ resolve(searchEngine);
+ } catch (ex) {
+ reject(ex);
+ }
+ }, "browser-search-engine-modified");
+ Services.search.addOpenSearchEngine(
+ "file://" + do_get_cwd().path + "/engine.xml",
+ null
+ );
+ });
+ await Services.search.setDefault(engine);
+ await promise;
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.deepEqual(data.settings.defaultSearchEngineData, {
+ name: "engine-telemetry",
+ loadPath: "[other]/engine.xml",
+ origin: "verified",
+ });
+
+ // Now break this engine's load path hash.
+ promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener(
+ "testWatch_SearchDefault",
+ resolve
+ );
+ });
+ engine.wrappedJSObject.setAttr("loadPathHash", "broken");
+ Services.obs.notifyObservers(
+ null,
+ "browser-search-engine-modified",
+ "engine-default"
+ );
+ await promise;
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.equal(data.settings.defaultSearchEngineData.origin, "invalid");
+ await Services.search.removeEngine(engine);
+
+ const SEARCH_ENGINE_ID = "telemetry_default";
+ const EXPECTED_SEARCH_ENGINE = "other-" + SEARCH_ENGINE_ID;
+ // Work around bug 1165341: Intentionally set the default engine.
+ await Services.search.setDefault(
+ Services.search.getEngineByName(SEARCH_ENGINE_ID)
+ );
+
+ // Double-check the default for the next part of the test.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, EXPECTED_SEARCH_ENGINE);
+
+ // Define and reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Watch the test preference.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "testSearchEngine_pref",
+ deferred.resolve
+ );
+ // Trigger an environment change.
+ Preferences.set(PREF_TEST, 1);
+ await deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("testSearchEngine_pref");
+
+ // Check that the search engine information is correctly retained when prefs change.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, EXPECTED_SEARCH_ENGINE);
+});
+
+add_task(async function test_defaultPrivateSearchEngine() {
+ await checkDefaultSearch(true, true);
+});
+
+add_task(async function test_defaultSearchEngine_paramsChanged() {
+ let extension = await SearchTestUtils.installSearchExtension({
+ name: "TestEngine",
+ search_url: "https://www.google.com/fake1",
+ });
+
+ let promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener(
+ "testWatch_SearchDefault",
+ resolve
+ );
+ });
+ let engine = Services.search.getEngineByName("TestEngine");
+ await Services.search.setDefault(engine);
+ await promise;
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.deepEqual(data.settings.defaultSearchEngineData, {
+ name: "TestEngine",
+ loadPath: "[other]addEngineWithDetails:example@tests.mozilla.org",
+ origin: "verified",
+ submissionURL: "https://www.google.com/fake1?q=",
+ });
+
+ promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener(
+ "testWatch_SearchDefault",
+ resolve
+ );
+ });
+
+ engine.wrappedJSObject._updateFromManifest(
+ extension.id,
+ extension.baseURI,
+ SearchTestUtils.createEngineManifest({
+ name: "TestEngine",
+ version: "1.2",
+ search_url: "https://www.google.com/fake2",
+ })
+ );
+
+ await promise;
+
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.deepEqual(data.settings.defaultSearchEngineData, {
+ name: "TestEngine",
+ loadPath: "[other]addEngineWithDetails:example@tests.mozilla.org",
+ origin: "verified",
+ submissionURL: "https://www.google.com/fake2?q=",
+ });
+
+ await extension.unload();
+});
+
+add_task(
+ { skip_if: () => AppConstants.MOZ_APP_NAME == "thunderbird" },
+ async function test_delayed_defaultBrowser() {
+ // Skip this test on Thunderbird since it is not a browser, so it cannot
+ // be the default browser.
+
+ // Make sure we don't have anything already cached for this test.
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+
+ let environmentData = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(environmentData);
+ Assert.equal(
+ environmentData.settings.isDefaultBrowser,
+ null,
+ "isDefaultBrowser must be null before the session is restored."
+ );
+
+ Services.obs.notifyObservers(null, "sessionstore-windows-restored");
+
+ environmentData = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(environmentData);
+ Assert.ok(
+ "isDefaultBrowser" in environmentData.settings,
+ "isDefaultBrowser must be available after the session is restored."
+ );
+ Assert.equal(
+ typeof environmentData.settings.isDefaultBrowser,
+ "boolean",
+ "isDefaultBrowser must be of the right type."
+ );
+
+ // Make sure pref-flipping doesn't overwrite the browser default state.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Watch the test preference.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "testDefaultBrowser_pref",
+ deferred.resolve
+ );
+ // Trigger an environment change.
+ Preferences.set(PREF_TEST, 1);
+ await deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("testDefaultBrowser_pref");
+
+ // Check that the data is still available.
+ environmentData = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(environmentData);
+ Assert.ok(
+ "isDefaultBrowser" in environmentData.settings,
+ "isDefaultBrowser must still be available after a pref is flipped."
+ );
+ }
+);
+
+add_task(async function test_osstrings() {
+ // First test that numbers in sysinfo properties are converted to string fields
+ // in system.os.
+ SysInfo.overrides = {
+ version: 1,
+ name: 2,
+ kernel_version: 3,
+ };
+
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ Assert.equal(data.system.os.version, "1");
+ Assert.equal(data.system.os.name, "2");
+ if (AppConstants.platform == "android") {
+ Assert.equal(data.system.os.kernelVersion, "3");
+ }
+
+ // Check that null values are also handled.
+ SysInfo.overrides = {
+ version: null,
+ name: null,
+ kernel_version: null,
+ };
+
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ Assert.equal(data.system.os.version, null);
+ Assert.equal(data.system.os.name, null);
+ if (AppConstants.platform == "android") {
+ Assert.equal(data.system.os.kernelVersion, null);
+ }
+
+ // Clean up.
+ SysInfo.overrides = {};
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+});
+
+add_task(async function test_experimentsAPI() {
+ const EXPERIMENT1 = "experiment-1";
+ const EXPERIMENT1_BRANCH = "nice-branch";
+ const EXPERIMENT2 = "experiment-2";
+ const EXPERIMENT2_BRANCH = "other-branch";
+
+ let checkExperiment = (environmentData, id, branch, type = null) => {
+ Assert.ok(
+ "experiments" in environmentData,
+ "The current environment must report the experiment annotations."
+ );
+ Assert.ok(
+ id in environmentData.experiments,
+ "The experiments section must contain the expected experiment id."
+ );
+ Assert.equal(
+ environmentData.experiments[id].branch,
+ branch,
+ "The experiment branch must be correct."
+ );
+ };
+
+ // Clean the environment and check that it's reporting the correct info.
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // We don't expect the experiments section to be there if no annotation
+ // happened.
+ Assert.ok(
+ !("experiments" in data),
+ "No experiments section must be reported if nothing was annotated."
+ );
+
+ // Add a change listener and add an experiment annotation.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_experimentsAPI",
+ (reason, env) => {
+ deferred.resolve(env);
+ }
+ );
+ TelemetryEnvironment.setExperimentActive(EXPERIMENT1, EXPERIMENT1_BRANCH);
+ let eventEnvironmentData = await deferred.promise;
+
+ // Check that the old environment does not contain the experiments.
+ checkEnvironmentData(eventEnvironmentData);
+ Assert.ok(
+ !("experiments" in eventEnvironmentData),
+ "No experiments section must be reported in the old environment."
+ );
+
+ // Check that the current environment contains the right experiment.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ checkExperiment(data, EXPERIMENT1, EXPERIMENT1_BRANCH);
+
+ TelemetryEnvironment.unregisterChangeListener("test_experimentsAPI");
+
+ // Add a second annotation and check that both experiments are there.
+ deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_experimentsAPI2",
+ (reason, env) => {
+ deferred.resolve(env);
+ }
+ );
+ TelemetryEnvironment.setExperimentActive(EXPERIMENT2, EXPERIMENT2_BRANCH);
+ eventEnvironmentData = await deferred.promise;
+
+ // Check that the current environment contains both the experiment.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ checkExperiment(data, EXPERIMENT1, EXPERIMENT1_BRANCH);
+ checkExperiment(data, EXPERIMENT2, EXPERIMENT2_BRANCH);
+
+ // The previous environment should only contain the first experiment.
+ checkExperiment(eventEnvironmentData, EXPERIMENT1, EXPERIMENT1_BRANCH);
+ Assert.ok(
+ !(EXPERIMENT2 in eventEnvironmentData),
+ "The old environment must not contain the new experiment annotation."
+ );
+
+ TelemetryEnvironment.unregisterChangeListener("test_experimentsAPI2");
+
+ // Check that removing an unknown experiment annotation does not trigger
+ // a notification.
+ TelemetryEnvironment.registerChangeListener("test_experimentsAPI3", () => {
+ Assert.ok(
+ false,
+ "Removing an unknown experiment annotation must not trigger a change."
+ );
+ });
+ TelemetryEnvironment.setExperimentInactive("unknown-experiment-id");
+ // Also make sure that passing non-string parameters arguments doesn't throw nor
+ // trigger a notification.
+ TelemetryEnvironment.setExperimentActive({}, "some-branch");
+ TelemetryEnvironment.setExperimentActive("some-id", {});
+ TelemetryEnvironment.unregisterChangeListener("test_experimentsAPI3");
+
+ // Check that removing a known experiment leaves the other in place and triggers
+ // a change.
+ deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener(
+ "test_experimentsAPI4",
+ (reason, env) => {
+ deferred.resolve(env);
+ }
+ );
+ TelemetryEnvironment.setExperimentInactive(EXPERIMENT1);
+ eventEnvironmentData = await deferred.promise;
+
+ // Check that the current environment contains just the second experiment.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.ok(
+ !(EXPERIMENT1 in data),
+ "The current environment must not contain the removed experiment annotation."
+ );
+ checkExperiment(data, EXPERIMENT2, EXPERIMENT2_BRANCH);
+
+ // The previous environment should contain both annotations.
+ checkExperiment(eventEnvironmentData, EXPERIMENT1, EXPERIMENT1_BRANCH);
+ checkExperiment(eventEnvironmentData, EXPERIMENT2, EXPERIMENT2_BRANCH);
+
+ // Set an experiment with a type and check that it correctly shows up.
+ TelemetryEnvironment.setExperimentActive(
+ "typed-experiment",
+ "random-branch",
+ { type: "ab-test" }
+ );
+ data = TelemetryEnvironment.currentEnvironment;
+ checkExperiment(data, "typed-experiment", "random-branch", "ab-test");
+});
+
+add_task(async function test_experimentsAPI_limits() {
+ const EXPERIMENT =
+ "experiment-2-experiment-2-experiment-2-experiment-2-experiment-2" +
+ "-experiment-2-experiment-2-experiment-2-experiment-2";
+ const EXPERIMENT_BRANCH =
+ "other-branch-other-branch-other-branch-other-branch-other" +
+ "-branch-other-branch-other-branch-other-branch-other-branch";
+ const EXPERIMENT_TRUNCATED = EXPERIMENT.substring(0, 100);
+ const EXPERIMENT_BRANCH_TRUNCATED = EXPERIMENT_BRANCH.substring(0, 100);
+
+ // Clean the environment and check that it's reporting the correct info.
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // We don't expect the experiments section to be there if no annotation
+ // happened.
+ Assert.ok(
+ !("experiments" in data),
+ "No experiments section must be reported if nothing was annotated."
+ );
+
+ // Add a change listener and wait for the annotation to happen.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("test_experimentsAPI", () =>
+ deferred.resolve()
+ );
+ TelemetryEnvironment.setExperimentActive(EXPERIMENT, EXPERIMENT_BRANCH);
+ await deferred.promise;
+
+ // Check that the current environment contains the truncated values
+ // for the experiment data.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.ok(
+ "experiments" in data,
+ "The environment must contain an experiments section."
+ );
+ Assert.ok(
+ EXPERIMENT_TRUNCATED in data.experiments,
+ "The experiments must be reporting the truncated id."
+ );
+ Assert.ok(
+ !(EXPERIMENT in data.experiments),
+ "The experiments must not be reporting the full id."
+ );
+ Assert.equal(
+ EXPERIMENT_BRANCH_TRUNCATED,
+ data.experiments[EXPERIMENT_TRUNCATED].branch,
+ "The experiments must be reporting the truncated branch."
+ );
+
+ TelemetryEnvironment.unregisterChangeListener("test_experimentsAPI");
+
+ // Check that an overly long type is truncated.
+ const longType = "a0123456678901234567890123456789";
+ TelemetryEnvironment.setExperimentActive("exp", "some-branch", {
+ type: longType,
+ });
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.equal(data.experiments.exp.type, longType.substring(0, 20));
+});
+
+if (gIsWindows) {
+ add_task(async function test_environmentHDDInfo() {
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ let empty = { model: null, revision: null, type: null };
+ Assert.deepEqual(
+ data.system.hdd,
+ { binary: empty, profile: empty, system: empty },
+ "Should have no data yet."
+ );
+ await TelemetryEnvironment.delayedInit();
+ data = TelemetryEnvironment.currentEnvironment;
+ for (let k of EXPECTED_HDD_FIELDS) {
+ checkString(data.system.hdd[k].model);
+ checkString(data.system.hdd[k].revision);
+ checkString(data.system.hdd[k].type);
+ }
+ if (AppConstants.MOZ_GLEAN) {
+ if (data.system.hdd.profile.type == "SSD") {
+ Assert.equal(
+ true,
+ Glean.fogValidation.profileDiskIsSsd.testGetValue(),
+ "SSDness should be recorded in Glean"
+ );
+ } else {
+ Assert.equal(
+ false,
+ Glean.fogValidation.profileDiskIsSsd.testGetValue(),
+ "nonSSDness should be recorded in Glean"
+ );
+ }
+ }
+ });
+
+ add_task(async function test_environmentProcessInfo() {
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ Assert.deepEqual(data.system.isWow64, null, "Should have no data yet.");
+ await TelemetryEnvironment.delayedInit();
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.equal(
+ typeof data.system.isWow64,
+ "boolean",
+ "isWow64 must be a boolean."
+ );
+ Assert.equal(
+ typeof data.system.isWowARM64,
+ "boolean",
+ "isWowARM64 must be a boolean."
+ );
+ // These should be numbers if they are not null
+ for (let f of [
+ "count",
+ "model",
+ "family",
+ "stepping",
+ "l2cacheKB",
+ "l3cacheKB",
+ "speedMHz",
+ "cores",
+ ]) {
+ Assert.ok(
+ !(f in data.system.cpu) ||
+ data.system.cpu[f] === null ||
+ Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null."
+ );
+ }
+ Assert.ok(
+ checkString(data.system.cpu.vendor),
+ "vendor must be a valid string."
+ );
+ });
+
+ add_task(async function test_environmentOSInfo() {
+ await TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ Assert.deepEqual(
+ data.system.os.installYear,
+ null,
+ "Should have no data yet."
+ );
+ await TelemetryEnvironment.delayedInit();
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.ok(
+ Number.isFinite(data.system.os.installYear),
+ "Install year must be a number."
+ );
+ });
+}
+
+add_task(
+ { skip_if: () => AppConstants.MOZ_APP_NAME == "thunderbird" },
+ async function test_environmentServicesInfo() {
+ let cache = TelemetryEnvironment.testCleanRestart();
+ await cache.onInitialized();
+ let oldGetFxaSignedInUser = cache._getFxaSignedInUser;
+ try {
+ // Test the 'yes to both' case.
+
+ // This makes the weave service return that the usere is definitely a sync user
+ Preferences.set("services.sync.username", "c00lperson123@example.com");
+ let calledFxa = false;
+ cache._getFxaSignedInUser = () => {
+ calledFxa = true;
+ return null;
+ };
+
+ await cache._updateServicesInfo();
+ ok(
+ !calledFxa,
+ "Shouldn't need to ask FxA if they're definitely signed in"
+ );
+ deepEqual(cache.currentEnvironment.services, {
+ accountEnabled: true,
+ syncEnabled: true,
+ });
+
+ // Test the fxa-but-not-sync case.
+ Preferences.reset("services.sync.username");
+ // We don't actually inspect the returned object, just t
+ cache._getFxaSignedInUser = async () => {
+ return {};
+ };
+ await cache._updateServicesInfo();
+ deepEqual(cache.currentEnvironment.services, {
+ accountEnabled: true,
+ syncEnabled: false,
+ });
+ // Test the "no to both" case.
+ cache._getFxaSignedInUser = async () => {
+ return null;
+ };
+ await cache._updateServicesInfo();
+ deepEqual(cache.currentEnvironment.services, {
+ accountEnabled: false,
+ syncEnabled: false,
+ });
+ // And finally, the 'fxa is in an error state' case.
+ cache._getFxaSignedInUser = () => {
+ throw new Error("You'll never know");
+ };
+ await cache._updateServicesInfo();
+ equal(cache.currentEnvironment.services, null);
+ } finally {
+ cache._getFxaSignedInUser = oldGetFxaSignedInUser;
+ Preferences.reset("services.sync.username");
+ }
+ }
+);
+
+add_task(async function test_normandyTestPrefsGoneAfter91() {
+ const testPrefBool = "app.normandy.test-prefs.bool";
+ const testPrefInteger = "app.normandy.test-prefs.integer";
+ const testPrefString = "app.normandy.test-prefs.string";
+
+ Services.prefs.setBoolPref(testPrefBool, true);
+ Services.prefs.setIntPref(testPrefInteger, 10);
+ Services.prefs.setCharPref(testPrefString, "test-string");
+
+ const data = TelemetryEnvironment.currentEnvironment;
+
+ if (Services.vc.compare(data.build.version, "91") > 0) {
+ Assert.equal(
+ data.settings.userPrefs["app.normandy.test-prefs.bool"],
+ null,
+ "This probe should expire in FX91. bug 1686105 "
+ );
+ Assert.equal(
+ data.settings.userPrefs["app.normandy.test-prefs.integer"],
+ null,
+ "This probe should expire in FX91. bug 1686105 "
+ );
+ Assert.equal(
+ data.settings.userPrefs["app.normandy.test-prefs.string"],
+ null,
+ "This probe should expire in FX91. bug 1686105 "
+ );
+ }
+});
+
+add_task(async function test_environmentShutdown() {
+ // Define and reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Set up the preferences and listener, then the trigger shutdown
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ TelemetryEnvironment.registerChangeListener(
+ "test_environmentShutdownChange",
+ () => {
+ // Register a new change listener that asserts if change is propogated
+ Assert.ok(false, "No change should be propagated after shutdown.");
+ }
+ );
+ TelemetryEnvironment.shutdown();
+
+ // Flipping the test preference after shutdown should not trigger the listener
+ Preferences.set(PREF_TEST, 1);
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener(
+ "test_environmentShutdownChange"
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js
new file mode 100644
index 0000000000..a80b1c7001
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js
@@ -0,0 +1,1111 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TestUtils",
+ "resource://testing-common/TestUtils.jsm"
+);
+const { TelemetryTestUtils } = ChromeUtils.import(
+ "resource://testing-common/TelemetryTestUtils.jsm"
+);
+
+const PRERELEASE_CHANNELS = Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS;
+const ALL_CHANNELS = Ci.nsITelemetry.DATASET_ALL_CHANNELS;
+
+function checkEventFormat(events) {
+ Assert.ok(Array.isArray(events), "Events should be serialized to an array.");
+ for (let e of events) {
+ Assert.ok(Array.isArray(e), "Event should be an array.");
+ Assert.greaterOrEqual(
+ e.length,
+ 4,
+ "Event should have at least 4 elements."
+ );
+ Assert.lessOrEqual(e.length, 6, "Event should have at most 6 elements.");
+
+ Assert.equal(typeof e[0], "number", "Element 0 should be a number.");
+ Assert.equal(typeof e[1], "string", "Element 1 should be a string.");
+ Assert.equal(typeof e[2], "string", "Element 2 should be a string.");
+ Assert.equal(typeof e[3], "string", "Element 3 should be a string.");
+
+ if (e.length > 4) {
+ Assert.ok(
+ e[4] === null || typeof e[4] == "string",
+ "Event element 4 should be null or a string."
+ );
+ }
+ if (e.length > 5) {
+ Assert.ok(
+ e[5] === null || typeof e[5] == "object",
+ "Event element 5 should be null or an object."
+ );
+ }
+
+ let extra = e[5];
+ if (extra) {
+ Assert.ok(
+ Object.keys(extra).every(k => typeof k == "string"),
+ "All extra keys should be strings."
+ );
+ Assert.ok(
+ Object.values(extra).every(v => typeof v == "string"),
+ "All extra values should be strings."
+ );
+ }
+ }
+}
+
+/**
+ * @param summaries is of the form
+ * [{process, [event category, event object, event method], count}]
+ * @param clearScalars - true if you want to clear the scalars
+ */
+function checkEventSummary(summaries, clearScalars) {
+ let scalars = Telemetry.getSnapshotForKeyedScalars("main", clearScalars);
+
+ for (let [process, [category, eObject, method], count] of summaries) {
+ let uniqueEventName = `${category}#${eObject}#${method}`;
+ let summaryCount;
+ if (process === "dynamic") {
+ summaryCount =
+ scalars.dynamic["telemetry.dynamic_event_counts"][uniqueEventName];
+ } else {
+ summaryCount =
+ scalars[process]["telemetry.event_counts"][uniqueEventName];
+ }
+ Assert.equal(
+ summaryCount,
+ count,
+ `${uniqueEventName} had wrong summary count`
+ );
+ }
+}
+
+function checkRegistrationFailure(failureType) {
+ let snapshot = Telemetry.getSnapshotForHistograms("main", true);
+ Assert.ok(
+ "parent" in snapshot,
+ "There should be at least one parent histogram when checking for registration failures."
+ );
+ Assert.ok(
+ "TELEMETRY_EVENT_REGISTRATION_ERROR" in snapshot.parent,
+ "TELEMETRY_EVENT_REGISTRATION_ERROR should exist when checking for registration failures."
+ );
+ let values = snapshot.parent.TELEMETRY_EVENT_REGISTRATION_ERROR.values;
+ Assert.ok(
+ !!values,
+ "TELEMETRY_EVENT_REGISTRATION_ERROR's values should exist when checking for registration failures."
+ );
+ Assert.equal(
+ values[failureType],
+ 1,
+ `Event registration ought to have failed due to type ${failureType}`
+ );
+}
+
+function checkRecordingFailure(failureType) {
+ let snapshot = Telemetry.getSnapshotForHistograms("main", true);
+ Assert.ok(
+ "parent" in snapshot,
+ "There should be at least one parent histogram when checking for recording failures."
+ );
+ Assert.ok(
+ "TELEMETRY_EVENT_RECORDING_ERROR" in snapshot.parent,
+ "TELEMETRY_EVENT_RECORDING_ERROR should exist when checking for recording failures."
+ );
+ let values = snapshot.parent.TELEMETRY_EVENT_RECORDING_ERROR.values;
+ Assert.ok(
+ !!values,
+ "TELEMETRY_EVENT_RECORDING_ERROR's values should exist when checking for recording failures."
+ );
+ Assert.equal(
+ values[failureType],
+ 1,
+ `Event recording ought to have failed due to type ${failureType}`
+ );
+}
+
+add_task(async function test_event_summary_limit() {
+ Telemetry.clearEvents();
+ Telemetry.clearScalars();
+
+ const limit = 500; // matches kMaxEventSummaryKeys in TelemetryScalar.cpp.
+ let objects = [];
+ for (let i = 0; i < limit + 1; i++) {
+ objects.push("object" + i);
+ }
+ // Using "telemetry.test.dynamic" as using "telemetry.test" will enable
+ // the "telemetry.test" category.
+ Telemetry.registerEvents("telemetry.test.dynamic", {
+ test_method: {
+ methods: ["testMethod"],
+ objects,
+ record_on_release: true,
+ },
+ });
+ for (let object of objects) {
+ Telemetry.recordEvent("telemetry.test.dynamic", "testMethod", object);
+ }
+
+ TelemetryTestUtils.assertNumberOfEvents(
+ limit + 1,
+ {},
+ { process: "dynamic" }
+ );
+ let scalarSnapshot = Telemetry.getSnapshotForKeyedScalars("main", true);
+ Assert.equal(
+ Object.keys(scalarSnapshot.dynamic["telemetry.dynamic_event_counts"])
+ .length,
+ limit,
+ "Should not have recorded more than `limit` events"
+ );
+});
+
+add_task(async function test_recording_state() {
+ Telemetry.clearEvents();
+ Telemetry.clearScalars();
+
+ const events = [
+ ["telemetry.test", "test1", "object1"],
+ ["telemetry.test.second", "test", "object1"],
+ ];
+
+ // Both test categories should be off by default.
+ events.forEach(e => Telemetry.recordEvent(...e));
+ TelemetryTestUtils.assertEvents([]);
+ checkEventSummary(
+ events.map(e => ["parent", e, 1]),
+ true
+ );
+
+ // Enable one test category and see that we record correctly.
+ Telemetry.setEventRecordingEnabled("telemetry.test", true);
+ events.forEach(e => Telemetry.recordEvent(...e));
+ TelemetryTestUtils.assertEvents([events[0]]);
+ checkEventSummary(
+ events.map(e => ["parent", e, 1]),
+ true
+ );
+
+ // Also enable the other test category and see that we record correctly.
+ Telemetry.setEventRecordingEnabled("telemetry.test.second", true);
+ events.forEach(e => Telemetry.recordEvent(...e));
+ TelemetryTestUtils.assertEvents(events);
+ checkEventSummary(
+ events.map(e => ["parent", e, 1]),
+ true
+ );
+
+ // Now turn of one category again and check that this works as expected.
+ Telemetry.setEventRecordingEnabled("telemetry.test", false);
+ events.forEach(e => Telemetry.recordEvent(...e));
+ TelemetryTestUtils.assertEvents([events[1]]);
+ checkEventSummary(
+ events.map(e => ["parent", e, 1]),
+ true
+ );
+});
+
+add_task(async function recording_setup() {
+ // Make sure both test categories are enabled for the remaining tests.
+ // Otherwise their event recording won't work.
+ Telemetry.setEventRecordingEnabled("telemetry.test", true);
+ Telemetry.setEventRecordingEnabled("telemetry.test.second", true);
+});
+
+add_task(async function test_recording() {
+ Telemetry.clearScalars();
+ Telemetry.clearEvents();
+
+ // Record some events.
+ let expected = [
+ { optout: false, event: ["telemetry.test", "test1", "object1"] },
+ { optout: false, event: ["telemetry.test", "test2", "object2"] },
+
+ { optout: false, event: ["telemetry.test", "test1", "object1", "value"] },
+ {
+ optout: false,
+ event: ["telemetry.test", "test1", "object1", "value", null],
+ },
+ {
+ optout: false,
+ event: ["telemetry.test", "test1", "object1", null, { key1: "value1" }],
+ },
+ {
+ optout: false,
+ event: [
+ "telemetry.test",
+ "test1",
+ "object1",
+ "value",
+ { key1: "value1", key2: "value2" },
+ ],
+ },
+
+ { optout: true, event: ["telemetry.test", "optout", "object1"] },
+ { optout: false, event: ["telemetry.test.second", "test", "object1"] },
+ {
+ optout: false,
+ event: [
+ "telemetry.test.second",
+ "test",
+ "object1",
+ null,
+ { key1: "value1" },
+ ],
+ },
+ ];
+
+ for (let entry of expected) {
+ entry.tsBefore = Math.floor(Telemetry.msSinceProcessStart());
+ try {
+ Telemetry.recordEvent(...entry.event);
+ } catch (ex) {
+ Assert.ok(
+ false,
+ `Failed to record event ${JSON.stringify(entry.event)}: ${ex}`
+ );
+ }
+ entry.tsAfter = Math.floor(Telemetry.msSinceProcessStart());
+ }
+
+ // Strip off trailing null values to match the serialized events.
+ for (let entry of expected) {
+ let e = entry.event;
+ while (e.length >= 3 && e[e.length - 1] === null) {
+ e.pop();
+ }
+ }
+
+ // Check that the events were summarized properly.
+ let summaries = {};
+ expected.forEach(({ optout, event }) => {
+ let [category, eObject, method] = event;
+ let uniqueEventName = `${category}#${eObject}#${method}`;
+ if (!(uniqueEventName in summaries)) {
+ summaries[uniqueEventName] = ["parent", event, 1];
+ } else {
+ summaries[uniqueEventName][2]++;
+ }
+ });
+ checkEventSummary(Object.values(summaries), true);
+
+ // The following should not result in any recorded events.
+ Telemetry.recordEvent("unknown.category", "test1", "object1");
+ checkRecordingFailure(0 /* UnknownEvent */);
+ Telemetry.recordEvent("telemetry.test", "unknown", "object1");
+ checkRecordingFailure(0 /* UnknownEvent */);
+ Telemetry.recordEvent("telemetry.test", "test1", "unknown");
+ checkRecordingFailure(0 /* UnknownEvent */);
+
+ let checkEvents = (events, expectedEvents) => {
+ checkEventFormat(events);
+ Assert.equal(
+ events.length,
+ expectedEvents.length,
+ "Snapshot should have the right number of events."
+ );
+
+ for (let i = 0; i < events.length; ++i) {
+ let { tsBefore, tsAfter } = expectedEvents[i];
+ let ts = events[i][0];
+ Assert.greaterOrEqual(
+ ts,
+ tsBefore,
+ "The recorded timestamp should be greater than the one before recording."
+ );
+ Assert.lessOrEqual(
+ ts,
+ tsAfter,
+ "The recorded timestamp should be less than the one after recording."
+ );
+
+ let recordedData = events[i].slice(1);
+ let expectedData = expectedEvents[i].event.slice();
+ Assert.deepEqual(
+ recordedData,
+ expectedData,
+ "The recorded event data should match."
+ );
+ }
+ };
+
+ // Check that the expected events were recorded.
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, false);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ checkEvents(snapshot.parent, expected);
+
+ // Check serializing only opt-out events.
+ snapshot = Telemetry.snapshotEvents(ALL_CHANNELS, false);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ let filtered = expected.filter(e => !!e.optout);
+ checkEvents(snapshot.parent, filtered);
+});
+
+add_task(async function test_clear() {
+ Telemetry.clearEvents();
+
+ const COUNT = 10;
+ for (let i = 0; i < COUNT; ++i) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ Telemetry.recordEvent("telemetry.test.second", "test", "object1");
+ }
+
+ // Check that events were recorded.
+ // The events are cleared by passing the respective flag.
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ Assert.equal(
+ snapshot.parent.length,
+ 2 * COUNT,
+ `Should have recorded ${2 * COUNT} events.`
+ );
+
+ // Now the events should be cleared.
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, false);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ `Should have cleared the events.`
+ );
+
+ for (let i = 0; i < COUNT; ++i) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ Telemetry.recordEvent("telemetry.test.second", "test", "object1");
+ }
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true, 5);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ Assert.equal(snapshot.parent.length, 5, "Should have returned 5 events");
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, false);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ Assert.equal(
+ snapshot.parent.length,
+ 2 * COUNT - 5,
+ `Should have returned ${2 * COUNT - 5} events`
+ );
+
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, false, 5);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ Assert.equal(snapshot.parent.length, 5, "Should have returned 5 events");
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ Assert.equal(
+ snapshot.parent.length,
+ 2 * COUNT - 5 + 1,
+ `Should have returned ${2 * COUNT - 5 + 1} events`
+ );
+});
+
+add_task(async function test_expiry() {
+ Telemetry.clearEvents();
+
+ // Recording call with event that is expired by version.
+ Telemetry.recordEvent("telemetry.test", "expired_version", "object1");
+ checkRecordingFailure(1 /* Expired */);
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should not record event with expired version."
+ );
+
+ // Recording call with event that has expiry_version set into the future.
+ Telemetry.recordEvent("telemetry.test", "not_expired_optout", "object1");
+ TelemetryTestUtils.assertNumberOfEvents(1);
+});
+
+add_task(async function test_invalidParams() {
+ Telemetry.clearEvents();
+
+ // Recording call with wrong type for value argument.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", 1);
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should not record event when value argument with invalid type is passed."
+ );
+ checkRecordingFailure(3 /* Value */);
+
+ // Recording call with wrong type for extra argument.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, "invalid");
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should not record event when extra argument with invalid type is passed."
+ );
+ checkRecordingFailure(4 /* Extra */);
+
+ // Recording call with unknown extra key.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {
+ key3: "x",
+ });
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should not record event when extra argument with invalid key is passed."
+ );
+ checkRecordingFailure(2 /* ExtraKey */);
+
+ // Recording call with invalid value type.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {
+ key3: 1,
+ });
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ Object.keys(snapshot).length,
+ 0,
+ "Should not record event when extra argument with invalid value type is passed."
+ );
+ checkRecordingFailure(4 /* Extra */);
+});
+
+add_task(async function test_storageLimit() {
+ Telemetry.clearEvents();
+
+ let limitReached = TestUtils.topicObserved(
+ "event-telemetry-storage-limit-reached"
+ );
+ // Record more events than the storage limit allows.
+ let LIMIT = 1000;
+ let COUNT = LIMIT + 10;
+ for (let i = 0; i < COUNT; ++i) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", String(i));
+ }
+
+ await limitReached;
+ Assert.ok(true, "Topic was notified when event limit was reached");
+
+ // Check that the right events were recorded.
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.ok("parent" in snapshot, "Should have entry for main process.");
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ COUNT,
+ `Should have only recorded all ${COUNT} events`
+ );
+ Assert.ok(
+ events.every((e, idx) => e[4] === String(idx)),
+ "Should have recorded all events."
+ );
+});
+
+add_task(async function test_valueLimits() {
+ Telemetry.clearEvents();
+
+ // Record values that are at or over the limits for string lengths.
+ let LIMIT = 80;
+ let expected = [
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT - 10), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT + 1), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT + 10), null],
+
+ [
+ "telemetry.test",
+ "test1",
+ "object1",
+ null,
+ { key1: "a".repeat(LIMIT - 10) },
+ ],
+ ["telemetry.test", "test1", "object1", null, { key1: "a".repeat(LIMIT) }],
+ [
+ "telemetry.test",
+ "test1",
+ "object1",
+ null,
+ { key1: "a".repeat(LIMIT + 1) },
+ ],
+ [
+ "telemetry.test",
+ "test1",
+ "object1",
+ null,
+ { key1: "a".repeat(LIMIT + 10) },
+ ],
+ ];
+
+ for (let event of expected) {
+ Telemetry.recordEvent(...event);
+ if (event[3]) {
+ event[3] = event[3].substr(0, LIMIT);
+ } else {
+ event[3] = undefined;
+ }
+ if (event[4]) {
+ event[4].key1 = event[4].key1.substr(0, LIMIT);
+ }
+ }
+
+ // Strip off trailing null values to match the serialized events.
+ for (let e of expected) {
+ while (e.length >= 3 && e[e.length - 1] === null) {
+ e.pop();
+ }
+ }
+
+ // Check that the right events were recorded.
+ TelemetryTestUtils.assertEvents(expected);
+});
+
+add_task(async function test_unicodeValues() {
+ Telemetry.clearEvents();
+
+ // Record string values containing unicode characters.
+ let value = "漢語";
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", value);
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {
+ key1: value,
+ });
+
+ // Check that the values were correctly recorded.
+ TelemetryTestUtils.assertEvents([{ value }, { extra: { key1: value } }]);
+});
+
+add_task(async function test_dynamicEvents() {
+ Telemetry.clearEvents();
+ Telemetry.clearScalars();
+ Telemetry.canRecordExtended = true;
+
+ // Register some test events.
+ Telemetry.registerEvents("telemetry.test.dynamic", {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ // Event with extra_keys.
+ test2: {
+ methods: ["test2", "test2b"],
+ objects: ["object1"],
+ extra_keys: ["key1", "key2"],
+ },
+ // Expired event.
+ test3: {
+ methods: ["test3"],
+ objects: ["object1"],
+ expired: true,
+ },
+ // A release-channel recording event.
+ test4: {
+ methods: ["test4"],
+ objects: ["object1"],
+ record_on_release: true,
+ },
+ });
+
+ // Record some valid events.
+ Telemetry.recordEvent("telemetry.test.dynamic", "test1", "object1");
+ Telemetry.recordEvent("telemetry.test.dynamic", "test2", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ Telemetry.recordEvent("telemetry.test.dynamic", "test2b", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ Telemetry.recordEvent(
+ "telemetry.test.dynamic",
+ "test3",
+ "object1",
+ "some value"
+ );
+ Telemetry.recordEvent("telemetry.test.dynamic", "test4", "object1", null);
+
+ // Test recording an unknown event.
+ Telemetry.recordEvent("telemetry.test.dynamic", "unknown", "unknown");
+ checkRecordingFailure(0 /* UnknownEvent */);
+
+ // Now check that the snapshot contains the expected data.
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, false);
+ Assert.ok(
+ "dynamic" in snapshot,
+ "Should have dynamic events in the snapshot."
+ );
+
+ let expected = [
+ ["telemetry.test.dynamic", "test1", "object1"],
+ [
+ "telemetry.test.dynamic",
+ "test2",
+ "object1",
+ null,
+ { key1: "foo", key2: "bar" },
+ ],
+ [
+ "telemetry.test.dynamic",
+ "test2b",
+ "object1",
+ null,
+ { key1: "foo", key2: "bar" },
+ ],
+ // "test3" is epxired, so it should not be recorded.
+ ["telemetry.test.dynamic", "test4", "object1"],
+ ];
+ let events = snapshot.dynamic;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+
+ // Check that we've summarized the recorded events
+ checkEventSummary(
+ expected.map(ev => ["dynamic", ev, 1]),
+ true
+ );
+
+ // Check that the opt-out snapshot contains only the one expected event.
+ snapshot = Telemetry.snapshotEvents(ALL_CHANNELS, false);
+ Assert.ok(
+ "dynamic" in snapshot,
+ "Should have dynamic events in the snapshot."
+ );
+ Assert.equal(
+ snapshot.dynamic.length,
+ 1,
+ "Should have one opt-out event in the snapshot."
+ );
+ expected = ["telemetry.test.dynamic", "test4", "object1"];
+ Assert.deepEqual(snapshot.dynamic[0].slice(1), expected);
+
+ // Recording with unknown extra keys should be ignored and print an error.
+ Telemetry.clearEvents();
+ Telemetry.recordEvent("telemetry.test.dynamic", "test1", "object1", null, {
+ key1: "foo",
+ });
+ Telemetry.recordEvent("telemetry.test.dynamic", "test2", "object1", null, {
+ key1: "foo",
+ unknown: "bar",
+ });
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.ok(
+ !("dynamic" in snapshot),
+ "Should have not recorded dynamic events with unknown extra keys."
+ );
+
+ // Other built-in events should not show up in the "dynamic" bucket of the snapshot.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.ok(
+ !("dynamic" in snapshot),
+ "Should have not recorded built-in event into dynamic bucket."
+ );
+
+ // Test that recording opt-in and opt-out events works as expected.
+ Telemetry.clearEvents();
+ Telemetry.canRecordExtended = false;
+
+ Telemetry.recordEvent("telemetry.test.dynamic", "test1", "object1");
+ Telemetry.recordEvent("telemetry.test.dynamic", "test4", "object1");
+
+ expected = [
+ // Only "test4" should have been recorded.
+ ["telemetry.test.dynamic", "test4", "object1"],
+ ];
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ 1,
+ "Should have one opt-out event in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+});
+
+add_task(async function test_dynamicEventRegistrationValidation() {
+ Telemetry.canRecordExtended = true;
+ Telemetry.clearEvents();
+
+ // Test registration of invalid categories.
+ Telemetry.getSnapshotForHistograms("main", true); // Clear histograms before we begin.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry+test+dynamic", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ }),
+ /Category parameter should match the identifier pattern\./,
+ "Should throw when registering category names with invalid characters."
+ );
+ checkRegistrationFailure(2 /* Category */);
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents(
+ "telemetry.test.test.test.test.test.test.test.test",
+ {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ }
+ ),
+ /Category parameter should match the identifier pattern\./,
+ "Should throw when registering overly long category names."
+ );
+ checkRegistrationFailure(2 /* Category */);
+
+ // Test registration of invalid event names.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic1", {
+ "test?1": {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ }),
+ /Event names should match the identifier pattern\./,
+ "Should throw when registering event names with invalid characters."
+ );
+ checkRegistrationFailure(1 /* Name */);
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic2", {
+ test1test1test1test1test1test1test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ }),
+ /Event names should match the identifier pattern\./,
+ "Should throw when registering overly long event names."
+ );
+ checkRegistrationFailure(1 /* Name */);
+
+ // Test registration of invalid method names.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic3", {
+ test1: {
+ methods: ["test?1"],
+ objects: ["object1"],
+ },
+ }),
+ /Method names should match the identifier pattern\./,
+ "Should throw when registering method names with invalid characters."
+ );
+ checkRegistrationFailure(3 /* Method */);
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic", {
+ test1: {
+ methods: ["test1test1test1test1test1test1test1"],
+ objects: ["object1"],
+ },
+ }),
+ /Method names should match the identifier pattern\./,
+ "Should throw when registering overly long method names."
+ );
+ checkRegistrationFailure(3 /* Method */);
+
+ // Test registration of invalid object names.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic4", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object?1"],
+ },
+ }),
+ /Object names should match the identifier pattern\./,
+ "Should throw when registering object names with invalid characters."
+ );
+ checkRegistrationFailure(4 /* Object */);
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic5", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1object1object1object1object1object1"],
+ },
+ }),
+ /Object names should match the identifier pattern\./,
+ "Should throw when registering overly long object names."
+ );
+ checkRegistrationFailure(4 /* Object */);
+
+ // Test validation of invalid key names.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic6", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ extra_keys: ["a?1"],
+ },
+ }),
+ /Extra key names should match the identifier pattern\./,
+ "Should throw when registering extra key names with invalid characters."
+ );
+ checkRegistrationFailure(5 /* ExtraKeys */);
+
+ // Test validation of key names that are too long - we allow a maximum of 15 characters.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic7", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ extra_keys: ["a012345678901234"],
+ },
+ }),
+ /Extra key names should match the identifier pattern\./,
+ "Should throw when registering extra key names which are too long."
+ );
+ checkRegistrationFailure(5 /* ExtraKeys */);
+ Telemetry.registerEvents("telemetry.test.dynamic8", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ extra_keys: ["a01234567890123"],
+ },
+ });
+
+ // Test validation of extra key count - we only allow 10.
+ Assert.throws(
+ () =>
+ Telemetry.registerEvents("telemetry.test.dynamic9", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ extra_keys: [
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "a6",
+ "a7",
+ "a8",
+ "a9",
+ "a10",
+ "a11",
+ ],
+ },
+ }),
+ /No more than 10 extra keys can be registered\./,
+ "Should throw when registering too many extra keys."
+ );
+ checkRegistrationFailure(5 /* ExtraKeys */);
+ Telemetry.registerEvents("telemetry.test.dynamic10", {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ extra_keys: ["a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "a10"],
+ },
+ });
+});
+
+// When add-ons update, they may re-register some of the dynamic events.
+// Test through some possible scenarios.
+add_task(async function test_dynamicEventRegisterAgain() {
+ Telemetry.canRecordExtended = true;
+ Telemetry.clearEvents();
+
+ const category = "telemetry.test.register.again";
+ let events = {
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ };
+
+ // First register the initial event and make sure it can be recorded.
+ Telemetry.registerEvents(category, events);
+ let expected = [[category, "test1", "object1"]];
+ expected.forEach(e => Telemetry.recordEvent(...e));
+
+ let snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ expected.length,
+ "Should have right number of events in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+
+ // Register the same event again and make sure it can still be recorded.
+ Telemetry.registerEvents(category, events);
+ Telemetry.recordEvent(category, "test1", "object1");
+
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ expected.length,
+ "Should have right number of events in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+
+ // Now register another event in the same category and make sure both events can be recorded.
+ events.test2 = {
+ methods: ["test2"],
+ objects: ["object2"],
+ };
+ Telemetry.registerEvents(category, events);
+
+ expected = [
+ [category, "test1", "object1"],
+ [category, "test2", "object2"],
+ ];
+ expected.forEach(e => Telemetry.recordEvent(...e));
+
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ expected.length,
+ "Should have right number of events in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+
+ // Check that adding a new object to an event entry works.
+ events.test1.methods = ["test1a"];
+ events.test2.objects = ["object2", "object2a"];
+ Telemetry.registerEvents(category, events);
+
+ expected = [
+ [category, "test1", "object1"],
+ [category, "test2", "object2"],
+ [category, "test1a", "object1"],
+ [category, "test2", "object2a"],
+ ];
+ expected.forEach(e => Telemetry.recordEvent(...e));
+
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ expected.length,
+ "Should have right number of events in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+
+ // Make sure that we can expire events that are already registered.
+ events.test2.expired = true;
+ Telemetry.registerEvents(category, events);
+
+ expected = [[category, "test1", "object1"]];
+ expected.forEach(e => Telemetry.recordEvent(...e));
+
+ snapshot = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true);
+ Assert.equal(
+ snapshot.dynamic.length,
+ expected.length,
+ "Should have right number of events in the snapshot."
+ );
+ Assert.deepEqual(
+ snapshot.dynamic.map(e => e.slice(1)),
+ expected
+ );
+});
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_productSpecificEvents() {
+ const EVENT_CATEGORY = "telemetry.test";
+ const DEFAULT_PRODUCTS_EVENT = "default_products";
+ const DESKTOP_ONLY_EVENT = "desktop_only";
+ const MULTIPRODUCT_EVENT = "multiproduct";
+ const MOBILE_ONLY_EVENT = "mobile_only";
+
+ Telemetry.clearEvents();
+
+ // Try to record the desktop and multiproduct event
+ Telemetry.recordEvent(EVENT_CATEGORY, DEFAULT_PRODUCTS_EVENT, "object1");
+ Telemetry.recordEvent(EVENT_CATEGORY, DESKTOP_ONLY_EVENT, "object1");
+ Telemetry.recordEvent(EVENT_CATEGORY, MULTIPRODUCT_EVENT, "object1");
+
+ // Try to record the mobile-only event
+ Telemetry.recordEvent(EVENT_CATEGORY, MOBILE_ONLY_EVENT, "object1");
+
+ let events = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true).parent;
+
+ let expected = [
+ [EVENT_CATEGORY, DEFAULT_PRODUCTS_EVENT, "object1"],
+ [EVENT_CATEGORY, DESKTOP_ONLY_EVENT, "object1"],
+ [EVENT_CATEGORY, MULTIPRODUCT_EVENT, "object1"],
+ ];
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+ }
+);
+
+add_task(
+ {
+ skip_if: () => !gIsAndroid,
+ },
+ async function test_mobileSpecificEvents() {
+ const EVENT_CATEGORY = "telemetry.test";
+ const DEFAULT_PRODUCTS_EVENT = "default_products";
+ const DESKTOP_ONLY_EVENT = "desktop_only";
+ const MULTIPRODUCT_EVENT = "multiproduct";
+ const MOBILE_ONLY_EVENT = "mobile_only";
+
+ Telemetry.clearEvents();
+
+ // Try to record the mobile-only and multiproduct event
+ Telemetry.recordEvent(EVENT_CATEGORY, DEFAULT_PRODUCTS_EVENT, "object1");
+ Telemetry.recordEvent(EVENT_CATEGORY, MOBILE_ONLY_EVENT, "object1");
+ Telemetry.recordEvent(EVENT_CATEGORY, MULTIPRODUCT_EVENT, "object1");
+
+ // Try to record the mobile-only event
+ Telemetry.recordEvent(EVENT_CATEGORY, DESKTOP_ONLY_EVENT, "object1");
+
+ let events = Telemetry.snapshotEvents(PRERELEASE_CHANNELS, true).parent;
+
+ let expected = [
+ [EVENT_CATEGORY, DEFAULT_PRODUCTS_EVENT, "object1"],
+ [EVENT_CATEGORY, MOBILE_ONLY_EVENT, "object1"],
+ [EVENT_CATEGORY, MULTIPRODUCT_EVENT, "object1"],
+ ];
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEvents_buildFaster.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents_buildFaster.js
new file mode 100644
index 0000000000..24554403e4
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents_buildFaster.js
@@ -0,0 +1,468 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+
+/**
+ * Return the path to the definitions file for the events.
+ */
+function getDefinitionsPath() {
+ // Write the event definition to the spec file in the binary directory.
+ let definitionFile = Cc["@mozilla.org/file/local;1"].createInstance(
+ Ci.nsIFile
+ );
+ definitionFile = Services.dirsvc.get("GreD", Ci.nsIFile);
+ definitionFile.append("EventArtifactDefinitions.json");
+ return definitionFile.path;
+}
+
+add_task(async function test_setup() {
+ do_get_profile();
+});
+
+add_task(
+ {
+ // The test needs to write a file, and that fails in tests on Android.
+ // We don't really need the Android coverage, so skip on Android.
+ skip_if: () => AppConstants.platform == "android",
+ },
+ async function test_invalidJSON() {
+ const INVALID_JSON = "{ invalid,JSON { {1}";
+ const FILE_PATH = getDefinitionsPath();
+
+ // Write a corrupted JSON file.
+ await OS.File.writeAtomic(FILE_PATH, INVALID_JSON, {
+ encoding: "utf-8",
+ noOverwrite: false,
+ });
+
+ // Simulate Firefox startup. This should not throw!
+ await TelemetryController.testSetup();
+ await TelemetryController.testPromiseJsProbeRegistration();
+
+ // Cleanup.
+ await TelemetryController.testShutdown();
+ await OS.File.remove(FILE_PATH);
+ }
+);
+
+add_task(
+ {
+ // The test needs to write a file, and that fails in tests on Android.
+ // We don't really need the Android coverage, so skip on Android.
+ skip_if: () => AppConstants.platform == "android",
+ },
+ async function test_dynamicBuiltin() {
+ const DYNAMIC_EVENT_SPEC = {
+ "telemetry.test.builtin": {
+ test: {
+ objects: ["object1", "object2"],
+ expires: "never",
+ methods: ["test1", "test2"],
+ extra_keys: ["key2", "key1"],
+ record_on_release: false,
+ },
+ },
+ // Test a new, expired event
+ "telemetry.test.expired": {
+ expired: {
+ objects: ["object1"],
+ methods: ["method1"],
+ expires: AppConstants.MOZ_APP_VERSION,
+ record_on_release: false,
+ },
+ },
+ // Test overwriting static expiries
+ "telemetry.test": {
+ expired_version: {
+ objects: ["object1"],
+ methods: ["expired_version"],
+ expires: "never",
+ record_on_release: false,
+ },
+ not_expired_optout: {
+ objects: ["object1"],
+ methods: ["not_expired_optout"],
+ expires: AppConstants.MOZ_APP_VERSION,
+ record_on_release: true,
+ },
+ },
+ };
+
+ Telemetry.clearEvents();
+
+ // Let's write to the definition file to also cover the file
+ // loading part.
+ const FILE_PATH = getDefinitionsPath();
+ await CommonUtils.writeJSON(DYNAMIC_EVENT_SPEC, FILE_PATH);
+
+ // Start TelemetryController to trigger loading the specs.
+ await TelemetryController.testReset();
+ await TelemetryController.testPromiseJsProbeRegistration();
+
+ // Record the events
+ const TEST_EVENT_NAME = "telemetry.test.builtin";
+ const DYNAMIC_EVENT_CATEGORY = "telemetry.test.expired";
+ const STATIC_EVENT_CATEGORY = "telemetry.test";
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+ Telemetry.setEventRecordingEnabled(DYNAMIC_EVENT_CATEGORY, true);
+ Telemetry.setEventRecordingEnabled(STATIC_EVENT_CATEGORY, true);
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object2", null, {
+ key2: "bar",
+ });
+ Telemetry.recordEvent(DYNAMIC_EVENT_CATEGORY, "method1", "object1");
+ Telemetry.recordEvent(STATIC_EVENT_CATEGORY, "expired_version", "object1");
+ Telemetry.recordEvent(
+ STATIC_EVENT_CATEGORY,
+ "not_expired_optout",
+ "object1"
+ );
+
+ // Check the values we tried to store.
+ const snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok(
+ "parent" in snapshot,
+ "Should have parent events in the snapshot."
+ );
+
+ let expected = [
+ [TEST_EVENT_NAME, "test1", "object1"],
+ [TEST_EVENT_NAME, "test2", "object1", null, { key1: "foo", key2: "bar" }],
+ [TEST_EVENT_NAME, "test2", "object2", null, { key2: "bar" }],
+ [STATIC_EVENT_CATEGORY, "expired_version", "object1"],
+ ];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+
+ // Clean up.
+ await TelemetryController.testShutdown();
+ await OS.File.remove(FILE_PATH);
+ }
+);
+
+add_task(async function test_dynamicBuiltinEvents() {
+ Telemetry.clearEvents();
+ Telemetry.clearScalars();
+ Telemetry.canRecordExtended = true;
+
+ const TEST_EVENT_NAME = "telemetry.test.dynamicbuiltin";
+
+ // Register some dynamic builtin test events.
+ Telemetry.registerBuiltinEvents(TEST_EVENT_NAME, {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ // Event with extra_keys.
+ test2: {
+ methods: ["test2", "test2b"],
+ objects: ["object1", "object2"],
+ extra_keys: ["key1", "key2"],
+ },
+ });
+
+ // Record some events.
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2b", "object2", null, {
+ key2: "bar",
+ });
+ // Now check that the snapshot contains the expected data.
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+
+ // For checking event summaries
+ const scalars = Telemetry.getSnapshotForKeyedScalars("main", true);
+ Assert.ok(
+ "parent" in scalars,
+ "Should have parent scalars in the main snapshot."
+ );
+
+ let expected = [
+ [TEST_EVENT_NAME, "test1", "object1"],
+ [TEST_EVENT_NAME, "test2", "object1", null, { key1: "foo", key2: "bar" }],
+ [TEST_EVENT_NAME, "test2b", "object2", null, { key2: "bar" }],
+ ];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+
+ const uniqueEventName = `${expected[i][0]}#${expected[i][1]}#${expected[i][2]}`;
+ const summaryCount =
+ scalars.parent["telemetry.event_counts"][uniqueEventName];
+ Assert.equal(1, summaryCount, `${uniqueEventName} had wrong summary count`);
+ }
+});
+
+add_task(async function test_dynamicBuiltinEventsDisabledByDefault() {
+ Telemetry.clearEvents();
+ Telemetry.canRecordExtended = true;
+
+ const TEST_EVENT_NAME = "telemetry.test.offbydefault";
+
+ // Register some dynamic builtin test events.
+ Telemetry.registerBuiltinEvents(TEST_EVENT_NAME, {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1"],
+ },
+ });
+
+ // Record some events.
+ // Explicitely _don't_ enable the category
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+
+ // Now check that the snapshot contains the expected data.
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok(
+ !("parent" in snapshot),
+ "Should not have parent events in the snapshot."
+ );
+
+ // Now enable the category and record again
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+
+ snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+
+ let expected = [[TEST_EVENT_NAME, "test1", "object1"]];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+});
+
+add_task(async function test_dynamicBuiltinDontOverwriteStaticData() {
+ Telemetry.clearEvents();
+ Telemetry.canRecordExtended = true;
+
+ const TEST_STATIC_EVENT_NAME = "telemetry.test";
+ const TEST_EVENT_NAME = "telemetry.test.nooverwrite";
+
+ // Register some dynamic builtin test events.
+ Telemetry.registerBuiltinEvents(TEST_EVENT_NAME, {
+ dynamic: {
+ methods: ["dynamic"],
+ objects: ["builtin", "anotherone"],
+ },
+ });
+
+ // First enable the categories we're using
+ Telemetry.setEventRecordingEnabled(TEST_STATIC_EVENT_NAME, true);
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+
+ // Now record some dynamic-builtin and static events
+ Telemetry.recordEvent(TEST_EVENT_NAME, "dynamic", "builtin");
+ Telemetry.recordEvent(TEST_STATIC_EVENT_NAME, "test1", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "dynamic", "anotherone");
+
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+
+ // All events should now be recorded in the right order
+ let expected = [
+ [TEST_EVENT_NAME, "dynamic", "builtin"],
+ [TEST_STATIC_EVENT_NAME, "test1", "object1"],
+ [TEST_EVENT_NAME, "dynamic", "anotherone"],
+ ];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+});
+
+add_task(async function test_dynamicBuiltinEventsOverridingStatic() {
+ Telemetry.clearEvents();
+ Telemetry.canRecordExtended = true;
+
+ const TEST_EVENT_NAME = "telemetry.test";
+
+ // Register dynamic builtin test events, overwriting existing one.
+ Telemetry.registerBuiltinEvents(TEST_EVENT_NAME, {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1", "object2"],
+ },
+ // Event with extra_keys.
+ test2: {
+ methods: ["test2"],
+ objects: ["object1", "object2", "object3"],
+ extra_keys: ["key1", "key2", "newdynamickey"],
+ },
+ });
+
+ // Record some events that should be available in the static event already .
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ // Record events with newly added objects and keys.
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object2", null, {
+ newdynamickey: "foo",
+ });
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object3", null, {
+ key1: "foo",
+ });
+ // Now check that the snapshot contains the expected data.
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+
+ let expected = [
+ [TEST_EVENT_NAME, "test1", "object1"],
+ [TEST_EVENT_NAME, "test2", "object1", null, { key1: "foo", key2: "bar" }],
+ [TEST_EVENT_NAME, "test2", "object2", null, { newdynamickey: "foo" }],
+ [TEST_EVENT_NAME, "test2", "object3", null, { key1: "foo" }],
+ ];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+});
+
+add_task(async function test_realDynamicDontOverwrite() {
+ // Real dynamic events follow similar code paths internally.
+ // Let's ensure they trigger the right code path and don't overwrite.
+
+ Telemetry.clearEvents();
+ Telemetry.canRecordExtended = true;
+
+ const TEST_EVENT_NAME = "telemetry.test";
+
+ // Register dynamic test events, this should not overwrite existing ones.
+ Telemetry.registerEvents(TEST_EVENT_NAME, {
+ // Event with only required fields.
+ test1: {
+ methods: ["test1"],
+ objects: ["object1", "object2"],
+ },
+ // Event with extra_keys.
+ test2: {
+ methods: ["test2"],
+ objects: ["object1", "object2", "object3"],
+ extra_keys: ["key1", "key2", "realdynamic"],
+ },
+ });
+
+ // Record some events that should be available in the static event already .
+ Telemetry.setEventRecordingEnabled(TEST_EVENT_NAME, true);
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test1", "object1");
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object1", null, {
+ key1: "foo",
+ key2: "bar",
+ });
+ // Record events with newly added objects and keys.
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object2", null, {
+ realdynamic: "foo",
+ });
+ Telemetry.recordEvent(TEST_EVENT_NAME, "test2", "object3", null, {
+ key1: "foo",
+ });
+ // Now check that the snapshot contains the expected data.
+ let snapshot = Telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ false
+ );
+ Assert.ok("parent" in snapshot, "Should have parent events in the snapshot.");
+
+ let expected = [
+ [TEST_EVENT_NAME, "test1", "object1"],
+ [TEST_EVENT_NAME, "test2", "object1", null, { key1: "foo", key2: "bar" }],
+ [TEST_EVENT_NAME, "test2", "object3", null, { key1: "foo" }],
+ ];
+ let events = snapshot.parent;
+ Assert.equal(
+ events.length,
+ expected.length,
+ "Should have recorded the right amount of events."
+ );
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(
+ events[i].slice(1),
+ expected[i],
+ "Should have recorded the expected event data."
+ );
+ }
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js b/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js
new file mode 100644
index 0000000000..29ea4c0a1e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js
@@ -0,0 +1,29 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ let testFlag = Services.telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ deepEqual(
+ testFlag.snapshot().values,
+ { 0: 1, 1: 0 },
+ "Original value is correct"
+ );
+ testFlag.add(1);
+ deepEqual(
+ testFlag.snapshot().values,
+ { 0: 0, 1: 1, 2: 0 },
+ "Value is correct after ping"
+ );
+ testFlag.clear();
+ deepEqual(
+ testFlag.snapshot().values,
+ { 0: 1, 1: 0 },
+ "Value is correct after calling clear()"
+ );
+ testFlag.add(1);
+ deepEqual(
+ testFlag.snapshot().values,
+ { 0: 0, 1: 1, 2: 0 },
+ "Value is correct after ping"
+ );
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryHistograms.js b/toolkit/components/telemetry/tests/unit/test_TelemetryHistograms.js
new file mode 100644
index 0000000000..c71ac54d2d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryHistograms.js
@@ -0,0 +1,2067 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const INT_MAX = 0x7fffffff;
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+
+// Return an array of numbers from lower up to, excluding, upper
+function numberRange(lower, upper) {
+ let a = [];
+ for (let i = lower; i < upper; ++i) {
+ a.push(i);
+ }
+ return a;
+}
+
+function expect_fail(f) {
+ let failed = false;
+ try {
+ f();
+ failed = false;
+ } catch (e) {
+ failed = true;
+ }
+ Assert.ok(failed);
+}
+
+function expect_success(f) {
+ let succeeded = false;
+ try {
+ f();
+ succeeded = true;
+ } catch (e) {
+ succeeded = false;
+ }
+ Assert.ok(succeeded);
+}
+
+function check_histogram(histogram_type, name, min, max, bucket_count) {
+ var h = Telemetry.getHistogramById(name);
+ h.add(0);
+ var s = h.snapshot();
+ Assert.equal(0, s.sum);
+
+ var hgrams = Telemetry.getSnapshotForHistograms("main", false).parent;
+ let gh = hgrams[name];
+ Assert.equal(gh.histogram_type, histogram_type);
+
+ Assert.deepEqual(gh.range, [min, max]);
+
+ // Check that booleans work with nonboolean histograms
+ h.add(false);
+ h.add(true);
+ s = Object.values(h.snapshot().values);
+ Assert.deepEqual(s, [2, 1, 0]);
+
+ // Check that clearing works.
+ h.clear();
+ s = h.snapshot();
+ Assert.deepEqual(s.values, {});
+ Assert.equal(s.sum, 0);
+
+ h.add(0);
+ h.add(1);
+ var c = Object.values(h.snapshot().values);
+ Assert.deepEqual(c, [1, 1, 0]);
+}
+
+// This MUST be the very first test of this file.
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ function test_instantiate() {
+ const ID = "TELEMETRY_TEST_COUNT";
+ let h = Telemetry.getHistogramById(ID);
+
+ // Instantiate the subsession histogram through |add| and make sure they match.
+ // This MUST be the first use of "TELEMETRY_TEST_COUNT" in this file, otherwise
+ // |add| will not instantiate the histogram.
+ h.add(1);
+ let snapshot = h.snapshot();
+ let subsession = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.ok(ID in subsession);
+ Assert.equal(
+ snapshot.sum,
+ subsession[ID].sum,
+ "Histogram and subsession histogram sum must match."
+ );
+ // Clear the histogram, so we don't void the assumptions from the other tests.
+ h.clear();
+ }
+);
+
+add_task(async function test_parameterChecks() {
+ let kinds = [Telemetry.HISTOGRAM_EXPONENTIAL, Telemetry.HISTOGRAM_LINEAR];
+ let testNames = ["TELEMETRY_TEST_EXPONENTIAL", "TELEMETRY_TEST_LINEAR"];
+ for (let i = 0; i < kinds.length; i++) {
+ let histogram_type = kinds[i];
+ let test_type = testNames[i];
+ let [min, max, bucket_count] = [1, INT_MAX - 1, 10];
+ check_histogram(histogram_type, test_type, min, max, bucket_count);
+ }
+});
+
+add_task(async function test_parameterCounts() {
+ let histogramIds = [
+ "TELEMETRY_TEST_EXPONENTIAL",
+ "TELEMETRY_TEST_LINEAR",
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_CATEGORICAL",
+ "TELEMETRY_TEST_BOOLEAN",
+ ];
+
+ for (let id of histogramIds) {
+ let h = Telemetry.getHistogramById(id);
+ h.clear();
+ h.add();
+ Assert.equal(
+ h.snapshot().sum,
+ 0,
+ "Calling add() without a value should only log an error."
+ );
+ h.clear();
+ }
+});
+
+add_task(async function test_parameterCountsKeyed() {
+ let histogramIds = [
+ "TELEMETRY_TEST_KEYED_FLAG",
+ "TELEMETRY_TEST_KEYED_BOOLEAN",
+ "TELEMETRY_TEST_KEYED_EXPONENTIAL",
+ "TELEMETRY_TEST_KEYED_LINEAR",
+ ];
+
+ for (let id of histogramIds) {
+ let h = Telemetry.getKeyedHistogramById(id);
+ h.clear();
+ h.add("key");
+ Assert.deepEqual(
+ h.snapshot(),
+ {},
+ "Calling add('key') without a value should only log an error."
+ );
+ h.clear();
+ }
+});
+
+add_task(async function test_noSerialization() {
+ // Instantiate the storage for this histogram and make sure it doesn't
+ // get reflected into JS, as it has no interesting data in it.
+ Telemetry.getHistogramById("NEWTAB_PAGE_PINNED_SITES_COUNT");
+ let histograms = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+ Assert.equal(false, "NEWTAB_PAGE_PINNED_SITES_COUNT" in histograms);
+});
+
+add_task(async function test_boolean_histogram() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ var r = h.snapshot().range;
+ // boolean histograms ignore numeric parameters
+ Assert.deepEqual(r, [1, 2]);
+ h.add(0);
+ h.add(1);
+ h.add(2);
+
+ h.add(true);
+ h.add(false);
+ var s = h.snapshot();
+ Assert.equal(s.histogram_type, Telemetry.HISTOGRAM_BOOLEAN);
+ // last bucket should always be 0 since .add parameters are normalized to either 0 or 1
+ Assert.deepEqual(s.values, { 0: 2, 1: 3, 2: 0 });
+ Assert.equal(s.sum, 3);
+});
+
+add_task(async function test_flag_histogram() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ var r = h.snapshot().range;
+ // Flag histograms ignore numeric parameters.
+ Assert.deepEqual(r, [1, 2]);
+ // Should already have a 0 counted.
+ var v = h.snapshot().values;
+ var s = h.snapshot().sum;
+ Assert.deepEqual(v, { 0: 1, 1: 0 });
+ Assert.equal(s, 0);
+ // Should switch counts.
+ h.add(1);
+ var v2 = h.snapshot().values;
+ var s2 = h.snapshot().sum;
+ Assert.deepEqual(v2, { 0: 0, 1: 1, 2: 0 });
+ Assert.equal(s2, 1);
+ // Should only switch counts once.
+ h.add(1);
+ var v3 = h.snapshot().values;
+ var s3 = h.snapshot().sum;
+ Assert.deepEqual(v3, { 0: 0, 1: 1, 2: 0 });
+ Assert.equal(s3, 1);
+ Assert.equal(h.snapshot().histogram_type, Telemetry.HISTOGRAM_FLAG);
+});
+
+add_task(async function test_count_histogram() {
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT2");
+ let s = h.snapshot();
+ Assert.deepEqual(s.range, [1, 2]);
+ Assert.deepEqual(s.values, {});
+ Assert.equal(s.sum, 0);
+ h.add();
+ s = h.snapshot();
+ Assert.deepEqual(s.values, { 0: 1, 1: 0 });
+ Assert.equal(s.sum, 1);
+ h.add();
+ s = h.snapshot();
+ Assert.deepEqual(s.values, { 0: 2, 1: 0 });
+ Assert.equal(s.sum, 2);
+});
+
+add_task(async function test_categorical_histogram() {
+ let h1 = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL");
+ for (let v of ["CommonLabel", "Label2", "Label3", "Label3", 0, 0, 1]) {
+ h1.add(v);
+ }
+ for (let s of ["", "Label4", "1234"]) {
+ // The |add| method should not throw for unexpected values, but rather
+ // print an error message in the console.
+ h1.add(s);
+ }
+
+ let snapshot = h1.snapshot();
+ Assert.equal(snapshot.sum, 6);
+ Assert.deepEqual(snapshot.range, [1, 50]);
+ Assert.deepEqual(snapshot.values, { 0: 3, 1: 2, 2: 2, 3: 0 });
+
+ let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL_OPTOUT");
+ for (let v of [
+ "CommonLabel",
+ "CommonLabel",
+ "Label4",
+ "Label5",
+ "Label6",
+ 0,
+ 1,
+ ]) {
+ h2.add(v);
+ }
+ for (let s of ["", "Label3", "1234"]) {
+ // The |add| method should not throw for unexpected values, but rather
+ // print an error message in the console.
+ h2.add(s);
+ }
+
+ snapshot = h2.snapshot();
+ Assert.equal(snapshot.sum, 7);
+ Assert.deepEqual(snapshot.range, [1, 50]);
+ Assert.deepEqual(snapshot.values, { 0: 3, 1: 2, 2: 1, 3: 1, 4: 0 });
+
+ // This histogram overrides the default of 50 values to 70.
+ let h3 = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL_NVALUES");
+ for (let v of ["CommonLabel", "Label7", "Label8"]) {
+ h3.add(v);
+ }
+
+ snapshot = h3.snapshot();
+ Assert.equal(snapshot.sum, 3);
+ Assert.deepEqual(snapshot.range, [1, 70]);
+ Assert.deepEqual(snapshot.values, { 0: 1, 1: 1, 2: 1, 3: 0 });
+});
+
+add_task(async function test_getCategoricalLabels() {
+ let h = Telemetry.getCategoricalLabels();
+
+ Assert.deepEqual(h.TELEMETRY_TEST_CATEGORICAL, [
+ "CommonLabel",
+ "Label2",
+ "Label3",
+ ]);
+ Assert.deepEqual(h.TELEMETRY_TEST_CATEGORICAL_OPTOUT, [
+ "CommonLabel",
+ "Label4",
+ "Label5",
+ "Label6",
+ ]);
+ Assert.deepEqual(h.TELEMETRY_TEST_CATEGORICAL_NVALUES, [
+ "CommonLabel",
+ "Label7",
+ "Label8",
+ ]);
+ Assert.deepEqual(h.TELEMETRY_TEST_KEYED_CATEGORICAL, [
+ "CommonLabel",
+ "Label2",
+ "Label3",
+ ]);
+});
+
+add_task(async function test_add_error_behaviour() {
+ const PLAIN_HISTOGRAMS_TO_TEST = [
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_EXPONENTIAL",
+ "TELEMETRY_TEST_LINEAR",
+ "TELEMETRY_TEST_BOOLEAN",
+ ];
+
+ const KEYED_HISTOGRAMS_TO_TEST = [
+ "TELEMETRY_TEST_KEYED_FLAG",
+ "TELEMETRY_TEST_KEYED_COUNT",
+ "TELEMETRY_TEST_KEYED_BOOLEAN",
+ ];
+
+ // Check that |add| doesn't throw for plain histograms.
+ for (let hist of PLAIN_HISTOGRAMS_TO_TEST) {
+ const returnValue = Telemetry.getHistogramById(hist).add(
+ "unexpected-value"
+ );
+ Assert.strictEqual(
+ returnValue,
+ undefined,
+ "Adding to an histogram must return 'undefined'."
+ );
+ }
+
+ // And for keyed histograms.
+ for (let hist of KEYED_HISTOGRAMS_TO_TEST) {
+ const returnValue = Telemetry.getKeyedHistogramById(hist).add(
+ "some-key",
+ "unexpected-value"
+ );
+ Assert.strictEqual(
+ returnValue,
+ undefined,
+ "Adding to a keyed histogram must return 'undefined'."
+ );
+ }
+});
+
+add_task(async function test_API_return_values() {
+ // Check that the plain scalar functions don't allow to crash the browser.
+ // We expect 'undefined' to be returned so that .add(1).add() can't be called.
+ // See bug 1321349 for context.
+ let hist = Telemetry.getHistogramById("TELEMETRY_TEST_LINEAR");
+ let keyedHist = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+
+ const RETURN_VALUES = [
+ hist.clear(),
+ hist.add(1),
+ keyedHist.clear(),
+ keyedHist.add("some-key", 1),
+ ];
+
+ for (let returnValue of RETURN_VALUES) {
+ Assert.strictEqual(
+ returnValue,
+ undefined,
+ "The function must return undefined"
+ );
+ }
+});
+
+add_task(async function test_getHistogramById() {
+ try {
+ Telemetry.getHistogramById("nonexistent");
+ do_throw("This can't happen");
+ } catch (e) {}
+ var h = Telemetry.getHistogramById("CYCLE_COLLECTOR");
+ var s = h.snapshot();
+ Assert.equal(s.histogram_type, Telemetry.HISTOGRAM_EXPONENTIAL);
+ Assert.deepEqual(s.range, [1, 10000]);
+});
+
+add_task(async function test_getSlowSQL() {
+ var slow = Telemetry.slowSQL;
+ Assert.ok("mainThread" in slow && "otherThreads" in slow);
+});
+
+// Check that telemetry doesn't record in private mode
+add_task(async function test_privateMode() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ var orig = h.snapshot();
+ Telemetry.canRecordExtended = false;
+ h.add(1);
+ Assert.deepEqual(orig, h.snapshot());
+ Telemetry.canRecordExtended = true;
+ h.add(1);
+ Assert.notDeepEqual(orig, h.snapshot());
+});
+
+// Check that telemetry records only when it is suppose to.
+add_task(async function test_histogramRecording() {
+ // Check that no histogram is recorded if both base and extended recording are off.
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ let orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum, h.snapshot().sum);
+
+ // Check that only base histograms are recorded.
+ Telemetry.canRecordBase = true;
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording."
+ );
+
+ // Extended histograms should not be recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(
+ orig.sum,
+ h.snapshot().sum,
+ "Histograms should be equal after recording."
+ );
+
+ // Runtime created histograms should not be recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(
+ orig.sum,
+ h.snapshot().sum,
+ "Histograms should be equal after recording."
+ );
+
+ // Check that extended histograms are recorded when required.
+ Telemetry.canRecordExtended = true;
+
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "Runtime histogram value should have incremented by 1 due to recording."
+ );
+
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording."
+ );
+
+ // Check that base histograms are still being recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording."
+ );
+});
+
+add_task(async function test_expired_histogram() {
+ var test_expired_id = "TELEMETRY_TEST_EXPIRED";
+ var dummy = Telemetry.getHistogramById(test_expired_id);
+
+ dummy.add(1);
+
+ for (let process of ["main", "content", "gpu", "extension"]) {
+ let histograms = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ );
+ if (!(process in histograms)) {
+ info("Nothing present for process " + process);
+ continue;
+ }
+ Assert.equal(histograms[process].__expired__, undefined);
+ }
+ let parentHgrams = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.equal(parentHgrams[test_expired_id], undefined);
+});
+
+add_task(async function test_keyed_expired_histogram() {
+ var test_expired_id = "TELEMETRY_TEST_EXPIRED_KEYED";
+ var dummy = Telemetry.getKeyedHistogramById(test_expired_id);
+ dummy.add("someKey", 1);
+
+ const histograms = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ );
+ for (let process of ["parent", "content", "gpu", "extension"]) {
+ if (!(process in histograms)) {
+ info("Nothing present for process " + process);
+ continue;
+ }
+ Assert.ok(
+ !(test_expired_id in histograms[process]),
+ "The expired keyed histogram must not be reported"
+ );
+ }
+});
+
+add_task(async function test_keyed_histogram() {
+ // Check that invalid names get rejected.
+
+ let threw = false;
+ try {
+ Telemetry.getKeyedHistogramById(
+ "test::unknown histogram",
+ "never",
+ Telemetry.HISTOGRAM_BOOLEAN
+ );
+ } catch (e) {
+ // This should throw as it is an unknown ID
+ threw = true;
+ }
+ Assert.ok(threw, "getKeyedHistogramById should have thrown");
+});
+
+add_task(async function test_keyed_boolean_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_BOOLEAN";
+ let KEYS = numberRange(0, 2).map(i => "key" + (i + 1));
+ KEYS.push("漢語");
+ let histogramBase = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 2,
+ sum: 1,
+ values: { 0: 0, 1: 1, 2: 0 },
+ };
+ let testHistograms = numberRange(0, 3).map(i =>
+ JSON.parse(JSON.stringify(histogramBase))
+ );
+ let testKeys = [];
+ let testSnapShot = {};
+
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ for (let i = 0; i < 2; ++i) {
+ let key = KEYS[i];
+ h.add(key, true);
+ testSnapShot[key] = testHistograms[i];
+ testKeys.push(key);
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+ }
+
+ h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let key = KEYS[2];
+ h.add(key, false);
+ testKeys.push(key);
+ testSnapShot[key] = testHistograms[2];
+ testSnapShot[key].sum = 0;
+ testSnapShot[key].values = { 0: 1, 1: 0 };
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let parentHgrams = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.deepEqual(parentHgrams[KEYED_ID], testSnapShot);
+
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+});
+
+add_task(async function test_keyed_count_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const KEYS = numberRange(0, 5).map(i => "key" + (i + 1));
+ let histogramBase = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ sum: 0,
+ values: { 0: 1, 1: 0 },
+ };
+ let testHistograms = numberRange(0, 5).map(i =>
+ JSON.parse(JSON.stringify(histogramBase))
+ );
+ let testKeys = [];
+ let testSnapShot = {};
+
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ h.clear();
+ for (let i = 0; i < 4; ++i) {
+ let key = KEYS[i];
+ let value = i * 2 + 1;
+
+ for (let k = 0; k < value; ++k) {
+ h.add(key);
+ }
+ testHistograms[i].values[0] = value;
+ testHistograms[i].sum = value;
+ testSnapShot[key] = testHistograms[i];
+ testKeys.push(key);
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot()[key], testHistograms[i]);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+ }
+
+ h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let key = KEYS[4];
+ h.add(key);
+ testKeys.push(key);
+ testHistograms[4].values[0] = 1;
+ testHistograms[4].sum = 1;
+ testSnapShot[key] = testHistograms[4];
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let parentHgrams = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.deepEqual(parentHgrams[KEYED_ID], testSnapShot);
+
+ // Test clearing categorical histogram.
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+
+ // Test leaving out the value argument. That should increment by 1.
+ h.add("key");
+ Assert.equal(h.snapshot().key.sum, 1);
+});
+
+add_task(async function test_keyed_categorical_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_CATEGORICAL";
+ const KEYS = numberRange(0, 5).map(i => "key" + (i + 1));
+
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ for (let k of KEYS) {
+ // Test adding both per label and index.
+ for (let v of ["CommonLabel", "Label2", "Label3", "Label3", 0, 0, 1]) {
+ h.add(k, v);
+ }
+
+ // The |add| method should not throw for unexpected values, but rather
+ // print an error message in the console.
+ for (let s of ["", "Label4", "1234"]) {
+ h.add(k, s);
+ }
+ }
+
+ // Check that the set of keys in the snapshot is what we expect.
+ let snapshot = h.snapshot();
+ let snapshotKeys = Object.keys(snapshot);
+ Assert.equal(KEYS.length, snapshotKeys.length);
+ Assert.ok(KEYS.every(k => snapshotKeys.includes(k)));
+
+ // Check the snapshot values.
+ for (let k of KEYS) {
+ Assert.ok(k in snapshot);
+ Assert.equal(snapshot[k].sum, 6);
+ Assert.deepEqual(snapshot[k].range, [1, 50]);
+ Assert.deepEqual(snapshot[k].values, { 0: 3, 1: 2, 2: 2, 3: 0 });
+ }
+});
+
+add_task(async function test_keyed_flag_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_FLAG";
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ const KEY = "default";
+ h.add(KEY, true);
+
+ let testSnapshot = {};
+ testSnapshot[KEY] = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 3,
+ sum: 1,
+ values: { 0: 0, 1: 1, 2: 0 },
+ };
+
+ Assert.deepEqual(h.keys().sort(), [KEY]);
+ Assert.deepEqual(h.snapshot(), testSnapshot);
+
+ let parentHgrams = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.deepEqual(parentHgrams[KEYED_ID], testSnapshot);
+
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+});
+
+add_task(async function test_keyed_histogram_recording() {
+ // Check that no histogram is recorded if both base and extended recording are off.
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+
+ const TEST_KEY = "record_foo";
+ let h = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"
+ );
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.ok(!(TEST_KEY in h.snapshot()));
+
+ // Check that only base histograms are recorded.
+ Telemetry.canRecordBase = true;
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "The keyed histogram should record the correct value."
+ );
+
+ // Extended set keyed histograms should not be recorded.
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.ok(
+ !(TEST_KEY in h.snapshot()),
+ "The keyed histograms should not record any data."
+ );
+
+ // Check that extended histograms are recorded when required.
+ Telemetry.canRecordExtended = true;
+
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "The runtime keyed histogram should record the correct value."
+ );
+
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "The keyed histogram should record the correct value."
+ );
+
+ // Check that base histograms are still being recorded.
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot()[TEST_KEY].sum, 1);
+});
+
+add_task(async function test_histogram_recording_enabled() {
+ Telemetry.canRecordBase = true;
+ Telemetry.canRecordExtended = true;
+
+ // Check that a "normal" histogram respects recording-enabled on/off
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ var orig = h.snapshot();
+
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum, "add should record by default.");
+
+ // Check that when recording is disabled - add is ignored
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", false);
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "When recording is disabled add should not record."
+ );
+
+ // Check that we're back to normal after recording is enabled
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", true);
+ h.add(1);
+ Assert.equal(
+ orig.sum + 2,
+ h.snapshot().sum,
+ "When recording is re-enabled add should record."
+ );
+
+ // Check that we're correctly accumulating values other than 1.
+ h.clear();
+ h.add(3);
+ Assert.equal(
+ 3,
+ h.snapshot().sum,
+ "Recording counts greater than 1 should work."
+ );
+
+ // Check that a histogram with recording disabled by default behaves correctly
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT_INIT_NO_RECORD");
+ orig = h.snapshot();
+
+ h.add(1);
+ Assert.equal(
+ orig.sum,
+ h.snapshot().sum,
+ "When recording is disabled by default, add should not record by default."
+ );
+
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_COUNT_INIT_NO_RECORD",
+ true
+ );
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "When recording is enabled add should record."
+ );
+
+ // Restore to disabled
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_COUNT_INIT_NO_RECORD",
+ false
+ );
+ h.add(1);
+ Assert.equal(
+ orig.sum + 1,
+ h.snapshot().sum,
+ "When recording is disabled add should not record."
+ );
+});
+
+add_task(async function test_keyed_histogram_recording_enabled() {
+ Telemetry.canRecordBase = true;
+ Telemetry.canRecordExtended = true;
+
+ // Check RecordingEnabled for keyed histograms which are recording by default
+ const TEST_KEY = "record_foo";
+ let h = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"
+ );
+
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "Keyed histogram add should record by default"
+ );
+
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
+ false
+ );
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "Keyed histogram add should not record when recording is disabled"
+ );
+
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
+ true
+ );
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "Keyed histogram add should record when recording is re-enabled"
+ );
+
+ // Check that a histogram with recording disabled by default behaves correctly
+ h = Telemetry.getKeyedHistogramById(
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD"
+ );
+ h.clear();
+
+ h.add(TEST_KEY, 1);
+ Assert.ok(
+ !(TEST_KEY in h.snapshot()),
+ "Keyed histogram add should not record by default for histograms which don't record by default"
+ );
+
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD",
+ true
+ );
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "Keyed histogram add should record when recording is enabled"
+ );
+
+ // Restore to disabled
+ Telemetry.setHistogramRecordingEnabled(
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD",
+ false
+ );
+ h.add(TEST_KEY, 1);
+ Assert.equal(
+ h.snapshot()[TEST_KEY].sum,
+ 1,
+ "Keyed histogram add should not record when recording is disabled"
+ );
+});
+
+add_task(async function test_histogramSnapshots() {
+ let keyed = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+ keyed.add("a", 1);
+
+ // Check that keyed histograms are not returned
+ let parentHgrams = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+ Assert.ok(!("TELEMETRY_TEST_KEYED_COUNT" in parentHgrams));
+});
+
+add_task(async function test_datasets() {
+ // Check that datasets work as expected.
+
+ const currentRecordExtended = Telemetry.canRecordExtended;
+
+ // Clear everything out
+ Telemetry.getSnapshotForHistograms("main", true /* clear */);
+ Telemetry.getSnapshotForKeyedHistograms("main", true /* clear */);
+
+ // Empty histograms are filtered. Let's record what we check below.
+ Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN").add(1);
+ Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT").add(1);
+ // Keyed flag histograms are skipped if empty, let's add data
+ Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_FLAG").add("a", 1);
+ Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN").add(
+ "a",
+ 1
+ );
+ Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT").add(
+ "a",
+ 1
+ );
+
+ // Check that registeredHistogram works properly
+ Telemetry.canRecordExtended = true;
+ let registered = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ );
+ registered = new Set(Object.keys(registered.parent));
+ Assert.ok(registered.has("TELEMETRY_TEST_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTIN"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTOUT"));
+ Telemetry.canRecordExtended = false;
+ registered = Telemetry.getSnapshotForHistograms("main", false /* clear */);
+ registered = new Set(Object.keys(registered.parent));
+ Assert.ok(!registered.has("TELEMETRY_TEST_FLAG"));
+ Assert.ok(!registered.has("TELEMETRY_TEST_RELEASE_OPTIN"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTOUT"));
+
+ // Check that registeredKeyedHistograms works properly
+ Telemetry.canRecordExtended = true;
+ registered = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ );
+ registered = new Set(Object.keys(registered.parent));
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"));
+ Telemetry.canRecordExtended = false;
+ registered = Telemetry.getSnapshotForKeyedHistograms(
+ "main",
+ false /* clear */
+ );
+ registered = new Set(Object.keys(registered.parent));
+ Assert.ok(!registered.has("TELEMETRY_TEST_KEYED_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"));
+
+ Telemetry.canRecordExtended = currentRecordExtended;
+});
+
+add_task(async function test_keyed_keys() {
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_KEYS");
+ h.clear();
+ Telemetry.clearScalars();
+
+ // The |add| method should not throw for keys that are not allowed.
+ h.add("testkey", true);
+ h.add("thirdKey", false);
+ h.add("not-allowed", true);
+
+ // Check that we have the expected keys.
+ let snap = h.snapshot();
+ Assert.equal(Object.keys(snap).length, 2, "Only 2 keys must be recorded.");
+ Assert.ok("testkey" in snap, "'testkey' must be recorded.");
+ Assert.ok("thirdKey" in snap, "'thirdKey' must be recorded.");
+ Assert.deepEqual(
+ snap.testkey.values,
+ { 0: 0, 1: 1, 2: 0 },
+ "'testkey' must contain the correct value."
+ );
+ Assert.deepEqual(
+ snap.thirdKey.values,
+ { 0: 1, 1: 0 },
+ "'thirdKey' must contain the correct value."
+ );
+
+ // Keys that are not allowed must not be recorded.
+ Assert.ok(!("not-allowed" in snap), "'not-allowed' must not be recorded.");
+
+ // Check that these failures were correctly tracked.
+ const parentScalars = Telemetry.getSnapshotForKeyedScalars("main", false)
+ .parent;
+ const scalarName = "telemetry.accumulate_unknown_histogram_keys";
+ Assert.ok(
+ scalarName in parentScalars,
+ "Accumulation to unallowed keys must be reported."
+ );
+ Assert.ok(
+ "TELEMETRY_TEST_KEYED_KEYS" in parentScalars[scalarName],
+ "Accumulation to unallowed keys must be recorded with the correct key."
+ );
+ Assert.equal(
+ parentScalars[scalarName].TELEMETRY_TEST_KEYED_KEYS,
+ 1,
+ "Accumulation to unallowed keys must report the correct value."
+ );
+});
+
+add_task(async function test_count_multiple_samples() {
+ let valid = [1, 1, 3, 0];
+ let invalid = ["1", "0", "", "random"];
+
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ h.clear();
+
+ // If the array contains even a single invalid value, no accumulation should take place
+ // Keep the valid values in front of invalid to check if it is simply accumulating as
+ // it's traversing the array and throwing upon first invalid value. That should not happen.
+ h.add(valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.equal(s1.sum, 0);
+ // Ensure that no accumulations of 0-like values took place.
+ // These accumulations won't increase the sum.
+ Assert.deepEqual({}, s1.values);
+
+ h.add(valid);
+ let s2 = h.snapshot();
+ Assert.deepEqual(s2.values, { 0: 4, 1: 0 });
+ Assert.equal(s2.sum, 5);
+});
+
+add_task(async function test_categorical_multiple_samples() {
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL");
+ h.clear();
+ let valid = ["CommonLabel", "Label2", "Label3", "Label3", 0, 0, 1];
+ let invalid = ["", "Label4", "1234", "0", "1", 5000];
+
+ // At least one invalid parameter, so no accumulation should happen here
+ // Valid values in front of invalid.
+ h.add(valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.equal(s1.sum, 0);
+ Assert.deepEqual({}, s1.values);
+
+ h.add(valid);
+ let snapshot = h.snapshot();
+ Assert.equal(snapshot.sum, 6);
+ Assert.deepEqual(snapshot.values, { 0: 3, 1: 2, 2: 2, 3: 0 });
+});
+
+add_task(async function test_boolean_multiple_samples() {
+ let valid = [true, false, 0, 1, 2];
+ let invalid = ["", "0", "1", ",2", "true", "false", "random"];
+
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ h.clear();
+
+ // At least one invalid parameter, so no accumulation should happen here
+ // Valid values in front of invalid.
+ h.add(valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.equal(s1.sum, 0);
+ Assert.deepEqual({}, s1.values);
+
+ h.add(valid);
+ let s = h.snapshot();
+ Assert.deepEqual(s.values, { 0: 2, 1: 3, 2: 0 });
+ Assert.equal(s.sum, 3);
+});
+
+add_task(async function test_linear_multiple_samples() {
+ // According to telemetry.mozilla.org/histogram-simulator, bucket at
+ // index 1 of TELEMETRY_TEST_LINEAR has max value of 268.44M
+ let valid = [0, 1, 5, 10, 268450000, 268450001, Math.pow(2, 31) + 1];
+ let invalid = ["", "0", "1", "random"];
+
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_LINEAR");
+ h.clear();
+
+ // At least one invalid paramater, so no accumulations.
+ // Valid values in front of invalid.
+ h.add(valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.equal(s1.sum, 0);
+ Assert.deepEqual({}, s1.values);
+
+ h.add(valid);
+ let s2 = h.snapshot();
+ // Values >= INT32_MAX are accumulated as INT32_MAX - 1
+ Assert.equal(s2.sum, valid.reduce((acc, cur) => acc + cur) - 3);
+ Assert.deepEqual(Object.values(s2.values), [1, 3, 2, 1]);
+});
+
+add_task(async function test_keyed_no_arguments() {
+ // Test for no accumulation when add is called with no arguments
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_LINEAR");
+ h.clear();
+
+ h.add();
+
+ // No keys should be added due to no accumulation.
+ Assert.equal(h.keys().length, 0);
+});
+
+add_task(async function test_keyed_categorical_invalid_string() {
+ // Test for no accumulation when add is called on a
+ // keyed categorical histogram with an invalid string label.
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_CATEGORICAL");
+ h.clear();
+
+ h.add("someKey", "#notALabel");
+
+ // No keys should be added due to no accumulation.
+ Assert.equal(h.keys().length, 0);
+});
+
+add_task(async function test_keyed_count_multiple_samples() {
+ let valid = [1, 1, 3, 0];
+ let invalid = ["1", "0", "", "random"];
+ let key = "somekeystring";
+
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+ h.clear();
+
+ // If the array contains even a single invalid value, no accumulation should take place
+ // Keep the valid values in front of invalid to check if it is simply accumulating as
+ // it's traversing the array and throwing upon first invalid value. That should not happen.
+ h.add(key, valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.ok(!(key in s1));
+
+ h.add(key, valid);
+ let s2 = h.snapshot()[key];
+ Assert.deepEqual(s2.values, { 0: 4, 1: 0 });
+ Assert.equal(s2.sum, 5);
+});
+
+add_task(async function test_keyed_categorical_multiple_samples() {
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_CATEGORICAL");
+ h.clear();
+ let valid = ["CommonLabel", "Label2", "Label3", "Label3", 0, 0, 1];
+ let invalid = ["", "Label4", "1234", "0", "1", 5000];
+ let key = "somekeystring";
+
+ // At least one invalid parameter, so no accumulation should happen here
+ // Valid values in front of invalid.
+ h.add(key, valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.ok(!(key in s1));
+
+ h.add(key, valid);
+ let snapshot = h.snapshot()[key];
+ Assert.equal(snapshot.sum, 6);
+ Assert.deepEqual(Object.values(snapshot.values), [3, 2, 2, 0]);
+});
+
+add_task(async function test_keyed_boolean_multiple_samples() {
+ let valid = [true, false, 0, 1, 2];
+ let invalid = ["", "0", "1", ",2", "true", "false", "random"];
+ let key = "somekey";
+
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_BOOLEAN");
+ h.clear();
+
+ // At least one invalid parameter, so no accumulation should happen here
+ // Valid values in front of invalid.
+ h.add(key, valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.ok(!(key in s1));
+
+ h.add(key, valid);
+ let s = h.snapshot()[key];
+ Assert.deepEqual(s.values, { 0: 2, 1: 3, 2: 0 });
+ Assert.equal(s.sum, 3);
+});
+
+add_task(async function test_keyed_linear_multiple_samples() {
+ // According to telemetry.mozilla.org/histogram-simulator, bucket at
+ // index 1 of TELEMETRY_TEST_LINEAR has max value of 3.13K
+ let valid = [0, 1, 5, 10, 268450000, 268450001, Math.pow(2, 31) + 1];
+ let invalid = ["", "0", "1", "random"];
+ let key = "somestring";
+
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_LINEAR");
+ h.clear();
+
+ // At least one invalid paramater, so no accumulations.
+ // Valid values in front of invalid.
+ h.add(key, valid.concat(invalid));
+ let s1 = h.snapshot();
+ Assert.ok(!(key in s1));
+
+ h.add(key, valid);
+ let s2 = h.snapshot()[key];
+ // Values >= INT32_MAX are accumulated as INT32_MAX - 1
+ Assert.equal(s2.sum, valid.reduce((acc, cur) => acc + cur) - 3);
+ Assert.deepEqual(s2.range, [1, 250000]);
+ Assert.deepEqual(s2.values, { 0: 1, 1: 3, 250000: 3 });
+});
+
+add_task(async function test_non_array_non_string_obj() {
+ let invalid_obj = {
+ prop1: "someValue",
+ prop2: "someOtherValue",
+ };
+ let key = "someString";
+
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_LINEAR");
+ h.clear();
+
+ h.add(key, invalid_obj);
+ Assert.equal(h.keys().length, 0);
+});
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_productSpecificHistograms() {
+ const DEFAULT_PRODUCTS_HISTOGRAM = "TELEMETRY_TEST_DEFAULT_PRODUCTS";
+ const DESKTOP_ONLY_HISTOGRAM = "TELEMETRY_TEST_DESKTOP_ONLY";
+ const MULTIPRODUCT_HISTOGRAM = "TELEMETRY_TEST_MULTIPRODUCT";
+ const MOBILE_ONLY_HISTOGRAM = "TELEMETRY_TEST_MOBILE_ONLY";
+
+ var default_histo = Telemetry.getHistogramById(DEFAULT_PRODUCTS_HISTOGRAM);
+ var desktop_histo = Telemetry.getHistogramById(DESKTOP_ONLY_HISTOGRAM);
+ var multiproduct_histo = Telemetry.getHistogramById(MULTIPRODUCT_HISTOGRAM);
+ var mobile_histo = Telemetry.getHistogramById(MOBILE_ONLY_HISTOGRAM);
+ default_histo.clear();
+ desktop_histo.clear();
+ multiproduct_histo.clear();
+ mobile_histo.clear();
+
+ default_histo.add(42);
+ desktop_histo.add(42);
+ multiproduct_histo.add(42);
+ mobile_histo.add(42);
+
+ let histograms = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+
+ Assert.ok(
+ DEFAULT_PRODUCTS_HISTOGRAM in histograms,
+ "Should have recorded default products histogram"
+ );
+ Assert.ok(
+ DESKTOP_ONLY_HISTOGRAM in histograms,
+ "Should have recorded desktop-only histogram"
+ );
+ Assert.ok(
+ MULTIPRODUCT_HISTOGRAM in histograms,
+ "Should have recorded multiproduct histogram"
+ );
+
+ Assert.ok(
+ !(MOBILE_ONLY_HISTOGRAM in histograms),
+ "Should not have recorded mobile-only histogram"
+ );
+ }
+);
+
+add_task(
+ {
+ skip_if: () => !gIsAndroid,
+ },
+ async function test_mobileSpecificHistograms() {
+ const DEFAULT_PRODUCTS_HISTOGRAM = "TELEMETRY_TEST_DEFAULT_PRODUCTS";
+ const DESKTOP_ONLY_HISTOGRAM = "TELEMETRY_TEST_DESKTOP_ONLY";
+ const MULTIPRODUCT_HISTOGRAM = "TELEMETRY_TEST_MULTIPRODUCT";
+ const MOBILE_ONLY_HISTOGRAM = "TELEMETRY_TEST_MOBILE_ONLY";
+
+ var default_histo = Telemetry.getHistogramById(DEFAULT_PRODUCTS_HISTOGRAM);
+ var desktop_histo = Telemetry.getHistogramById(DESKTOP_ONLY_HISTOGRAM);
+ var multiproduct_histo = Telemetry.getHistogramById(MULTIPRODUCT_HISTOGRAM);
+ var mobile_histo = Telemetry.getHistogramById(MOBILE_ONLY_HISTOGRAM);
+ default_histo.clear();
+ desktop_histo.clear();
+ multiproduct_histo.clear();
+ mobile_histo.clear();
+
+ default_histo.add(1);
+ desktop_histo.add(1);
+ multiproduct_histo.add(1);
+ mobile_histo.add(1);
+
+ let histograms = Telemetry.getSnapshotForHistograms(
+ "main",
+ false /* clear */
+ ).parent;
+
+ Assert.ok(
+ DEFAULT_PRODUCTS_HISTOGRAM in histograms,
+ "Should have recorded default products histogram"
+ );
+ Assert.ok(
+ MOBILE_ONLY_HISTOGRAM in histograms,
+ "Should have recorded mobile-only histogram"
+ );
+ Assert.ok(
+ MULTIPRODUCT_HISTOGRAM in histograms,
+ "Should have recorded multiproduct histogram"
+ );
+
+ Assert.ok(
+ !(DESKTOP_ONLY_HISTOGRAM in histograms),
+ "Should not have recorded desktop-only histogram"
+ );
+ }
+);
+
+add_task(async function test_productsOverride() {
+ Services.prefs.setBoolPref(
+ "toolkit.telemetry.testing.overrideProductsCheck",
+ true
+ );
+ const DEFAULT_PRODUCTS_HISTOGRAM = "TELEMETRY_TEST_DEFAULT_PRODUCTS";
+ const DESKTOP_ONLY_HISTOGRAM = "TELEMETRY_TEST_DESKTOP_ONLY";
+ const MULTIPRODUCT_HISTOGRAM = "TELEMETRY_TEST_MULTIPRODUCT";
+ const MOBILE_ONLY_HISTOGRAM = "TELEMETRY_TEST_MOBILE_ONLY";
+
+ var default_histo = Telemetry.getHistogramById(DEFAULT_PRODUCTS_HISTOGRAM);
+ var desktop_histo = Telemetry.getHistogramById(DESKTOP_ONLY_HISTOGRAM);
+ var multiproduct_histo = Telemetry.getHistogramById(MULTIPRODUCT_HISTOGRAM);
+ var mobile_histo = Telemetry.getHistogramById(MOBILE_ONLY_HISTOGRAM);
+ default_histo.clear();
+ desktop_histo.clear();
+ multiproduct_histo.clear();
+ mobile_histo.clear();
+
+ default_histo.add(1);
+ desktop_histo.add(1);
+ multiproduct_histo.add(1);
+ mobile_histo.add(1);
+
+ let histograms = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+
+ Assert.ok(
+ DEFAULT_PRODUCTS_HISTOGRAM in histograms,
+ "Should have recorded default products histogram"
+ );
+ Assert.ok(
+ MOBILE_ONLY_HISTOGRAM in histograms,
+ "Should have recorded mobile-only histogram"
+ );
+ Assert.ok(
+ MULTIPRODUCT_HISTOGRAM in histograms,
+ "Should have recorded multiproduct histogram"
+ );
+
+ Assert.ok(
+ DESKTOP_ONLY_HISTOGRAM in histograms,
+ "Should not have recorded desktop-only histogram"
+ );
+ Services.prefs.clearUserPref(
+ "toolkit.telemetry.testing.overrideProductsCheck"
+ );
+});
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_clearHistogramsOnSnapshot() {
+ const COUNT = "TELEMETRY_TEST_COUNT";
+ let h = Telemetry.getHistogramById(COUNT);
+ h.clear();
+ let snapshot;
+
+ // The first snapshot should be empty, nothing recorded.
+ snapshot = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+ Assert.ok(!(COUNT in snapshot));
+
+ // After recording into a histogram, the data should be in the snapshot. Don't delete it.
+ h.add(1);
+
+ Assert.equal(h.snapshot().sum, 1);
+ snapshot = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+ Assert.ok(COUNT in snapshot);
+ Assert.equal(snapshot[COUNT].sum, 1);
+
+ // After recording into a histogram again, the data should be updated and in the snapshot.
+ // Clean up after.
+ h.add(41);
+
+ Assert.equal(h.snapshot().sum, 42);
+ snapshot = Telemetry.getSnapshotForHistograms("main", true /* clear */)
+ .parent;
+ Assert.ok(COUNT in snapshot);
+ Assert.equal(snapshot[COUNT].sum, 42);
+
+ // Finally, no data should be in the snapshot.
+ Assert.equal(h.snapshot().sum, 0);
+ snapshot = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+ Assert.ok(!(COUNT in snapshot));
+ }
+);
+
+add_task(async function test_valid_os_smoketest() {
+ let nonExistingProbe;
+ let existingProbe;
+
+ switch (AppConstants.platform) {
+ case "linux":
+ nonExistingProbe = "TELEMETRY_TEST_OS_ANDROID_ONLY";
+ existingProbe = "TELEMETRY_TEST_OS_LINUX_ONLY";
+ break;
+ case "macosx":
+ nonExistingProbe = "TELEMETRY_TEST_OS_ANDROID_ONLY";
+ existingProbe = "TELEMETRY_TEST_OS_MAC_ONLY";
+ break;
+ case "win":
+ nonExistingProbe = "TELEMETRY_TEST_OS_ANDROID_ONLY";
+ existingProbe = "TELEMETRY_TEST_OS_WIN_ONLY";
+ break;
+ case "android":
+ nonExistingProbe = "TELEMETRY_TEST_OS_LINUX_ONLY";
+ existingProbe = "TELEMETRY_TEST_OS_ANDROID_ONLY";
+ break;
+ default:
+ /* Unknown OS. Let's not test OS-specific probes */
+ return;
+ }
+
+ Assert.throws(
+ () => Telemetry.getHistogramById(nonExistingProbe),
+ /NS_ERROR_FAILURE/,
+ `Should throw on ${nonExistingProbe} probe that's not available on ${AppConstants.platform}`
+ );
+
+ let h = Telemetry.getHistogramById(existingProbe);
+ h.clear();
+ h.add(1);
+ let snapshot = Telemetry.getSnapshotForHistograms("main", false /* clear */)
+ .parent;
+ Assert.ok(
+ existingProbe in snapshot,
+ `${existingProbe} should be recorded on ${AppConstants.platform}`
+ );
+ Assert.equal(snapshot[existingProbe].sum, 1);
+});
+
+add_task(async function test_multistore_individual_histogram() {
+ Telemetry.canRecordExtended = true;
+
+ let id;
+ let hist;
+ let snapshot;
+
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ snapshot = hist.snapshot();
+ Assert.equal(0, snapshot.sum, `Histogram ${id} should be empty.`);
+ hist.add(1);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ 1,
+ snapshot.sum,
+ `Histogram ${id} should have recorded one value.`
+ );
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.equal(0, snapshot.sum, `Histogram ${id} should be cleared.`);
+
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ hist = Telemetry.getHistogramById(id);
+ snapshot = hist.snapshot();
+ Assert.equal(0, snapshot.sum, `Histogram ${id} should be empty.`);
+ hist.add(1);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ 1,
+ snapshot.sum,
+ `Histogram ${id} should have recorded one value.`
+ );
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.equal(0, snapshot.sum, `Histogram ${id} should be cleared.`);
+
+ // When sync only, then the snapshot will be empty on the main store
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+ hist.add(1);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ hist = Telemetry.getKeyedHistogramById(id);
+ snapshot = hist.snapshot();
+ Assert.deepEqual({}, snapshot, `Histogram ${id} should be empty.`);
+ hist.add("key-a", 1);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ 1,
+ snapshot["key-a"].sum,
+ `Histogram ${id} should have recorded one value.`
+ );
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.deepEqual({}, snapshot, `Histogram ${id} should be cleared.`);
+
+ // When sync only, then the snapshot will be empty on the main store
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ hist = Telemetry.getKeyedHistogramById(id);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+ hist.add("key-a", 1);
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.equal(
+ undefined,
+ snapshot,
+ `Histogram ${id} should not be in the 'main' storage`
+ );
+});
+
+add_task(async function test_multistore_main_snapshot() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForHistograms("main", true);
+ Telemetry.getSnapshotForKeyedHistograms("main", true);
+
+ let id;
+ let hist;
+ let snapshot;
+
+ // Plain histograms
+
+ // Fill with data
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ // Getting snapshot and NOT clearing (using default values for optional parameters)
+ snapshot = Telemetry.getSnapshotForHistograms().parent;
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ // Data should still be in, getting snapshot and clearing
+ snapshot = Telemetry.getSnapshotForHistograms("main", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ // Should be empty after clearing
+ snapshot = Telemetry.getSnapshotForHistograms("main", /* clear */ false)
+ .parent;
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ // Keyed histograms
+
+ // Fill with data
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-a", 1);
+
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-b", 1);
+
+ // Getting snapshot and NOT clearing (using default values for optional parameters)
+ snapshot = Telemetry.getSnapshotForKeyedHistograms().parent;
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ // Data should still be in, getting snapshot and clearing
+ snapshot = Telemetry.getSnapshotForKeyedHistograms("main", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ // Should be empty after clearing
+ snapshot = Telemetry.getSnapshotForKeyedHistograms("main", /* clear */ false)
+ .parent;
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+});
+
+add_task(async function test_multistore_argument_handling() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForHistograms("main", true);
+ Telemetry.getSnapshotForHistograms("sync", true);
+ Telemetry.getSnapshotForKeyedHistograms("main", true);
+ Telemetry.getSnapshotForKeyedHistograms("sync", true);
+
+ let id;
+ let hist;
+ let snapshot;
+
+ // Plain Histograms
+
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(37);
+
+ // No argument
+ snapshot = hist.snapshot();
+ Assert.equal(37, snapshot.sum, `${id} should be in a default store snapshot`);
+
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.equal(0, snapshot.sum, `${id} should be cleared in the default store`);
+
+ snapshot = hist.snapshot({ store: "sync" });
+ Assert.equal(
+ 37,
+ snapshot.sum,
+ `${id} should not have been cleared in the sync store`
+ );
+
+ Assert.throws(
+ () => hist.snapshot(2, "or", "more", "arguments"),
+ /one argument/,
+ "snapshot should check argument count"
+ );
+ Assert.throws(
+ () => hist.snapshot(2),
+ /object argument/,
+ "snapshot should check argument type"
+ );
+ Assert.throws(
+ () => hist.snapshot({}),
+ /property/,
+ "snapshot should check for object property"
+ );
+ Assert.throws(
+ () => hist.snapshot({ store: 1 }),
+ /string/,
+ "snapshot should check object property's type"
+ );
+
+ Assert.throws(
+ () => hist.clear(2, "or", "more", "arguments"),
+ /one argument/,
+ "clear should check argument count"
+ );
+ Assert.throws(
+ () => hist.clear(2),
+ /object argument/,
+ "clear should check argument type"
+ );
+ Assert.throws(
+ () => hist.clear({}),
+ /property/,
+ "clear should check for object property"
+ );
+ Assert.throws(
+ () => hist.clear({ store: 1 }),
+ /string/,
+ "clear should check object property's type"
+ );
+
+ // Keyed Histogram
+
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-1", 37);
+
+ // No argument
+ snapshot = hist.snapshot();
+ Assert.equal(
+ 37,
+ snapshot["key-1"].sum,
+ `${id} should be in a default store snapshot`
+ );
+
+ hist.clear();
+ snapshot = hist.snapshot();
+ Assert.ok(
+ !("key-1" in snapshot),
+ `${id} should be cleared in the default store`
+ );
+
+ snapshot = hist.snapshot({ store: "sync" });
+ Assert.equal(
+ 37,
+ snapshot["key-1"].sum,
+ `${id} should not have been cleared in the sync store`
+ );
+
+ Assert.throws(
+ () => hist.snapshot(2, "or", "more", "arguments"),
+ /one argument/,
+ "snapshot should check argument count"
+ );
+ Assert.throws(
+ () => hist.snapshot(2),
+ /object argument/,
+ "snapshot should check argument type"
+ );
+ Assert.throws(
+ () => hist.snapshot({}),
+ /property/,
+ "snapshot should check for object property"
+ );
+ Assert.throws(
+ () => hist.snapshot({ store: 1 }),
+ /string/,
+ "snapshot should check object property's type"
+ );
+
+ Assert.throws(
+ () => hist.clear(2, "or", "more", "arguments"),
+ /one argument/,
+ "clear should check argument count"
+ );
+ Assert.throws(
+ () => hist.clear(2),
+ /object argument/,
+ "clear should check argument type"
+ );
+ Assert.throws(
+ () => hist.clear({}),
+ /property/,
+ "clear should check for object property"
+ );
+ Assert.throws(
+ () => hist.clear({ store: 1 }),
+ /string/,
+ "clear should check object property's type"
+ );
+});
+
+add_task(async function test_multistore_sync_snapshot() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForHistograms("main", true);
+ Telemetry.getSnapshotForHistograms("sync", true);
+
+ let id;
+ let hist;
+ let snapshot;
+
+ // Plain histograms
+
+ // Fill with data
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ hist = Telemetry.getHistogramById(id);
+ hist.add(1);
+
+ // Getting snapshot and clearing
+ snapshot = Telemetry.getSnapshotForHistograms("main", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ snapshot = Telemetry.getSnapshotForHistograms("sync", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a sync store snapshot`);
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a sync store snapshot`);
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ Assert.ok(id in snapshot, `${id} should be in a sync store snapshot`);
+});
+
+add_task(async function test_multistore_keyed_sync_snapshot() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForKeyedHistograms("main", true);
+ Telemetry.getSnapshotForKeyedHistograms("sync", true);
+
+ let id;
+ let hist;
+ let snapshot;
+
+ // Plain histograms
+
+ // Fill with data
+ id = "TELEMETRY_TEST_KEYED_LINEAR";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-1", 1);
+
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-1", 1);
+
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ hist = Telemetry.getKeyedHistogramById(id);
+ hist.add("key-1", 1);
+
+ // Getting snapshot and clearing
+ snapshot = Telemetry.getSnapshotForKeyedHistograms("main", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_KEYED_LINEAR";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a main store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ Assert.ok(!(id in snapshot), `${id} should not be in a main store snapshot`);
+
+ snapshot = Telemetry.getSnapshotForKeyedHistograms("sync", /* clear */ true)
+ .parent;
+ id = "TELEMETRY_TEST_KEYED_LINEAR";
+ Assert.ok(!(id in snapshot), `${id} should not be in a sync store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ Assert.ok(id in snapshot, `${id} should be in a sync store snapshot`);
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ Assert.ok(id in snapshot, `${id} should be in a sync store snapshot`);
+});
+
+add_task(async function test_multistore_plain_individual_snapshot() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForHistograms("main", true);
+ Telemetry.getSnapshotForHistograms("sync", true);
+
+ let id;
+ let hist;
+
+ id = "TELEMETRY_TEST_MAIN_ONLY";
+ hist = Telemetry.getHistogramById(id);
+
+ hist.add(37);
+ Assert.deepEqual(37, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "sync" }));
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual(0, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "sync" }));
+
+ id = "TELEMETRY_TEST_MULTIPLE_STORES";
+ hist = Telemetry.getHistogramById(id);
+
+ hist.add(37);
+ Assert.deepEqual(37, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" }).sum);
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual(0, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" }).sum);
+
+ hist.add(3);
+ Assert.deepEqual(3, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(40, hist.snapshot({ store: "sync" }).sum);
+
+ hist.clear({ store: "sync" });
+ Assert.deepEqual(3, hist.snapshot({ store: "main" }).sum);
+ Assert.deepEqual(0, hist.snapshot({ store: "sync" }).sum);
+
+ id = "TELEMETRY_TEST_SYNC_ONLY";
+ hist = Telemetry.getHistogramById(id);
+
+ hist.add(37);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" }).sum);
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" }).sum);
+
+ hist.add(3);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(40, hist.snapshot({ store: "sync" }).sum);
+
+ hist.clear({ store: "sync" });
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(0, hist.snapshot({ store: "sync" }).sum);
+});
+
+add_task(async function test_multistore_keyed_individual_snapshot() {
+ Telemetry.canRecordExtended = true;
+ // Clear histograms
+ Telemetry.getSnapshotForKeyedHistograms("main", true);
+ Telemetry.getSnapshotForKeyedHistograms("sync", true);
+
+ let id;
+ let hist;
+
+ id = "TELEMETRY_TEST_KEYED_LINEAR";
+ hist = Telemetry.getKeyedHistogramById(id);
+
+ hist.add("key-1", 37);
+ Assert.deepEqual(37, hist.snapshot({ store: "main" })["key-1"].sum);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "sync" }));
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual({}, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(undefined, hist.snapshot({ store: "sync" }));
+
+ hist.add("key-1", 4);
+ hist.clear({ store: "sync" });
+ Assert.deepEqual(4, hist.snapshot({ store: "main" })["key-1"].sum);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "sync" }));
+
+ id = "TELEMETRY_TEST_KEYED_MULTIPLE_STORES";
+ hist = Telemetry.getKeyedHistogramById(id);
+
+ hist.add("key-1", 37);
+ Assert.deepEqual(37, hist.snapshot({ store: "main" })["key-1"].sum);
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual({}, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.add("key-1", 3);
+ Assert.deepEqual(3, hist.snapshot({ store: "main" })["key-1"].sum);
+ Assert.deepEqual(40, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.clear({ store: "sync" });
+ Assert.deepEqual(3, hist.snapshot({ store: "main" })["key-1"].sum);
+ Assert.deepEqual({}, hist.snapshot({ store: "sync" }));
+
+ id = "TELEMETRY_TEST_KEYED_SYNC_ONLY";
+ hist = Telemetry.getKeyedHistogramById(id);
+
+ hist.add("key-1", 37);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.clear({ store: "main" });
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(37, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.add("key-1", 3);
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual(40, hist.snapshot({ store: "sync" })["key-1"].sum);
+
+ hist.clear({ store: "sync" });
+ Assert.deepEqual(undefined, hist.snapshot({ store: "main" }));
+ Assert.deepEqual({}, hist.snapshot({ store: "sync" }));
+});
+
+add_task(async function test_can_record_in_process_regression_bug_1530361() {
+ Telemetry.getSnapshotForHistograms("main", true);
+
+ // The socket and gpu processes should not have any histograms.
+ // Flag and count histograms have defaults, so if we're accidentally recording them
+ // in these processes they'd show up even immediately after being cleared.
+ let snapshot = Telemetry.getSnapshotForHistograms("main", true);
+
+ Assert.deepEqual(
+ snapshot.gpu,
+ {},
+ "No histograms should have been recorded for the gpu process"
+ );
+ Assert.deepEqual(
+ snapshot.socket,
+ {},
+ "No histograms should have been recorded for the socket process"
+ );
+});
+
+add_task(function test_knows_its_name() {
+ let h;
+
+ // Plain histograms
+ const histNames = [
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_COUNT",
+ "TELEMETRY_TEST_CATEGORICAL",
+ "TELEMETRY_TEST_EXPIRED",
+ ];
+
+ for (let name of histNames) {
+ h = Telemetry.getHistogramById(name);
+ Assert.equal(name, h.name());
+ }
+
+ // Keyed histograms
+ const keyedHistNames = [
+ "TELEMETRY_TEST_KEYED_EXPONENTIAL",
+ "TELEMETRY_TEST_KEYED_BOOLEAN",
+ "TELEMETRY_TEST_EXPIRED_KEYED",
+ ];
+
+ for (let name of keyedHistNames) {
+ h = Telemetry.getKeyedHistogramById(name);
+ Assert.equal(name, h.name());
+ }
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js b/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js
new file mode 100644
index 0000000000..36a77d0e2b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js
@@ -0,0 +1,145 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* A testcase to make sure reading late writes stacks works. */
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+const STACK_SUFFIX1 = "stack1.txt";
+const STACK_SUFFIX2 = "stack2.txt";
+const STACK_BOGUS_SUFFIX = "bogus.txt";
+const LATE_WRITE_PREFIX = "Telemetry.LateWriteFinal-";
+
+// The names and IDs don't matter, but the format of the IDs does.
+const LOADED_MODULES = {
+ "4759A7E6993548C89CAF716A67EC242D00": "libtest.so",
+ F77AF15BB8D6419FA875954B4A3506CA00: "libxul.so",
+ "1E2F7FB590424E8F93D60BB88D66B8C500": "libc.so",
+ E4D6D70CC09A63EF8B88D532F867858800: "libmodμles.so",
+};
+const N_MODULES = Object.keys(LOADED_MODULES).length;
+
+// Format of individual items is [index, offset-in-library].
+const STACK1 = [
+ [0, 0],
+ [1, 1],
+ [2, 2],
+ [3, 3],
+];
+const STACK2 = [
+ [0, 0],
+ [1, 5],
+ [2, 10],
+ [3, 15],
+];
+// XXX The only error checking is for a zero-sized stack.
+const STACK_BOGUS = [];
+
+function write_string_to_file(file, contents) {
+ let ostream = Cc[
+ "@mozilla.org/network/safe-file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ ostream.init(
+ file,
+ PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER,
+ ostream.DEFER_OPEN
+ );
+
+ var bos = Cc["@mozilla.org/binaryoutputstream;1"].createInstance(
+ Ci.nsIBinaryOutputStream
+ );
+ bos.setOutputStream(ostream);
+
+ let utf8 = new TextEncoder("utf-8").encode(contents);
+ bos.writeByteArray(utf8);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function construct_file(suffix) {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append(LATE_WRITE_PREFIX + suffix);
+ return file;
+}
+
+function write_late_writes_file(stack, suffix) {
+ let file = construct_file(suffix);
+ let contents = N_MODULES + "\n";
+ for (let id in LOADED_MODULES) {
+ contents += id + " " + LOADED_MODULES[id] + "\n";
+ }
+
+ contents += stack.length + "\n";
+ for (let element of stack) {
+ contents += element[0] + " " + element[1].toString(16) + "\n";
+ }
+
+ write_string_to_file(file, contents);
+}
+
+function run_test() {
+ do_get_profile();
+
+ write_late_writes_file(STACK1, STACK_SUFFIX1);
+ write_late_writes_file(STACK2, STACK_SUFFIX2);
+ write_late_writes_file(STACK_BOGUS, STACK_BOGUS_SUFFIX);
+
+ let lateWrites = Telemetry.lateWrites;
+ Assert.ok("memoryMap" in lateWrites);
+ Assert.equal(lateWrites.memoryMap.length, 0);
+ Assert.ok("stacks" in lateWrites);
+ Assert.equal(lateWrites.stacks.length, 0);
+
+ do_test_pending();
+ Telemetry.asyncFetchTelemetryData(function() {
+ actual_test();
+ });
+}
+
+function actual_test() {
+ Assert.ok(!construct_file(STACK_SUFFIX1).exists());
+ Assert.ok(!construct_file(STACK_SUFFIX2).exists());
+ Assert.ok(!construct_file(STACK_BOGUS_SUFFIX).exists());
+
+ let lateWrites = Telemetry.lateWrites;
+
+ Assert.ok("memoryMap" in lateWrites);
+ Assert.equal(lateWrites.memoryMap.length, N_MODULES);
+ for (let id in LOADED_MODULES) {
+ let matchingLibrary = lateWrites.memoryMap.filter(function(
+ library,
+ idx,
+ array
+ ) {
+ return library[1] == id;
+ });
+ Assert.equal(matchingLibrary.length, 1);
+ let library = matchingLibrary[0];
+ let name = library[0];
+ Assert.equal(LOADED_MODULES[id], name);
+ }
+
+ Assert.ok("stacks" in lateWrites);
+ Assert.equal(lateWrites.stacks.length, 2);
+ let uneval_STACKS = [uneval(STACK1), uneval(STACK2)];
+ let first_stack = lateWrites.stacks[0];
+ let second_stack = lateWrites.stacks[1];
+ function stackChecker(canonicalStack) {
+ let unevalCanonicalStack = uneval(canonicalStack);
+ return function(obj, idx, array) {
+ return unevalCanonicalStack == obj;
+ };
+ }
+ Assert.equal(uneval_STACKS.filter(stackChecker(first_stack)).length, 1);
+ Assert.equal(uneval_STACKS.filter(stackChecker(second_stack)).length, 1);
+
+ do_test_finished();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js b/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js
new file mode 100644
index 0000000000..ecaf87944b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js
@@ -0,0 +1,58 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* A testcase to make sure reading the failed profile lock count works. */
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+
+const LOCK_FILE_NAME = "Telemetry.FailedProfileLocks.txt";
+const N_FAILED_LOCKS = 10;
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+function write_string_to_file(file, contents) {
+ let ostream = Cc[
+ "@mozilla.org/network/safe-file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ ostream.init(
+ file,
+ PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER,
+ ostream.DEFER_OPEN
+ );
+ ostream.write(contents, contents.length);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function construct_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append(LOCK_FILE_NAME);
+ return file;
+}
+
+function run_test() {
+ do_get_profile();
+
+ Assert.equal(Telemetry.failedProfileLockCount, 0);
+
+ write_string_to_file(construct_file(), N_FAILED_LOCKS.toString());
+
+ // Make sure that we're not eagerly reading the count now that the
+ // file exists.
+ Assert.equal(Telemetry.failedProfileLockCount, 0);
+
+ do_test_pending();
+ Telemetry.asyncFetchTelemetryData(actual_test);
+}
+
+function actual_test() {
+ Assert.equal(Telemetry.failedProfileLockCount, N_FAILED_LOCKS);
+ Assert.ok(!construct_file().exists());
+ do_test_finished();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js b/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js
new file mode 100644
index 0000000000..479823c4cd
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js
@@ -0,0 +1,348 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Test that TelemetryController sends close to shutdown don't lead
+// to AsyncShutdown timeouts.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryReportingPolicy.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Timer.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/UpdateUtils.jsm", this);
+
+const TEST_CHANNEL = "TestChannelABC";
+
+const PREF_MINIMUM_CHANNEL_POLICY_VERSION =
+ TelemetryUtils.Preferences.MinimumPolicyVersion + ".channel-" + TEST_CHANNEL;
+
+function fakeShowPolicyTimeout(set, clear) {
+ let reportingPolicy = ChromeUtils.import(
+ "resource://gre/modules/TelemetryReportingPolicy.jsm",
+ null
+ );
+ reportingPolicy.Policy.setShowInfobarTimeout = set;
+ reportingPolicy.Policy.clearShowInfobarTimeout = clear;
+}
+
+function fakeResetAcceptedPolicy() {
+ Preferences.reset(TelemetryUtils.Preferences.AcceptedPolicyDate);
+ Preferences.reset(TelemetryUtils.Preferences.AcceptedPolicyVersion);
+}
+
+function setMinimumPolicyVersion(aNewPolicyVersion) {
+ const CHANNEL_NAME = UpdateUtils.getUpdateChannel(false);
+ // We might have channel-dependent minimum policy versions.
+ const CHANNEL_DEPENDENT_PREF =
+ TelemetryUtils.Preferences.MinimumPolicyVersion +
+ ".channel-" +
+ CHANNEL_NAME;
+
+ // Does the channel-dependent pref exist? If so, set its value.
+ if (Preferences.get(CHANNEL_DEPENDENT_PREF, undefined)) {
+ Preferences.set(CHANNEL_DEPENDENT_PREF, aNewPolicyVersion);
+ return;
+ }
+
+ // We don't have a channel specific minimu, so set the common one.
+ Preferences.set(
+ TelemetryUtils.Preferences.MinimumPolicyVersion,
+ aNewPolicyVersion
+ );
+}
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile(true);
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ // Don't bypass the notifications in this test, we'll fake it.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.BypassNotification,
+ false
+ );
+
+ TelemetryReportingPolicy.setup();
+});
+
+add_task(
+ {
+ // This tests initialises the search service, but that doesn't currently
+ // work on Android.
+ skip_if: () => AppConstants.platform == "android",
+ },
+ async function test_firstRun() {
+ await Services.search.init();
+
+ const FIRST_RUN_TIMEOUT_MSEC = 60 * 1000; // 60s
+ const OTHER_RUNS_TIMEOUT_MSEC = 10 * 1000; // 10s
+
+ Preferences.reset(TelemetryUtils.Preferences.FirstRun);
+
+ let startupTimeout = 0;
+ fakeShowPolicyTimeout(
+ (callback, timeout) => (startupTimeout = timeout),
+ () => {}
+ );
+ TelemetryReportingPolicy.reset();
+
+ Services.obs.notifyObservers(null, "sessionstore-windows-restored");
+ Assert.equal(
+ startupTimeout,
+ FIRST_RUN_TIMEOUT_MSEC,
+ "The infobar display timeout should be 60s on the first run."
+ );
+
+ // Run again, and check that we actually wait only 10 seconds.
+ TelemetryReportingPolicy.reset();
+ Services.obs.notifyObservers(null, "sessionstore-windows-restored");
+ Assert.equal(
+ startupTimeout,
+ OTHER_RUNS_TIMEOUT_MSEC,
+ "The infobar display timeout should be 10s on other runs."
+ );
+ }
+);
+
+add_task(async function test_prefs() {
+ TelemetryReportingPolicy.reset();
+
+ let now = fakeNow(2009, 11, 18);
+
+ // If the date is not valid (earlier than 2012), we don't regard the policy as accepted.
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified());
+ Assert.equal(
+ Preferences.get(TelemetryUtils.Preferences.AcceptedPolicyDate, null),
+ 0,
+ "Invalid dates should not make the policy accepted."
+ );
+
+ // Check that the notification date and version are correctly saved to the prefs.
+ now = fakeNow(2012, 11, 18);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.equal(
+ Preferences.get(TelemetryUtils.Preferences.AcceptedPolicyDate, null),
+ now.getTime(),
+ "A valid date must correctly be saved."
+ );
+
+ // Now that user is notified, check if we are allowed to upload.
+ Assert.ok(
+ TelemetryReportingPolicy.canUpload(),
+ "We must be able to upload after the policy is accepted."
+ );
+
+ // Disable submission and check that we're no longer allowed to upload.
+ Preferences.set(TelemetryUtils.Preferences.DataSubmissionEnabled, false);
+ Assert.ok(
+ !TelemetryReportingPolicy.canUpload(),
+ "We must not be able to upload if data submission is disabled."
+ );
+
+ // Turn the submission back on.
+ Preferences.set(TelemetryUtils.Preferences.DataSubmissionEnabled, true);
+ Assert.ok(
+ TelemetryReportingPolicy.canUpload(),
+ "We must be able to upload if data submission is enabled and the policy was accepted."
+ );
+
+ // Set a new minimum policy version and check that user is no longer notified.
+ let newMinimum =
+ Preferences.get(TelemetryUtils.Preferences.CurrentPolicyVersion, 1) + 1;
+ setMinimumPolicyVersion(newMinimum);
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "A greater minimum policy version must invalidate the policy and disable upload."
+ );
+
+ // Eventually accept the policy and make sure user is notified.
+ Preferences.set(TelemetryUtils.Preferences.CurrentPolicyVersion, newMinimum);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(
+ TelemetryReportingPolicy.testIsUserNotified(),
+ "Accepting the policy again should show the user as notified."
+ );
+ Assert.ok(
+ TelemetryReportingPolicy.canUpload(),
+ "Accepting the policy again should let us upload data."
+ );
+
+ // Set a new, per channel, minimum policy version. Start by setting a test current channel.
+ let defaultPrefs = new Preferences({ defaultBranch: true });
+ defaultPrefs.set("app.update.channel", TEST_CHANNEL);
+
+ // Increase and set the new minimum version, then check that we're not notified anymore.
+ newMinimum++;
+ Preferences.set(PREF_MINIMUM_CHANNEL_POLICY_VERSION, newMinimum);
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "Increasing the minimum policy version should invalidate the policy."
+ );
+
+ // Eventually accept the policy and make sure user is notified.
+ Preferences.set(TelemetryUtils.Preferences.CurrentPolicyVersion, newMinimum);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(
+ TelemetryReportingPolicy.testIsUserNotified(),
+ "Accepting the policy again should show the user as notified."
+ );
+ Assert.ok(
+ TelemetryReportingPolicy.canUpload(),
+ "Accepting the policy again should let us upload data."
+ );
+});
+
+add_task(async function test_migratePrefs() {
+ const DEPRECATED_FHR_PREFS = {
+ "datareporting.policy.dataSubmissionPolicyAccepted": true,
+ "datareporting.policy.dataSubmissionPolicyBypassAcceptance": true,
+ "datareporting.policy.dataSubmissionPolicyResponseType": "foxyeah",
+ "datareporting.policy.dataSubmissionPolicyResponseTime": Date.now().toString(),
+ };
+
+ // Make sure the preferences are set before setting up the policy.
+ for (let name in DEPRECATED_FHR_PREFS) {
+ Preferences.set(name, DEPRECATED_FHR_PREFS[name]);
+ }
+ // Set up the policy.
+ TelemetryReportingPolicy.reset();
+ // They should have been removed by now.
+ for (let name in DEPRECATED_FHR_PREFS) {
+ Assert.ok(!Preferences.has(name), name + " should have been removed.");
+ }
+});
+
+add_task(async function test_userNotifiedOfCurrentPolicy() {
+ fakeResetAcceptedPolicy();
+ TelemetryReportingPolicy.reset();
+
+ // User should be reported as not notified by default.
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "The initial state should be unnotified."
+ );
+
+ // Forcing a policy version should not automatically make the user notified.
+ Preferences.set(
+ TelemetryUtils.Preferences.AcceptedPolicyVersion,
+ TelemetryReportingPolicy.DEFAULT_DATAREPORTING_POLICY_VERSION
+ );
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "The default state of the date should have a time of 0 and it should therefore fail"
+ );
+
+ // Showing the notification bar should make the user notified.
+ fakeNow(2012, 11, 11);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(
+ TelemetryReportingPolicy.testIsUserNotified(),
+ "Using the proper API causes user notification to report as true."
+ );
+
+ // It is assumed that later versions of the policy will incorporate previous
+ // ones, therefore this should also return true.
+ let newVersion =
+ Preferences.get(TelemetryUtils.Preferences.CurrentPolicyVersion, 1) + 1;
+ Preferences.set(TelemetryUtils.Preferences.AcceptedPolicyVersion, newVersion);
+ Assert.ok(
+ TelemetryReportingPolicy.testIsUserNotified(),
+ "A future version of the policy should pass."
+ );
+
+ newVersion =
+ Preferences.get(TelemetryUtils.Preferences.CurrentPolicyVersion, 1) - 1;
+ Preferences.set(TelemetryUtils.Preferences.AcceptedPolicyVersion, newVersion);
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "A previous version of the policy should fail."
+ );
+});
+
+add_task(async function test_canSend() {
+ const TEST_PING_TYPE = "test-ping";
+
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ await TelemetryController.testReset();
+ TelemetryReportingPolicy.reset();
+
+ // User should be reported as not notified by default.
+ Assert.ok(
+ !TelemetryReportingPolicy.testIsUserNotified(),
+ "The initial state should be unnotified."
+ );
+
+ // Assert if we receive any ping before the policy is accepted.
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Reset the ping handler.
+ PingServer.resetPingHandler();
+
+ // Fake the infobar: this should also trigger the ping send task.
+ TelemetryReportingPolicy.testInfobarShown();
+ let ping = await PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(
+ ping[0].type,
+ TEST_PING_TYPE,
+ "We should have received the previous ping."
+ );
+
+ // Submit another ping, to make sure it gets sent.
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Get the ping and check its type.
+ ping = await PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(
+ ping[0].type,
+ TEST_PING_TYPE,
+ "We should have received the new ping."
+ );
+
+ // Fake a restart with a pending ping.
+ await TelemetryController.addPendingPing(TEST_PING_TYPE, {});
+ await TelemetryController.testReset();
+
+ // We should be immediately sending the ping out.
+ ping = await PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(
+ ping[0].type,
+ TEST_PING_TYPE,
+ "We should have received the pending ping."
+ );
+
+ // Submit another ping, to make sure it gets sent.
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Get the ping and check its type.
+ ping = await PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(
+ ping[0].type,
+ TEST_PING_TYPE,
+ "We should have received the new ping."
+ );
+
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js
new file mode 100644
index 0000000000..c15287eaf8
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js
@@ -0,0 +1,1090 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryTestUtils",
+ "resource://testing-common/TelemetryTestUtils.jsm"
+);
+
+const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+const STRING_SCALAR = "telemetry.test.string_kind";
+const BOOLEAN_SCALAR = "telemetry.test.boolean_kind";
+const KEYED_UINT_SCALAR = "telemetry.test.keyed_unsigned_int";
+const KEYED_EXCEED_SCALAR = "telemetry.keyed_scalars_exceed_limit";
+
+function getProcessScalars(aProcessName, aKeyed = false, aClear = false) {
+ const scalars = aKeyed
+ ? Telemetry.getSnapshotForKeyedScalars("main", aClear)[aProcessName]
+ : Telemetry.getSnapshotForScalars("main", aClear)[aProcessName];
+ return scalars || {};
+}
+
+add_task(async function test_serializationFormat() {
+ Telemetry.clearScalars();
+
+ // Set the scalars to a known value.
+ const expectedUint = 3785;
+ const expectedString = "some value";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, true);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "first_key", 1234);
+
+ // Get a snapshot of the scalars for the main process (internally called "default").
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+
+ // Check that they are serialized to the correct format.
+ Assert.equal(
+ typeof scalars[UINT_SCALAR],
+ "number",
+ UINT_SCALAR + " must be serialized to the correct format."
+ );
+ Assert.ok(
+ Number.isInteger(scalars[UINT_SCALAR]),
+ UINT_SCALAR + " must be a finite integer."
+ );
+ Assert.equal(
+ scalars[UINT_SCALAR],
+ expectedUint,
+ UINT_SCALAR + " must have the correct value."
+ );
+ Assert.equal(
+ typeof scalars[STRING_SCALAR],
+ "string",
+ STRING_SCALAR + " must be serialized to the correct format."
+ );
+ Assert.equal(
+ scalars[STRING_SCALAR],
+ expectedString,
+ STRING_SCALAR + " must have the correct value."
+ );
+ Assert.equal(
+ typeof scalars[BOOLEAN_SCALAR],
+ "boolean",
+ BOOLEAN_SCALAR + " must be serialized to the correct format."
+ );
+ Assert.equal(
+ scalars[BOOLEAN_SCALAR],
+ true,
+ BOOLEAN_SCALAR + " must have the correct value."
+ );
+ Assert.ok(
+ !(KEYED_UINT_SCALAR in scalars),
+ "Keyed scalars must be reported in a separate section."
+ );
+});
+
+add_task(async function test_keyedSerializationFormat() {
+ Telemetry.clearScalars();
+
+ const expectedKey = "first_key";
+ const expectedOtherKey = "漢語";
+ const expectedUint = 3785;
+ const expectedOtherValue = 1107;
+
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, expectedKey, expectedUint);
+ Telemetry.keyedScalarSet(
+ KEYED_UINT_SCALAR,
+ expectedOtherKey,
+ expectedOtherValue
+ );
+
+ // Get a snapshot of the scalars.
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+
+ Assert.ok(
+ !(UINT_SCALAR in keyedScalars),
+ UINT_SCALAR + " must not be serialized with the keyed scalars."
+ );
+ Assert.ok(
+ KEYED_UINT_SCALAR in keyedScalars,
+ KEYED_UINT_SCALAR + " must be serialized with the keyed scalars."
+ );
+ Assert.equal(
+ Object.keys(keyedScalars[KEYED_UINT_SCALAR]).length,
+ 2,
+ "The keyed scalar must contain exactly 2 keys."
+ );
+ Assert.ok(
+ expectedKey in keyedScalars[KEYED_UINT_SCALAR],
+ KEYED_UINT_SCALAR + " must contain the expected keys."
+ );
+ Assert.ok(
+ expectedOtherKey in keyedScalars[KEYED_UINT_SCALAR],
+ KEYED_UINT_SCALAR + " must contain the expected keys."
+ );
+ Assert.ok(
+ Number.isInteger(keyedScalars[KEYED_UINT_SCALAR][expectedKey]),
+ KEYED_UINT_SCALAR + "." + expectedKey + " must be a finite integer."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR][expectedKey],
+ expectedUint,
+ KEYED_UINT_SCALAR + "." + expectedKey + " must have the correct value."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR][expectedOtherKey],
+ expectedOtherValue,
+ KEYED_UINT_SCALAR + "." + expectedOtherKey + " must have the correct value."
+ );
+});
+
+add_task(async function test_nonexistingScalar() {
+ const NON_EXISTING_SCALAR = "telemetry.test.non_existing";
+
+ Telemetry.clearScalars();
+
+ // The JS API must not throw when used incorrectly but rather print
+ // a message to the console.
+ Telemetry.scalarAdd(NON_EXISTING_SCALAR, 11715);
+ Telemetry.scalarSet(NON_EXISTING_SCALAR, 11715);
+ Telemetry.scalarSetMaximum(NON_EXISTING_SCALAR, 11715);
+
+ // Make sure we do not throw on any operation for non-existing scalars.
+ Telemetry.keyedScalarAdd(NON_EXISTING_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSet(NON_EXISTING_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSetMaximum(NON_EXISTING_SCALAR, "some_key", 11715);
+
+ // Get a snapshot of the scalars.
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+
+ Assert.ok(
+ !(NON_EXISTING_SCALAR in scalars),
+ "The non existing scalar must not be persisted."
+ );
+
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+
+ Assert.ok(
+ !(NON_EXISTING_SCALAR in keyedScalars),
+ "The non existing keyed scalar must not be persisted."
+ );
+});
+
+add_task(async function test_expiredScalar() {
+ const EXPIRED_SCALAR = "telemetry.test.expired";
+ const EXPIRED_KEYED_SCALAR = "telemetry.test.keyed_expired";
+ const UNEXPIRED_SCALAR = "telemetry.test.unexpired";
+
+ Telemetry.clearScalars();
+
+ // Try to set the expired scalar to some value. We will not be recording the value,
+ // but we shouldn't throw.
+ Telemetry.scalarAdd(EXPIRED_SCALAR, 11715);
+ Telemetry.scalarSet(EXPIRED_SCALAR, 11715);
+ Telemetry.scalarSetMaximum(EXPIRED_SCALAR, 11715);
+ Telemetry.keyedScalarAdd(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSet(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSetMaximum(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+
+ // The unexpired scalar has an expiration version, but far away in the future.
+ const expectedValue = 11716;
+ Telemetry.scalarSet(UNEXPIRED_SCALAR, expectedValue);
+
+ // Get a snapshot of the scalars.
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent");
+
+ Assert.ok(
+ !(EXPIRED_SCALAR in scalars),
+ "The expired scalar must not be persisted."
+ );
+ Assert.equal(
+ scalars[UNEXPIRED_SCALAR],
+ expectedValue,
+ "The unexpired scalar must be persisted with the correct value."
+ );
+ Assert.ok(
+ !(EXPIRED_KEYED_SCALAR in keyedScalars),
+ "The expired keyed scalar must not be persisted."
+ );
+});
+
+add_task(async function test_unsignedIntScalar() {
+ let checkScalar = expectedValue => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.equal(
+ scalars[UINT_SCALAR],
+ expectedValue,
+ UINT_SCALAR + " must contain the expected value."
+ );
+ };
+
+ Telemetry.clearScalars();
+
+ // Let's start with an accumulation without a prior set.
+ Telemetry.scalarAdd(UINT_SCALAR, 1);
+ Telemetry.scalarAdd(UINT_SCALAR, 2);
+ // Do we get what we expect?
+ checkScalar(3);
+
+ // Let's test setting the scalar to a value.
+ Telemetry.scalarSet(UINT_SCALAR, 3785);
+ checkScalar(3785);
+ Telemetry.scalarAdd(UINT_SCALAR, 1);
+ checkScalar(3786);
+
+ // Does setMaximum work?
+ Telemetry.scalarSet(UINT_SCALAR, 2);
+ checkScalar(2);
+ Telemetry.scalarSetMaximum(UINT_SCALAR, 5);
+ checkScalar(5);
+ // The value of the probe should still be 5, as the previous value
+ // is greater than the one we want to set.
+ Telemetry.scalarSetMaximum(UINT_SCALAR, 3);
+ checkScalar(5);
+
+ // Check that non-integer numbers get truncated and set.
+ Telemetry.scalarSet(UINT_SCALAR, 3.785);
+ checkScalar(3);
+
+ // Setting or adding a negative number must report an error through
+ // the console and drop the change (shouldn't throw).
+ Telemetry.scalarAdd(UINT_SCALAR, -5);
+ Telemetry.scalarSet(UINT_SCALAR, -5);
+ Telemetry.scalarSetMaximum(UINT_SCALAR, -1);
+ checkScalar(3);
+
+ // If we try to set a value of a different type, the JS API should not
+ // throw but rather print a console message.
+ Telemetry.scalarSet(UINT_SCALAR, 1);
+ Telemetry.scalarSet(UINT_SCALAR, "unexpected value");
+ Telemetry.scalarAdd(UINT_SCALAR, "unexpected value");
+ Telemetry.scalarSetMaximum(UINT_SCALAR, "unexpected value");
+ // The stored value must not be compromised.
+ checkScalar(1);
+});
+
+add_task(async function test_stringScalar() {
+ let checkExpectedString = expectedString => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.equal(
+ scalars[STRING_SCALAR],
+ expectedString,
+ STRING_SCALAR + " must contain the expected string value."
+ );
+ };
+
+ Telemetry.clearScalars();
+
+ // Let's check simple strings...
+ let expected = "test string";
+ Telemetry.scalarSet(STRING_SCALAR, expected);
+ checkExpectedString(expected);
+ expected = "漢語";
+ Telemetry.scalarSet(STRING_SCALAR, expected);
+ checkExpectedString(expected);
+
+ // We have some unsupported operations for strings.
+ Telemetry.scalarAdd(STRING_SCALAR, 1);
+ Telemetry.scalarAdd(STRING_SCALAR, "string value");
+ Telemetry.scalarSetMaximum(STRING_SCALAR, 1);
+ Telemetry.scalarSetMaximum(STRING_SCALAR, "string value");
+ Telemetry.scalarSet(STRING_SCALAR, 1);
+
+ // Try to set the scalar to a string longer than the maximum length limit.
+ const LONG_STRING =
+ "browser.qaxfiuosnzmhlg.rpvxicawolhtvmbkpnludhedobxvkjwqyeyvmv";
+ Telemetry.scalarSet(STRING_SCALAR, LONG_STRING);
+ checkExpectedString(LONG_STRING.substr(0, 50));
+});
+
+add_task(async function test_booleanScalar() {
+ let checkExpectedBool = expectedBoolean => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.equal(
+ scalars[BOOLEAN_SCALAR],
+ expectedBoolean,
+ BOOLEAN_SCALAR + " must contain the expected boolean value."
+ );
+ };
+
+ Telemetry.clearScalars();
+
+ // Set a test boolean value.
+ let expected = false;
+ Telemetry.scalarSet(BOOLEAN_SCALAR, expected);
+ checkExpectedBool(expected);
+ expected = true;
+ Telemetry.scalarSet(BOOLEAN_SCALAR, expected);
+ checkExpectedBool(expected);
+
+ // Check that setting a numeric value implicitly converts to boolean.
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 1);
+ checkExpectedBool(true);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 0);
+ checkExpectedBool(false);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 1.0);
+ checkExpectedBool(true);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 0.0);
+ checkExpectedBool(false);
+
+ // Check that unsupported operations for booleans do not throw.
+ Telemetry.scalarAdd(BOOLEAN_SCALAR, 1);
+ Telemetry.scalarAdd(BOOLEAN_SCALAR, "string value");
+ Telemetry.scalarSetMaximum(BOOLEAN_SCALAR, 1);
+ Telemetry.scalarSetMaximum(BOOLEAN_SCALAR, "string value");
+ Telemetry.scalarSet(BOOLEAN_SCALAR, "true");
+});
+
+add_task(async function test_scalarRecording() {
+ const OPTIN_SCALAR = "telemetry.test.release_optin";
+ const OPTOUT_SCALAR = "telemetry.test.release_optout";
+
+ let checkValue = (scalarName, expectedValue) => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.equal(
+ scalars[scalarName],
+ expectedValue,
+ scalarName + " must contain the expected value."
+ );
+ };
+
+ let checkNotSerialized = scalarName => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ Assert.ok(!(scalarName in scalars), scalarName + " was not recorded.");
+ };
+
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+ Telemetry.clearScalars();
+
+ // Check that no scalar is recorded if both base and extended recording are off.
+ Telemetry.scalarSet(OPTOUT_SCALAR, 3);
+ Telemetry.scalarSet(OPTIN_SCALAR, 3);
+ checkNotSerialized(OPTOUT_SCALAR);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that opt-out scalars are recorded, while opt-in are not.
+ Telemetry.canRecordBase = true;
+ Telemetry.scalarSet(OPTOUT_SCALAR, 3);
+ Telemetry.scalarSet(OPTIN_SCALAR, 3);
+ checkValue(OPTOUT_SCALAR, 3);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that both opt-out and opt-in scalars are recorded.
+ Telemetry.canRecordExtended = true;
+ Telemetry.scalarSet(OPTOUT_SCALAR, 5);
+ Telemetry.scalarSet(OPTIN_SCALAR, 6);
+ checkValue(OPTOUT_SCALAR, 5);
+ checkValue(OPTIN_SCALAR, 6);
+});
+
+add_task(async function test_keyedScalarRecording() {
+ const OPTIN_SCALAR = "telemetry.test.keyed_release_optin";
+ const OPTOUT_SCALAR = "telemetry.test.keyed_release_optout";
+ const testKey = "policy_key";
+
+ let checkValue = (scalarName, expectedValue) => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent", true);
+ Assert.equal(
+ scalars[scalarName][testKey],
+ expectedValue,
+ scalarName + " must contain the expected value."
+ );
+ };
+
+ let checkNotSerialized = scalarName => {
+ const scalars = TelemetryTestUtils.getProcessScalars("parent", true);
+ Assert.ok(!(scalarName in scalars), scalarName + " was not recorded.");
+ };
+
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+ Telemetry.clearScalars();
+
+ // Check that no scalar is recorded if both base and extended recording are off.
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 3);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 3);
+ checkNotSerialized(OPTOUT_SCALAR);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that opt-out scalars are recorded, while opt-in are not.
+ Telemetry.canRecordBase = true;
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 3);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 3);
+ checkValue(OPTOUT_SCALAR, 3);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that both opt-out and opt-in scalars are recorded.
+ Telemetry.canRecordExtended = true;
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 5);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 6);
+ checkValue(OPTOUT_SCALAR, 5);
+ checkValue(OPTIN_SCALAR, 6);
+});
+
+add_task(async function test_subsession() {
+ Telemetry.clearScalars();
+
+ // Set the scalars to a known value.
+ Telemetry.scalarSet(UINT_SCALAR, 3785);
+ Telemetry.scalarSet(STRING_SCALAR, "some value");
+ Telemetry.scalarSet(BOOLEAN_SCALAR, false);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "some_random_key", 12);
+
+ // Get a snapshot and reset the subsession. The value we set must be there.
+ let scalars = TelemetryTestUtils.getProcessScalars("parent", false, true);
+ let keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true, true);
+
+ Assert.equal(
+ scalars[UINT_SCALAR],
+ 3785,
+ UINT_SCALAR + " must contain the expected value."
+ );
+ Assert.equal(
+ scalars[STRING_SCALAR],
+ "some value",
+ STRING_SCALAR + " must contain the expected value."
+ );
+ Assert.equal(
+ scalars[BOOLEAN_SCALAR],
+ false,
+ BOOLEAN_SCALAR + " must contain the expected value."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR].some_random_key,
+ 12,
+ KEYED_UINT_SCALAR + " must contain the expected value."
+ );
+
+ // Get a new snapshot and reset the subsession again. Since no new value
+ // was set, the scalars should not be reported.
+ scalars = TelemetryTestUtils.getProcessScalars("parent", false, true);
+ keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true, true);
+
+ Assert.ok(
+ !(UINT_SCALAR in scalars),
+ UINT_SCALAR + " must be empty and not reported."
+ );
+ Assert.ok(
+ !(STRING_SCALAR in scalars),
+ STRING_SCALAR + " must be empty and not reported."
+ );
+ Assert.ok(
+ !(BOOLEAN_SCALAR in scalars),
+ BOOLEAN_SCALAR + " must be empty and not reported."
+ );
+ Assert.ok(
+ !(KEYED_UINT_SCALAR in keyedScalars),
+ KEYED_UINT_SCALAR + " must be empty and not reported."
+ );
+});
+
+add_task(async function test_keyed_uint() {
+ Telemetry.clearScalars();
+
+ const KEYS = ["a_key", "another_key", "third_key"];
+ let expectedValues = [1, 1, 1];
+
+ // Set all the keys to a baseline value.
+ for (let key of KEYS) {
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, key, 1);
+ }
+
+ // Increment only one key.
+ Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, KEYS[1], 1);
+ expectedValues[1]++;
+
+ // Use SetMaximum on the third key.
+ Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, KEYS[2], 37);
+ expectedValues[2] = 37;
+
+ // Get a snapshot of the scalars and make sure the keys contain
+ // the correct values.
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+
+ for (let k = 0; k < 3; k++) {
+ const keyName = KEYS[k];
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR][keyName],
+ expectedValues[k],
+ KEYED_UINT_SCALAR + "." + keyName + " must contain the correct value."
+ );
+ }
+
+ // Do not throw when doing unsupported things on uint keyed scalars.
+ // Just test one single unsupported operation, the other are covered in the plain
+ // unsigned scalar test.
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "new_key", "unexpected value");
+});
+
+add_task(async function test_keyed_boolean() {
+ Telemetry.clearScalars();
+
+ const KEYED_BOOLEAN_TYPE = "telemetry.test.keyed_boolean_kind";
+ const first_key = "first_key";
+ const second_key = "second_key";
+
+ // Set the initial values.
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, first_key, true);
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, second_key, false);
+
+ // Get a snapshot of the scalars and make sure the keys contain
+ // the correct values.
+ let keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+ Assert.equal(
+ keyedScalars[KEYED_BOOLEAN_TYPE][first_key],
+ true,
+ "The key must contain the expected value."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_BOOLEAN_TYPE][second_key],
+ false,
+ "The key must contain the expected value."
+ );
+
+ // Now flip the values and make sure we get the expected values back.
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, first_key, false);
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, second_key, true);
+
+ keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+ Assert.equal(
+ keyedScalars[KEYED_BOOLEAN_TYPE][first_key],
+ false,
+ "The key must contain the expected value."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_BOOLEAN_TYPE][second_key],
+ true,
+ "The key must contain the expected value."
+ );
+
+ // Do not throw when doing unsupported things on a boolean keyed scalars.
+ // Just test one single unsupported operation, the other are covered in the plain
+ // boolean scalar test.
+ Telemetry.keyedScalarAdd(KEYED_BOOLEAN_TYPE, "somehey", 1);
+});
+
+add_task(async function test_keyed_keys_length() {
+ Telemetry.clearScalars();
+
+ const LONG_KEY_STRING =
+ "browser.qaxfiuosnzmhlg.rpvxicawolhtvmbkpnludhedobxvkjwqyeyvmv.somemoresowereach70chars";
+ const NORMAL_KEY = "a_key";
+
+ // Set the value for a key within the length limits.
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, NORMAL_KEY, 1);
+
+ // Now try to set and modify the value for a very long key (must not throw).
+ Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, LONG_KEY_STRING, 10);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, LONG_KEY_STRING, 1);
+ Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, LONG_KEY_STRING, 10);
+
+ // Also attempt to set the value for an empty key.
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "", 1);
+
+ // Make sure the key with the right length contains the expected value.
+ let keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+ Assert.equal(
+ Object.keys(keyedScalars[KEYED_UINT_SCALAR]).length,
+ 1,
+ "The keyed scalar must contain exactly 1 key."
+ );
+ Assert.ok(
+ NORMAL_KEY in keyedScalars[KEYED_UINT_SCALAR],
+ "The keyed scalar must contain the expected key."
+ );
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR][NORMAL_KEY],
+ 1,
+ "The key must contain the expected value."
+ );
+ Assert.ok(
+ !(LONG_KEY_STRING in keyedScalars[KEYED_UINT_SCALAR]),
+ "The data for the long key should not have been recorded."
+ );
+ Assert.ok(
+ !("" in keyedScalars[KEYED_UINT_SCALAR]),
+ "The data for the empty key should not have been recorded."
+ );
+});
+
+add_task(async function test_keyed_max_keys() {
+ Telemetry.clearScalars();
+
+ // Generate the names for the first 100 keys.
+ let keyNamesSet = new Set();
+ for (let k = 0; k < 100; k++) {
+ keyNamesSet.add("key_" + k);
+ }
+
+ // Add 100 keys to an histogram and set their initial value.
+ let valueToSet = 0;
+ keyNamesSet.forEach(keyName => {
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, keyName, valueToSet++);
+ });
+
+ // Perform some operations on the 101th key. This should throw, as
+ // we're not allowed to have more than 100 keys.
+ const LAST_KEY_NAME = "overflowing_key";
+ Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, LAST_KEY_NAME, 10);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, LAST_KEY_NAME, 1);
+ Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, LAST_KEY_NAME, 10);
+
+ // Make sure all the keys except the last one are available and have the correct
+ // values.
+ let keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+
+ // Check that the keyed scalar only contain the first 100 keys.
+ const reportedKeysSet = new Set(Object.keys(keyedScalars[KEYED_UINT_SCALAR]));
+ Assert.ok(
+ [...keyNamesSet].filter(x => reportedKeysSet.has(x)) &&
+ [...reportedKeysSet].filter(x => keyNamesSet.has(x)),
+ "The keyed scalar must contain all the 100 keys, and drop the others."
+ );
+
+ // Check that all the keys recorded the expected values.
+ let expectedValue = 0;
+ keyNamesSet.forEach(keyName => {
+ Assert.equal(
+ keyedScalars[KEYED_UINT_SCALAR][keyName],
+ expectedValue++,
+ "The key must contain the expected value."
+ );
+ });
+
+ // Check that KEYED_EXCEED_SCALAR is in keyedScalars
+ Assert.ok(
+ KEYED_EXCEED_SCALAR in keyedScalars,
+ "We have exceeded maximum number of Keys."
+ );
+
+ // Generate the names for the exceeded keys
+ let keyNamesSet2 = new Set();
+ for (let k = 0; k < 100; k++) {
+ keyNamesSet2.add("key2_" + k);
+ }
+
+ // Add 100 keys to the keyed exceed scalar and set their initial value.
+ valueToSet = 0;
+ keyNamesSet2.forEach(keyName2 => {
+ Telemetry.keyedScalarSet(KEYED_EXCEED_SCALAR, keyName2, valueToSet++);
+ });
+
+ // Check that there are exactly 100 keys in KEYED_EXCEED_SCALAR
+ let snapshot = Telemetry.getSnapshotForKeyedScalars("main", false);
+ Assert.equal(
+ 100,
+ Object.keys(snapshot.parent[KEYED_UINT_SCALAR]).length,
+ "The keyed scalar must contain all the 100 keys."
+ );
+
+ // Check that KEYED_UINT_SCALAR is in keyedScalars and its value equals 3
+ Assert.ok(
+ KEYED_UINT_SCALAR in keyedScalars[KEYED_EXCEED_SCALAR],
+ "The keyed Scalar is in the keyed exceeded scalar"
+ );
+ Assert.equal(
+ keyedScalars[KEYED_EXCEED_SCALAR][KEYED_UINT_SCALAR],
+ 3,
+ "We have exactly 3 keys over the limit"
+ );
+});
+
+add_task(async function test_dynamicScalars_registration() {
+ Telemetry.clearScalars();
+
+ const TEST_CASES = [
+ {
+ category: "telemetry.test",
+ data: {
+ missing_kind: {
+ keyed: false,
+ record_on_release: true,
+ },
+ },
+ evaluation: /missing 'kind'/,
+ description: "Registration must fail if required fields are missing",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_collection: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: "opt-in",
+ },
+ },
+ evaluation: /Invalid 'record_on_release'/,
+ description:
+ "Registration must fail if 'record_on_release' is of the wrong type",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_kind: {
+ kind: "12",
+ },
+ },
+ evaluation: /Invalid or missing 'kind'/,
+ description: "Registration must fail if 'kind' is of the wrong type",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_expired: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ expired: "never",
+ },
+ },
+ evaluation: /Invalid 'expired'/,
+ description: "Registration must fail if 'expired' is of the wrong type",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ valid_scalar: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ record_on_release: true,
+ },
+ invalid_scalar: {
+ expired: false,
+ },
+ },
+ evaluation: /Invalid or missing 'kind'/,
+ description:
+ "No scalar must be registered if the batch contains an invalid one",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_stores: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ stores: true,
+ },
+ },
+ evaluation: /Invalid 'stores'/,
+ description: "Registration must fail if 'stores' is of the wrong type",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_stores: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ stores: {},
+ },
+ },
+ evaluation: /Invalid 'stores'/,
+ description: "Registration must fail if 'stores' is of the wrong type",
+ },
+ {
+ category: "telemetry.test",
+ data: {
+ invalid_stores: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: false,
+ stores: [{}],
+ },
+ },
+ evaluation: /'stores' array isn't a string./,
+ description:
+ "Registration must fail if element in 'stores' is of the wrong type",
+ },
+ ];
+
+ for (let testCase of TEST_CASES) {
+ Assert.throws(
+ () => Telemetry.registerScalars(testCase.category, testCase.data),
+ testCase.evaluation,
+ testCase.description
+ );
+ }
+});
+
+add_task(async function test_dynamicScalars_doubleRegistration() {
+ Telemetry.clearScalars();
+
+ // Register a test scalar.
+ Telemetry.registerScalars("telemetry.test.dynamic", {
+ double_registration_1: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ },
+ });
+
+ // Verify that we can record the scalar.
+ Telemetry.scalarSet("telemetry.test.dynamic.double_registration_1", 1);
+
+ // Register the same scalar again, along with a second scalar.
+ // This must not throw.
+ Telemetry.registerScalars("telemetry.test.dynamic", {
+ double_registration_1: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ },
+ double_registration_2: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ },
+ });
+
+ // Set the dynamic scalars to some test values.
+ Telemetry.scalarAdd("telemetry.test.dynamic.double_registration_1", 1);
+ Telemetry.scalarSet("telemetry.test.dynamic.double_registration_2", 3);
+
+ // Get a snapshot of the scalars and check that the dynamic ones were correctly set.
+ let scalars = getProcessScalars("dynamic", false, false);
+
+ Assert.equal(
+ scalars["telemetry.test.dynamic.double_registration_1"],
+ 2,
+ "The recorded scalar must contain the right value."
+ );
+ Assert.equal(
+ scalars["telemetry.test.dynamic.double_registration_2"],
+ 3,
+ "The recorded scalar must contain the right value."
+ );
+
+ // Register an existing scalar again, only change the definition
+ // to make it expire.
+ Telemetry.registerScalars("telemetry.test.dynamic", {
+ double_registration_2: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ expired: true,
+ },
+ });
+
+ // Attempt to record and make sure that no recording happens.
+ Telemetry.scalarAdd("telemetry.test.dynamic.double_registration_2", 1);
+ scalars = getProcessScalars("dynamic", false, false);
+ Assert.equal(
+ scalars["telemetry.test.dynamic.double_registration_2"],
+ 3,
+ "The recorded scalar must contain the right value."
+ );
+});
+
+add_task(async function test_dynamicScalars_recording() {
+ Telemetry.clearScalars();
+
+ // Disable extended recording so that we will just record opt-out.
+ Telemetry.canRecordExtended = false;
+
+ // Register some test scalars.
+ Telemetry.registerScalars("telemetry.test.dynamic", {
+ record_optout: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ },
+ record_keyed: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ keyed: true,
+ record_on_release: true,
+ },
+ record_optin: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_BOOLEAN,
+ record_on_release: false,
+ },
+ record_expired: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_STRING,
+ expired: true,
+ record_on_release: true,
+ },
+ });
+
+ // Set the dynamic scalars to some test values.
+ Telemetry.scalarSet("telemetry.test.dynamic.record_optout", 1);
+ Telemetry.keyedScalarSet("telemetry.test.dynamic.record_keyed", "someKey", 5);
+ Telemetry.scalarSet("telemetry.test.dynamic.record_optin", false);
+ Telemetry.scalarSet("telemetry.test.dynamic.record_expired", "test");
+
+ // Get a snapshot of the scalars and check that the dynamic ones were correctly set.
+ let scalars = getProcessScalars("dynamic", false, false);
+ let keyedScalars = getProcessScalars("dynamic", true, true);
+
+ Assert.ok(
+ !("telemetry.test.dynamic.record_optin" in scalars),
+ "Dynamic opt-in scalars must not be recorded."
+ );
+ Assert.ok(
+ "telemetry.test.dynamic.record_keyed" in keyedScalars,
+ "Dynamic opt-out keyed scalars must be recorded."
+ );
+ Assert.ok(
+ !("telemetry.test.dynamic.record_expired" in scalars),
+ "Dynamic expired scalars must not be recorded."
+ );
+ Assert.ok(
+ "telemetry.test.dynamic.record_optout" in scalars,
+ "Dynamic opt-out scalars must be recorded."
+ );
+ Assert.equal(
+ scalars["telemetry.test.dynamic.record_optout"],
+ 1,
+ "The recorded scalar must contain the right value."
+ );
+ Assert.equal(
+ keyedScalars["telemetry.test.dynamic.record_keyed"].someKey,
+ 5,
+ "The recorded keyed scalar must contain the right value."
+ );
+
+ // Enable extended recording.
+ Telemetry.canRecordExtended = true;
+
+ // Set the dynamic scalars to some test values.
+ Telemetry.scalarSet("telemetry.test.dynamic.record_optin", true);
+ Telemetry.scalarSet("telemetry.test.dynamic.record_expired", "test");
+
+ // Get a snapshot of the scalars and check that the dynamic ones were correctly set.
+ scalars = getProcessScalars("dynamic", false, true);
+
+ Assert.ok(
+ !("telemetry.test.dynamic.record_expired" in scalars),
+ "Dynamic expired scalars must not be recorded."
+ );
+ Assert.ok(
+ "telemetry.test.dynamic.record_optin" in scalars,
+ "Dynamic opt-in scalars must be recorded."
+ );
+ Assert.equal(
+ scalars["telemetry.test.dynamic.record_optin"],
+ true,
+ "The recorded scalar must contain the right value."
+ );
+});
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_productSpecificScalar() {
+ const DEFAULT_PRODUCT_SCALAR = "telemetry.test.default_products";
+ const DESKTOP_ONLY_SCALAR = "telemetry.test.desktop_only";
+ const MULTIPRODUCT_SCALAR = "telemetry.test.multiproduct";
+ const MOBILE_ONLY_SCALAR = "telemetry.test.mobile_only";
+ const MOBILE_ONLY_KEYED_SCALAR = "telemetry.test.keyed_mobile_only";
+
+ Telemetry.clearScalars();
+
+ // Try to set the desktop scalars
+ let expectedValue = 11714;
+ Telemetry.scalarAdd(DEFAULT_PRODUCT_SCALAR, expectedValue);
+ Telemetry.scalarAdd(DESKTOP_ONLY_SCALAR, expectedValue);
+ Telemetry.scalarAdd(MULTIPRODUCT_SCALAR, expectedValue);
+
+ // Try to set the mobile-only scalar to some value. We will not be recording the value,
+ // but we shouldn't throw.
+ let expectedKey = "some_key";
+ Telemetry.scalarSet(MOBILE_ONLY_SCALAR, 11715);
+ Telemetry.scalarSetMaximum(MOBILE_ONLY_SCALAR, 11715);
+ Telemetry.keyedScalarAdd(MOBILE_ONLY_KEYED_SCALAR, expectedKey, 11715);
+ Telemetry.keyedScalarSet(MOBILE_ONLY_KEYED_SCALAR, expectedKey, 11715);
+ Telemetry.keyedScalarSetMaximum(
+ MOBILE_ONLY_KEYED_SCALAR,
+ expectedKey,
+ 11715
+ );
+
+ // Get a snapshot of the scalars.
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent");
+
+ Assert.equal(
+ scalars[DEFAULT_PRODUCT_SCALAR],
+ expectedValue,
+ "The default platfomrs scalar must contain the right value"
+ );
+ Assert.equal(
+ scalars[DESKTOP_ONLY_SCALAR],
+ expectedValue,
+ "The desktop-only scalar must contain the right value"
+ );
+ Assert.equal(
+ scalars[MULTIPRODUCT_SCALAR],
+ expectedValue,
+ "The multiproduct scalar must contain the right value"
+ );
+
+ Assert.ok(
+ !(MOBILE_ONLY_SCALAR in scalars),
+ "The mobile-only scalar must not be persisted."
+ );
+ Assert.ok(
+ !(MOBILE_ONLY_KEYED_SCALAR in keyedScalars),
+ "The mobile-only keyed scalar must not be persisted."
+ );
+ }
+);
+
+add_task(
+ {
+ skip_if: () => !gIsAndroid,
+ },
+ async function test_mobileSpecificScalar() {
+ const DEFAULT_PRODUCT_SCALAR = "telemetry.test.default_products";
+ const DESKTOP_ONLY_SCALAR = "telemetry.test.desktop_only";
+ const DESKTOP_ONLY_KEYED_SCALAR = "telemetry.test.keyed_desktop_only";
+ const MULTIPRODUCT_SCALAR = "telemetry.test.multiproduct";
+ const MOBILE_ONLY_SCALAR = "telemetry.test.mobile_only";
+ const MOBILE_ONLY_KEYED_SCALAR = "telemetry.test.keyed_mobile_only";
+
+ Telemetry.clearScalars();
+
+ // Try to set the mobile and multiproduct scalars
+ let expectedValue = 11714;
+ let expectedKey = "some_key";
+ Telemetry.scalarAdd(DEFAULT_PRODUCT_SCALAR, expectedValue);
+ Telemetry.scalarAdd(MOBILE_ONLY_SCALAR, expectedValue);
+ Telemetry.keyedScalarSet(
+ MOBILE_ONLY_KEYED_SCALAR,
+ expectedKey,
+ expectedValue
+ );
+ Telemetry.scalarAdd(MULTIPRODUCT_SCALAR, expectedValue);
+
+ // Try to set the desktop-only scalar to some value. We will not be recording the value,
+ // but we shouldn't throw.
+ Telemetry.scalarSet(DESKTOP_ONLY_SCALAR, 11715);
+ Telemetry.scalarSetMaximum(DESKTOP_ONLY_SCALAR, 11715);
+ Telemetry.keyedScalarAdd(DESKTOP_ONLY_KEYED_SCALAR, expectedKey, 11715);
+ Telemetry.keyedScalarSet(DESKTOP_ONLY_KEYED_SCALAR, expectedKey, 11715);
+ Telemetry.keyedScalarSetMaximum(
+ DESKTOP_ONLY_KEYED_SCALAR,
+ expectedKey,
+ 11715
+ );
+
+ // Get a snapshot of the scalars.
+ const scalars = TelemetryTestUtils.getProcessScalars("parent");
+ const keyedScalars = TelemetryTestUtils.getProcessScalars("parent", true);
+
+ Assert.equal(
+ scalars[DEFAULT_PRODUCT_SCALAR],
+ expectedValue,
+ "The default products scalar must contain the right value"
+ );
+ Assert.equal(
+ scalars[MOBILE_ONLY_SCALAR],
+ expectedValue,
+ "The mobile-only scalar must contain the right value"
+ );
+ Assert.equal(
+ keyedScalars[MOBILE_ONLY_KEYED_SCALAR][expectedKey],
+ expectedValue,
+ "The mobile-only keyed scalar must contain the right value"
+ );
+ Assert.equal(
+ scalars[MULTIPRODUCT_SCALAR],
+ expectedValue,
+ "The multiproduct scalar must contain the right value"
+ );
+
+ Assert.ok(
+ !(DESKTOP_ONLY_SCALAR in scalars),
+ "The desktop-only scalar must not be persisted."
+ );
+ Assert.ok(
+ !(DESKTOP_ONLY_KEYED_SCALAR in keyedScalars),
+ "The desktop-only keyed scalar must not be persisted."
+ );
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_buildFaster.js b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_buildFaster.js
new file mode 100644
index 0000000000..551a3efee4
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_buildFaster.js
@@ -0,0 +1,236 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+const STRING_SCALAR = "telemetry.test.string_kind";
+const BOOLEAN_SCALAR = "telemetry.test.boolean_kind";
+const KEYED_UINT_SCALAR = "telemetry.test.keyed_unsigned_int";
+
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+
+/**
+ * Return the path to the definitions file for the scalars.
+ */
+function getDefinitionsPath() {
+ // Write the scalar definition to the spec file in the binary directory.
+ let definitionFile = Cc["@mozilla.org/file/local;1"].createInstance(
+ Ci.nsIFile
+ );
+ definitionFile = Services.dirsvc.get("GreD", Ci.nsIFile);
+ definitionFile.append("ScalarArtifactDefinitions.json");
+ return definitionFile.path;
+}
+
+add_task(async function test_setup() {
+ do_get_profile();
+});
+
+add_task(
+ {
+ // The test needs to write a file, and that fails in tests on Android.
+ // We don't really need the Android coverage, so skip on Android.
+ skip_if: () => AppConstants.platform == "android",
+ },
+ async function test_invalidJSON() {
+ const INVALID_JSON = "{ invalid,JSON { {1}";
+ const FILE_PATH = getDefinitionsPath();
+
+ // Write a corrupted JSON file.
+ await OS.File.writeAtomic(FILE_PATH, INVALID_JSON, {
+ encoding: "utf-8",
+ noOverwrite: false,
+ });
+
+ // Simulate Firefox startup. This should not throw!
+ await TelemetryController.testSetup();
+ await TelemetryController.testPromiseJsProbeRegistration();
+
+ // Cleanup.
+ await TelemetryController.testShutdown();
+ await OS.File.remove(FILE_PATH);
+ }
+);
+
+add_task(
+ {
+ // The test needs to write a file, and that fails in tests on Android.
+ // We don't really need the Android coverage, so skip on Android.
+ skip_if: () => AppConstants.platform == "android",
+ },
+ async function test_dynamicBuiltin() {
+ const DYNAMIC_SCALAR_SPEC = {
+ "telemetry.test": {
+ builtin_dynamic: {
+ kind: "nsITelemetry::SCALAR_TYPE_COUNT",
+ expires: "never",
+ record_on_release: false,
+ keyed: false,
+ },
+ builtin_dynamic_other: {
+ kind: "nsITelemetry::SCALAR_TYPE_BOOLEAN",
+ expires: "never",
+ record_on_release: false,
+ keyed: false,
+ },
+ builtin_dynamic_expired: {
+ kind: "nsITelemetry::SCALAR_TYPE_BOOLEAN",
+ expires: AppConstants.MOZ_APP_VERSION,
+ record_on_release: false,
+ keyed: false,
+ },
+ builtin_dynamic_multi: {
+ kind: "nsITelemetry::SCALAR_TYPE_COUNT",
+ expired: false,
+ record_on_release: false,
+ keyed: false,
+ stores: ["main", "sync"],
+ },
+ builtin_dynamic_sync_only: {
+ kind: "nsITelemetry::SCALAR_TYPE_COUNT",
+ expired: false,
+ record_on_release: false,
+ keyed: false,
+ stores: ["sync"],
+ },
+ },
+ };
+
+ Telemetry.clearScalars();
+
+ // Let's write to the definition file to also cover the file
+ // loading part.
+ const FILE_PATH = getDefinitionsPath();
+ await CommonUtils.writeJSON(DYNAMIC_SCALAR_SPEC, FILE_PATH);
+
+ // Start TelemetryController to trigger loading the specs.
+ await TelemetryController.testReset();
+ await TelemetryController.testPromiseJsProbeRegistration();
+
+ // Store to that scalar.
+ const TEST_SCALAR1 = "telemetry.test.builtin_dynamic";
+ const TEST_SCALAR2 = "telemetry.test.builtin_dynamic_other";
+ const TEST_SCALAR3 = "telemetry.test.builtin_dynamic_multi";
+ const TEST_SCALAR4 = "telemetry.test.builtin_dynamic_sync_only";
+ const TEST_SCALAR5 = "telemetry.test.builtin_dynamic_expired";
+ Telemetry.scalarSet(TEST_SCALAR1, 3785);
+ Telemetry.scalarSet(TEST_SCALAR2, true);
+ Telemetry.scalarSet(TEST_SCALAR3, 1337);
+ Telemetry.scalarSet(TEST_SCALAR4, 31337);
+ Telemetry.scalarSet(TEST_SCALAR5, true);
+
+ // Check the values we tried to store.
+ const scalars = Telemetry.getSnapshotForScalars("main", false).parent;
+ const syncScalars = Telemetry.getSnapshotForScalars("sync", false).parent;
+
+ // Check that they are serialized to the correct format.
+ Assert.equal(
+ typeof scalars[TEST_SCALAR1],
+ "number",
+ TEST_SCALAR1 + " must be serialized to the correct format."
+ );
+ Assert.ok(
+ Number.isInteger(scalars[TEST_SCALAR1]),
+ TEST_SCALAR1 + " must be a finite integer."
+ );
+ Assert.equal(
+ scalars[TEST_SCALAR1],
+ 3785,
+ TEST_SCALAR1 + " must have the correct value."
+ );
+ Assert.equal(
+ typeof scalars[TEST_SCALAR2],
+ "boolean",
+ TEST_SCALAR2 + " must be serialized to the correct format."
+ );
+ Assert.equal(
+ scalars[TEST_SCALAR2],
+ true,
+ TEST_SCALAR2 + " must have the correct value."
+ );
+
+ Assert.equal(
+ typeof scalars[TEST_SCALAR3],
+ "number",
+ `${TEST_SCALAR3} must be serialized to the correct format.`
+ );
+ Assert.equal(
+ scalars[TEST_SCALAR3],
+ 1337,
+ `${TEST_SCALAR3} must have the correct value.`
+ );
+ Assert.equal(
+ typeof syncScalars[TEST_SCALAR3],
+ "number",
+ `${TEST_SCALAR3} must be serialized in the sync store to the correct format.`
+ );
+ Assert.equal(
+ syncScalars[TEST_SCALAR3],
+ 1337,
+ `${TEST_SCALAR3} must have the correct value in the sync snapshot.`
+ );
+
+ Assert.ok(
+ !(TEST_SCALAR4 in scalars),
+ `${TEST_SCALAR4} must not be in the main store.`
+ );
+ Assert.equal(
+ typeof syncScalars[TEST_SCALAR4],
+ "number",
+ `${TEST_SCALAR4} must be in the sync snapshot.`
+ );
+ Assert.equal(
+ syncScalars[TEST_SCALAR4],
+ 31337,
+ `${TEST_SCALAR4} must have the correct value.`
+ );
+
+ // Clean up.
+ await TelemetryController.testShutdown();
+ await OS.File.remove(FILE_PATH);
+ }
+);
+
+add_task(async function test_keyedDynamicBuiltin() {
+ Telemetry.clearScalars();
+
+ // Register the built-in scalars (let's not take the I/O hit).
+ Telemetry.registerBuiltinScalars("telemetry.test", {
+ builtin_dynamic_keyed: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ expired: false,
+ record_on_release: false,
+ keyed: true,
+ },
+ });
+
+ // Store to that scalar.
+ const TEST_SCALAR1 = "telemetry.test.builtin_dynamic_keyed";
+ Telemetry.keyedScalarSet(TEST_SCALAR1, "test-key", 3785);
+
+ // Check the values we tried to store.
+ const scalars = Telemetry.getSnapshotForKeyedScalars("main", false).parent;
+
+ // Check that they are serialized to the correct format.
+ Assert.equal(
+ typeof scalars[TEST_SCALAR1],
+ "object",
+ TEST_SCALAR1 + " must be a keyed scalar."
+ );
+ Assert.equal(
+ typeof scalars[TEST_SCALAR1]["test-key"],
+ "number",
+ TEST_SCALAR1 + " must be serialized to the correct format."
+ );
+ Assert.ok(
+ Number.isInteger(scalars[TEST_SCALAR1]["test-key"]),
+ TEST_SCALAR1 + " must be a finite integer."
+ );
+ Assert.equal(
+ scalars[TEST_SCALAR1]["test-key"],
+ 3785,
+ TEST_SCALAR1 + " must have the correct value."
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_impressionId.js b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_impressionId.js
new file mode 100644
index 0000000000..0aff09741e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_impressionId.js
@@ -0,0 +1,49 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const CATEGORY = "telemetry.test";
+const MAIN_ONLY = `${CATEGORY}.main_only`;
+const IMPRESSION_ID_ONLY = `${CATEGORY}.impression_id_only`;
+
+add_task(async function test_multistore_basics() {
+ Telemetry.clearScalars();
+
+ const expectedUint = 3785;
+ const expectedString = "{some_impression_id}";
+ Telemetry.scalarSet(MAIN_ONLY, expectedUint);
+ Telemetry.scalarSet(IMPRESSION_ID_ONLY, expectedString);
+
+ const mainScalars = Telemetry.getSnapshotForScalars("main").parent;
+ const impressionIdScalars = Telemetry.getSnapshotForScalars(
+ "deletion-request"
+ ).parent;
+
+ Assert.ok(
+ MAIN_ONLY in mainScalars,
+ `Main-store scalar ${MAIN_ONLY} must be in main snapshot.`
+ );
+ Assert.ok(
+ !(MAIN_ONLY in impressionIdScalars),
+ `Main-store scalar ${MAIN_ONLY} must not be in deletion-request snapshot.`
+ );
+ Assert.equal(
+ mainScalars[MAIN_ONLY],
+ expectedUint,
+ `Main-store scalar ${MAIN_ONLY} must have correct value.`
+ );
+
+ Assert.ok(
+ IMPRESSION_ID_ONLY in impressionIdScalars,
+ `Deletion-request store scalar ${IMPRESSION_ID_ONLY} must be in deletion-request snapshot.`
+ );
+ Assert.ok(
+ !(IMPRESSION_ID_ONLY in mainScalars),
+ `Deletion-request scalar ${IMPRESSION_ID_ONLY} must not be in main snapshot.`
+ );
+ Assert.equal(
+ impressionIdScalars[IMPRESSION_ID_ONLY],
+ expectedString,
+ `Deletion-request store scalar ${IMPRESSION_ID_ONLY} must have correct value.`
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_multistore.js b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_multistore.js
new file mode 100644
index 0000000000..841caa4f1d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars_multistore.js
@@ -0,0 +1,415 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const CATEGORY = "telemetry.test";
+const MAIN_ONLY = `${CATEGORY}.main_only`;
+const SYNC_ONLY = `${CATEGORY}.sync_only`;
+const MULTIPLE_STORES = `${CATEGORY}.multiple_stores`;
+const MULTIPLE_STORES_STRING = `${CATEGORY}.multiple_stores_string`;
+const MULTIPLE_STORES_BOOL = `${CATEGORY}.multiple_stores_bool`;
+const MULTIPLE_STORES_KEYED = `${CATEGORY}.multiple_stores_keyed`;
+
+function getParentSnapshot(store, keyed = false, clear = false) {
+ return keyed
+ ? Telemetry.getSnapshotForKeyedScalars(store, clear).parent
+ : Telemetry.getSnapshotForScalars(store, clear).parent;
+}
+
+add_task(async function test_multistore_basics() {
+ Telemetry.clearScalars();
+
+ const expectedUint = 3785;
+ const expectedBool = true;
+ const expectedString = "some value";
+ const expectedKey = "some key";
+ Telemetry.scalarSet(MAIN_ONLY, expectedUint);
+ Telemetry.scalarSet(SYNC_ONLY, expectedUint);
+ Telemetry.scalarSet(MULTIPLE_STORES, expectedUint);
+ Telemetry.scalarSet(MULTIPLE_STORES_STRING, expectedString);
+ Telemetry.scalarSet(MULTIPLE_STORES_BOOL, expectedBool);
+ Telemetry.keyedScalarSet(MULTIPLE_STORES_KEYED, expectedKey, expectedUint);
+
+ const mainScalars = getParentSnapshot("main");
+ const syncScalars = getParentSnapshot("sync");
+ const mainKeyedScalars = getParentSnapshot("main", true /* keyed */);
+ const syncKeyedScalars = getParentSnapshot("sync", true /* keyed */);
+
+ Assert.ok(
+ MAIN_ONLY in mainScalars,
+ `Main-store scalar ${MAIN_ONLY} must be in main snapshot.`
+ );
+ Assert.ok(
+ !(MAIN_ONLY in syncScalars),
+ `Main-store scalar ${MAIN_ONLY} must not be in sync snapshot.`
+ );
+ Assert.equal(
+ mainScalars[MAIN_ONLY],
+ expectedUint,
+ `Main-store scalar ${MAIN_ONLY} must have correct value.`
+ );
+
+ Assert.ok(
+ SYNC_ONLY in syncScalars,
+ `Sync-store scalar ${SYNC_ONLY} must be in sync snapshot.`
+ );
+ Assert.ok(
+ !(SYNC_ONLY in mainScalars),
+ `Sync-store scalar ${SYNC_ONLY} must not be in main snapshot.`
+ );
+ Assert.equal(
+ syncScalars[SYNC_ONLY],
+ expectedUint,
+ `Sync-store scalar ${SYNC_ONLY} must have correct value.`
+ );
+
+ Assert.ok(
+ MULTIPLE_STORES in mainScalars && MULTIPLE_STORES in syncScalars,
+ `Multi-store scalar ${MULTIPLE_STORES} must be in both main and sync snapshots.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES} must have correct value in main store.`
+ );
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES} must have correct value in sync store.`
+ );
+
+ Assert.ok(
+ MULTIPLE_STORES_STRING in mainScalars &&
+ MULTIPLE_STORES_STRING in syncScalars,
+ `Multi-store scalar ${MULTIPLE_STORES_STRING} must be in both main and sync snapshots.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES_STRING],
+ expectedString,
+ `Multi-store scalar ${MULTIPLE_STORES_STRING} must have correct value in main store.`
+ );
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES_STRING],
+ expectedString,
+ `Multi-store scalar ${MULTIPLE_STORES_STRING} must have correct value in sync store.`
+ );
+
+ Assert.ok(
+ MULTIPLE_STORES_BOOL in mainScalars && MULTIPLE_STORES_BOOL in syncScalars,
+ `Multi-store scalar ${MULTIPLE_STORES_BOOL} must be in both main and sync snapshots.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES_BOOL],
+ expectedBool,
+ `Multi-store scalar ${MULTIPLE_STORES_BOOL} must have correct value in main store.`
+ );
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES_BOOL],
+ expectedBool,
+ `Multi-store scalar ${MULTIPLE_STORES_BOOL} must have correct value in sync store.`
+ );
+
+ Assert.ok(
+ MULTIPLE_STORES_KEYED in mainKeyedScalars &&
+ MULTIPLE_STORES_KEYED in syncKeyedScalars,
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must be in both main and sync snapshots.`
+ );
+ Assert.ok(
+ expectedKey in mainKeyedScalars[MULTIPLE_STORES_KEYED] &&
+ expectedKey in syncKeyedScalars[MULTIPLE_STORES_KEYED],
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must have key ${expectedKey} in both snapshots.`
+ );
+ Assert.equal(
+ mainKeyedScalars[MULTIPLE_STORES_KEYED][expectedKey],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must have correct value in main store.`
+ );
+ Assert.equal(
+ syncKeyedScalars[MULTIPLE_STORES_KEYED][expectedKey],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must have correct value in sync store.`
+ );
+});
+
+add_task(async function test_multistore_uint() {
+ Telemetry.clearScalars();
+
+ // Uint scalars are the only kind with an implicit default value of 0.
+ // They shouldn't report any value until set, but if you Add or SetMaximum
+ // they pretend that they have been set to 0 for the purposes of that operation.
+
+ function assertNotIn() {
+ let mainScalars = getParentSnapshot("main");
+ let syncScalars = getParentSnapshot("sync");
+
+ if (!mainScalars && !syncScalars) {
+ Assert.ok(true, "No scalars at all");
+ } else {
+ Assert.ok(
+ !(MULTIPLE_STORES in mainScalars) && !(MULTIPLE_STORES in syncScalars),
+ `Multi-store scalar ${MULTIPLE_STORES} must not have an initial value in either store.`
+ );
+ }
+ }
+ assertNotIn();
+
+ // Test that Add operates on implicit 0.
+ Telemetry.scalarAdd(MULTIPLE_STORES, 1);
+
+ function assertBothEqual(val, clear = false) {
+ let mainScalars = getParentSnapshot("main", false, clear);
+ let syncScalars = getParentSnapshot("sync", false, clear);
+
+ Assert.ok(
+ MULTIPLE_STORES in mainScalars && MULTIPLE_STORES in syncScalars,
+ `Multi-store scalar ${MULTIPLE_STORES} must be in both main and sync snapshots.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ val,
+ `Multi-store scalar ${MULTIPLE_STORES} must have the correct value in main store.`
+ );
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES],
+ val,
+ `Multi-store scalar ${MULTIPLE_STORES} must have the correct value in sync store.`
+ );
+ }
+
+ assertBothEqual(1, true /* clear */);
+
+ assertNotIn();
+
+ // Test that SetMaximum operates on implicit 0.
+ Telemetry.scalarSetMaximum(MULTIPLE_STORES, 1337);
+ assertBothEqual(1337);
+
+ // Test that Add works, since we're in the neighbourhood.
+ Telemetry.scalarAdd(MULTIPLE_STORES, 1);
+ assertBothEqual(1338, true /* clear */);
+
+ assertNotIn();
+
+ // Test that clearing individual stores works
+ // and that afterwards the values are managed independently.
+ Telemetry.scalarAdd(MULTIPLE_STORES, 1234);
+ assertBothEqual(1234);
+ let syncScalars = getParentSnapshot(
+ "sync",
+ false /* keyed */,
+ true /* clear */
+ );
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES],
+ 1234,
+ `Multi-store scalar ${MULTIPLE_STORES} must be present in a second snapshot.`
+ );
+ syncScalars = getParentSnapshot("sync");
+ Assert.equal(
+ syncScalars,
+ undefined,
+ `Multi-store scalar ${MULTIPLE_STORES} must not be present after clearing.`
+ );
+ let mainScalars = getParentSnapshot("main");
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ 1234,
+ `Multi-store scalar ${MULTIPLE_STORES} must maintain value in main store after sync store is cleared.`
+ );
+
+ Telemetry.scalarSetMaximum(MULTIPLE_STORES, 1);
+ syncScalars = getParentSnapshot("sync");
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES],
+ 1,
+ `Multi-store scalar ${MULTIPLE_STORES} must return to using implicit 0 for setMax operation.`
+ );
+ mainScalars = getParentSnapshot("main");
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ 1234,
+ `Multi-store scalar ${MULTIPLE_STORES} must retain old value.`
+ );
+
+ Telemetry.scalarAdd(MULTIPLE_STORES, 1);
+ syncScalars = getParentSnapshot("sync");
+ Assert.equal(
+ syncScalars[MULTIPLE_STORES],
+ 2,
+ `Multi-store scalar ${MULTIPLE_STORES} must manage independently for add operations.`
+ );
+ mainScalars = getParentSnapshot("main");
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ 1235,
+ `Multi-store scalar ${MULTIPLE_STORES} must add properly.`
+ );
+
+ Telemetry.scalarSet(MULTIPLE_STORES, 9876);
+ assertBothEqual(9876);
+});
+
+add_task(async function test_empty_absence() {
+ // Current semantics are we don't snapshot empty things.
+ // So no {parent: {}, ...}. Instead {...}.
+
+ Telemetry.clearScalars();
+
+ Telemetry.scalarSet(MULTIPLE_STORES, 1);
+ let snapshot = getParentSnapshot("main", false /* keyed */, true /* clear */);
+
+ Assert.ok(
+ MULTIPLE_STORES in snapshot,
+ `${MULTIPLE_STORES} must be in the snapshot.`
+ );
+ Assert.equal(
+ snapshot[MULTIPLE_STORES],
+ 1,
+ `${MULTIPLE_STORES} must have the correct value.`
+ );
+
+ snapshot = getParentSnapshot("main", false /* keyed */, true /* clear */);
+ Assert.equal(
+ snapshot,
+ undefined,
+ `Parent snapshot must be empty if no data.`
+ );
+
+ snapshot = getParentSnapshot("sync", false /* keyed */, true /* clear */);
+ Assert.ok(
+ MULTIPLE_STORES in snapshot,
+ `${MULTIPLE_STORES} must be in the sync snapshot.`
+ );
+ Assert.equal(
+ snapshot[MULTIPLE_STORES],
+ 1,
+ `${MULTIPLE_STORES} must have the correct value in the sync snapshot.`
+ );
+});
+
+add_task(async function test_empty_absence_keyed() {
+ // Current semantics are we don't snapshot empty things.
+ // So no {parent: {}, ...}. Instead {...}.
+ // And for Keyed Scalars, no {parent: { keyed_scalar: {} }, ...}. Just {...}.
+
+ Telemetry.clearScalars();
+
+ const key = "just a key, y'know";
+ Telemetry.keyedScalarSet(MULTIPLE_STORES_KEYED, key, 1);
+ let snapshot = getParentSnapshot("main", true /* keyed */, true /* clear */);
+
+ Assert.ok(
+ MULTIPLE_STORES_KEYED in snapshot,
+ `${MULTIPLE_STORES_KEYED} must be in the snapshot.`
+ );
+ Assert.ok(
+ key in snapshot[MULTIPLE_STORES_KEYED],
+ `${MULTIPLE_STORES_KEYED} must have the stored key.`
+ );
+ Assert.equal(
+ snapshot[MULTIPLE_STORES_KEYED][key],
+ 1,
+ `${MULTIPLE_STORES_KEYED}[${key}] should have the correct value.`
+ );
+
+ snapshot = getParentSnapshot("main", true /* keyed */);
+ Assert.equal(
+ snapshot,
+ undefined,
+ `Parent snapshot should be empty if no data.`
+ );
+ snapshot = getParentSnapshot("sync", true /* keyed */);
+
+ Assert.ok(
+ MULTIPLE_STORES_KEYED in snapshot,
+ `${MULTIPLE_STORES_KEYED} must be in the sync snapshot.`
+ );
+ Assert.ok(
+ key in snapshot[MULTIPLE_STORES_KEYED],
+ `${MULTIPLE_STORES_KEYED} must have the stored key.`
+ );
+ Assert.equal(
+ snapshot[MULTIPLE_STORES_KEYED][key],
+ 1,
+ `${MULTIPLE_STORES_KEYED}[${key}] should have the correct value.`
+ );
+});
+
+add_task(async function test_multistore_default_values() {
+ Telemetry.clearScalars();
+
+ const expectedUint = 3785;
+ const expectedKey = "some key";
+ Telemetry.scalarSet(MAIN_ONLY, expectedUint);
+ Telemetry.scalarSet(SYNC_ONLY, expectedUint);
+ Telemetry.scalarSet(MULTIPLE_STORES, expectedUint);
+ Telemetry.keyedScalarSet(MULTIPLE_STORES_KEYED, expectedKey, expectedUint);
+
+ let mainScalars;
+ let mainKeyedScalars;
+
+ // Getting snapshot and NOT clearing (using default values for optional parameters)
+ mainScalars = Telemetry.getSnapshotForScalars().parent;
+ mainKeyedScalars = Telemetry.getSnapshotForKeyedScalars().parent;
+
+ Assert.equal(
+ mainScalars[MAIN_ONLY],
+ expectedUint,
+ `Main-store scalar ${MAIN_ONLY} must have correct value.`
+ );
+ Assert.ok(
+ !(SYNC_ONLY in mainScalars),
+ `Sync-store scalar ${SYNC_ONLY} must not be in main snapshot.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES} must have correct value in main store.`
+ );
+ Assert.equal(
+ mainKeyedScalars[MULTIPLE_STORES_KEYED][expectedKey],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must have correct value in main store.`
+ );
+
+ // Getting snapshot and clearing
+ mainScalars = Telemetry.getSnapshotForScalars("main", true).parent;
+ mainKeyedScalars = Telemetry.getSnapshotForKeyedScalars("main", true).parent;
+
+ Assert.equal(
+ mainScalars[MAIN_ONLY],
+ expectedUint,
+ `Main-store scalar ${MAIN_ONLY} must have correct value.`
+ );
+ Assert.ok(
+ !(SYNC_ONLY in mainScalars),
+ `Sync-store scalar ${SYNC_ONLY} must not be in main snapshot.`
+ );
+ Assert.equal(
+ mainScalars[MULTIPLE_STORES],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES} must have correct value in main store.`
+ );
+ Assert.equal(
+ mainKeyedScalars[MULTIPLE_STORES_KEYED][expectedKey],
+ expectedUint,
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must have correct value in main store.`
+ );
+
+ // Getting snapshot (with default values), should be empty now
+ mainScalars = Telemetry.getSnapshotForScalars().parent || {};
+ mainKeyedScalars = Telemetry.getSnapshotForKeyedScalars().parent || {};
+
+ Assert.ok(
+ !(MAIN_ONLY in mainScalars),
+ `Main-store scalar ${MAIN_ONLY} must not be in main snapshot.`
+ );
+ Assert.ok(
+ !(MULTIPLE_STORES in mainScalars),
+ `Multi-store scalar ${MULTIPLE_STORES} must not be in main snapshot.`
+ );
+ Assert.ok(
+ !(MULTIPLE_STORES_KEYED in mainKeyedScalars),
+ `Multi-store scalar ${MULTIPLE_STORES_KEYED} must not be in main snapshot.`
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js
new file mode 100644
index 0000000000..c53dffccef
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js
@@ -0,0 +1,1095 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+// This tests the public Telemetry API for submitting pings.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm", this);
+ChromeUtils.import("resource://testing-common/MockRegistrar.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/osfile.jsm", this);
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryHealthPing",
+ "resource://gre/modules/HealthPing.jsm"
+);
+
+const MS_IN_A_MINUTE = 60 * 1000;
+
+function countPingTypes(pings) {
+ let countByType = new Map();
+ for (let p of pings) {
+ countByType.set(p.type, 1 + (countByType.get(p.type) || 0));
+ }
+ return countByType;
+}
+
+function setPingLastModified(id, timestamp) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, id);
+ return OS.File.setDates(path, null, timestamp);
+}
+
+// Mock out the send timer activity.
+function waitForTimer() {
+ return new Promise(resolve => {
+ fakePingSendTimer(
+ (callback, timeout) => {
+ resolve([callback, timeout]);
+ },
+ () => {}
+ );
+ });
+}
+
+function sendPing(aSendClientId, aSendEnvironment) {
+ const TEST_PING_TYPE = "test-ping-type";
+
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ } else {
+ TelemetrySend.setServer("http://doesnotexist");
+ }
+
+ let options = {
+ addClientId: aSendClientId,
+ addEnvironment: aSendEnvironment,
+ };
+ return TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, options);
+}
+
+// Allow easy faking of readable ping ids.
+// This helps with debugging issues with e.g. ordering in the send logic.
+function fakePingId(type, number) {
+ const HEAD = "93bd0011-2c8f-4e1c-bee0-";
+ const TAIL = "000000000000";
+ const N = String(number);
+ const id = HEAD + type + TAIL.slice(type.length, -N.length) + N;
+ fakeGeneratePingId(() => id);
+ return id;
+}
+
+var checkPingsSaved = async function(pingIds) {
+ let allFound = true;
+ for (let id of pingIds) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, id);
+ let exists = false;
+ try {
+ exists = await OS.File.exists(path);
+ } catch (ex) {}
+
+ if (!exists) {
+ dump("checkPingsSaved - failed to find ping: " + path + "\n");
+ allFound = false;
+ }
+ }
+
+ return allFound;
+};
+
+function histogramValueCount(h) {
+ return Object.values(h.values).reduce((a, b) => a + b, 0);
+}
+
+add_task(async function test_setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+
+ // Addon manager needs a profile directory.
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.HealthPingEnabled,
+ true
+ );
+ TelemetryStopwatch.setTestModeEnabled(true);
+});
+
+// Test the ping sending logic.
+add_task(async function test_sendPendingPings() {
+ const TYPE_PREFIX = "test-sendPendingPings-";
+ const TEST_TYPE_A = TYPE_PREFIX + "A";
+ const TEST_TYPE_B = TYPE_PREFIX + "B";
+
+ const TYPE_A_COUNT = 20;
+ const TYPE_B_COUNT = 5;
+
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById(
+ "TELEMETRY_SEND_SUCCESS"
+ );
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ histSuccess.clear();
+ histSendTimeSuccess.clear();
+ histSendTimeFail.clear();
+
+ // Fake a current date.
+ let now = TelemetryUtils.truncateToDays(new Date());
+ now = fakeNow(futureDate(now, 10 * 60 * MS_IN_A_MINUTE));
+
+ // Enable test-mode for TelemetrySend, otherwise we won't store pending pings
+ // before the module is fully initialized later.
+ TelemetrySend.setTestModeEnabled(true);
+
+ // Submit some pings without the server and telemetry started yet.
+ for (let i = 0; i < TYPE_A_COUNT; ++i) {
+ fakePingId("a", i);
+ const id = await TelemetryController.submitExternalPing(TEST_TYPE_A, {});
+ await setPingLastModified(id, now.getTime() + i * 1000);
+ }
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ TYPE_A_COUNT,
+ "Should have correct pending ping count"
+ );
+
+ // Submit some more pings of a different type.
+ now = fakeNow(futureDate(now, 5 * MS_IN_A_MINUTE));
+ for (let i = 0; i < TYPE_B_COUNT; ++i) {
+ fakePingId("b", i);
+ const id = await TelemetryController.submitExternalPing(TEST_TYPE_B, {});
+ await setPingLastModified(id, now.getTime() + i * 1000);
+ }
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ TYPE_A_COUNT + TYPE_B_COUNT,
+ "Should have correct pending ping count"
+ );
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ {},
+ "Should not have recorded any sending in histograms yet."
+ );
+ Assert.equal(
+ histSendTimeSuccess.snapshot().sum,
+ 0,
+ "Should not have recorded any sending in histograms yet."
+ );
+ Assert.equal(
+ histSendTimeFail.snapshot().sum,
+ 0,
+ "Should not have recorded any sending in histograms yet."
+ );
+
+ // Now enable sending to the ping server.
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ PingServer.start();
+ Services.prefs.setStringPref(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ let timerPromise = waitForTimer();
+ await TelemetryController.testReset();
+ let [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+ Assert.ok(!!pingSendTimerCallback, "Should have a timer callback");
+
+ // We should have received 10 pings from the first send batch:
+ // 5 of type B and 5 of type A, as sending is newest-first.
+ // The other pings should be delayed by the 10-pings-per-minute limit.
+ let pings = await PingServer.promiseNextPings(10);
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ TYPE_A_COUNT - 5,
+ "Should have correct pending ping count"
+ );
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+ let countByType = countPingTypes(pings);
+
+ Assert.equal(
+ countByType.get(TEST_TYPE_B),
+ TYPE_B_COUNT,
+ "Should have received the correct amount of type B pings"
+ );
+ Assert.equal(
+ countByType.get(TEST_TYPE_A),
+ 10 - TYPE_B_COUNT,
+ "Should have received the correct amount of type A pings"
+ );
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: 0, 1: 10, 2: 0 },
+ "Should have recorded sending success in histograms."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeSuccess.snapshot()),
+ 10,
+ "Should have recorded successful send times in histograms."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeFail.snapshot()),
+ 0,
+ "Should not have recorded any failed sending in histograms yet."
+ );
+
+ // As we hit the ping send limit and still have pending pings, a send tick should
+ // be scheduled in a minute.
+ Assert.ok(!!pingSendTimerCallback, "Timer callback should be set");
+ Assert.equal(
+ pingSendTimeout,
+ MS_IN_A_MINUTE,
+ "Send tick timeout should be correct"
+ );
+
+ // Trigger the next tick - we should receive the next 10 type A pings.
+ PingServer.resetPingHandler();
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ timerPromise = waitForTimer();
+ pingSendTimerCallback();
+ [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+
+ pings = await PingServer.promiseNextPings(10);
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+ countByType = countPingTypes(pings);
+
+ Assert.equal(
+ countByType.get(TEST_TYPE_A),
+ 10,
+ "Should have received the correct amount of type A pings"
+ );
+
+ // We hit the ping send limit again and still have pending pings, a send tick should
+ // be scheduled in a minute.
+ Assert.equal(
+ pingSendTimeout,
+ MS_IN_A_MINUTE,
+ "Send tick timeout should be correct"
+ );
+
+ // Trigger the next tick - we should receive the remaining type A pings.
+ PingServer.resetPingHandler();
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ await pingSendTimerCallback();
+
+ pings = await PingServer.promiseNextPings(5);
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+ countByType = countPingTypes(pings);
+
+ Assert.equal(
+ countByType.get(TEST_TYPE_A),
+ 5,
+ "Should have received the correct amount of type A pings"
+ );
+
+ await TelemetrySend.testWaitOnOutgoingPings();
+ PingServer.resetPingHandler();
+ // Restore the default ping id generator.
+ fakeGeneratePingId(() => TelemetryUtils.generateUUID());
+});
+
+add_task(async function test_sendDateHeader() {
+ fakeNow(new Date(Date.UTC(2011, 1, 1, 11, 0, 0)));
+ await TelemetrySend.reset();
+
+ let pingId = await TelemetryController.submitExternalPing(
+ "test-send-date-header",
+ {}
+ );
+ let req = await PingServer.promiseNextRequest();
+ let ping = decodeRequestPayload(req);
+ Assert.equal(
+ req.getHeader("Date"),
+ "Tue, 01 Feb 2011 11:00:00 GMT",
+ "Telemetry should send the correct Date header with requests."
+ );
+ Assert.equal(ping.id, pingId, "Should have received the correct ping id.");
+});
+
+// Test the backoff timeout behavior after send failures.
+add_task(async function test_backoffTimeout() {
+ const TYPE_PREFIX = "test-backoffTimeout-";
+ const TEST_TYPE_C = TYPE_PREFIX + "C";
+ const TEST_TYPE_D = TYPE_PREFIX + "D";
+ const TEST_TYPE_E = TYPE_PREFIX + "E";
+
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById(
+ "TELEMETRY_SEND_SUCCESS"
+ );
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+
+ // Failing a ping send now should trigger backoff behavior.
+ let now = fakeNow(2010, 1, 1, 11, 0, 0);
+ await TelemetrySend.reset();
+ PingServer.stop();
+
+ histSuccess.clear();
+ histSendTimeSuccess.clear();
+ histSendTimeFail.clear();
+
+ fakePingId("c", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ let sendAttempts = 0;
+ let timerPromise = waitForTimer();
+ await TelemetryController.submitExternalPing(TEST_TYPE_C, {});
+ let [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 1,
+ "Should have one pending ping."
+ );
+ ++sendAttempts;
+
+ const MAX_BACKOFF_TIMEOUT = 120 * MS_IN_A_MINUTE;
+ for (
+ let timeout = 2 * MS_IN_A_MINUTE;
+ timeout <= MAX_BACKOFF_TIMEOUT;
+ timeout *= 2
+ ) {
+ Assert.ok(!!pingSendTimerCallback, "Should have received a timer callback");
+ Assert.equal(
+ pingSendTimeout,
+ timeout,
+ "Send tick timeout should be correct"
+ );
+
+ let callback = pingSendTimerCallback;
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ timerPromise = waitForTimer();
+ await callback();
+ [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+ ++sendAttempts;
+ }
+
+ timerPromise = waitForTimer();
+ await pingSendTimerCallback();
+ [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+ Assert.equal(
+ pingSendTimeout,
+ MAX_BACKOFF_TIMEOUT,
+ "Tick timeout should be capped"
+ );
+ ++sendAttempts;
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: sendAttempts, 1: 0 },
+ "Should have recorded sending failure in histograms."
+ );
+ Assert.equal(
+ histSendTimeSuccess.snapshot().sum,
+ 0,
+ "Should not have recorded any sending success in histograms yet."
+ );
+ Assert.greaterOrEqual(
+ histSendTimeFail.snapshot().sum,
+ 0,
+ "Should have recorded send failure times in histograms."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeFail.snapshot()),
+ sendAttempts,
+ "Should have recorded send failure times in histograms."
+ );
+
+ // Submitting a new ping should reset the backoff behavior.
+ fakePingId("d", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ timerPromise = waitForTimer();
+ await TelemetryController.submitExternalPing(TEST_TYPE_D, {});
+ [pingSendTimerCallback, pingSendTimeout] = await timerPromise;
+ Assert.equal(
+ pingSendTimeout,
+ 2 * MS_IN_A_MINUTE,
+ "Send tick timeout should be correct"
+ );
+ sendAttempts += 2;
+
+ // With the server running again, we should send out the pending pings immediately
+ // when a new ping is submitted.
+ PingServer.start();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ fakePingId("e", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ timerPromise = waitForTimer();
+ await TelemetryController.submitExternalPing(TEST_TYPE_E, {});
+
+ let pings = await PingServer.promiseNextPings(3);
+ let countByType = countPingTypes(pings);
+
+ Assert.equal(
+ countByType.get(TEST_TYPE_C),
+ 1,
+ "Should have received the correct amount of type C pings"
+ );
+ Assert.equal(
+ countByType.get(TEST_TYPE_D),
+ 1,
+ "Should have received the correct amount of type D pings"
+ );
+ Assert.equal(
+ countByType.get(TEST_TYPE_E),
+ 1,
+ "Should have received the correct amount of type E pings"
+ );
+
+ await TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should have no pending pings left"
+ );
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: sendAttempts, 1: 3, 2: 0 },
+ "Should have recorded sending failure in histograms."
+ );
+ Assert.greaterOrEqual(
+ histSendTimeSuccess.snapshot().sum,
+ 0,
+ "Should have recorded sending success in histograms."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeSuccess.snapshot()),
+ 3,
+ "Should have recorded sending success in histograms."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeFail.snapshot()),
+ sendAttempts,
+ "Should have recorded send failure times in histograms."
+ );
+
+ // Restore the default ping id generator.
+ fakeGeneratePingId(() => TelemetryUtils.generateUUID());
+});
+
+add_task(async function test_discardBigPings() {
+ const TEST_PING_TYPE = "test-ping-type";
+
+ let histSizeExceeded = Telemetry.getHistogramById(
+ "TELEMETRY_PING_SIZE_EXCEEDED_SEND"
+ );
+ let histDiscardedSize = Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB"
+ );
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById(
+ "TELEMETRY_SEND_SUCCESS"
+ );
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ for (let h of [
+ histSizeExceeded,
+ histDiscardedSize,
+ histSuccess,
+ histSendTimeSuccess,
+ histSendTimeFail,
+ ]) {
+ h.clear();
+ }
+
+ // Submit a ping of a normal size and check that we don't count it in the histogram.
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {
+ test: "test",
+ });
+ await TelemetrySend.testWaitOnOutgoingPings();
+ await PingServer.promiseNextPing();
+
+ Assert.equal(
+ histSizeExceeded.snapshot().sum,
+ 0,
+ "Telemetry must report no oversized ping submitted."
+ );
+ Assert.equal(
+ histDiscardedSize.snapshot().sum,
+ 0,
+ "Telemetry must report no oversized pings."
+ );
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: 0, 1: 1, 2: 0 },
+ "Should have recorded sending success."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeSuccess.snapshot()),
+ 1,
+ "Should have recorded send success time."
+ );
+ Assert.greaterOrEqual(
+ histSendTimeSuccess.snapshot().sum,
+ 0,
+ "Should have recorded send success time."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeFail.snapshot()),
+ 0,
+ "Should not have recorded send failure time."
+ );
+
+ // Submit an oversized ping and check that it gets discarded.
+ TelemetryHealthPing.testReset();
+ // Ensure next ping has a 2 MB gzipped payload.
+ fakeGzipCompressStringForNextPing(2 * 1024 * 1024);
+ const OVERSIZED_PAYLOAD = {
+ data: "empty on purpose - policy takes care of size",
+ };
+ await TelemetryController.submitExternalPing(
+ TEST_PING_TYPE,
+ OVERSIZED_PAYLOAD
+ );
+ await TelemetrySend.testWaitOnOutgoingPings();
+ let ping = await PingServer.promiseNextPing();
+
+ Assert.equal(
+ ping.type,
+ TelemetryHealthPing.HEALTH_PING_TYPE,
+ "Should have received a health ping."
+ );
+ Assert.equal(
+ ping.payload.reason,
+ TelemetryHealthPing.Reason.IMMEDIATE,
+ "Health ping should have the right reason."
+ );
+ Assert.deepEqual(
+ ping.payload[TelemetryHealthPing.FailureType.DISCARDED_FOR_SIZE],
+ { [TEST_PING_TYPE]: 1 },
+ "Should have recorded correct type of oversized ping."
+ );
+ Assert.deepEqual(
+ ping.payload.os,
+ TelemetryHealthPing.OsInfo,
+ "Should have correct os info."
+ );
+
+ Assert.equal(
+ histSizeExceeded.snapshot().sum,
+ 1,
+ "Telemetry must report 1 oversized ping submitted."
+ );
+ Assert.equal(
+ histDiscardedSize.snapshot().values[2],
+ 1,
+ "Telemetry must report a 2MB, oversized, ping submitted."
+ );
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: 0, 1: 2, 2: 0 },
+ "Should have recorded sending success."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeSuccess.snapshot()),
+ 2,
+ "Should have recorded send success time."
+ );
+ Assert.greaterOrEqual(
+ histSendTimeSuccess.snapshot().sum,
+ 0,
+ "Should have recorded send success time."
+ );
+ Assert.equal(
+ histogramValueCount(histSendTimeFail.snapshot()),
+ 0,
+ "Should not have recorded send failure time."
+ );
+});
+
+add_task(async function test_largeButWithinLimit() {
+ const TEST_PING_TYPE = "test-ping-type";
+
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ histSuccess.clear();
+
+ // Next ping will have a 900KB gzip payload.
+ fakeGzipCompressStringForNextPing(900 * 1024);
+ const LARGE_PAYLOAD = {
+ data: "empty on purpose - policy takes care of size",
+ };
+
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, LARGE_PAYLOAD);
+ await TelemetrySend.testWaitOnOutgoingPings();
+ await PingServer.promiseNextRequest();
+
+ Assert.deepEqual(
+ histSuccess.snapshot().values,
+ { 0: 0, 1: 1, 2: 0 },
+ "Should have sent large ping."
+ );
+});
+
+add_task(async function test_evictedOnServerErrors() {
+ const TEST_TYPE = "test-evicted";
+
+ await TelemetrySend.reset();
+
+ let histEvicted = Telemetry.getHistogramById(
+ "TELEMETRY_PING_EVICTED_FOR_SERVER_ERRORS"
+ );
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById(
+ "TELEMETRY_SEND_SUCCESS"
+ );
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ for (let h of [
+ histEvicted,
+ histSuccess,
+ histSendTimeSuccess,
+ histSendTimeFail,
+ ]) {
+ h.clear();
+ }
+
+ // Write a custom ping handler which will return 403. This will trigger ping eviction
+ // on client side.
+ PingServer.registerPingHandler((req, res) => {
+ res.setStatusLine(null, 403, "Forbidden");
+ res.processAsync();
+ res.finish();
+ });
+
+ // Clear the histogram and submit a ping.
+ let pingId = await TelemetryController.submitExternalPing(TEST_TYPE, {});
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ Assert.equal(
+ histEvicted.snapshot().sum,
+ 1,
+ "Telemetry must report a ping evicted due to server errors"
+ );
+ Assert.deepEqual(histSuccess.snapshot().values, { 0: 0, 1: 1, 2: 0 });
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 1);
+ Assert.greaterOrEqual(histSendTimeSuccess.snapshot().sum, 0);
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0);
+
+ // The ping should not be persisted.
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(pingId),
+ /TelemetryStorage.loadPendingPing - no ping with id/,
+ "The ping must not be persisted."
+ );
+
+ // Reset the ping handler and submit a new ping.
+ PingServer.resetPingHandler();
+ pingId = await TelemetryController.submitExternalPing(TEST_TYPE, {});
+
+ let ping = await PingServer.promiseNextPings(1);
+ Assert.equal(ping[0].id, pingId, "The correct ping must be received");
+
+ // We should not have updated the error histogram.
+ await TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(
+ histEvicted.snapshot().sum,
+ 1,
+ "Telemetry must report only one ping evicted due to server errors"
+ );
+ Assert.deepEqual(histSuccess.snapshot().values, { 0: 0, 1: 2, 2: 0 });
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 2);
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0);
+});
+
+add_task(async function test_tooLateToSend() {
+ Assert.ok(true, "TEST BEGIN");
+ const TEST_TYPE = "test-too-late-to-send";
+
+ await TelemetrySend.reset();
+ PingServer.start();
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should have no pending pings yet"
+ );
+
+ TelemetrySend.testTooLateToSend(true);
+
+ const id = await TelemetryController.submitExternalPing(TEST_TYPE, {});
+
+ // Triggering a shutdown should persist the pings
+ await TelemetrySend.shutdown();
+ const pendingPings = TelemetryStorage.getPendingPingList();
+ Assert.equal(pendingPings.length, 1, "Should have a pending ping in storage");
+ Assert.equal(pendingPings[0].id, id, "Should have pended our test's ping");
+
+ Assert.equal(
+ Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE_TYPE").snapshot()
+ .values[7],
+ 1,
+ "Should have registered the failed attempt to send"
+ );
+ Assert.equal(
+ Telemetry.getKeyedHistogramById(
+ "TELEMETRY_SEND_FAILURE_TYPE_PER_PING"
+ ).snapshot()[TEST_TYPE].values[7],
+ 1,
+ "Should have registered the failed attempt to send TEST_TYPE ping"
+ );
+ await TelemetryStorage.reset();
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should clean up after yourself"
+ );
+});
+
+add_task(
+ { skip_if: () => gIsAndroid },
+ async function test_pingSenderShutdownBatch() {
+ const TEST_TYPE = "test-ping-sender-batch";
+
+ await TelemetrySend.reset();
+ PingServer.start();
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings at this time.")
+ );
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should have no pending pings yet"
+ );
+
+ TelemetrySend.testTooLateToSend(true);
+
+ const id = await TelemetryController.submitExternalPing(
+ TEST_TYPE,
+ { payload: false },
+ { usePingSender: true }
+ );
+ const id2 = await TelemetryController.submitExternalPing(
+ TEST_TYPE,
+ { payload: false },
+ { usePingSender: true }
+ );
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 2,
+ "Should have stored these two pings in pending land."
+ );
+
+ // Permit pings to be received.
+ PingServer.resetPingHandler();
+
+ TelemetrySend.flushPingSenderBatch();
+
+ const ping = await PingServer.promiseNextPing();
+ Assert.equal(ping.type, TEST_TYPE);
+ Assert.equal(ping.id, id);
+
+ const ping2 = await PingServer.promiseNextPing();
+ Assert.equal(ping2.type, TEST_TYPE);
+ Assert.equal(ping2.id, id2);
+
+ await TelemetryStorage.reset();
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should clean up after yourself"
+ );
+ }
+);
+
+// Test that the current, non-persisted pending pings are properly saved on shutdown.
+add_task(async function test_persistCurrentPingsOnShutdown() {
+ const TEST_TYPE = "test-persistCurrentPingsOnShutdown";
+ const PING_COUNT = 5;
+ await TelemetrySend.reset();
+ PingServer.stop();
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should have no pending pings yet"
+ );
+
+ // Submit new pings that shouldn't be persisted yet.
+ let ids = [];
+ for (let i = 0; i < 5; ++i) {
+ ids.push(fakePingId("f", i));
+ TelemetryController.submitExternalPing(TEST_TYPE, {});
+ }
+
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ PING_COUNT,
+ "Should have the correct pending ping count"
+ );
+
+ // Triggering a shutdown should persist the pings.
+ await TelemetrySend.shutdown();
+ Assert.ok(
+ await checkPingsSaved(ids),
+ "All pending pings should have been persisted"
+ );
+
+ // After a restart the pings should have been found when scanning.
+ await TelemetrySend.reset();
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ PING_COUNT,
+ "Should have the correct pending ping count"
+ );
+
+ // Restore the default ping id generator.
+ fakeGeneratePingId(() => TelemetryUtils.generateUUID());
+});
+
+add_task(async function test_sendCheckOverride() {
+ const TEST_PING_TYPE = "test-sendCheckOverride";
+
+ // Clear any pending pings.
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+
+ // Enable the ping server.
+ PingServer.start();
+ Services.prefs.setStringPref(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ // Start Telemetry and disable the test-mode so pings don't get
+ // sent unless we enable the override.
+ await TelemetryController.testReset();
+
+ // Submit a test ping and make sure it doesn't get sent. We only do
+ // that if we're on unofficial builds: pings will always get sent otherwise.
+ if (!Services.telemetry.isOfficialTelemetry) {
+ TelemetrySend.setTestModeEnabled(false);
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Should not have received any pings now")
+ );
+
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {
+ test: "test",
+ });
+ Assert.equal(
+ TelemetrySend.pendingPingCount,
+ 0,
+ "Should have no pending pings"
+ );
+ }
+
+ // Enable the override and try to send again.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.OverrideOfficialCheck,
+ true
+ );
+ PingServer.resetPingHandler();
+ await TelemetrySend.reset();
+ await TelemetryController.submitExternalPing(TEST_PING_TYPE, {
+ test: "test",
+ });
+
+ // Make sure we received the ping.
+ const ping = await PingServer.promiseNextPing();
+ Assert.equal(
+ ping.type,
+ TEST_PING_TYPE,
+ "Must receive a ping of the expected type"
+ );
+
+ // Restore the test mode and disable the override.
+ TelemetrySend.setTestModeEnabled(true);
+ Services.prefs.clearUserPref(
+ TelemetryUtils.Preferences.OverrideOfficialCheck
+ );
+});
+
+add_task(async function test_submissionPath() {
+ const PING_FORMAT_VERSION = 4;
+ const TEST_PING_TYPE = "test-ping-type";
+
+ await TelemetrySend.reset();
+ PingServer.clearRequests();
+
+ await sendPing(false, false);
+
+ // Fetch the request from the server.
+ let request = await PingServer.promiseNextRequest();
+
+ // Get the payload.
+ let ping = decodeRequestPayload(request);
+ checkPingFormat(ping, TEST_PING_TYPE, false, false);
+
+ let app = ping.application;
+ let pathComponents = [
+ ping.id,
+ ping.type,
+ app.name,
+ app.version,
+ app.channel,
+ app.buildId,
+ ];
+
+ let urlComponents = request.path.split("/");
+
+ for (let i = 0; i < pathComponents.length; i++) {
+ Assert.ok(
+ urlComponents.includes(pathComponents[i]),
+ `Path should include ${pathComponents[i]}`
+ );
+ }
+
+ // Check that we have a version query parameter in the URL.
+ Assert.notEqual(request.queryString, "");
+
+ // Make sure the version in the query string matches the new ping format version.
+ let params = request.queryString.split("&");
+ Assert.ok(params.find(p => p == "v=" + PING_FORMAT_VERSION));
+});
+
+add_task(async function testCookies() {
+ const TEST_TYPE = "test-cookies";
+
+ await TelemetrySend.reset();
+ PingServer.clearRequests();
+
+ let uri = Services.io.newURI("http://localhost:" + PingServer.port);
+ let channel = NetUtil.newChannel({
+ uri,
+ loadUsingSystemPrincipal: true,
+ contentPolicyType: Ci.nsIContentPolicy.TYPE_DOCUMENT,
+ });
+ Services.cookies.QueryInterface(Ci.nsICookieService);
+ Services.cookies.setCookieStringFromHttp(uri, "cookie-time=yes", channel);
+
+ const id = await TelemetryController.submitExternalPing(TEST_TYPE, {});
+ let foundit = false;
+ while (!foundit) {
+ var request = await PingServer.promiseNextRequest();
+ var ping = decodeRequestPayload(request);
+ foundit = id === ping.id;
+ }
+ Assert.equal(id, ping.id, "We're testing the right ping's request, right?");
+ Assert.equal(
+ false,
+ request.hasHeader("Cookie"),
+ "Request should not have Cookie header"
+ );
+});
+
+add_task(async function test_pref_observer() {
+ // This test requires the presence of the crash reporter component.
+ let registrar = Components.manager.QueryInterface(Ci.nsIComponentRegistrar);
+ if (
+ !registrar.isContractIDRegistered("@mozilla.org/toolkit/crash-reporter;1")
+ ) {
+ return;
+ }
+
+ await TelemetrySend.setup(true);
+
+ const IS_UNIFIED_TELEMETRY = Services.prefs.getBoolPref(
+ TelemetryUtils.Preferences.Unified,
+ false
+ );
+
+ let origTelemetryEnabled = Services.prefs.getBoolPref(
+ TelemetryUtils.Preferences.TelemetryEnabled
+ );
+ let origFhrUploadEnabled = Services.prefs.getBoolPref(
+ TelemetryUtils.Preferences.FhrUploadEnabled
+ );
+
+ if (!IS_UNIFIED_TELEMETRY) {
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.TelemetryEnabled,
+ true
+ );
+ }
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ function waitAnnotateCrashReport(expectedValue, trigger) {
+ return new Promise(function(resolve, reject) {
+ let keys = new Set(["TelemetryClientId", "TelemetryServerURL"]);
+
+ let crs = {
+ QueryInterface: ChromeUtils.generateQI(["nsICrashReporter"]),
+ annotateCrashReport(key, value) {
+ if (!keys.delete(key)) {
+ MockRegistrar.unregister(gMockCrs);
+ reject(
+ Error(`Crash report annotation with unexpected key: "${key}".`)
+ );
+ }
+
+ if (expectedValue && value == "") {
+ MockRegistrar.unregister(gMockCrs);
+ reject(Error("Crash report annotation without expected value."));
+ }
+
+ if (keys.size == 0) {
+ MockRegistrar.unregister(gMockCrs);
+ resolve();
+ }
+ },
+ removeCrashReportAnnotation(key) {
+ if (!keys.delete(key)) {
+ MockRegistrar.unregister(gMockCrs);
+ }
+
+ if (keys.size == 0) {
+ MockRegistrar.unregister(gMockCrs);
+ resolve();
+ }
+ },
+ UpdateCrashEventsDir() {},
+ };
+
+ let gMockCrs = MockRegistrar.register(
+ "@mozilla.org/toolkit/crash-reporter;1",
+ crs
+ );
+ registerCleanupFunction(function() {
+ MockRegistrar.unregister(gMockCrs);
+ });
+
+ trigger();
+ });
+ }
+
+ await waitAnnotateCrashReport(!IS_UNIFIED_TELEMETRY, () =>
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.FhrUploadEnabled,
+ false
+ )
+ );
+
+ await waitAnnotateCrashReport(true, () =>
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.FhrUploadEnabled,
+ true
+ )
+ );
+
+ if (!IS_UNIFIED_TELEMETRY) {
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.TelemetryEnabled,
+ origTelemetryEnabled
+ );
+ }
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.FhrUploadEnabled,
+ origFhrUploadEnabled
+ );
+});
+
+add_task(async function cleanup() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js
new file mode 100644
index 0000000000..0fb317f6ec
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js
@@ -0,0 +1,626 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+/**
+ * This test case populates the profile with some fake stored
+ * pings, and checks that pending pings are immediatlely sent
+ * after delayed init.
+ */
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+const {
+ OS: { File, Path, Constants },
+} = ChromeUtils.import("resource://gre/modules/osfile.jsm");
+
+const PING_SAVE_FOLDER = "saved-telemetry-pings";
+const PING_TIMEOUT_LENGTH = 5000;
+const OLD_FORMAT_PINGS = 4;
+const RECENT_PINGS = 4;
+
+var gCreatedPings = 0;
+var gSeenPings = 0;
+
+/**
+ * Creates some Telemetry pings for the and saves them to disk. Each ping gets a
+ * unique ID based on an incrementor.
+ *
+ * @param {Array} aPingInfos An array of ping type objects. Each entry must be an
+ * object containing a "num" field for the number of pings to create and
+ * an "age" field. The latter representing the age in milliseconds to offset
+ * from now. A value of 10 would make the ping 10ms older than now, for
+ * example.
+ * @returns Promise
+ * @resolve an Array with the created pings ids.
+ */
+var createSavedPings = async function(aPingInfos) {
+ let pingIds = [];
+ let now = Date.now();
+
+ for (let type in aPingInfos) {
+ let num = aPingInfos[type].num;
+ let age = now - (aPingInfos[type].age || 0);
+ for (let i = 0; i < num; ++i) {
+ let pingId = await TelemetryController.addPendingPing(
+ "test-ping",
+ {},
+ { overwrite: true }
+ );
+ if (aPingInfos[type].age) {
+ // savePing writes to the file synchronously, so we're good to
+ // modify the lastModifedTime now.
+ let filePath = getSavePathForPingId(pingId);
+ await File.setDates(filePath, null, age);
+ }
+ gCreatedPings++;
+ pingIds.push(pingId);
+ }
+ }
+
+ return pingIds;
+};
+
+/**
+ * Deletes locally saved pings if they exist.
+ *
+ * @param aPingIds an Array of ping ids to delete.
+ * @returns Promise
+ */
+var clearPings = async function(aPingIds) {
+ for (let pingId of aPingIds) {
+ await TelemetryStorage.removePendingPing(pingId);
+ }
+};
+
+/**
+ * Fakes the pending pings storage quota.
+ * @param {Integer} aPendingQuota The new quota, in bytes.
+ */
+function fakePendingPingsQuota(aPendingQuota) {
+ let storage = ChromeUtils.import(
+ "resource://gre/modules/TelemetryStorage.jsm",
+ null
+ );
+ storage.Policy.getPendingPingsQuota = () => aPendingQuota;
+}
+
+/**
+ * Returns a handle for the file that a ping should be
+ * stored in locally.
+ *
+ * @returns path
+ */
+function getSavePathForPingId(aPingId) {
+ return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPingId);
+}
+
+/**
+ * Check if the number of Telemetry pings received by the HttpServer is not equal
+ * to aExpectedNum.
+ *
+ * @param aExpectedNum the number of pings we expect to receive.
+ */
+function assertReceivedPings(aExpectedNum) {
+ Assert.equal(gSeenPings, aExpectedNum);
+}
+
+/**
+ * Throws if any pings with the id in aPingIds is saved locally.
+ *
+ * @param aPingIds an Array of pings ids to check.
+ * @returns Promise
+ */
+var assertNotSaved = async function(aPingIds) {
+ let saved = 0;
+ for (let id of aPingIds) {
+ let filePath = getSavePathForPingId(id);
+ if (await File.exists(filePath)) {
+ saved++;
+ }
+ }
+ if (saved > 0) {
+ do_throw("Found " + saved + " unexpected saved pings.");
+ }
+};
+
+/**
+ * Our handler function for the HttpServer that simply
+ * increments the gSeenPings global when it successfully
+ * receives and decodes a Telemetry payload.
+ *
+ * @param aRequest the HTTP request sent from HttpServer.
+ */
+function pingHandler(aRequest) {
+ gSeenPings++;
+}
+
+add_task(async function test_setup() {
+ PingServer.start();
+ PingServer.registerPingHandler(pingHandler);
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setCharPref(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+});
+
+/**
+ * Setup the tests by making sure the ping storage directory is available, otherwise
+ * |TelemetryController.testSaveDirectoryToFile| could fail.
+ */
+add_task(async function setupEnvironment() {
+ // The following tests assume this pref to be true by default.
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ await TelemetryController.testSetup();
+
+ let directory = TelemetryStorage.pingDirectoryPath;
+ await File.makeDir(directory, {
+ ignoreExisting: true,
+ unixMode: OS.Constants.S_IRWXU,
+ });
+
+ await TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Test that really recent pings are sent on Telemetry initialization.
+ */
+add_task(async function test_recent_pings_sent() {
+ let pingTypes = [{ num: RECENT_PINGS }];
+ await createSavedPings(pingTypes);
+
+ await TelemetryController.testReset();
+ await TelemetrySend.testWaitOnOutgoingPings();
+ assertReceivedPings(RECENT_PINGS);
+
+ await TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Create an overdue ping in the old format and try to send it.
+ */
+add_task(async function test_old_formats() {
+ // A test ping in the old, standard format.
+ const PING_OLD_FORMAT = {
+ slug: "1234567abcd",
+ reason: "test-ping",
+ payload: {
+ info: {
+ reason: "test-ping",
+ OS: "XPCShell",
+ appID: "SomeId",
+ appVersion: "1.0",
+ appName: "XPCShell",
+ appBuildID: "123456789",
+ appUpdateChannel: "Test",
+ platformBuildID: "987654321",
+ },
+ },
+ };
+
+ // A ping with no info section, but with a slug.
+ const PING_NO_INFO = {
+ slug: "1234-no-info-ping",
+ reason: "test-ping",
+ payload: {},
+ };
+
+ // A ping with no payload.
+ const PING_NO_PAYLOAD = {
+ slug: "5678-no-payload",
+ reason: "test-ping",
+ };
+
+ // A ping with no info and no slug.
+ const PING_NO_SLUG = {
+ reason: "test-ping",
+ payload: {},
+ };
+
+ const PING_FILES_PATHS = [
+ getSavePathForPingId(PING_OLD_FORMAT.slug),
+ getSavePathForPingId(PING_NO_INFO.slug),
+ getSavePathForPingId(PING_NO_PAYLOAD.slug),
+ getSavePathForPingId("no-slug-file"),
+ ];
+
+ // Write the ping to file
+ await TelemetryStorage.savePing(PING_OLD_FORMAT, true);
+ await TelemetryStorage.savePing(PING_NO_INFO, true);
+ await TelemetryStorage.savePing(PING_NO_PAYLOAD, true);
+ await TelemetryStorage.savePingToFile(
+ PING_NO_SLUG,
+ PING_FILES_PATHS[3],
+ true
+ );
+
+ gSeenPings = 0;
+ await TelemetryController.testReset();
+ await TelemetrySend.testWaitOnOutgoingPings();
+ assertReceivedPings(OLD_FORMAT_PINGS);
+
+ // |TelemetryStorage.cleanup| doesn't know how to remove a ping with no slug or id,
+ // so remove it manually so that the next test doesn't fail.
+ await OS.File.remove(PING_FILES_PATHS[3]);
+
+ await TelemetryStorage.testClearPendingPings();
+});
+
+add_task(async function test_corrupted_pending_pings() {
+ const TEST_TYPE = "test_corrupted";
+
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").clear();
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").clear();
+
+ // Save a pending ping and get its id.
+ let pendingPingId = await TelemetryController.addPendingPing(
+ TEST_TYPE,
+ {},
+ {}
+ );
+
+ // Try to load it: there should be no error.
+ await TelemetryStorage.loadPendingPing(pendingPingId);
+
+ let h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_READ"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must not report a pending ping load failure"
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_PARSE"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must not report a pending ping parse failure"
+ );
+
+ // Delete it from the disk, so that its id will be kept in the cache but it will
+ // fail loading the file.
+ await OS.File.remove(getSavePathForPingId(pendingPingId));
+
+ // Try to load a pending ping which isn't there anymore.
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(pendingPingId),
+ /PingReadError/,
+ "Telemetry must fail loading a ping which isn't there"
+ );
+
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_READ"
+ ).snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping load failure");
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_PARSE"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must not report a pending ping parse failure"
+ );
+
+ // Save a new ping, so that it gets in the pending pings cache.
+ pendingPingId = await TelemetryController.addPendingPing(TEST_TYPE, {}, {});
+ // Overwrite it with a corrupted JSON file and then try to load it.
+ const INVALID_JSON = "{ invalid,JSON { {1}";
+ await OS.File.writeAtomic(getSavePathForPingId(pendingPingId), INVALID_JSON, {
+ encoding: "utf-8",
+ });
+
+ // Try to load the ping with the corrupted JSON content.
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(pendingPingId),
+ /PingParseError/,
+ "Telemetry must fail loading a corrupted ping"
+ );
+
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_READ"
+ ).snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping load failure");
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_LOAD_FAILURE_PARSE"
+ ).snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping parse failure");
+
+ let exists = await OS.File.exists(getSavePathForPingId(pendingPingId));
+ Assert.ok(!exists, "The unparseable ping should have been removed");
+
+ await TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Create a ping in the old format, send it, and make sure the request URL contains
+ * the correct version query parameter.
+ */
+add_task(async function test_overdue_old_format() {
+ // A test ping in the old, standard format.
+ const PING_OLD_FORMAT = {
+ slug: "1234567abcd",
+ reason: "test-ping",
+ payload: {
+ info: {
+ reason: "test-ping",
+ OS: "XPCShell",
+ appID: "SomeId",
+ appVersion: "1.0",
+ appName: "XPCShell",
+ appBuildID: "123456789",
+ appUpdateChannel: "Test",
+ platformBuildID: "987654321",
+ },
+ },
+ };
+
+ // Write the ping to file
+ await TelemetryStorage.savePing(PING_OLD_FORMAT, true);
+
+ let receivedPings = 0;
+ // Register a new prefix handler to validate the URL.
+ PingServer.registerPingHandler(request => {
+ // Check that we have a version query parameter in the URL.
+ Assert.notEqual(request.queryString, "");
+
+ // Make sure the version in the query string matches the old ping format version.
+ let params = request.queryString.split("&");
+ Assert.ok(params.find(p => p == "v=1"));
+
+ receivedPings++;
+ });
+
+ await TelemetryController.testReset();
+ await TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(receivedPings, 1, "We must receive a ping in the old format.");
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.resetPingHandler();
+});
+
+add_task(async function test_pendingPingsQuota() {
+ const PING_TYPE = "foo";
+
+ // Disable upload so pings don't get sent and removed from the pending pings directory.
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.FhrUploadEnabled,
+ false
+ );
+
+ // Remove all the pending pings then startup and wait for the cleanup task to complete.
+ // There should be nothing to remove.
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+ await TelemetrySend.testWaitOnOutgoingPings();
+ await TelemetryStorage.testPendingQuotaTaskPromise();
+
+ // Remove the pending optout ping generated when flipping FHR upload off.
+ await TelemetryStorage.testClearPendingPings();
+
+ let expectedPrunedPings = [];
+ let expectedNotPrunedPings = [];
+
+ let checkPendingPings = async function() {
+ // Check that the pruned pings are not on disk anymore.
+ for (let prunedPingId of expectedPrunedPings) {
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(prunedPingId),
+ /TelemetryStorage.loadPendingPing - no ping with id/,
+ "Ping " + prunedPingId + " should have been pruned."
+ );
+ const pingPath = getSavePathForPingId(prunedPingId);
+ Assert.ok(
+ !(await OS.File.exists(pingPath)),
+ "The ping should not be on the disk anymore."
+ );
+ }
+
+ // Check that the expected pings are there.
+ for (let expectedPingId of expectedNotPrunedPings) {
+ Assert.ok(
+ await TelemetryStorage.loadPendingPing(expectedPingId),
+ "Ping" + expectedPingId + " should be among the pending pings."
+ );
+ }
+ };
+
+ let pendingPingsInfo = [];
+ let pingsSizeInBytes = 0;
+
+ // Create 10 pings to test the pending pings quota.
+ for (let days = 1; days < 11; days++) {
+ const date = fakeNow(2010, 1, days, 1, 1, 0);
+ const pingId = await TelemetryController.addPendingPing(PING_TYPE, {}, {});
+
+ // Find the size of the ping.
+ const pingFilePath = getSavePathForPingId(pingId);
+ const pingSize = (await OS.File.stat(pingFilePath)).size;
+ // Add the info at the beginning of the array, so that most recent pings come first.
+ pendingPingsInfo.unshift({
+ id: pingId,
+ size: pingSize,
+ timestamp: date.getTime(),
+ });
+
+ // Set the last modification date.
+ await OS.File.setDates(pingFilePath, null, date.getTime());
+
+ // Add it to the pending ping directory size.
+ pingsSizeInBytes += pingSize;
+ }
+
+ // We need to test the pending pings size before we hit the quota, otherwise a special
+ // value is recorded.
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").clear();
+ Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA"
+ ).clear();
+ Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS"
+ ).clear();
+
+ await TelemetryController.testReset();
+ await TelemetryStorage.testPendingQuotaTaskPromise();
+
+ // Check that the correct values for quota probes are reported when no quota is hit.
+ let h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_PINGS_SIZE_MB"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ Math.round(pingsSizeInBytes / 1024 / 1024),
+ "Telemetry must report the correct pending pings directory size."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report 0 evictions if quota is not hit."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 0,
+ "Telemetry must report a null elapsed time if quota is not hit."
+ );
+
+ // Set the quota to 80% of the space.
+ const testQuotaInBytes = pingsSizeInBytes * 0.8;
+ fakePendingPingsQuota(testQuotaInBytes);
+
+ // The storage prunes pending pings until we reach 90% of the requested storage quota.
+ // Based on that, find how many pings should be kept.
+ const safeQuotaSize = Math.round(testQuotaInBytes * 0.9);
+ let sizeInBytes = 0;
+ let pingsWithinQuota = [];
+ let pingsOutsideQuota = [];
+
+ for (let pingInfo of pendingPingsInfo) {
+ sizeInBytes += pingInfo.size;
+ if (sizeInBytes >= safeQuotaSize) {
+ pingsOutsideQuota.push(pingInfo.id);
+ continue;
+ }
+ pingsWithinQuota.push(pingInfo.id);
+ }
+
+ expectedNotPrunedPings = pingsWithinQuota;
+ expectedPrunedPings = pingsOutsideQuota;
+
+ // Reset TelemetryController to start the pending pings cleanup.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testPendingQuotaTaskPromise();
+ await checkPendingPings();
+
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ pingsOutsideQuota.length,
+ "Telemetry must correctly report the over quota pings evicted from the pending pings directory."
+ );
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").snapshot();
+ Assert.equal(
+ h.sum,
+ 17,
+ "Pending pings quota was hit, a special size must be reported."
+ );
+
+ // Trigger a cleanup again and make sure we're not removing anything.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testPendingQuotaTaskPromise();
+ await checkPendingPings();
+
+ const OVERSIZED_PING_ID = "9b21ec8f-f762-4d28-a2c1-44e1c4694f24";
+ // Create a pending oversized ping.
+ const OVERSIZED_PING = {
+ id: OVERSIZED_PING_ID,
+ type: PING_TYPE,
+ creationDate: new Date().toISOString(),
+ // Generate a 2MB string to use as the ping payload.
+ payload: generateRandomString(2 * 1024 * 1024),
+ };
+ await TelemetryStorage.savePendingPing(OVERSIZED_PING);
+
+ // Reset the histograms.
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").clear();
+ Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB"
+ ).clear();
+
+ // Try to manually load the oversized ping.
+ await Assert.rejects(
+ TelemetryStorage.loadPendingPing(OVERSIZED_PING_ID),
+ /loadPendingPing - exceeded the maximum ping size/,
+ "The oversized ping should have been pruned."
+ );
+ Assert.ok(
+ !(await OS.File.exists(getSavePathForPingId(OVERSIZED_PING_ID))),
+ "The ping should not be on the disk anymore."
+ );
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PING_SIZE_EXCEEDED_PENDING"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 1,
+ "Telemetry must report 1 oversized ping in the pending pings directory."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB"
+ ).snapshot();
+ Assert.equal(h.values[2], 1, "Telemetry must report a 2MB, oversized, ping.");
+
+ // Save the ping again to check if it gets pruned when scanning the pings directory.
+ await TelemetryStorage.savePendingPing(OVERSIZED_PING);
+ expectedPrunedPings.push(OVERSIZED_PING_ID);
+
+ // Scan the pending pings directory.
+ await TelemetryController.testReset();
+ await TelemetryStorage.testPendingQuotaTaskPromise();
+ await checkPendingPings();
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_PING_SIZE_EXCEEDED_PENDING"
+ ).snapshot();
+ Assert.equal(
+ h.sum,
+ 2,
+ "Telemetry must report 1 oversized ping in the pending pings directory."
+ );
+ h = Telemetry.getHistogramById(
+ "TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB"
+ ).snapshot();
+ Assert.equal(
+ h.values[2],
+ 2,
+ "Telemetry must report two 2MB, oversized, pings."
+ );
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+});
+
+add_task(async function teardown() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
new file mode 100644
index 0000000000..f85bed72a4
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
@@ -0,0 +1,2395 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* This testcase triggers two telemetry pings.
+ *
+ * Telemetry code keeps histograms of past telemetry pings. The first
+ * ping populates these histograms. One of those histograms is then
+ * checked in the second request.
+ */
+
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+const { ClientID } = ChromeUtils.import("resource://gre/modules/ClientID.jsm");
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySend.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryReportingPolicy.jsm", this);
+const { Preferences } = ChromeUtils.import(
+ "resource://gre/modules/Preferences.jsm"
+);
+
+const PING_FORMAT_VERSION = 4;
+const PING_TYPE_MAIN = "main";
+const PING_TYPE_SAVED_SESSION = "saved-session";
+
+const REASON_ABORTED_SESSION = "aborted-session";
+const REASON_SAVED_SESSION = "saved-session";
+const REASON_SHUTDOWN = "shutdown";
+const REASON_TEST_PING = "test-ping";
+const REASON_DAILY = "daily";
+const REASON_ENVIRONMENT_CHANGE = "environment-change";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
+const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
+// Add some unicode characters here to ensure that sending them works correctly.
+const SHUTDOWN_TIME = 10000;
+const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+const NUMBER_OF_THREADS_TO_LAUNCH = 30;
+var gNumberOfThreadsLaunched = 0;
+
+const MS_IN_ONE_HOUR = 60 * 60 * 1000;
+const MS_IN_ONE_DAY = 24 * MS_IN_ONE_HOUR;
+
+const DATAREPORTING_DIR = "datareporting";
+const ABORTED_PING_FILE_NAME = "aborted-session-ping";
+const ABORTED_SESSION_UPDATE_INTERVAL_MS = 5 * 60 * 1000;
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR);
+});
+
+var gClientID = null;
+var gMonotonicNow = 0;
+
+function sendPing() {
+ TelemetrySession.gatherStartup();
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ return TelemetrySession.testPing();
+ }
+ TelemetrySend.setServer("http://doesnotexist");
+ return TelemetrySession.testPing();
+}
+
+function fakeGenerateUUID(sessionFunc, subsessionFunc) {
+ let session = ChromeUtils.import(
+ "resource://gre/modules/TelemetrySession.jsm",
+ null
+ );
+ session.Policy.generateSessionUUID = sessionFunc;
+ session.Policy.generateSubsessionUUID = subsessionFunc;
+}
+
+function fakeIdleNotification(topic) {
+ let scheduler = ChromeUtils.import(
+ "resource://gre/modules/TelemetryScheduler.jsm",
+ null
+ );
+ return scheduler.TelemetryScheduler.observe(null, topic, null);
+}
+
+function setupTestData() {
+ Services.startup.interrupted = true;
+ let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ h2.add();
+
+ let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+ k1.add("a");
+ k1.add("a");
+ k1.add("b");
+}
+
+function getSavedPingFile(basename) {
+ let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let pingFile = tmpDir.clone();
+ pingFile.append(basename);
+ if (pingFile.exists()) {
+ pingFile.remove(true);
+ }
+ registerCleanupFunction(function() {
+ try {
+ pingFile.remove(true);
+ } catch (e) {}
+ });
+ return pingFile;
+}
+
+function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
+ const MANDATORY_PING_FIELDS = [
+ "type",
+ "id",
+ "creationDate",
+ "version",
+ "application",
+ "payload",
+ ];
+
+ const APPLICATION_TEST_DATA = {
+ buildId: gAppInfo.appBuildID,
+ name: APP_NAME,
+ version: APP_VERSION,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ // Check that the ping contains all the mandatory fields.
+ for (let f of MANDATORY_PING_FIELDS) {
+ Assert.ok(f in aPing, f + " must be available.");
+ }
+
+ Assert.equal(aPing.type, aType, "The ping must have the correct type.");
+ Assert.equal(
+ aPing.version,
+ PING_FORMAT_VERSION,
+ "The ping must have the correct version."
+ );
+
+ // Test the application section.
+ for (let f in APPLICATION_TEST_DATA) {
+ Assert.equal(
+ aPing.application[f],
+ APPLICATION_TEST_DATA[f],
+ f + " must have the correct value."
+ );
+ }
+
+ // We can't check the values for channel and architecture. Just make
+ // sure they are in.
+ Assert.ok(
+ "architecture" in aPing.application,
+ "The application section must have an architecture field."
+ );
+ Assert.ok(
+ "channel" in aPing.application,
+ "The application section must have a channel field."
+ );
+
+ // Check the clientId and environment fields, as needed.
+ Assert.equal("clientId" in aPing, aHasClientId);
+ Assert.equal("environment" in aPing, aHasEnvironment);
+}
+
+function checkPayloadInfo(data, reason) {
+ const ALLOWED_REASONS = [
+ "environment-change",
+ "shutdown",
+ "daily",
+ "saved-session",
+ "test-ping",
+ ];
+ let numberCheck = arg => {
+ return typeof arg == "number";
+ };
+ let positiveNumberCheck = arg => {
+ return numberCheck(arg) && arg >= 0;
+ };
+ let stringCheck = arg => {
+ return typeof arg == "string" && arg != "";
+ };
+ let revisionCheck = arg => {
+ return AppConstants.MOZILLA_OFFICIAL
+ ? stringCheck(arg)
+ : typeof arg == "string";
+ };
+ let uuidCheck = arg => {
+ return UUID_REGEX.test(arg);
+ };
+ let isoDateCheck = arg => {
+ // We expect use of this version of the ISO format:
+ // 2015-04-12T18:51:19.1+00:00
+ const isoDateRegEx = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+[+-]\d{2}:\d{2}$/;
+ return (
+ stringCheck(arg) &&
+ !Number.isNaN(Date.parse(arg)) &&
+ isoDateRegEx.test(arg)
+ );
+ };
+
+ const EXPECTED_INFO_FIELDS_TYPES = {
+ reason: stringCheck,
+ revision: revisionCheck,
+ timezoneOffset: numberCheck,
+ sessionId: uuidCheck,
+ subsessionId: uuidCheck,
+ // Special cases: previousSessionId and previousSubsessionId are null on first run.
+ previousSessionId: arg => {
+ return arg ? uuidCheck(arg) : true;
+ },
+ previousSubsessionId: arg => {
+ return arg ? uuidCheck(arg) : true;
+ },
+ subsessionCounter: positiveNumberCheck,
+ profileSubsessionCounter: positiveNumberCheck,
+ sessionStartDate: isoDateCheck,
+ subsessionStartDate: isoDateCheck,
+ subsessionLength: positiveNumberCheck,
+ };
+
+ for (let f in EXPECTED_INFO_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+
+ let checkFunc = EXPECTED_INFO_FIELDS_TYPES[f];
+ Assert.ok(
+ checkFunc(data[f]),
+ f + " must have the correct type and valid data " + data[f]
+ );
+ }
+
+ // Check for a valid revision.
+ if (data.revision != "") {
+ const revisionUrlRegEx = /^http[s]?:\/\/hg.mozilla.org(\/[a-z\S]+)+(\/rev\/[0-9a-z]+)$/g;
+ Assert.ok(revisionUrlRegEx.test(data.revision));
+ }
+
+ // Previous buildId is not mandatory.
+ if (data.previousBuildId) {
+ Assert.ok(stringCheck(data.previousBuildId));
+ }
+
+ Assert.ok(
+ ALLOWED_REASONS.find(r => r == data.reason),
+ "Payload must contain an allowed reason."
+ );
+ Assert.equal(data.reason, reason, "Payload reason must match expected.");
+
+ Assert.ok(
+ Date.parse(data.subsessionStartDate) >= Date.parse(data.sessionStartDate)
+ );
+ Assert.ok(data.profileSubsessionCounter >= data.subsessionCounter);
+
+ // According to https://en.wikipedia.org/wiki/List_of_UTC_time_offsets,
+ // UTC offsets range from -12 to +14 hours.
+ // Don't think the extremes of the range are affected by further
+ // daylight-savings adjustments, but it is possible.
+ Assert.ok(
+ data.timezoneOffset >= -12 * 60,
+ "The timezone must be in a valid range."
+ );
+ Assert.ok(
+ data.timezoneOffset <= 14 * 60,
+ "The timezone must be in a valid range."
+ );
+}
+
+function checkScalars(processes) {
+ // Check that the scalars section is available in the ping payload.
+ const parentProcess = processes.parent;
+ Assert.ok(
+ "scalars" in parentProcess,
+ "The scalars section must be available in the parent process."
+ );
+ Assert.ok(
+ "keyedScalars" in parentProcess,
+ "The keyedScalars section must be available in the parent process."
+ );
+ Assert.equal(
+ typeof parentProcess.scalars,
+ "object",
+ "The scalars entry must be an object."
+ );
+ Assert.equal(
+ typeof parentProcess.keyedScalars,
+ "object",
+ "The keyedScalars entry must be an object."
+ );
+
+ let checkScalar = function(scalar) {
+ // Check if the value is of a supported type.
+ const valueType = typeof scalar;
+ switch (valueType) {
+ case "string":
+ Assert.ok(
+ scalar.length <= 50,
+ "String values can't have more than 50 characters"
+ );
+ break;
+ case "number":
+ Assert.ok(
+ scalar >= 0,
+ "We only support unsigned integer values in scalars."
+ );
+ break;
+ case "boolean":
+ Assert.ok(true, "Boolean scalar found.");
+ break;
+ default:
+ Assert.ok(
+ false,
+ name + " contains an unsupported value type (" + valueType + ")"
+ );
+ }
+ };
+
+ // Check that we have valid scalar entries.
+ const scalars = parentProcess.scalars;
+ for (let name in scalars) {
+ Assert.equal(typeof name, "string", "Scalar names must be strings.");
+ checkScalar(scalars[name]);
+ }
+
+ // Check that we have valid keyed scalar entries.
+ const keyedScalars = parentProcess.keyedScalars;
+ for (let name in keyedScalars) {
+ Assert.equal(typeof name, "string", "Scalar names must be strings.");
+ Assert.ok(
+ Object.keys(keyedScalars[name]).length,
+ "The reported keyed scalars must contain at least 1 key."
+ );
+ for (let key in keyedScalars[name]) {
+ Assert.equal(typeof key, "string", "Keyed scalar keys must be strings.");
+ Assert.ok(
+ key.length <= 70,
+ "Keyed scalar keys can't have more than 70 characters."
+ );
+ checkScalar(scalars[name][key]);
+ }
+ }
+}
+
+function checkPayload(payload, reason, successfulPings) {
+ Assert.ok("info" in payload, "Payload must contain an info section.");
+ checkPayloadInfo(payload.info, reason);
+
+ Assert.ok(payload.simpleMeasurements.totalTime >= 0);
+ Assert.equal(payload.simpleMeasurements.startupInterrupted, 1);
+ Assert.equal(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
+ Assert.ok("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
+ Assert.ok(
+ payload.simpleMeasurements.maximalNumberOfConcurrentThreads >=
+ gNumberOfThreadsLaunched
+ );
+
+ let activeTicks = payload.simpleMeasurements.activeTicks;
+ Assert.ok(activeTicks >= 0);
+
+ if ("browser.timings.last_shutdown" in payload.processes.parent.scalars) {
+ Assert.equal(
+ payload.processes.parent.scalars["browser.timings.last_shutdown"],
+ SHUTDOWN_TIME
+ );
+ }
+
+ Assert.equal(
+ payload.simpleMeasurements.failedProfileLockCount,
+ FAILED_PROFILE_LOCK_ATTEMPTS
+ );
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let failedProfileLocksFile = profileDirectory.clone();
+ failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
+ Assert.ok(!failedProfileLocksFile.exists());
+
+ let isWindows = "@mozilla.org/windows-registry-key;1" in Cc;
+ if (isWindows) {
+ Assert.ok(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
+ Assert.ok(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
+ }
+
+ const TELEMETRY_SEND_SUCCESS = "TELEMETRY_SEND_SUCCESS";
+ const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
+ const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
+ const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
+ const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
+ const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
+
+ if (successfulPings > 0) {
+ Assert.ok(TELEMETRY_SEND_SUCCESS in payload.histograms);
+ }
+ Assert.ok(TELEMETRY_TEST_FLAG in payload.histograms);
+ Assert.ok(TELEMETRY_TEST_COUNT in payload.histograms);
+
+ Assert.ok(!(IGNORE_CLONED_HISTOGRAM in payload.histograms));
+
+ // Flag histograms should automagically spring to life.
+ const expected_flag = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 3,
+ values: { 0: 1, 1: 0 },
+ sum: 0,
+ };
+ let flag = payload.histograms[TELEMETRY_TEST_FLAG];
+ Assert.deepEqual(flag, expected_flag);
+
+ // We should have a test count.
+ const expected_count = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: { 0: 1, 1: 0 },
+ sum: 1,
+ };
+ let count = payload.histograms[TELEMETRY_TEST_COUNT];
+ Assert.deepEqual(count, expected_count);
+
+ // There should be one successful report from the previous telemetry ping.
+ if (successfulPings > 0) {
+ const expected_tc = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 2,
+ values: { 0: 2, 1: successfulPings, 2: 0 },
+ sum: successfulPings,
+ };
+ let tc = payload.histograms[TELEMETRY_SUCCESS];
+ Assert.deepEqual(tc, expected_tc);
+ }
+
+ // The ping should include data from memory reporters. We can't check that
+ // this data is correct, because we can't control the values returned by the
+ // memory reporters. But we can at least check that the data is there.
+ //
+ // It's important to check for the presence of reporters with a mix of units,
+ // because MemoryTelemetry has separate logic for each one. But we can't
+ // currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
+ // Telemetry doesn't touch a memory reporter with these units that's
+ // available on all platforms.
+
+ Assert.ok("MEMORY_TOTAL" in payload.histograms); // UNITS_BYTES
+ Assert.ok("MEMORY_JS_GC_HEAP" in payload.histograms); // UNITS_BYTES
+ Assert.ok("MEMORY_JS_COMPARTMENTS_SYSTEM" in payload.histograms); // UNITS_COUNT
+
+ Assert.ok(
+ "mainThread" in payload.slowSQL && "otherThreads" in payload.slowSQL
+ );
+
+ // Check keyed histogram payload.
+
+ Assert.ok("keyedHistograms" in payload);
+ let keyedHistograms = payload.keyedHistograms;
+ Assert.ok(!(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms));
+ Assert.ok(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
+
+ const expected_keyed_count = {
+ a: {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: { 0: 2, 1: 0 },
+ sum: 2,
+ },
+ b: {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: { 0: 1, 1: 0 },
+ sum: 1,
+ },
+ };
+ Assert.deepEqual(
+ expected_keyed_count,
+ keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]
+ );
+
+ Assert.ok(
+ "processes" in payload,
+ "The payload must have a processes section."
+ );
+ Assert.ok(
+ "parent" in payload.processes,
+ "There must be at least a parent process."
+ );
+
+ checkScalars(payload.processes);
+}
+
+function writeStringToFile(file, contents) {
+ let ostream = Cc[
+ "@mozilla.org/network/safe-file-output-stream;1"
+ ].createInstance(Ci.nsIFileOutputStream);
+ ostream.init(
+ file,
+ PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER,
+ ostream.DEFER_OPEN
+ );
+ ostream.write(contents, contents.length);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function write_fake_shutdown_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append("Telemetry.ShutdownTime.txt");
+ let contents = "" + SHUTDOWN_TIME;
+ writeStringToFile(file, contents);
+}
+
+function write_fake_failedprofilelocks_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append("Telemetry.FailedProfileLocks.txt");
+ let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
+ writeStringToFile(file, contents);
+}
+
+add_task(async function test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+
+ // Make it look like we've previously failed to lock a profile a couple times.
+ write_fake_failedprofilelocks_file();
+
+ // Make it look like we've shutdown before.
+ write_fake_shutdown_file();
+
+ let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
+ Assert.ok(currentMaxNumberOfThreads > 0);
+
+ // Try to augment the maximal number of threads currently launched
+ let threads = [];
+ try {
+ for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
+ threads.push(Services.tm.newThread(0));
+ }
+ } catch (ex) {
+ // If memory is too low, it is possible that not all threads will be launched.
+ }
+ gNumberOfThreadsLaunched = threads.length;
+
+ Assert.ok(
+ Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched
+ );
+
+ registerCleanupFunction(function() {
+ threads.forEach(function(thread) {
+ thread.shutdown();
+ });
+ });
+
+ await new Promise(resolve =>
+ Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(resolve))
+ );
+});
+
+add_task(async function asyncSetup() {
+ await TelemetryController.testSetup();
+ // Load the client ID from the client ID provider to check for pings sanity.
+ gClientID = await ClientID.getClientID();
+});
+
+// Ensures that expired histograms are not part of the payload.
+add_task(async function test_expiredHistogram() {
+ let dummy = Telemetry.getHistogramById("TELEMETRY_TEST_EXPIRED");
+
+ dummy.add(1);
+
+ Assert.equal(
+ TelemetrySession.getPayload().histograms.TELEMETRY_TEST_EXPIRED,
+ undefined
+ );
+});
+
+add_task(async function sessionTimeExcludingAndIncludingSuspend() {
+ if (gIsAndroid) {
+ // We don't support this new probe on android at the moment.
+ return;
+ }
+ Preferences.set("toolkit.telemetry.testing.overrideProductsCheck", true);
+ await TelemetryController.testReset();
+ let subsession = TelemetrySession.getPayload("environment-change", true);
+ let parentScalars = subsession.processes.parent.scalars;
+
+ let withSuspend =
+ parentScalars["browser.engagement.session_time_including_suspend"];
+ let withoutSuspend =
+ parentScalars["browser.engagement.session_time_excluding_suspend"];
+
+ Assert.ok(
+ withSuspend > 0,
+ "The session time including suspend should be positive"
+ );
+
+ Assert.ok(
+ withoutSuspend > 0,
+ "The session time excluding suspend should be positive"
+ );
+
+ // Two things about the next assertion:
+ // 1. The two calls to get the two different uptime values are made
+ // separately, so we can't guarantee equality, even if we know the machine
+ // has not been suspended (for example because it's running in infra and
+ // was just booted). In this case the value should be close to each other.
+ // 2. This test will fail if the device running this has been suspended in
+ // between booting the Firefox process running this test, and doing the
+ // following assertion test, but that's unlikely in practice.
+ const max_delta_ms = 100;
+
+ Assert.ok(
+ withSuspend - withoutSuspend <= max_delta_ms,
+ "In test condition, the two uptimes should be close to each other"
+ );
+
+ // This however should always hold, except on Windows < 10, where the two
+ // clocks are from different system calls, and it can fail in test condition
+ // because the machine has not been suspended.
+ if (
+ AppConstants.platform != "windows" ||
+ AppConstants.isPlatformAndVersionAtLeast("win", "10.0")
+ ) {
+ Assert.greaterOrEqual(
+ withSuspend,
+ withoutSuspend,
+ `The uptime with suspend must always been greater or equal to the uptime
+ without suspend`
+ );
+ }
+
+ Preferences.set("toolkit.telemetry.testing.overrideProductsCheck", false);
+});
+
+// Sends a ping to a non existing server. If we remove this test, we won't get
+// all the histograms we need in the main ping.
+add_task(async function test_noServerPing() {
+ await sendPing();
+ // We need two pings in order to make sure STARTUP_MEMORY_STORAGE_SQLIE histograms
+ // are initialised. See bug 1131585.
+ await sendPing();
+ // Allowing Telemetry to persist unsent pings as pending. If omitted may cause
+ // problems to the consequent tests.
+ await TelemetryController.testShutdown();
+});
+
+// Checks that a sent ping is correctly received by a dummy http server.
+add_task(async function test_simplePing() {
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ let now = new Date(2020, 1, 1, 12, 5, 6);
+ let expectedDate = new Date(2020, 1, 1, 12, 0, 0);
+ fakeNow(now);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 5000);
+
+ const expectedSessionUUID = "bd314d15-95bf-4356-b682-b6c4a8942202";
+ const expectedSubsessionUUID = "3e2e5f6c-74ba-4e4d-a93f-a48af238a8c7";
+ fakeGenerateUUID(
+ () => expectedSessionUUID,
+ () => expectedSubsessionUUID
+ );
+ await TelemetryController.testReset();
+
+ // Session and subsession start dates are faked during TelemetrySession setup. We can
+ // now fake the session duration.
+ const SESSION_DURATION_IN_MINUTES = 15;
+ fakeNow(new Date(2020, 1, 1, 12, SESSION_DURATION_IN_MINUTES, 0));
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + SESSION_DURATION_IN_MINUTES * 60 * 1000
+ );
+
+ await sendPing();
+ let ping = await PingServer.promiseNextPing();
+
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that we get the data we expect.
+ let payload = ping.payload;
+ Assert.equal(payload.info.sessionId, expectedSessionUUID);
+ Assert.equal(payload.info.subsessionId, expectedSubsessionUUID);
+ let sessionStartDate = new Date(payload.info.sessionStartDate);
+ Assert.equal(sessionStartDate.toISOString(), expectedDate.toISOString());
+ let subsessionStartDate = new Date(payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+ Assert.equal(payload.info.subsessionLength, SESSION_DURATION_IN_MINUTES * 60);
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(TelemetryUtils.generateUUID, TelemetryUtils.generateUUID);
+
+ await TelemetryController.testShutdown();
+});
+
+// Saves the current session histograms, reloads them, performs a ping
+// and checks that the dummy http server received both the previously
+// saved ping and the new one.
+add_task(async function test_saveLoadPing() {
+ // Let's start out with a defined state.
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+ PingServer.clearRequests();
+
+ // Setup test data and trigger pings.
+ setupTestData();
+ await TelemetrySession.testSavePendingPing();
+ await sendPing();
+
+ // Get requests received by dummy server.
+ const requests = await PingServer.promiseNextRequests(2);
+
+ for (let req of requests) {
+ Assert.equal(
+ req.getHeader("content-type"),
+ "application/json; charset=UTF-8",
+ "The request must have the correct content-type."
+ );
+ }
+
+ // We decode both requests to check for the |reason|.
+ let pings = Array.from(requests, decodeRequestPayload);
+
+ // Check we have the correct two requests. Ordering is not guaranteed. The ping type
+ // is encoded in the URL.
+ if (pings[0].type != PING_TYPE_MAIN) {
+ pings.reverse();
+ }
+
+ checkPingFormat(pings[0], PING_TYPE_MAIN, true, true);
+ checkPayload(pings[0].payload, REASON_TEST_PING, 0);
+ checkPingFormat(pings[1], PING_TYPE_SAVED_SESSION, true, true);
+ checkPayload(pings[1].payload, REASON_SAVED_SESSION, 0);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_checkSubsessionScalars() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ // Clear the scalars.
+ Telemetry.clearScalars();
+ await TelemetryController.testReset();
+
+ // Set some scalars.
+ const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+ const STRING_SCALAR = "telemetry.test.string_kind";
+ let expectedUint = 37;
+ let expectedString = "Test value. Yay.";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+
+ // Check that scalars are not available in classic pings but are in subsession
+ // pings. Also clear the subsession.
+ let classic = TelemetrySession.getPayload();
+ let subsession = TelemetrySession.getPayload("environment-change", true);
+
+ const TEST_SCALARS = [UINT_SCALAR, STRING_SCALAR];
+ for (let name of TEST_SCALARS) {
+ // Scalar must be reported in subsession pings (e.g. main).
+ Assert.ok(
+ name in subsession.processes.parent.scalars,
+ name + " must be reported in a subsession ping."
+ );
+ }
+ // No scalar must be reported in classic pings (e.g. saved-session).
+ Assert.ok(
+ !Object.keys(classic.processes.parent.scalars).length,
+ "Scalars must not be reported in a classic ping."
+ );
+
+ // And make sure that we're getting the right values in the
+ // subsession ping.
+ Assert.equal(
+ subsession.processes.parent.scalars[UINT_SCALAR],
+ expectedUint,
+ UINT_SCALAR + " must contain the expected value."
+ );
+ Assert.equal(
+ subsession.processes.parent.scalars[STRING_SCALAR],
+ expectedString,
+ STRING_SCALAR + " must contain the expected value."
+ );
+
+ // Since we cleared the subsession in the last getPayload(), check that
+ // breaking subsessions clears the scalars.
+ subsession = TelemetrySession.getPayload("environment-change");
+ for (let name of TEST_SCALARS) {
+ Assert.ok(
+ !(name in subsession.processes.parent.scalars),
+ name + " must be cleared with the new subsession."
+ );
+ }
+
+ // Check if setting the scalars again works as expected.
+ expectedUint = 85;
+ expectedString = "A creative different value";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+ subsession = TelemetrySession.getPayload("environment-change");
+ Assert.equal(
+ subsession.processes.parent.scalars[UINT_SCALAR],
+ expectedUint,
+ UINT_SCALAR + " must contain the expected value."
+ );
+ Assert.equal(
+ subsession.processes.parent.scalars[STRING_SCALAR],
+ expectedString,
+ STRING_SCALAR + " must contain the expected value."
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_dailyCollection() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ let now = new Date(2030, 1, 1, 12, 0, 0);
+ let nowHour = new Date(2030, 1, 1, 12, 0, 0);
+ let schedulerTickCallback = null;
+
+ PingServer.clearRequests();
+
+ fakeNow(now);
+
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+
+ // Init and check timer.
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+
+ // Set histograms to expected state.
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ count.clear();
+ keyed.clear();
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+ keyed.add("b", 1);
+
+ // Make sure the daily ping gets triggered.
+ let expectedDate = nowHour;
+ now = futureDate(nowHour, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ Assert.ok(!!schedulerTickCallback);
+ // Run a scheduler tick: it should trigger the daily ping.
+ await schedulerTickCallback();
+
+ // Collect the daily ping.
+ let ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ let subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID].a.sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID].b.sum, 2);
+
+ // The daily ping is rescheduled for "tomorrow".
+ expectedDate = futureDate(expectedDate, MS_IN_ONE_DAY);
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ // Run a scheduler tick. Trigger and collect another ping. The histograms should be reset.
+ await schedulerTickCallback();
+
+ ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.ok(!(COUNT_ID in ping.payload.histograms));
+ Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
+
+ // Trigger and collect another daily ping, with the histograms being set again.
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+
+ // The daily ping is rescheduled for "tomorrow".
+ expectedDate = futureDate(expectedDate, MS_IN_ONE_DAY);
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ await schedulerTickCallback();
+ ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID].a.sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID].b.sum, 1);
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_dailyDuplication() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ await TelemetrySend.reset();
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ // Make sure the daily ping gets triggered at midnight.
+ // We need to make sure that we trigger this after the period where we wait for
+ // the user to become idle.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+
+ // Get the first daily ping.
+ let ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+
+ // We don't expect to receive any other daily ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(
+ false,
+ "No more daily pings should be sent/received in this test."
+ );
+ });
+
+ // Set the current time to a bit after midnight.
+ let secondDailyDue = new Date(firstDailyDue);
+ secondDailyDue.setHours(0);
+ secondDailyDue.setMinutes(15);
+ fakeNow(secondDailyDue);
+
+ // Run a scheduler tick: it should NOT trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ PingServer.resetPingHandler();
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_dailyOverdue() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 11, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+
+ // Skip one hour ahead: nothing should be due.
+ now.setHours(now.getHours() + 1);
+ fakeNow(now);
+
+ // Assert if we receive something!
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No daily ping should be received if not overdue!.");
+ });
+
+ // This tick should not trigger any daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+
+ // Restore the non asserting ping handler.
+ PingServer.resetPingHandler();
+ PingServer.clearRequests();
+
+ // Simulate an overdue ping: we're not close to midnight, but the last daily ping
+ // time is too long ago.
+ let dailyOverdue = new Date(2030, 1, 2, 13, 0, 0);
+ fakeNow(dailyOverdue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+
+ // Get the first daily ping.
+ let ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_environmentChange() {
+ if (gIsAndroid) {
+ // We don't split subsessions on environment changes yet on Android.
+ return;
+ }
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let now = fakeNow(2040, 1, 1, 12, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ ]);
+
+ // Setup.
+ await TelemetryController.testReset();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // Set histograms to expected state.
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ count.clear();
+ keyed.clear();
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+
+ // Trigger and collect environment-change ping.
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ let startHour = TelemetryUtils.truncateToHours(now);
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+
+ Preferences.set(PREF_TEST, 1);
+ let ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.environment.settings.userPrefs[PREF_TEST], undefined);
+ Assert.equal(ping.payload.info.reason, REASON_ENVIRONMENT_CHANGE);
+ let subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), startHour.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID].a.sum, 1);
+
+ // Trigger and collect another ping. The histograms should be reset.
+ startHour = TelemetryUtils.truncateToHours(now);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+
+ Preferences.set(PREF_TEST, 2);
+ ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.environment.settings.userPrefs[PREF_TEST], 1);
+ Assert.equal(ping.payload.info.reason, REASON_ENVIRONMENT_CHANGE);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), startHour.toISOString());
+
+ Assert.ok(!(COUNT_ID in ping.payload.histograms));
+ Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
+
+ // Trigger and collect another ping. The histograms should be reset.
+ startHour = TelemetryUtils.truncateToHours(now);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_experimentAnnotations_subsession() {
+ if (gIsAndroid) {
+ // We don't split subsessions on environment changes yet on Android.
+ return;
+ }
+
+ const EXPERIMENT1 = "experiment-1";
+ const EXPERIMENT1_BRANCH = "nice-branch";
+ const EXPERIMENT2 = "experiment-2";
+ const EXPERIMENT2_BRANCH = "other-branch";
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let now = fakeNow(2040, 1, 1, 12, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+
+ // Setup.
+ await TelemetryController.testReset();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ Assert.equal(TelemetrySession.getPayload().info.subsessionCounter, 1);
+
+ // Trigger a subsession split with a telemetry annotation.
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ let futureTestDate = futureDate(now, 10 * MILLISECONDS_PER_MINUTE);
+ now = fakeNow(futureTestDate);
+ TelemetryEnvironment.setExperimentActive(EXPERIMENT1, EXPERIMENT1_BRANCH);
+
+ let ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping, "A ping must be received.");
+
+ Assert.equal(
+ ping.type,
+ PING_TYPE_MAIN,
+ "The received ping must be a 'main' ping."
+ );
+ Assert.equal(
+ ping.payload.info.reason,
+ REASON_ENVIRONMENT_CHANGE,
+ "The 'main' ping must be triggered by a change in the environment."
+ );
+ // We expect the current experiments to be reported in the next ping, not this
+ // one.
+ Assert.ok(
+ !("experiments" in ping.environment),
+ "The old environment must contain no active experiments."
+ );
+ // Since this change wasn't throttled, the subsession counter must increase.
+ Assert.equal(
+ TelemetrySession.getPayload().info.subsessionCounter,
+ 2,
+ "The experiment annotation must trigger a new subsession."
+ );
+
+ // Add another annotation to the environment. We're not advancing the fake
+ // timer, so no subsession split should happen due to throttling.
+ TelemetryEnvironment.setExperimentActive(EXPERIMENT2, EXPERIMENT2_BRANCH);
+ Assert.equal(
+ TelemetrySession.getPayload().info.subsessionCounter,
+ 2,
+ "The experiment annotation must not trigger a new subsession " +
+ "if throttling happens."
+ );
+ let oldExperiments = TelemetryEnvironment.getActiveExperiments();
+
+ // Fake the timer and remove an annotation, we expect a new subsession split.
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+ TelemetryEnvironment.setExperimentInactive(EXPERIMENT1, EXPERIMENT1_BRANCH);
+
+ ping = await PingServer.promiseNextPing();
+ Assert.ok(!!ping, "A ping must be received.");
+
+ Assert.equal(
+ ping.type,
+ PING_TYPE_MAIN,
+ "The received ping must be a 'main' ping."
+ );
+ Assert.equal(
+ ping.payload.info.reason,
+ REASON_ENVIRONMENT_CHANGE,
+ "The 'main' ping must be triggered by a change in the environment."
+ );
+ // We expect both experiments to be in this environment.
+ Assert.deepEqual(
+ ping.environment.experiments,
+ oldExperiments,
+ "The environment must contain both the experiments."
+ );
+ Assert.equal(
+ TelemetrySession.getPayload().info.subsessionCounter,
+ 3,
+ "The removing an experiment annotation must trigger a new subsession."
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_savedPingsOnShutdown() {
+ await TelemetryController.testReset();
+
+ // Assure that we store the ping properly when saving sessions on shutdown.
+ // We make the TelemetryController shutdown to trigger a session save.
+ const dir = TelemetryStorage.pingDirectoryPath;
+ await OS.File.removeDir(dir, { ignoreAbsent: true });
+ await OS.File.makeDir(dir);
+ await TelemetryController.testShutdown();
+
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+
+ const ping = await PingServer.promiseNextPing();
+
+ let expectedType = gIsAndroid ? PING_TYPE_SAVED_SESSION : PING_TYPE_MAIN;
+ let expectedReason = gIsAndroid ? REASON_SAVED_SESSION : REASON_SHUTDOWN;
+
+ checkPingFormat(ping, expectedType, true, true);
+ Assert.equal(ping.payload.info.reason, expectedReason);
+ Assert.equal(ping.clientId, gClientID);
+});
+
+add_task(async function test_sendShutdownPing() {
+ if (
+ gIsAndroid ||
+ (AppConstants.platform == "linux" && OS.Constants.Sys.bits == 32)
+ ) {
+ // We don't support the pingsender on Android, yet, see bug 1335917.
+ // We also don't suppor the pingsender testing on Treeherder for
+ // Linux 32 bit (due to missing libraries). So skip it there too.
+ // See bug 1310703 comment 78.
+ return;
+ }
+
+ let checkPendingShutdownPing = async function() {
+ let pendingPings = await TelemetryStorage.loadPendingPingList();
+ Assert.equal(pendingPings.length, 1, "We expect 1 pending ping: shutdown.");
+ // Load the pings off the disk.
+ const shutdownPing = await TelemetryStorage.loadPendingPing(
+ pendingPings[0].id
+ );
+ Assert.ok(shutdownPing, "The 'shutdown' ping must be saved to disk.");
+ Assert.equal(
+ "shutdown",
+ shutdownPing.payload.info.reason,
+ "The 'shutdown' ping must be saved to disk."
+ );
+ };
+
+ Preferences.set(TelemetryUtils.Preferences.ShutdownPingSender, true);
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, false);
+ // Make sure the reporting policy picks up the updated pref.
+ TelemetryReportingPolicy.testUpdateFirstRun();
+ PingServer.clearRequests();
+ Telemetry.clearScalars();
+
+ // Shutdown telemetry and wait for an incoming ping.
+ let nextPing = PingServer.promiseNextPing();
+ await TelemetryController.testShutdown();
+ let ping = await nextPing;
+
+ // Check that we received a shutdown ping.
+ checkPingFormat(ping, ping.type, true, true);
+ Assert.equal(ping.payload.info.reason, REASON_SHUTDOWN);
+ Assert.equal(ping.clientId, gClientID);
+ // Try again, this time disable ping upload. The PingSender
+ // should not be sending any ping!
+ PingServer.registerPingHandler(() =>
+ Assert.ok(false, "Telemetry must not send pings if not allowed to.")
+ );
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, false);
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ // Make sure we have no pending pings between the runs.
+ await TelemetryStorage.testClearPendingPings();
+
+ // Enable ping upload and signal an OS shutdown. The pingsender
+ // will not be spawned and no ping will be sent.
+ Preferences.set(TelemetryUtils.Preferences.FhrUploadEnabled, true);
+ await TelemetryController.testReset();
+ Services.obs.notifyObservers(null, "quit-application-forced");
+ await TelemetryController.testShutdown();
+ // After re-enabling FHR, wait for the new client ID
+ gClientID = await ClientID.getClientID();
+
+ // Check that the "shutdown" ping was correctly saved to disk.
+ await checkPendingShutdownPing();
+
+ // Make sure we have no pending pings between the runs.
+ await TelemetryStorage.testClearPendingPings();
+ Telemetry.clearScalars();
+
+ await TelemetryController.testReset();
+ Services.obs.notifyObservers(
+ null,
+ "quit-application-granted",
+ "syncShutdown"
+ );
+ await TelemetryController.testShutdown();
+ await checkPendingShutdownPing();
+
+ // Make sure we have no pending pings between the runs.
+ await TelemetryStorage.testClearPendingPings();
+
+ // Disable the "submission policy". The shutdown ping must not be sent.
+ Preferences.set(TelemetryUtils.Preferences.BypassNotification, false);
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ // Make sure we have no pending pings between the runs.
+ await TelemetryStorage.testClearPendingPings();
+
+ // We cannot reset the BypassNotification pref, as we need it to be
+ // |true| in tests.
+ Preferences.set(TelemetryUtils.Preferences.BypassNotification, true);
+
+ // With both upload enabled and the policy shown, make sure we don't
+ // send the shutdown ping using the pingsender on the first
+ // subsession.
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, true);
+ // Make sure the reporting policy picks up the updated pref.
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ // Clear the state and prepare for the next test.
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ PingServer.resetPingHandler();
+
+ // Check that we're able to send the shutdown ping using the pingsender
+ // from the first session if the related pref is on.
+ Preferences.set(
+ TelemetryUtils.Preferences.ShutdownPingSenderFirstSession,
+ true
+ );
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, true);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ // Restart/shutdown telemetry and wait for an incoming ping.
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ ping = await PingServer.promiseNextPing();
+
+ // Check that we received a shutdown ping.
+ checkPingFormat(ping, ping.type, true, true);
+ Assert.equal(ping.payload.info.reason, REASON_SHUTDOWN);
+ Assert.equal(ping.clientId, gClientID);
+
+ // Reset the pref and restart Telemetry.
+ Preferences.set(TelemetryUtils.Preferences.ShutdownPingSender, false);
+ Preferences.set(
+ TelemetryUtils.Preferences.ShutdownPingSenderFirstSession,
+ false
+ );
+ Preferences.reset(TelemetryUtils.Preferences.FirstRun);
+ PingServer.resetPingHandler();
+});
+
+add_task(async function test_sendFirstShutdownPing() {
+ if (
+ gIsAndroid ||
+ (AppConstants.platform == "linux" && OS.Constants.Sys.bits == 32)
+ ) {
+ // We don't support the pingsender on Android, yet, see bug 1335917.
+ // We also don't suppor the pingsender testing on Treeherder for
+ // Linux 32 bit (due to missing libraries). So skip it there too.
+ // See bug 1310703 comment 78.
+ return;
+ }
+
+ let storageContainsFirstShutdown = async function() {
+ let pendingPings = await TelemetryStorage.loadPendingPingList();
+ let pings = await Promise.all(
+ pendingPings.map(async p => {
+ return TelemetryStorage.loadPendingPing(p.id);
+ })
+ );
+ return pings.find(p => p.type == "first-shutdown");
+ };
+
+ let checkShutdownNotSent = async function() {
+ // The failure-mode of the ping-sender is used to check that a ping was
+ // *not* sent. This can be combined with the state of the storage to infer
+ // the appropriate behavior from the preference flags.
+
+ // Assert failure if we recive a ping.
+ PingServer.registerPingHandler((req, res) => {
+ const receivedPing = decodeRequestPayload(req);
+ Assert.ok(
+ false,
+ `No ping should be received in this test (got ${receivedPing.id}).`
+ );
+ });
+
+ // Assert that pings are sent on first run, forcing a forced application
+ // quit. This should be equivalent to the first test in this suite.
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, true);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ await TelemetryController.testReset();
+ Services.obs.notifyObservers(null, "quit-application-forced");
+ await TelemetryController.testShutdown();
+ Assert.ok(
+ await storageContainsFirstShutdown(),
+ "The 'first-shutdown' ping must be saved to disk."
+ );
+
+ await TelemetryStorage.testClearPendingPings();
+
+ // Assert that it's not sent during subsequent runs
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, false);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ await TelemetryController.testReset();
+ Services.obs.notifyObservers(null, "quit-application-forced");
+ await TelemetryController.testShutdown();
+ Assert.ok(
+ !(await storageContainsFirstShutdown()),
+ "The 'first-shutdown' ping should only be written during first run."
+ );
+
+ await TelemetryStorage.testClearPendingPings();
+
+ // Assert that the the ping is only sent if the flag is enabled.
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, true);
+ Preferences.set(TelemetryUtils.Preferences.FirstShutdownPingEnabled, false);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ Assert.ok(
+ !(await storageContainsFirstShutdown()),
+ "The 'first-shutdown' ping should only be written if enabled"
+ );
+
+ await TelemetryStorage.testClearPendingPings();
+
+ // Assert that the the ping is not collected when the ping-sender is disabled.
+ // The information would be made irrelevant by the main-ping in the second session.
+ Preferences.set(TelemetryUtils.Preferences.FirstShutdownPingEnabled, true);
+ Preferences.set(TelemetryUtils.Preferences.ShutdownPingSender, false);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ Assert.ok(
+ !(await storageContainsFirstShutdown()),
+ "The 'first-shutdown' ping should only be written if ping-sender is enabled"
+ );
+
+ // Clear the state and prepare for the next test.
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ PingServer.resetPingHandler();
+ };
+
+ // Remove leftover pending pings from other tests
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ Telemetry.clearScalars();
+
+ // Set testing invariants for FirstShutdownPingEnabled
+ Preferences.set(TelemetryUtils.Preferences.ShutdownPingSender, true);
+ Preferences.set(
+ TelemetryUtils.Preferences.ShutdownPingSenderFirstSession,
+ false
+ );
+
+ // Set primary conditions of the 'first-shutdown' ping
+ Preferences.set(TelemetryUtils.Preferences.FirstShutdownPingEnabled, true);
+ Preferences.set(TelemetryUtils.Preferences.FirstRun, true);
+ TelemetryReportingPolicy.testUpdateFirstRun();
+
+ // Assert general 'first-shutdown' use-case.
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, "first-shutdown", true, true);
+ Assert.equal(ping.payload.info.reason, REASON_SHUTDOWN);
+ Assert.equal(ping.clientId, gClientID);
+
+ await TelemetryStorage.testClearPendingPings();
+
+ // Assert that the shutdown is not sent under various conditions
+ await checkShutdownNotSent();
+
+ // Reset the pref and restart Telemetry.
+ Preferences.set(TelemetryUtils.Preferences.ShutdownPingSender, false);
+ Preferences.set(
+ TelemetryUtils.Preferences.ShutdownPingSenderFirstSession,
+ false
+ );
+ Preferences.set(TelemetryUtils.Preferences.FirstShutdownPingEnabled, false);
+ Preferences.reset(TelemetryUtils.Preferences.FirstRun);
+ PingServer.resetPingHandler();
+});
+
+add_task(async function test_savedSessionData() {
+ // Create the directory which will contain the data file, if it doesn't already
+ // exist.
+ await OS.File.makeDir(DATAREPORTING_PATH);
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ // Write test data to the session data file.
+ const dataFilePath = OS.Path.join(DATAREPORTING_PATH, "session-state.json");
+ const sessionState = {
+ sessionId: null,
+ subsessionId: null,
+ profileSubsessionCounter: 3785,
+ };
+ await CommonUtils.writeJSON(sessionState, dataFilePath);
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ ]);
+
+ // We expect one new subsession when starting TelemetrySession and one after triggering
+ // an environment change.
+ const expectedSubsessions = sessionState.profileSubsessionCounter + 2;
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(
+ () => expectedSessionUUID,
+ () => expectedSubsessionUUID
+ );
+
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ // Start TelemetrySession so that it loads the session data file.
+ await TelemetryController.testReset();
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Watch a test preference, trigger and environment change and wait for it to propagate.
+ // _watchPreferences triggers a subsession notification
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ fakeNow(new Date(2050, 1, 1, 12, 0, 0));
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let changePromise = new Promise(resolve =>
+ TelemetryEnvironment.registerChangeListener("test_fake_change", resolve)
+ );
+ Preferences.set(PREF_TEST, 1);
+ await changePromise;
+ TelemetryEnvironment.unregisterChangeListener("test_fake_change");
+
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, expectedSubsessions);
+ await TelemetryController.testShutdown();
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(TelemetryUtils.generateUUID, TelemetryUtils.generateUUID);
+
+ // Load back the serialised session data.
+ let data = await CommonUtils.readJSON(dataFilePath);
+ Assert.equal(data.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(data.sessionId, expectedSessionUUID);
+ Assert.equal(data.subsessionId, expectedSubsessionUUID);
+});
+
+add_task(async function test_sessionData_ShortSession() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ const SESSION_STATE_PATH = OS.Path.join(
+ DATAREPORTING_PATH,
+ "session-state.json"
+ );
+
+ // Remove the session state file.
+ await OS.File.remove(SESSION_STATE_PATH, { ignoreAbsent: true });
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(
+ () => expectedSessionUUID,
+ () => expectedSubsessionUUID
+ );
+
+ // We intentionally don't wait for the setup to complete and shut down to simulate
+ // short sessions. We expect the profile subsession counter to be 1.
+ TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Restore the UUID generation functions.
+ fakeGenerateUUID(TelemetryUtils.generateUUID, TelemetryUtils.generateUUID);
+
+ // Start TelemetryController so that it loads the session data file. We expect the profile
+ // subsession counter to be incremented by 1 again.
+ await TelemetryController.testReset();
+
+ // We expect 2 profile subsession counter updates.
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, 2);
+ Assert.equal(payload.info.previousSessionId, expectedSessionUUID);
+ Assert.equal(payload.info.previousSubsessionId, expectedSubsessionUUID);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_invalidSessionData() {
+ // Create the directory which will contain the data file, if it doesn't already
+ // exist.
+ await OS.File.makeDir(DATAREPORTING_PATH);
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ // Write test data to the session data file. This should fail to parse.
+ const dataFilePath = OS.Path.join(DATAREPORTING_PATH, "session-state.json");
+ const unparseableData = "{asdf:@äü";
+ OS.File.writeAtomic(dataFilePath, unparseableData, {
+ encoding: "utf-8",
+ tmpPath: dataFilePath + ".tmp",
+ });
+
+ // Start TelemetryController so that it loads the session data file.
+ await TelemetryController.testReset();
+
+ // The session data file should not load. Only expect the current subsession.
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Write test data to the session data file. This should fail validation.
+ const sessionState = {
+ profileSubsessionCounter: "not-a-number?",
+ someOtherField: 12,
+ };
+ await CommonUtils.writeJSON(sessionState, dataFilePath);
+
+ // The session data file should not load. Only expect the current subsession.
+ const expectedSubsessions = 1;
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(
+ () => expectedSessionUUID,
+ () => expectedSubsessionUUID
+ );
+
+ // Start TelemetryController so that it loads the session data file.
+ await TelemetryController.testShutdown();
+ await TelemetryController.testReset();
+
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ await TelemetryController.testShutdown();
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(TelemetryUtils.generateUUID, TelemetryUtils.generateUUID);
+
+ // Load back the serialised session data.
+ let data = await CommonUtils.readJSON(dataFilePath);
+ Assert.equal(data.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(data.sessionId, expectedSessionUUID);
+ Assert.equal(data.subsessionId, expectedSubsessionUUID);
+});
+
+add_task(async function test_abortedSession() {
+ if (gIsAndroid) {
+ // We don't have the aborted session ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2040, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ await OS.File.exists(DATAREPORTING_PATH),
+ "Telemetry must create the aborted session directory when starting."
+ );
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ // This ping is not yet in the pending pings folder, so we can't access it using
+ // TelemetryStorage.popPendingPings().
+ let pingContent = await OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let abortedSessionPing = JSON.parse(pingContent);
+
+ // Validate the ping.
+ checkPingFormat(abortedSessionPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(abortedSessionPing.payload.info.reason, REASON_ABORTED_SESSION);
+
+ // Trigger a another aborted-session ping and check that it overwrites the previous one.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ await schedulerTickCallback();
+
+ pingContent = await OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let updatedAbortedSessionPing = JSON.parse(pingContent);
+ checkPingFormat(updatedAbortedSessionPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(
+ updatedAbortedSessionPing.payload.info.reason,
+ REASON_ABORTED_SESSION
+ );
+ Assert.notEqual(abortedSessionPing.id, updatedAbortedSessionPing.id);
+ Assert.notEqual(
+ abortedSessionPing.creationDate,
+ updatedAbortedSessionPing.creationDate
+ );
+
+ await TelemetryController.testShutdown();
+ Assert.ok(
+ !(await OS.File.exists(ABORTED_FILE)),
+ "No aborted session ping must be available after a shutdown."
+ );
+});
+
+add_task(async function test_abortedSession_Shutdown() {
+ if (gIsAndroid) {
+ // We don't have the aborted session ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ let schedulerTickCallback = null;
+ let now = fakeNow(2040, 1, 1, 0, 0, 0);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ await OS.File.exists(DATAREPORTING_PATH),
+ "Telemetry must create the aborted session directory when starting."
+ );
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ fakeNow(futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS));
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ // Remove the aborted session file and then shut down to make sure exceptions (e.g file
+ // not found) do not compromise the shutdown.
+ await OS.File.remove(ABORTED_FILE);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_abortedDailyCoalescing() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ PingServer.clearRequests();
+
+ let nowDate = new Date(2009, 10, 18, 0, 0, 0);
+ fakeNow(nowDate);
+
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ await OS.File.exists(DATAREPORTING_PATH),
+ "Telemetry must create the aborted session directory when starting."
+ );
+
+ // Delay the callback around midnight so that the aborted-session ping gets merged with the
+ // daily ping.
+ let dailyDueDate = futureDate(nowDate, MS_IN_ONE_DAY);
+ fakeNow(dailyDueDate);
+ // Trigger both the daily ping and the saved-session.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+
+ // Wait for the daily ping.
+ let dailyPing = await PingServer.promiseNextPing();
+ Assert.equal(dailyPing.payload.info.reason, REASON_DAILY);
+
+ // Check that an aborted session ping was also written to disk.
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ // Read aborted session ping and check that the session/subsession ids equal the
+ // ones in the daily ping.
+ let pingContent = await OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let abortedSessionPing = JSON.parse(pingContent);
+ Assert.equal(
+ abortedSessionPing.payload.info.sessionId,
+ dailyPing.payload.info.sessionId
+ );
+ Assert.equal(
+ abortedSessionPing.payload.info.subsessionId,
+ dailyPing.payload.info.subsessionId
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_schedulerComputerSleep() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ // Remove any aborted-session ping from the previous tests.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ // Set a fake current date and start Telemetry.
+ let nowDate = fakeNow(2009, 10, 18, 0, 0, 0);
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ // Set the current time 3 days in the future at midnight, before running the callback.
+ nowDate = fakeNow(futureDate(nowDate, 3 * MS_IN_ONE_DAY));
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+
+ let dailyPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ dailyPing.payload.info.reason,
+ REASON_DAILY,
+ "The wake notification should have triggered a daily ping."
+ );
+ Assert.equal(
+ dailyPing.creationDate,
+ nowDate.toISOString(),
+ "The daily ping date should be correct."
+ );
+
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ // Now also test if we are sending a daily ping if we wake up on the next
+ // day even when the timer doesn't trigger.
+ // This can happen due to timeouts not running out during sleep times,
+ // see bug 1262386, bug 1204823 et al.
+ // Note that we don't get wake notifications on Linux due to bug 758848.
+ nowDate = fakeNow(futureDate(nowDate, 1 * MS_IN_ONE_DAY));
+
+ // We emulate the mentioned timeout behavior by sending the wake notification
+ // instead of triggering the timeout callback.
+ // This should trigger a daily ping, because we passed midnight.
+ Services.obs.notifyObservers(null, "wake_notification");
+
+ dailyPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ dailyPing.payload.info.reason,
+ REASON_DAILY,
+ "The wake notification should have triggered a daily ping."
+ );
+ Assert.equal(
+ dailyPing.creationDate,
+ nowDate.toISOString(),
+ "The daily ping date should be correct."
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_schedulerEnvironmentReschedules() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ // Reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_VALUE }],
+ ]);
+
+ await TelemetryController.testReset();
+ await TelemetryController.testShutdown();
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ // Set a fake current date and start Telemetry.
+ let nowDate = fakeNow(2060, 10, 18, 0, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // Set the current time at midnight.
+ fakeNow(futureDate(nowDate, MS_IN_ONE_DAY));
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+
+ // Trigger the environment change.
+ Preferences.set(PREF_TEST, 1);
+
+ // Wait for the environment-changed ping.
+ await PingServer.promiseNextPing();
+
+ // We don't expect to receive any daily ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ const receivedPing = decodeRequestPayload(req);
+ Assert.ok(
+ false,
+ `No ping should be received in this test (got ${receivedPing.id}).`
+ );
+ });
+
+ // Execute one scheduler tick. It should not trigger a daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_schedulerNothingDue() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Remove any aborted-session ping from the previous tests.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+
+ // We don't expect to receive any ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ const receivedPing = decodeRequestPayload(req);
+ Assert.ok(
+ false,
+ `No ping should be received in this test (got ${receivedPing.id}).`
+ );
+ });
+
+ // Set a current date/time away from midnight, so that the daily ping doesn't get
+ // sent.
+ let nowDate = new Date(2009, 10, 18, 11, 0, 0);
+ fakeNow(nowDate);
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ // Delay the callback execution to a time when no ping should be due.
+ let nothingDueDate = futureDate(
+ nowDate,
+ ABORTED_SESSION_UPDATE_INTERVAL_MS / 2
+ );
+ fakeNow(nothingDueDate);
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+
+ // Check that no aborted session ping was written to disk.
+ Assert.ok(!(await OS.File.exists(ABORTED_FILE)));
+
+ await TelemetryController.testShutdown();
+ PingServer.resetPingHandler();
+});
+
+add_task(async function test_pingExtendedStats() {
+ const EXTENDED_PAYLOAD_FIELDS = [
+ "log",
+ "slowSQL",
+ "fileIOReports",
+ "lateWrites",
+ "addonDetails",
+ ];
+
+ if (AppConstants.platform == "android") {
+ EXTENDED_PAYLOAD_FIELDS.push("UIMeasurements");
+ }
+
+ // Reset telemetry and disable sending extended statistics.
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+ Telemetry.canRecordExtended = false;
+
+ await sendPing();
+
+ let ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that the payload does not contain extended statistics fields.
+ for (let f in EXTENDED_PAYLOAD_FIELDS) {
+ Assert.ok(
+ !(EXTENDED_PAYLOAD_FIELDS[f] in ping.payload),
+ EXTENDED_PAYLOAD_FIELDS[f] +
+ " must not be in the payload if the extended set is off."
+ );
+ }
+
+ // We check this one separately so that we can reuse EXTENDED_PAYLOAD_FIELDS below, since
+ // slowSQLStartup might not be there.
+ Assert.ok(
+ !("slowSQLStartup" in ping.payload),
+ "slowSQLStartup must not be sent if the extended set is off"
+ );
+
+ Assert.ok(
+ !("addonManager" in ping.payload.simpleMeasurements),
+ "addonManager must not be sent if the extended set is off."
+ );
+ Assert.ok(
+ !("UITelemetry" in ping.payload.simpleMeasurements),
+ "UITelemetry must not be sent."
+ );
+
+ // Restore the preference.
+ Telemetry.canRecordExtended = true;
+
+ // Send a new ping that should contain the extended data.
+ await sendPing();
+ ping = await PingServer.promiseNextPing();
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that the payload now contains extended statistics fields.
+ for (let f in EXTENDED_PAYLOAD_FIELDS) {
+ Assert.ok(
+ EXTENDED_PAYLOAD_FIELDS[f] in ping.payload,
+ EXTENDED_PAYLOAD_FIELDS[f] +
+ " must be in the payload if the extended set is on."
+ );
+ }
+
+ Assert.ok(
+ "addonManager" in ping.payload.simpleMeasurements,
+ "addonManager must be sent if the extended set is on."
+ );
+ Assert.ok(
+ !("UITelemetry" in ping.payload.simpleMeasurements),
+ "UITelemetry must not be sent."
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_schedulerUserIdle() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const SCHEDULER_TICK_INTERVAL_MS = 5 * 60 * 1000;
+ const SCHEDULER_TICK_IDLE_INTERVAL_MS = 60 * 60 * 1000;
+
+ let now = new Date(2010, 1, 1, 11, 0, 0);
+ fakeNow(now);
+
+ let schedulerTimeout = 0;
+ fakeSchedulerTimer(
+ (callback, timeout) => {
+ schedulerTimeout = timeout;
+ },
+ () => {}
+ );
+ await TelemetryController.testReset();
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ // When not idle, the scheduler should have a 5 minutes tick interval.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_INTERVAL_MS);
+
+ // Send an "idle" notification to the scheduler.
+ fakeIdleNotification("idle");
+
+ // When idle, the scheduler should have a 1hr tick interval.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_IDLE_INTERVAL_MS);
+
+ // Send an "active" notification to the scheduler.
+ await fakeIdleNotification("active");
+
+ // When user is back active, the scheduler tick should be 5 minutes again.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_INTERVAL_MS);
+
+ // We should not miss midnight when going to idle.
+ now.setHours(23);
+ now.setMinutes(50);
+ fakeNow(now);
+ fakeIdleNotification("idle");
+ Assert.equal(schedulerTimeout, 10 * 60 * 1000);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_DailyDueAndIdle() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let receivedPingRequest = null;
+ // Register a ping handler that will assert when receiving multiple daily pings.
+ PingServer.registerPingHandler(req => {
+ Assert.ok(!receivedPingRequest, "Telemetry must only send one daily ping.");
+ receivedPingRequest = req;
+ });
+
+ // Faking scheduler timer has to happen before resetting TelemetryController
+ // to be effective.
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ // Trigger the daily ping.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ let tickPromise = schedulerTickCallback();
+
+ // Send an idle and then an active user notification.
+ fakeIdleNotification("idle");
+ fakeIdleNotification("active");
+
+ // Wait on the tick promise.
+ await tickPromise;
+
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Decode the ping contained in the request and check that's a daily ping.
+ Assert.ok(receivedPingRequest, "Telemetry must send one daily ping.");
+ const receivedPing = decodeRequestPayload(receivedPingRequest);
+ checkPingFormat(receivedPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(receivedPing.payload.info.reason, REASON_DAILY);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_userIdleAndSchedlerTick() {
+ if (gIsAndroid) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ let receivedPingRequest = null;
+ // Register a ping handler that will assert when receiving multiple daily pings.
+ PingServer.registerPingHandler(req => {
+ Assert.ok(!receivedPingRequest, "Telemetry must only send one daily ping.");
+ receivedPingRequest = req;
+ });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryStorage.testClearPendingPings();
+ await TelemetryController.testReset();
+ PingServer.clearRequests();
+
+ // Move the current date/time to midnight.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // The active notification should trigger a scheduler tick. The latter will send the
+ // due daily ping.
+ fakeIdleNotification("active");
+
+ // Immediately running another tick should not send a daily ping again.
+ Assert.ok(!!schedulerTickCallback);
+ await schedulerTickCallback();
+
+ // A new "idle" notification should not send a new daily ping.
+ fakeIdleNotification("idle");
+
+ await TelemetrySend.testWaitOnOutgoingPings();
+
+ // Decode the ping contained in the request and check that's a daily ping.
+ Assert.ok(receivedPingRequest, "Telemetry must send one daily ping.");
+ const receivedPing = decodeRequestPayload(receivedPingRequest);
+ checkPingFormat(receivedPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(receivedPing.payload.info.reason, REASON_DAILY);
+
+ PingServer.resetPingHandler();
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function test_changeThrottling() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ let getSubsessionCount = () => {
+ return TelemetrySession.getPayload().info.subsessionCounter;
+ };
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, { what: TelemetryEnvironment.RECORD_PREF_STATE }],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ let now = fakeNow(2050, 1, 2, 0, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE
+ );
+ await TelemetryController.testReset();
+ Assert.equal(getSubsessionCount(), 1);
+
+ // Set the Environment preferences to watch.
+ await TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // The first pref change should not trigger a notification.
+ Preferences.set(PREF_TEST, 1);
+ Assert.equal(getSubsessionCount(), 1);
+
+ // We should get a change notification after the 5min throttling interval.
+ fakeNow(futureDate(now, 5 * MILLISECONDS_PER_MINUTE + 1));
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 5 * MILLISECONDS_PER_MINUTE + 1
+ );
+ Preferences.set(PREF_TEST, 2);
+ Assert.equal(getSubsessionCount(), 2);
+
+ // After that, changes should be throttled again.
+ now = fakeNow(futureDate(now, 1 * MILLISECONDS_PER_MINUTE));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 1 * MILLISECONDS_PER_MINUTE);
+ Preferences.set(PREF_TEST, 3);
+ Assert.equal(getSubsessionCount(), 2);
+
+ // ... for 5min.
+ now = fakeNow(futureDate(now, 4 * MILLISECONDS_PER_MINUTE + 1));
+ gMonotonicNow = fakeMonotonicNow(
+ gMonotonicNow + 4 * MILLISECONDS_PER_MINUTE + 1
+ );
+ Preferences.set(PREF_TEST, 4);
+ Assert.equal(getSubsessionCount(), 3);
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs_throttling");
+});
+
+add_task(async function stopServer() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySession_abortedSessionQueued.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySession_abortedSessionQueued.js
new file mode 100644
index 0000000000..b5e434d0da
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySession_abortedSessionQueued.js
@@ -0,0 +1,187 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+/**
+ * This file only contains the |test_abortedSessionQueued| test. This needs
+ * to be in a separate, stand-alone file since we're initializing Telemetry
+ * twice, in a non-standard way to simulate incorrect shutdowns. Doing this
+ * in other files might interfere with the other tests.
+ */
+
+ChromeUtils.import("resource://services-common/utils.js", this);
+ChromeUtils.import("resource://gre/modules/TelemetryStorage.jsm", this);
+ChromeUtils.import("resource://gre/modules/Services.jsm", this);
+
+const DATAREPORTING_DIR = "datareporting";
+const ABORTED_PING_FILE_NAME = "aborted-session-ping";
+const ABORTED_SESSION_UPDATE_INTERVAL_MS = 5 * 60 * 1000;
+
+const PING_TYPE_MAIN = "main";
+const REASON_ABORTED_SESSION = "aborted-session";
+const TEST_PING_TYPE = "test-ping-type";
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR);
+});
+
+function sendPing() {
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ } else {
+ TelemetrySend.setServer("http://doesnotexist");
+ }
+
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ };
+ return TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, options);
+}
+
+add_task(async function test_setup() {
+ do_get_profile();
+ PingServer.start();
+ Services.prefs.setCharPref(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+});
+
+add_task(async function test_abortedSessionQueued() {
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2040, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ await OS.File.exists(DATAREPORTING_PATH),
+ "Telemetry must create the aborted session directory when starting."
+ );
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ !(await OS.File.exists(ABORTED_FILE)),
+ "The aborted session ping must be removed from the aborted session ping directory."
+ );
+
+ // Restarting Telemetry again to trigger sending pings in TelemetrySend.
+ await TelemetryController.testReset();
+
+ // We should have received an aborted-session ping.
+ const receivedPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ receivedPing.type,
+ PING_TYPE_MAIN,
+ "Should have the correct type"
+ );
+ Assert.equal(
+ receivedPing.payload.info.reason,
+ REASON_ABORTED_SESSION,
+ "Ping should have the correct reason"
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+/*
+ * An aborted-session ping might have been written when Telemetry upload was disabled and
+ * the profile had a canary client ID.
+ * These pings should not be sent out at a later point when Telemetry is enabled again.
+ */
+add_task(async function test_abortedSession_canary_clientid() {
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ await OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2040, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(
+ callback => (schedulerTickCallback = callback),
+ () => {}
+ );
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ await OS.File.exists(DATAREPORTING_PATH),
+ "Telemetry must create the aborted session directory when starting."
+ );
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ await schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok(
+ await OS.File.exists(ABORTED_FILE),
+ "There must be an aborted session ping."
+ );
+
+ // Set clientID in aborted-session ping to canary value
+ let abortedPing = await CommonUtils.readJSON(ABORTED_FILE);
+ abortedPing.clientId = TelemetryUtils.knownClientID;
+ OS.File.writeAtomic(ABORTED_FILE, JSON.stringify(abortedPing), {
+ encoding: "utf-8",
+ });
+
+ await TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ await TelemetryController.testReset();
+
+ Assert.ok(
+ !(await OS.File.exists(ABORTED_FILE)),
+ "The aborted session ping must be removed from the aborted session ping directory."
+ );
+
+ // Restarting Telemetry again to trigger sending pings in TelemetrySend.
+ await TelemetryController.testReset();
+
+ // Trigger a test ping, so we can verify the server received something.
+ sendPing();
+
+ // We should have received an aborted-session ping.
+ const receivedPing = await PingServer.promiseNextPing();
+ Assert.equal(
+ receivedPing.type,
+ TEST_PING_TYPE,
+ "Should have received test ping"
+ );
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(async function stopServer() {
+ await PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySession_activeTicks.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySession_activeTicks.js
new file mode 100644
index 0000000000..b9cb9e288f
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySession_activeTicks.js
@@ -0,0 +1,119 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+
+function tick(aHowMany) {
+ for (let i = 0; i < aHowMany; i++) {
+ Services.obs.notifyObservers(null, "user-interaction-active");
+ }
+}
+
+function checkSessionTicks(aExpected) {
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(
+ payload.simpleMeasurements.activeTicks,
+ aExpected,
+ "Should record the expected number of active ticks for the session."
+ );
+}
+
+function checkSubsessionTicks(aExpected, aClearSubsession) {
+ let payload = TelemetrySession.getPayload("main", aClearSubsession);
+ Assert.equal(
+ payload.simpleMeasurements.activeTicks,
+ aExpected,
+ "Should record the expected number of active ticks for the subsession."
+ );
+ if (aExpected > 0) {
+ Assert.equal(
+ payload.processes.parent.scalars["browser.engagement.active_ticks"],
+ aExpected,
+ "Should record the expected number of active ticks for the subsession, in a scalar."
+ );
+ }
+}
+
+add_task(async function test_setup() {
+ do_get_profile();
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+});
+
+add_task(async function test_record_activeTicks() {
+ await TelemetryController.testSetup();
+
+ let checkActiveTicks = expected => {
+ // Scalars are only present in subsession payloads.
+ let payload = TelemetrySession.getPayload("main");
+ Assert.equal(
+ payload.simpleMeasurements.activeTicks,
+ expected,
+ "TelemetrySession must record the expected number of active ticks (in simpleMeasurements)."
+ );
+ // Subsessions are not yet supported on Android.
+ if (!gIsAndroid) {
+ Assert.equal(
+ payload.processes.parent.scalars["browser.engagement.active_ticks"],
+ expected,
+ "TelemetrySession must record the expected number of active ticks (in scalars)."
+ );
+ }
+ };
+
+ for (let i = 0; i < 3; i++) {
+ Services.obs.notifyObservers(null, "user-interaction-active");
+ }
+ checkActiveTicks(3);
+
+ // Now send inactive. This must not increment the active ticks.
+ Services.obs.notifyObservers(null, "user-interaction-inactive");
+ checkActiveTicks(3);
+
+ // If we send active again, this should be counted as inactive.
+ Services.obs.notifyObservers(null, "user-interaction-active");
+ checkActiveTicks(3);
+
+ // If we send active again, this should be counted as active.
+ Services.obs.notifyObservers(null, "user-interaction-active");
+ checkActiveTicks(4);
+
+ Services.obs.notifyObservers(null, "user-interaction-active");
+ checkActiveTicks(5);
+
+ await TelemetryController.testShutdown();
+});
+
+add_task(
+ {
+ skip_if: () => gIsAndroid,
+ },
+ async function test_subsession_activeTicks() {
+ await TelemetryController.testReset();
+ Telemetry.clearScalars();
+
+ tick(5);
+ checkSessionTicks(5);
+ checkSubsessionTicks(5, true);
+
+ // After clearing the subsession, subsession ticks should be 0 but session
+ // ticks should still be 5.
+ checkSubsessionTicks(0);
+ checkSessionTicks(5);
+
+ tick(1);
+ checkSessionTicks(6);
+ checkSubsessionTicks(1, true);
+
+ checkSubsessionTicks(0);
+ checkSessionTicks(6);
+
+ tick(2);
+ checkSessionTicks(8);
+ checkSubsessionTicks(2);
+
+ await TelemetryController.testShutdown();
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js b/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js
new file mode 100644
index 0000000000..d9e5e08625
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js
@@ -0,0 +1,196 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const HIST_NAME = "TELEMETRY_SEND_SUCCESS";
+const HIST_NAME2 = "RANGE_CHECKSUM_ERRORS";
+const KEYED_HIST = { id: "TELEMETRY_INVALID_PING_TYPE_SUBMITTED", key: "TEST" };
+
+var refObj = {},
+ refObj2 = {};
+
+var originalCount1, originalCount2, originalCount3;
+
+function run_test() {
+ let histogram = Telemetry.getHistogramById(HIST_NAME);
+ let snapshot = histogram.snapshot();
+ originalCount1 = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ histogram = Telemetry.getHistogramById(HIST_NAME2);
+ snapshot = histogram.snapshot();
+ originalCount2 = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ histogram = Telemetry.getKeyedHistogramById(KEYED_HIST.id);
+ snapshot = histogram.snapshot()[KEYED_HIST.key] || { values: [] };
+ originalCount3 = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ Assert.ok(TelemetryStopwatch.start("mark1"));
+ Assert.ok(TelemetryStopwatch.start("mark2"));
+
+ Assert.ok(TelemetryStopwatch.start("mark1", refObj));
+ Assert.ok(TelemetryStopwatch.start("mark2", refObj));
+
+ // Same timer can't be re-started before being stopped
+ Assert.ok(!TelemetryStopwatch.start("mark1"));
+ Assert.ok(!TelemetryStopwatch.start("mark1", refObj));
+
+ // Can't stop a timer that was accidentaly started twice
+ Assert.ok(!TelemetryStopwatch.finish("mark1"));
+ Assert.ok(!TelemetryStopwatch.finish("mark1", refObj));
+
+ Assert.ok(TelemetryStopwatch.start("NON-EXISTENT_HISTOGRAM"));
+ Assert.ok(!TelemetryStopwatch.finish("NON-EXISTENT_HISTOGRAM"));
+
+ Assert.ok(TelemetryStopwatch.start("NON-EXISTENT_HISTOGRAM", refObj));
+ Assert.ok(!TelemetryStopwatch.finish("NON-EXISTENT_HISTOGRAM", refObj));
+
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2, refObj));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj2));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2, refObj2));
+
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME2));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME2, refObj));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME, refObj2));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME2, refObj2));
+
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME2));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME2, refObj));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME, refObj2));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME2, refObj2));
+
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME2));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME2, refObj));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME, refObj2));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME2, refObj2));
+
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2, refObj));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj2));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME2, refObj2));
+
+ // Verify that TS.finish deleted the timers
+ Assert.ok(!TelemetryStopwatch.finish(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ // Verify that they can be used again
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ Assert.ok(!TelemetryStopwatch.finish("unknown-mark")); // Unknown marker
+ Assert.ok(!TelemetryStopwatch.finish("unknown-mark", {})); // Unknown object
+ Assert.ok(!TelemetryStopwatch.finish(HIST_NAME, {})); // Known mark on unknown object
+
+ // Test cancel
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.start(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(TelemetryStopwatch.cancel(HIST_NAME));
+ Assert.ok(TelemetryStopwatch.cancel(HIST_NAME, refObj));
+
+ // Verify that can not cancel twice
+ Assert.ok(!TelemetryStopwatch.cancel(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.cancel(HIST_NAME, refObj));
+
+ // Verify that cancel removes the timers
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.running(HIST_NAME, refObj));
+ Assert.ok(!TelemetryStopwatch.finish(HIST_NAME));
+ Assert.ok(!TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ // Verify that keyed histograms can be started.
+ Assert.ok(!TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY1"));
+ Assert.ok(!TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY2"));
+ Assert.ok(!TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY1", refObj));
+ Assert.ok(!TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY2", refObj));
+
+ Assert.ok(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1"));
+ Assert.ok(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY2"));
+ Assert.ok(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1", refObj));
+ Assert.ok(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY2", refObj));
+
+ Assert.ok(TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY1"));
+ Assert.ok(TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY2"));
+ Assert.ok(TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY1", refObj));
+ Assert.ok(TelemetryStopwatch.runningKeyed("HISTOGRAM", "KEY2", refObj));
+
+ // Restarting keyed histograms should fail.
+ Assert.ok(!TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1"));
+ Assert.ok(!TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1", refObj));
+
+ // Finishing a stopwatch of a non existing histogram should return false.
+ Assert.ok(!TelemetryStopwatch.finishKeyed("HISTOGRAM", "KEY2"));
+ Assert.ok(!TelemetryStopwatch.finishKeyed("HISTOGRAM", "KEY2", refObj));
+
+ // Starting & finishing a keyed stopwatch for an existing histogram should work.
+ Assert.ok(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ Assert.ok(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ // Verify that TS.finish deleted the timers
+ Assert.ok(!TelemetryStopwatch.runningKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ // Verify that they can be used again
+ Assert.ok(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ Assert.ok(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ Assert.ok(!TelemetryStopwatch.finishKeyed("unknown-mark", "unknown-key"));
+ Assert.ok(!TelemetryStopwatch.finishKeyed(KEYED_HIST.id, "unknown-key"));
+
+ // Verify that keyed histograms can only be canceled through "keyed" API.
+ Assert.ok(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ Assert.throws(
+ () => TelemetryStopwatch.cancel(KEYED_HIST.id, KEYED_HIST.key),
+ /is not an object/
+ );
+ Assert.ok(TelemetryStopwatch.cancelKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ Assert.ok(!TelemetryStopwatch.cancelKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ finishTest();
+}
+
+function finishTest() {
+ let histogram = Telemetry.getHistogramById(HIST_NAME);
+ let snapshot = histogram.snapshot();
+ let newCount = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ Assert.equal(
+ newCount - originalCount1,
+ 5,
+ "The correct number of histograms were added for histogram 1."
+ );
+
+ histogram = Telemetry.getHistogramById(HIST_NAME2);
+ snapshot = histogram.snapshot();
+ newCount = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ Assert.equal(
+ newCount - originalCount2,
+ 3,
+ "The correct number of histograms were added for histogram 2."
+ );
+
+ histogram = Telemetry.getKeyedHistogramById(KEYED_HIST.id);
+ snapshot = histogram.snapshot()[KEYED_HIST.key];
+ newCount = Object.values(snapshot.values).reduce((a, b) => (a += b), 0);
+
+ Assert.equal(
+ newCount - originalCount3,
+ 2,
+ "The correct number of histograms were added for histogram 3."
+ );
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js b/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js
new file mode 100644
index 0000000000..47ebf8f08e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js
@@ -0,0 +1,78 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetrySession.jsm", this);
+
+// The @mozilla/xre/app-info;1 XPCOM object provided by the xpcshell test harness doesn't
+// implement the nsIXULAppInfo interface, which is needed by Services.jsm and
+// TelemetrySession.jsm. updateAppInfo() creates and registers a minimal mock app-info.
+const { updateAppInfo } = ChromeUtils.import(
+ "resource://testing-common/AppInfo.jsm"
+);
+updateAppInfo();
+
+var gGlobalScope = this;
+
+function getSimpleMeasurementsFromTelemetryController() {
+ return TelemetrySession.getPayload().simpleMeasurements;
+}
+
+add_task(async function test_setup() {
+ // Telemetry needs the AddonManager.
+ loadAddonManager();
+ finishAddonManagerStartup();
+ fakeIntlReady();
+ // Make profile available for |TelemetryController.testShutdown()|.
+ do_get_profile();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ await setEmptyPrefWatchlist();
+
+ await new Promise(resolve =>
+ Services.telemetry.asyncFetchTelemetryData(resolve)
+ );
+});
+
+add_task(async function actualTest() {
+ await TelemetryController.testSetup();
+
+ // Test the module logic
+ let tmp = {};
+ ChromeUtils.import("resource://gre/modules/TelemetryTimestamps.jsm", tmp);
+ let TelemetryTimestamps = tmp.TelemetryTimestamps;
+ let now = Date.now();
+ TelemetryTimestamps.add("foo");
+ Assert.ok(TelemetryTimestamps.get().foo != null); // foo was added
+ Assert.ok(TelemetryTimestamps.get().foo >= now); // foo has a reasonable value
+
+ // Add timestamp with value
+ // Use a value far in the future since TelemetryController substracts the time of
+ // process initialization.
+ const YEAR_4000_IN_MS = 64060588800000;
+ TelemetryTimestamps.add("bar", YEAR_4000_IN_MS);
+ Assert.equal(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS); // bar has the right value
+
+ // Can't add the same timestamp twice
+ TelemetryTimestamps.add("bar", 2);
+ Assert.equal(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS); // bar wasn't overwritten
+
+ let threw = false;
+ try {
+ TelemetryTimestamps.add("baz", "this isn't a number");
+ } catch (ex) {
+ threw = true;
+ }
+ Assert.ok(threw); // adding non-number threw
+ Assert.equal(null, TelemetryTimestamps.get().baz); // no baz was added
+
+ // Test that the data gets added to the telemetry ping properly
+ let simpleMeasurements = getSimpleMeasurementsFromTelemetryController();
+ Assert.ok(simpleMeasurements != null); // got simple measurements from ping data
+ Assert.ok(simpleMeasurements.foo > 1); // foo was included
+ Assert.ok(simpleMeasurements.bar > 1); // bar was included
+ Assert.equal(undefined, simpleMeasurements.baz); // baz wasn't included since it wasn't added
+
+ await TelemetryController.testShutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryUtils.js b/toolkit/components/telemetry/tests/unit/test_TelemetryUtils.js
new file mode 100644
index 0000000000..6eb83a9561
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryUtils.js
@@ -0,0 +1,34 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+ChromeUtils.import("resource://gre/modules/ObjectUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/Preferences.jsm", this);
+ChromeUtils.import("resource://gre/modules/TelemetryUtils.jsm", this);
+ChromeUtils.import("resource://gre/modules/UpdateUtils.jsm", this);
+
+add_task(async function testUpdateChannelOverride() {
+ if (Preferences.has(TelemetryUtils.Preferences.OverrideUpdateChannel)) {
+ // If the pref is already set at this point, the test is running in a build
+ // that makes use of the override pref. For testing purposes, unset the pref.
+ Preferences.set(TelemetryUtils.Preferences.OverrideUpdateChannel, "");
+ }
+
+ // Check that we return the same channel as UpdateUtils, by default
+ Assert.equal(
+ TelemetryUtils.getUpdateChannel(),
+ UpdateUtils.getUpdateChannel(false),
+ "The telemetry reported channel must match the one from UpdateChannel, by default."
+ );
+
+ // Now set the override pref and check that we return the correct channel
+ const OVERRIDE_TEST_CHANNEL = "nightly-test";
+ Preferences.set(
+ TelemetryUtils.Preferences.OverrideUpdateChannel,
+ OVERRIDE_TEST_CHANNEL
+ );
+ Assert.equal(
+ TelemetryUtils.getUpdateChannel(),
+ OVERRIDE_TEST_CHANNEL,
+ "The telemetry reported channel must match the override pref when pref is set."
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_ThirdPartyModulesPing.js b/toolkit/components/telemetry/tests/unit/test_ThirdPartyModulesPing.js
new file mode 100644
index 0000000000..3b25e749bc
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ThirdPartyModulesPing.js
@@ -0,0 +1,269 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { Preferences } = ChromeUtils.import(
+ "resource://gre/modules/Preferences.jsm"
+);
+const { ctypes } = ChromeUtils.import("resource://gre/modules/ctypes.jsm");
+const { setTimeout } = ChromeUtils.import("resource://gre/modules/Timer.jsm");
+
+const kDllName = "modules-test.dll";
+
+let gCurrentPidStr;
+
+async function load_and_free(name) {
+ // Dynamically load a DLL which we have hard-coded as untrusted; this should
+ // appear in the payload.
+ let dllHandle = ctypes.open(do_get_file(name).path);
+ if (dllHandle) {
+ dllHandle.close();
+ dllHandle = null;
+ }
+ // Give the thread some cycles to process a loading event.
+ // eslint-disable-next-line mozilla/no-arbitrary-setTimeout
+ await new Promise(resolve => setTimeout(resolve, 50));
+}
+
+add_task(async function setup() {
+ do_get_profile();
+
+ // Dynamically load a DLL which we have hard-coded as untrusted; this should
+ // appear in the payload.
+ await load_and_free(kDllName);
+
+ // Force the timer to fire (using a small interval).
+ Cc["@mozilla.org/updates/timer-manager;1"]
+ .getService(Ci.nsIObserver)
+ .observe(null, "utm-test-init", "");
+ Preferences.set("toolkit.telemetry.untrustedModulesPing.frequency", 0);
+ Preferences.set("app.update.url", "http://localhost");
+
+ let currentPid = Services.appinfo.processID;
+ gCurrentPidStr = "browser.0x" + currentPid.toString(16);
+
+ // Start the local ping server and setup Telemetry to use it during the tests.
+ PingServer.start();
+ Preferences.set(
+ TelemetryUtils.Preferences.Server,
+ "http://localhost:" + PingServer.port
+ );
+
+ return TelemetryController.testSetup();
+});
+
+registerCleanupFunction(function() {
+ return PingServer.stop();
+});
+
+// This tests basic end-to-end functionality of the untrusted modules
+// telemetry ping. We force the ping to fire, capture the result, and test for:
+// - Basic payload structure validity.
+// - Expected results for a few specific DLLs
+add_task(async function test_send_ping() {
+ let expectedModules = [
+ // This checks that a DLL loaded during runtime is evaluated properly.
+ // This is hard-coded as untrusted in toolkit/xre/UntrustedModules.cpp for
+ // testing purposes.
+ {
+ nameMatch: new RegExp(kDllName, "i"),
+ expectedTrusted: false,
+ wasFound: false,
+ },
+ {
+ nameMatch: /kernelbase.dll/i,
+ expectedTrusted: true,
+ wasFound: false,
+ },
+ ];
+
+ // There is a tiny chance some other ping is being sent legitimately before
+ // the one we care about. Spin until we find the correct ping type.
+ let found;
+ while (true) {
+ found = await PingServer.promiseNextPing();
+ if (found.type == "third-party-modules") {
+ break;
+ }
+ }
+
+ // Test the ping payload's validity.
+ Assert.ok(found, "Untrusted modules ping submitted");
+ Assert.ok(found.environment, "Ping has an environment");
+ Assert.ok(typeof found.clientId != "undefined", "Ping has a client ID");
+
+ Assert.equal(found.payload.structVersion, 1, "Version is correct");
+ Assert.ok(found.payload.modules, "'modules' object exists");
+ Assert.ok(Array.isArray(found.payload.modules), "'modules' is an array");
+ Assert.ok(found.payload.processes, "'processes' object exists");
+ Assert.ok(
+ gCurrentPidStr in found.payload.processes,
+ `Current process "${gCurrentPidStr}" is included in payload`
+ );
+
+ let ourProcInfo = found.payload.processes[gCurrentPidStr];
+ Assert.equal(ourProcInfo.processType, "browser", "'processType' is correct");
+ Assert.ok(typeof ourProcInfo.elapsed == "number", "'elapsed' exists");
+ Assert.equal(
+ ourProcInfo.sanitizationFailures,
+ 0,
+ "'sanitizationFailures' is 0"
+ );
+ Assert.equal(ourProcInfo.trustTestFailures, 0, "'trustTestFailures' is 0");
+
+ Assert.equal(
+ ourProcInfo.combinedStacks.stacks.length,
+ ourProcInfo.events.length,
+ "combinedStacks.stacks.length == events.length"
+ );
+
+ for (let event of ourProcInfo.events) {
+ Assert.ok(
+ typeof event.processUptimeMS == "number",
+ "'processUptimeMS' exists"
+ );
+ Assert.ok(typeof event.threadID == "number", "'threadID' exists");
+ Assert.ok(typeof event.baseAddress == "string", "'baseAddress' exists");
+
+ Assert.ok(typeof event.moduleIndex == "number", "'moduleIndex' exists");
+ Assert.ok(event.moduleIndex >= 0, "'moduleIndex' is non-negative");
+
+ Assert.ok(typeof event.isDependent == "boolean", "'isDependent' exists");
+ Assert.ok(!event.isDependent, "'isDependent' is false");
+
+ Assert.ok(typeof event.loadStatus == "number", "'loadStatus' exists");
+ Assert.ok(event.loadStatus == 0, "'loadStatus' is 0 (Loaded)");
+
+ let modRecord = found.payload.modules[event.moduleIndex];
+ Assert.ok(modRecord, "module record for this event exists");
+ Assert.ok(
+ typeof modRecord.resolvedDllName == "string",
+ "'resolvedDllName' exists"
+ );
+ Assert.ok(typeof modRecord.trustFlags == "number", "'trustFlags' exists");
+
+ let mod = expectedModules.find(function(elem) {
+ return elem.nameMatch.test(modRecord.resolvedDllName);
+ });
+
+ if (mod) {
+ mod.wasFound = true;
+ }
+ }
+
+ for (let x of expectedModules) {
+ Assert.equal(
+ !x.wasFound,
+ x.expectedTrusted,
+ `Trustworthiness == expected for module: ${x.nameMatch.source}`
+ );
+ }
+});
+
+// This tests the flags INCLUDE_OLD_LOADEVENTS and KEEP_LOADEVENTS_NEW
+// controls the method's return value and the internal storages
+// "Staging" and "Settled" correctly.
+add_task(async function test_new_old_instances() {
+ const kIncludeOld = Telemetry.INCLUDE_OLD_LOADEVENTS;
+ const kKeepNew = Telemetry.KEEP_LOADEVENTS_NEW;
+ const get_events_count = data => data.processes[gCurrentPidStr].events.length;
+
+ // Make sure |baseline| has at least one instance.
+ await load_and_free(kDllName);
+
+ // Make sure all instances are "old"
+ const baseline = await Telemetry.getUntrustedModuleLoadEvents(kIncludeOld);
+ const baseline_count = get_events_count(baseline);
+ print("baseline_count = " + baseline_count);
+ print("baseline = " + JSON.stringify(baseline));
+
+ await Assert.rejects(
+ Telemetry.getUntrustedModuleLoadEvents(),
+ e => e.result == Cr.NS_ERROR_NOT_AVAILABLE,
+ "New instances should not exist!"
+ );
+
+ await load_and_free(kDllName); // A
+
+ // Passing kIncludeOld and kKeepNew is unsupported. A is kept new.
+ await Assert.rejects(
+ Telemetry.getUntrustedModuleLoadEvents(kIncludeOld | kKeepNew),
+ e => e.result == Cr.NS_ERROR_INVALID_ARG,
+ "Passing unsupported flag combination should throw an exception!"
+ );
+
+ await load_and_free(kDllName); // B
+
+ // After newly loading B, the new instances we have is {A, B}
+ // Both A and B are still kept new.
+ let payload = await Telemetry.getUntrustedModuleLoadEvents(kKeepNew);
+ print("payload = " + JSON.stringify(payload));
+ Assert.equal(get_events_count(payload), 2);
+
+ await load_and_free(kDllName); // C
+
+ // After newly loading C, the new instances we have is {A, B, C}
+ // All of A, B, and C are now marked as old.
+ payload = await Telemetry.getUntrustedModuleLoadEvents();
+ Assert.equal(get_events_count(payload), 3);
+
+ payload = await Telemetry.getUntrustedModuleLoadEvents(kIncludeOld);
+ // payload is {baseline, A, B, C}
+ Assert.equal(get_events_count(payload), baseline_count + 3);
+});
+
+// This tests the flag INCLUDE_PRIVATE_FIELDS_IN_LOADEVENTS returns
+// data including private fields.
+add_task(async function test_private_fields() {
+ await load_and_free(kDllName);
+ const data = await Telemetry.getUntrustedModuleLoadEvents(
+ Telemetry.KEEP_LOADEVENTS_NEW |
+ Telemetry.INCLUDE_PRIVATE_FIELDS_IN_LOADEVENTS
+ );
+
+ for (const module of data.modules) {
+ Assert.ok(!("resolvedDllName" in module));
+ Assert.ok("dllFile" in module);
+ Assert.ok(module.dllFile.QueryInterface);
+ Assert.ok(module.dllFile.QueryInterface(Ci.nsIFile));
+ }
+});
+
+// This tests the flag EXCLUDE_STACKINFO_FROM_LOADEVENTS correctly
+// merges "Staging" and "Settled" on a JS object correctly, and
+// the "combinedStacks" field is really excluded.
+add_task(async function test_exclude_stack() {
+ const baseline = await Telemetry.getUntrustedModuleLoadEvents(
+ Telemetry.EXCLUDE_STACKINFO_FROM_LOADEVENTS |
+ Telemetry.INCLUDE_OLD_LOADEVENTS
+ );
+ Assert.ok(!("combinedStacks" in baseline.processes[gCurrentPidStr]));
+ const baseSet = baseline.processes[gCurrentPidStr].events.map(
+ x => x.processUptimeMS
+ );
+
+ await load_and_free(kDllName);
+ await load_and_free(kDllName);
+ const newLoadsWithStack = await Telemetry.getUntrustedModuleLoadEvents(
+ Telemetry.KEEP_LOADEVENTS_NEW
+ );
+ Assert.ok("combinedStacks" in newLoadsWithStack.processes[gCurrentPidStr]);
+ const newSet = newLoadsWithStack.processes[gCurrentPidStr].events.map(
+ x => x.processUptimeMS
+ );
+
+ const merged = baseSet.concat(newSet);
+
+ const allData = await Telemetry.getUntrustedModuleLoadEvents(
+ Telemetry.KEEP_LOADEVENTS_NEW |
+ Telemetry.EXCLUDE_STACKINFO_FROM_LOADEVENTS |
+ Telemetry.INCLUDE_OLD_LOADEVENTS
+ );
+ Assert.ok(!("combinedStacks" in allData.processes[gCurrentPidStr]));
+ const allSet = allData.processes[gCurrentPidStr].events.map(
+ x => x.processUptimeMS
+ );
+
+ Assert.deepEqual(allSet.sort(), merged.sort());
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_UninstallPing.js b/toolkit/components/telemetry/tests/unit/test_UninstallPing.js
new file mode 100644
index 0000000000..756ad38da6
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_UninstallPing.js
@@ -0,0 +1,127 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+"use strict";
+
+const { TelemetryStorage } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryStorage.jsm"
+);
+const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm");
+const { FileUtils } = ChromeUtils.import(
+ "resource://gre/modules/FileUtils.jsm"
+);
+
+const gFakeInstallPathHash = "0123456789ABCDEF";
+let gFakeVendorDirectory;
+let gFakeGetUninstallPingPath;
+
+add_task(async function setup() {
+ do_get_profile();
+
+ let fakeVendorDirectoryNSFile = new FileUtils.File(
+ OS.Path.join(OS.Constants.Path.profileDir, "uninstall-ping-test")
+ );
+ fakeVendorDirectoryNSFile.createUnique(
+ Ci.nsIFile.DIRECTORY_TYPE,
+ FileUtils.PERMS_DIRECTORY
+ );
+ gFakeVendorDirectory = fakeVendorDirectoryNSFile.path;
+
+ gFakeGetUninstallPingPath = id => ({
+ directory: fakeVendorDirectoryNSFile.clone(),
+ file: `uninstall_ping_${gFakeInstallPathHash}_${id}.json`,
+ });
+
+ fakeUninstallPingPath(gFakeGetUninstallPingPath);
+
+ registerCleanupFunction(() => {
+ OS.File.removeDir(gFakeVendorDirectory);
+ });
+});
+
+function ping_path(ping) {
+ let { directory: pingFile, file } = gFakeGetUninstallPingPath(ping.id);
+ pingFile.append(file);
+ return pingFile.path;
+}
+
+add_task(async function test_store_ping() {
+ // Remove shouldn't throw on an empty dir.
+ await TelemetryStorage.removeUninstallPings();
+
+ // Write ping
+ const ping1 = {
+ id: "58b63aac-999e-4efb-9d5a-20f368670721",
+ payload: { some: "thing" },
+ };
+ const ping1Path = ping_path(ping1);
+ await TelemetryStorage.saveUninstallPing(ping1);
+
+ // Check the ping
+ Assert.ok(await OS.File.exists(ping1Path));
+ const readPing1 = JSON.parse(
+ await OS.File.read(ping1Path, { encoding: "utf-8" })
+ );
+ Assert.deepEqual(ping1, readPing1);
+
+ // Write another file that shouldn't match the pattern
+ const otherFilePath = OS.Path.join(gFakeVendorDirectory, "other_file.json");
+ await OS.File.writeAtomic(otherFilePath, "");
+ Assert.ok(await OS.File.exists(otherFilePath));
+
+ // Write another ping, should remove the earlier one
+ const ping2 = {
+ id: "7202c564-8f23-41b4-8a50-1744e9549260",
+ payload: { another: "thing" },
+ };
+ const ping2Path = ping_path(ping2);
+ await TelemetryStorage.saveUninstallPing(ping2);
+
+ Assert.ok(!(await OS.File.exists(ping1Path)));
+ Assert.ok(await OS.File.exists(ping2Path));
+ Assert.ok(await OS.File.exists(otherFilePath));
+
+ // Write an additional file manually so there are multiple matching pings to remove
+ const ping3 = { id: "yada-yada" };
+ const ping3Path = ping_path(ping3);
+
+ await OS.File.writeAtomic(ping3Path, "");
+ Assert.ok(await OS.File.exists(ping3Path));
+
+ // Remove pings
+ await TelemetryStorage.removeUninstallPings();
+
+ // Check our pings are removed but other file isn't
+ Assert.ok(!(await OS.File.exists(ping1Path)));
+ Assert.ok(!(await OS.File.exists(ping2Path)));
+ Assert.ok(!(await OS.File.exists(ping3Path)));
+ Assert.ok(await OS.File.exists(otherFilePath));
+
+ // Remove again, confirming that the remove doesn't cause an error if nothing to remove
+ await TelemetryStorage.removeUninstallPings();
+
+ const ping4 = {
+ id: "1f113673-753c-4fbe-9143-fe197f936036",
+ payload: { any: "thing" },
+ };
+ const ping4Path = ping_path(ping4);
+ await TelemetryStorage.saveUninstallPing(ping4);
+
+ // Open the ping without FILE_SHARE_DELETE, so a delete should fail.
+ const ping4File = await OS.File.open(
+ ping4Path,
+ { read: true, existing: true },
+ { winShare: OS.Constants.Win.FILE_SHARE_READ }
+ );
+
+ // Check that there is no error if the file can't be removed.
+ await TelemetryStorage.removeUninstallPings();
+
+ // And file should still exist.
+ Assert.ok(await OS.File.exists(ping4Path));
+
+ // Close the file, it should be possible to remove now.
+ ping4File.close();
+ await TelemetryStorage.removeUninstallPings();
+ Assert.ok(!(await OS.File.exists(ping4Path)));
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_UserInteraction.js b/toolkit/components/telemetry/tests/unit/test_UserInteraction.js
new file mode 100644
index 0000000000..5fc3c5ecd1
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_UserInteraction.js
@@ -0,0 +1,134 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const TEST_USER_INTERACTION_ID = "testing.interaction";
+const TEST_VALUE_1 = "some value";
+const TEST_VALUE_2 = "some other value";
+const TEST_INVALID_VALUE =
+ "This is a value that is far too long - it has too many characters.";
+const TEST_ADDITIONAL_TEXT_1 = "some additional text";
+const TEST_ADDITIONAL_TEXT_2 = "some other additional text";
+
+function run_test() {
+ let obj1 = {};
+ let obj2 = {};
+
+ Assert.ok(UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1));
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj1)
+ );
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj2)
+ );
+
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+
+ // Unlike TelemetryStopwatch, we can clobber UserInteractions.
+ Assert.ok(UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1));
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj1)
+ );
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj2)
+ );
+
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+
+ // Ensure that we can finish a UserInteraction that was accidentally started
+ // twice
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID, obj2));
+
+ // Make sure we can't start or finish non-existent UserInteractions.
+ Assert.ok(!UserInteraction.start("non-existent.interaction", TEST_VALUE_1));
+ Assert.ok(
+ !UserInteraction.start("non-existent.interaction", TEST_VALUE_1, obj1)
+ );
+ Assert.ok(
+ !UserInteraction.start("non-existent.interaction", TEST_VALUE_1, obj2)
+ );
+ Assert.ok(!UserInteraction.running("non-existent.interaction"));
+ Assert.ok(!UserInteraction.running("non-existent.interaction", obj1));
+ Assert.ok(!UserInteraction.running("non-existent.interaction", obj2));
+ Assert.ok(!UserInteraction.finish("non-existent.interaction"));
+ Assert.ok(!UserInteraction.finish("non-existent.interaction", obj1));
+ Assert.ok(!UserInteraction.finish("non-existent.interaction", obj2));
+
+ // Ensure that we enforce the character limit on value strings.
+ Assert.ok(
+ !UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_INVALID_VALUE)
+ );
+ Assert.ok(
+ !UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_INVALID_VALUE, obj1)
+ );
+ Assert.ok(
+ !UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_INVALID_VALUE, obj2)
+ );
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+
+ // Verify that they can be used again
+ Assert.ok(UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2));
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj1)
+ );
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj2)
+ );
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID, obj2));
+
+ Assert.ok(!UserInteraction.finish(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.finish(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(!UserInteraction.finish(TEST_USER_INTERACTION_ID, obj2));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+
+ // Verify that they can be used again with different values.
+ Assert.ok(UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1));
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj1)
+ );
+ Assert.ok(
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj2)
+ );
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj1));
+ Assert.ok(UserInteraction.running(TEST_USER_INTERACTION_ID, obj2));
+ Assert.ok(UserInteraction.finish(TEST_USER_INTERACTION_ID));
+ Assert.ok(
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj1,
+ TEST_ADDITIONAL_TEXT_1
+ )
+ );
+ Assert.ok(
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj2,
+ TEST_ADDITIONAL_TEXT_2
+ )
+ );
+
+ // Test that they can be cancelled
+ Assert.ok(UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1));
+ Assert.ok(UserInteraction.cancel(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.running(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.finish(TEST_USER_INTERACTION_ID));
+
+ // Test that they cannot be cancelled twice
+ Assert.ok(!UserInteraction.cancel(TEST_USER_INTERACTION_ID));
+ Assert.ok(!UserInteraction.cancel(TEST_USER_INTERACTION_ID));
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_UserInteraction_annotations.js b/toolkit/components/telemetry/tests/unit/test_UserInteraction_annotations.js
new file mode 100644
index 0000000000..c6989bbc9e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_UserInteraction_annotations.js
@@ -0,0 +1,481 @@
+/* Any copyright is dedicated to the Public Domain.
+http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const { TestUtils } = ChromeUtils.import(
+ "resource://testing-common/TestUtils.jsm"
+);
+const { TelemetryUtils } = ChromeUtils.import(
+ "resource://gre/modules/TelemetryUtils.jsm"
+);
+
+const HANG_TIME = 1000; // ms
+const TEST_USER_INTERACTION_ID = "testing.interaction";
+const TEST_CLOBBERED_USER_INTERACTION_ID = `${TEST_USER_INTERACTION_ID} (clobbered)`;
+const TEST_VALUE_1 = "some value";
+const TEST_VALUE_2 = "some other value";
+const TEST_ADDITIONAL_TEXT_1 = "some additional text";
+const TEST_ADDITIONAL_TEXT_2 = "some other additional text";
+
+/**
+ * Intentionally hangs the main thread in the parent process for
+ * HANG_TIME, and then returns the BHR hang report generated for
+ * that hang.
+ *
+ * @returns {Promise}
+ * @resolves {nsIHangDetails}
+ * The hang report that was created.
+ */
+async function hangAndWaitForReport(expectTestAnnotation) {
+ let hangPromise = TestUtils.topicObserved("bhr-thread-hang", subject => {
+ let hang = subject.QueryInterface(Ci.nsIHangDetails);
+ if (hang.thread != "Gecko") {
+ return false;
+ }
+
+ if (expectTestAnnotation) {
+ return hang.annotations.some(annotation =>
+ annotation[0].startsWith(TEST_USER_INTERACTION_ID)
+ );
+ }
+
+ return hang.annotations.every(
+ annotation => annotation[0] != TEST_USER_INTERACTION_ID
+ );
+ });
+
+ executeSoon(() => {
+ let startTime = Date.now();
+ // eslint-disable-next-line no-empty
+ while (Date.now() - startTime < HANG_TIME) {}
+ });
+
+ let [report] = await hangPromise;
+ return report;
+}
+
+/**
+ * Makes sure that the profiler is initialized. This has the added side-effect
+ * of making sure that BHR is initialized as well.
+ */
+function ensureProfilerInitialized() {
+ if (!Services.profiler.CanProfile()) {
+ return false;
+ }
+
+ startProfiler();
+ stopProfiler();
+ return true;
+}
+
+function stopProfiler() {
+ Services.profiler.StopProfiler();
+}
+
+function startProfiler() {
+ // Starting and stopping the profiler with the "stackwalk" flag will cause the
+ // profiler's stackwalking features to be synchronously initialized. This
+ // should prevent us from not initializing BHR quickly enough.
+ Services.profiler.StartProfiler(1000, 10, ["stackwalk"]);
+}
+
+/**
+ * Given a performance profile object, returns a count of how many
+ * markers matched the value (and optional additionalText) that
+ * the UserInteraction backend added. This function only checks
+ * markers on thread 0.
+ *
+ * @param {Object} profile
+ * A profile returned from Services.profiler.getProfileData();
+ * @param {String} value
+ * The value that the marker is expected to have.
+ * @param {String} additionalText
+ * (Optional) If additionalText was provided when finishing the
+ * UserInteraction, then markerCount will check for a marker with
+ * text in the form of "value,additionalText".
+ * @returns {Number}
+ * A count of how many markers appear that match the criteria.
+ */
+function markerCount(profile, value, additionalText) {
+ let expectedName = value;
+ if (additionalText) {
+ expectedName = [value, additionalText].join(",");
+ }
+
+ let thread0 = profile.threads[0];
+ let stringTable = thread0.stringTable;
+ let markerStringIndex = stringTable.indexOf(TEST_USER_INTERACTION_ID);
+
+ let markers = thread0.markers.data.filter(markerData => {
+ return (
+ markerData[0] == markerStringIndex && markerData[5].name == expectedName
+ );
+ });
+
+ return markers.length;
+}
+
+/**
+ * Given an nsIHangReport, returns true if there are one or more annotations
+ * with the TEST_USER_INTERACTION_ID name, and the passed value.
+ *
+ * @param {nsIHangReport} report
+ * The hang report to check the annotations of.
+ * @param {String} value
+ * The value that the annotation should have.
+ * @returns {boolean}
+ * True if the annotation was found.
+ */
+function hasHangAnnotation(report, value) {
+ return report.annotations.some(annotation => {
+ return annotation[0] == TEST_USER_INTERACTION_ID && annotation[1] == value;
+ });
+}
+
+/**
+ * Given an nsIHangReport, returns true if there are one or more annotations
+ * with the TEST_CLOBBERED_USER_INTERACTION_ID name, and the passed value.
+ *
+ * This check should be used when we expect a pre-existing UserInteraction to
+ * have been clobbered by a new UserInteraction.
+ *
+ * @param {nsIHangReport} report
+ * The hang report to check the annotations of.
+ * @param {String} value
+ * The value that the annotation should have.
+ * @returns {boolean}
+ * True if the annotation was found.
+ */
+function hasClobberedHangAnnotation(report, value) {
+ return report.annotations.some(annotation => {
+ return (
+ annotation[0] == TEST_CLOBBERED_USER_INTERACTION_ID &&
+ annotation[1] == value
+ );
+ });
+}
+
+/**
+ * Tests that UserInteractions cause BHR annotations and profiler
+ * markers to be written.
+ */
+add_task(async function test_recording_annotations_and_markers() {
+ if (!Services.telemetry.canRecordExtended) {
+ Assert.ok("Hang reporting not enabled.");
+ return;
+ }
+
+ if (!ensureProfilerInitialized()) {
+ return;
+ }
+
+ Services.prefs.setBoolPref(
+ TelemetryUtils.Preferences.OverridePreRelease,
+ true
+ );
+
+ // First, we'll check to see if we can get a single annotation and
+ // profiler marker to be set.
+ startProfiler();
+
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1);
+ let report = await hangAndWaitForReport(true);
+ UserInteraction.finish(TEST_USER_INTERACTION_ID);
+ let profile = Services.profiler.getProfileData();
+ stopProfiler();
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1),
+ 1,
+ "Should have found the marker in the profile."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_1),
+ "Should have the BHR annotation set."
+ );
+
+ // Next, we'll make sure that when we're not running a UserInteraction,
+ // no marker or annotation is set.
+ startProfiler();
+
+ report = await hangAndWaitForReport(false);
+ profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1),
+ 0,
+ "Should not find the marker in the profile."
+ );
+ Assert.ok(
+ !hasHangAnnotation(report),
+ "Should not have the BHR annotation set."
+ );
+
+ // Next, we'll ensure that we can set multiple markers and annotations
+ // by using the optional object argument to start() and finish().
+ startProfiler();
+
+ let obj1 = {};
+ let obj2 = {};
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj1);
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj2);
+ report = await hangAndWaitForReport(true);
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj1,
+ TEST_ADDITIONAL_TEXT_1
+ );
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj2,
+ TEST_ADDITIONAL_TEXT_2
+ );
+ profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1, TEST_ADDITIONAL_TEXT_1),
+ 1,
+ "Should have found first marker in the profile."
+ );
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_2, TEST_ADDITIONAL_TEXT_2),
+ 1,
+ "Should have found second marker in the profile."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_1),
+ "Should have the first BHR annotation set."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_2),
+ "Should have the second BHR annotation set."
+ );
+});
+
+/**
+ * Tests that UserInteractions can be updated, resulting in their BHR
+ * annotations and profiler markers to also be updated.
+ */
+add_task(async function test_updating_annotations_and_markers() {
+ if (!Services.telemetry.canRecordExtended) {
+ Assert.ok("Hang reporting not enabled.");
+ return;
+ }
+
+ if (!ensureProfilerInitialized()) {
+ return;
+ }
+
+ // First, we'll check to see if we can get a single annotation and
+ // profiler marker to be set.
+ startProfiler();
+
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1);
+ // Updating the UserInteraction means that a new value will overwrite
+ // the old.
+ UserInteraction.update(TEST_USER_INTERACTION_ID, TEST_VALUE_2);
+ let report = await hangAndWaitForReport(true);
+ UserInteraction.finish(TEST_USER_INTERACTION_ID);
+ let profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1),
+ 0,
+ "Should not have found the original marker in the profile."
+ );
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_2),
+ 1,
+ "Should have found the updated marker in the profile."
+ );
+
+ Assert.ok(
+ !hasHangAnnotation(report, TEST_VALUE_1),
+ "Should not have the original BHR annotation set."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_2),
+ "Should have the updated BHR annotation set."
+ );
+
+ // Next, we'll ensure that we can update multiple markers and annotations
+ // by using the optional object argument to start() and finish().
+ startProfiler();
+
+ let obj1 = {};
+ let obj2 = {};
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj1);
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj2);
+
+ // Now swap the values between the two UserInteractions
+ UserInteraction.update(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj1);
+ UserInteraction.update(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj2);
+
+ report = await hangAndWaitForReport(true);
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj1,
+ TEST_ADDITIONAL_TEXT_1
+ );
+ UserInteraction.finish(
+ TEST_USER_INTERACTION_ID,
+ obj2,
+ TEST_ADDITIONAL_TEXT_2
+ );
+ profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_2, TEST_ADDITIONAL_TEXT_1),
+ 1,
+ "Should have found first marker in the profile."
+ );
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1, TEST_ADDITIONAL_TEXT_2),
+ 1,
+ "Should have found second marker in the profile."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_1),
+ "Should have the first BHR annotation set."
+ );
+
+ Assert.ok(
+ hasHangAnnotation(report, TEST_VALUE_2),
+ "Should have the second BHR annotation set."
+ );
+});
+
+/**
+ * Tests that UserInteractions can be cancelled, resulting in no BHR
+ * annotations and profiler markers being recorded.
+ */
+add_task(async function test_cancelling_annotations_and_markers() {
+ if (!Services.telemetry.canRecordExtended) {
+ Assert.ok("Hang reporting not enabled.");
+ return;
+ }
+
+ if (!ensureProfilerInitialized()) {
+ return;
+ }
+
+ // First, we'll check to see if we can get a single annotation and
+ // profiler marker to be set.
+ startProfiler();
+
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1);
+ UserInteraction.cancel(TEST_USER_INTERACTION_ID);
+ let report = await hangAndWaitForReport(false);
+
+ let profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1),
+ 0,
+ "Should not have found the marker in the profile."
+ );
+
+ Assert.ok(
+ !hasHangAnnotation(report, TEST_VALUE_1),
+ "Should not have the BHR annotation set."
+ );
+
+ // Next, we'll ensure that we can cancel multiple markers and annotations
+ // by using the optional object argument to start() and finish().
+ startProfiler();
+
+ let obj1 = {};
+ let obj2 = {};
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1, obj1);
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2, obj2);
+
+ UserInteraction.cancel(TEST_USER_INTERACTION_ID, obj1);
+ UserInteraction.cancel(TEST_USER_INTERACTION_ID, obj2);
+
+ report = await hangAndWaitForReport(false);
+
+ Assert.ok(
+ !UserInteraction.finish(TEST_USER_INTERACTION_ID, obj1),
+ "Finishing a canceled UserInteraction should return false."
+ );
+
+ Assert.ok(
+ !UserInteraction.finish(TEST_USER_INTERACTION_ID, obj2),
+ "Finishing a canceled UserInteraction should return false."
+ );
+
+ profile = Services.profiler.getProfileData();
+
+ stopProfiler();
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_1),
+ 0,
+ "Should not have found the first marker in the profile."
+ );
+
+ Assert.equal(
+ markerCount(profile, TEST_VALUE_2),
+ 0,
+ "Should not have found the second marker in the profile."
+ );
+
+ Assert.ok(
+ !hasHangAnnotation(report, TEST_VALUE_1),
+ "Should not have the first BHR annotation set."
+ );
+
+ Assert.ok(
+ !hasHangAnnotation(report, TEST_VALUE_2),
+ "Should not have the second BHR annotation set."
+ );
+});
+
+/**
+ * Tests that starting UserInteractions with the same ID and object
+ * creates a clobber annotation.
+ */
+add_task(async function test_clobbered_annotations() {
+ if (!Services.telemetry.canRecordExtended) {
+ Assert.ok("Hang reporting not enabled.");
+ return;
+ }
+
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_1);
+ // Now clobber the original UserInteraction
+ UserInteraction.start(TEST_USER_INTERACTION_ID, TEST_VALUE_2);
+
+ let report = await hangAndWaitForReport(true);
+ Assert.ok(
+ UserInteraction.finish(TEST_USER_INTERACTION_ID),
+ "Should have been able to finish the UserInteraction."
+ );
+
+ Assert.ok(
+ !hasHangAnnotation(report, TEST_VALUE_1),
+ "Should not have the original BHR annotation set."
+ );
+
+ Assert.ok(
+ hasClobberedHangAnnotation(report, TEST_VALUE_2),
+ "Should have the clobber BHR annotation set."
+ );
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_bug1555798.js b/toolkit/components/telemetry/tests/unit/test_bug1555798.js
new file mode 100644
index 0000000000..a7716d5875
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_bug1555798.js
@@ -0,0 +1,50 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.defineModuleGetter(
+ this,
+ "TelemetryTestUtils",
+ "resource://testing-common/TelemetryTestUtils.jsm"
+);
+
+add_task(async function test_bug1555798() {
+ /*
+ The idea behind this bug is that the registration of dynamic scalars causes
+ the position of the scalarinfo for telemetry.dynamic_events_count to move
+ which causes things to asplode.
+
+ So to test this we'll be registering two dynamic events, recording to one of
+ the events (to ensure the Scalar for event1 is allocated from the unmoved
+ DynamicScalarInfo&), registering several dynamic scalars to cause the
+ nsTArray of DynamicScalarInfo to realloc, and then recording to the second
+ event to make the Event Summary Scalar for event2 try to allocate from where
+ the DynamicScalarInfo used to be.
+ */
+ Telemetry.clearEvents();
+
+ const DYNAMIC_CATEGORY = "telemetry.test.dynamic.event";
+ Telemetry.registerEvents(DYNAMIC_CATEGORY, {
+ an_event: {
+ methods: ["a_method"],
+ objects: ["an_object", "another_object"],
+ record_on_release: true,
+ expired: false,
+ },
+ });
+ Telemetry.recordEvent(DYNAMIC_CATEGORY, "a_method", "an_object");
+
+ for (let i = 0; i < 100; ++i) {
+ Telemetry.registerScalars("telemetry.test.dynamic" + i, {
+ scalar_name: {
+ kind: Ci.nsITelemetry.SCALAR_TYPE_COUNT,
+ record_on_release: true,
+ },
+ });
+ Telemetry.scalarAdd("telemetry.test.dynamic" + i + ".scalar_name", 1);
+ }
+
+ Telemetry.recordEvent(DYNAMIC_CATEGORY, "a_method", "another_object");
+
+ TelemetryTestUtils.assertNumberOfEvents(2, {}, { process: "dynamic" });
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_client_id.js b/toolkit/components/telemetry/tests/unit/test_client_id.js
new file mode 100644
index 0000000000..96b22546d1
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_client_id.js
@@ -0,0 +1,372 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+if (AppConstants.MOZ_GLEAN) {
+ Cu.importGlobalProperties(["Glean"]);
+}
+const { ClientID } = ChromeUtils.import("resource://gre/modules/ClientID.jsm");
+const { CommonUtils } = ChromeUtils.import(
+ "resource://services-common/utils.js"
+);
+const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm");
+
+const PREF_CACHED_CLIENTID = "toolkit.telemetry.cachedClientID";
+
+const SCALAR_DELETION_REQUEST_ECOSYSTEM_CLIENT_ID =
+ "deletion.request.ecosystem_client_id";
+
+var drsPath;
+
+const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
+
+function run_test() {
+ do_get_profile();
+ drsPath = OS.Path.join(
+ OS.Constants.Path.profileDir,
+ "datareporting",
+ "state.json"
+ );
+
+ if (AppConstants.MOZ_GLEAN) {
+ // We need to ensure FOG is initialized, otherwise operations will be stuck in the pre-init queue.
+ let FOG = Cc["@mozilla.org/toolkit/glean;1"].createInstance(Ci.nsIFOG);
+ FOG.initializeFOG();
+ }
+
+ run_next_test();
+}
+
+add_task(async function test_ecosystemClientID() {
+ await ClientID._reset();
+ Assert.ok(!ClientID.getCachedEcosystemClientID());
+ let ecosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.equal(typeof ecosystemClientID, "string");
+ Assert.equal(ClientID.getCachedEcosystemClientID(), ecosystemClientID);
+
+ let clientID = await ClientID.getClientID();
+ await ClientID._reset();
+ await OS.File.writeAtomic(
+ drsPath,
+ JSON.stringify({
+ clientID,
+ }),
+ {
+ encoding: "utf-8",
+ tmpPath: drsPath + ".tmp",
+ }
+ );
+
+ let newClientID = await ClientID.getClientID();
+ Assert.equal(newClientID, clientID);
+
+ let newEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.notEqual(newEcosystemClientID, ecosystemClientID);
+});
+
+add_task(async function test_client_id() {
+ const invalidIDs = [
+ [-1, "setIntPref"],
+ [0.5, "setIntPref"],
+ ["INVALID-UUID", "setStringPref"],
+ [true, "setBoolPref"],
+ ["", "setStringPref"],
+ ["3d1e1560-682a-4043-8cf2-aaaaaaaaaaaZ", "setStringPref"],
+ ];
+
+ // If there is no DRS file, we should get a new client ID.
+ await ClientID._reset();
+ let clientID = await ClientID.getClientID();
+ Assert.equal(typeof clientID, "string");
+ Assert.ok(uuidRegex.test(clientID));
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ clientID
+ );
+ }
+
+ // We should be guarded against invalid DRS json.
+ await ClientID._reset();
+ await OS.File.writeAtomic(drsPath, "abcd", {
+ encoding: "utf-8",
+ tmpPath: drsPath + ".tmp",
+ });
+ clientID = await ClientID.getClientID();
+ Assert.equal(typeof clientID, "string");
+ Assert.ok(uuidRegex.test(clientID));
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ clientID
+ );
+ }
+
+ // If the DRS data is broken, we should end up with a new client ID.
+ for (let [invalidID] of invalidIDs) {
+ await ClientID._reset();
+ await CommonUtils.writeJSON({ clientID: invalidID }, drsPath);
+ clientID = await ClientID.getClientID();
+ Assert.equal(typeof clientID, "string");
+ Assert.ok(uuidRegex.test(clientID));
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ clientID
+ );
+ }
+ }
+
+ // Assure that cached IDs are being checked for validity.
+ for (let [invalidID, prefFunc] of invalidIDs) {
+ await ClientID._reset();
+ Services.prefs[prefFunc](PREF_CACHED_CLIENTID, invalidID);
+ let cachedID = ClientID.getCachedClientID();
+ Assert.strictEqual(
+ cachedID,
+ null,
+ "ClientID should ignore invalid cached IDs"
+ );
+ Assert.ok(
+ !Services.prefs.prefHasUserValue(PREF_CACHED_CLIENTID),
+ "ClientID should reset invalid cached IDs"
+ );
+ Assert.ok(
+ Services.prefs.getPrefType(PREF_CACHED_CLIENTID) ==
+ Ci.nsIPrefBranch.PREF_INVALID,
+ "ClientID should reset invalid cached IDs"
+ );
+ }
+});
+
+add_task(async function test_setCanaryClientIDs() {
+ const KNOWN_UUID = "c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0";
+
+ await ClientID._reset();
+
+ // We should be able to set a valid UUID
+ await ClientID.setCanaryClientIDs();
+ let clientID = await ClientID.getClientID();
+ Assert.equal(KNOWN_UUID, clientID);
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ clientID
+ );
+ }
+});
+
+add_task(async function test_resetEcosystemClientID() {
+ await ClientID._reset();
+
+ let firstClientID = await ClientID.getClientID();
+ let firstEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.ok(firstClientID);
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ firstClientID
+ );
+ }
+ Assert.ok(firstEcosystemClientID);
+
+ // We should reset the ecosystem client id, but not the main client id.
+ await ClientID.resetEcosystemClientID();
+ let secondClientID = await ClientID.getClientID();
+ let secondEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.equal(firstClientID, secondClientID);
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ firstClientID
+ );
+ }
+ Assert.notEqual(firstEcosystemClientID, secondEcosystemClientID);
+
+ // The new id should have been persisted to disk.
+ await ClientID._reset();
+ let thirdClientID = await ClientID.getClientID();
+ let thirdEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.equal(thirdClientID, secondClientID);
+ Assert.equal(thirdEcosystemClientID, secondEcosystemClientID);
+});
+
+add_task(async function test_removeClientIDs() {
+ // We should get a valid UUID after reset
+ await ClientID._reset();
+ let firstClientID = await ClientID.getClientID();
+ let firstEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.equal(typeof firstClientID, "string");
+ Assert.equal(typeof firstEcosystemClientID, "string");
+ Assert.ok(uuidRegex.test(firstClientID));
+ Assert.ok(uuidRegex.test(firstEcosystemClientID));
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ firstClientID
+ );
+ }
+
+ await ClientID.removeClientIDs();
+
+ if (
+ AppConstants.platform != "android" &&
+ AppConstants.MOZ_APP_NAME != "thunderbird"
+ ) {
+ // We don't record the old ecosystem client ID on Android or Thunderbird,
+ // since the FxA and telemetry infrastructure is different there.
+ let prefClientID = Services.prefs.getStringPref(PREF_CACHED_CLIENTID, null);
+ let scalarsDeletionRequest = Services.telemetry.getSnapshotForScalars(
+ "deletion-request"
+ );
+ Assert.ok(!prefClientID);
+ Assert.ok(
+ !scalarsDeletionRequest.parent?.[
+ SCALAR_DELETION_REQUEST_ECOSYSTEM_CLIENT_ID
+ ]
+ );
+ }
+
+ // When resetting again we should get a new ID
+ let nextClientID = await ClientID.getClientID();
+ let nextEcosystemClientID = await ClientID.getEcosystemClientID();
+ Assert.equal(typeof nextClientID, "string");
+ Assert.equal(typeof nextEcosystemClientID, "string");
+ Assert.ok(uuidRegex.test(nextClientID));
+ Assert.ok(uuidRegex.test(nextEcosystemClientID));
+ Assert.notEqual(
+ firstClientID,
+ nextClientID,
+ "After reset client ID should be different."
+ );
+ Assert.notEqual(
+ firstEcosystemClientID,
+ nextEcosystemClientID,
+ "After reset ecosystem client ID should be different."
+ );
+
+ let cachedID = ClientID.getCachedClientID();
+ Assert.equal(nextClientID, cachedID);
+
+ let cachedEcosystemID = ClientID.getCachedEcosystemClientID();
+ Assert.equal(nextEcosystemClientID, cachedEcosystemID);
+
+ let prefClientID = Services.prefs.getStringPref(PREF_CACHED_CLIENTID, null);
+ Assert.equal(nextClientID, prefClientID);
+
+ if (
+ AppConstants.platform != "android" &&
+ AppConstants.MOZ_APP_NAME != "thunderbird"
+ ) {
+ let scalarsDeletionRequest = Services.telemetry.getSnapshotForScalars(
+ "deletion-request"
+ );
+ Assert.equal(
+ nextEcosystemClientID,
+ scalarsDeletionRequest.parent[SCALAR_DELETION_REQUEST_ECOSYSTEM_CLIENT_ID]
+ );
+ }
+});
+
+add_task(async function test_removeParallelGet() {
+ // We should get a valid UUID after reset
+ await ClientID.removeClientIDs();
+ let firstClientID = await ClientID.getClientID();
+
+ // We should get the same ID twice when requesting it in parallel to a reset.
+ let promiseRemoveClientIDs = ClientID.removeClientIDs();
+ let p = ClientID.getClientID();
+ let newClientID = await ClientID.getClientID();
+ await promiseRemoveClientIDs;
+ let otherClientID = await p;
+
+ Assert.notEqual(
+ firstClientID,
+ newClientID,
+ "After reset client ID should be different."
+ );
+ Assert.equal(
+ newClientID,
+ otherClientID,
+ "Getting the client ID in parallel to a reset should give the same id."
+ );
+ if (AppConstants.MOZ_GLEAN) {
+ Assert.equal(
+ Glean.fogValidation.legacyTelemetryClientId.testGetValue(
+ "fog-validation"
+ ),
+ newClientID
+ );
+ }
+});
+
+add_task(
+ {
+ skip_if: () => AppConstants.platform != "android",
+ },
+ async function test_FennecCanaryDetect() {
+ const KNOWN_UUID = "c0ffeec0-ffee-c0ff-eec0-ffeec0ffeec0";
+
+ // We should get a valid UUID after reset
+ await ClientID.removeClientIDs();
+ let firstClientID = await ClientID.getClientID();
+ Assert.notEqual(KNOWN_UUID, firstClientID, "Client ID should be random.");
+
+ // Set the canary client ID.
+ await ClientID.setCanaryClientIDs();
+ Assert.equal(
+ KNOWN_UUID,
+ await ClientID.getClientID(),
+ "Client ID should be known canary."
+ );
+
+ await ClientID.removeClientIDs();
+ let newClientID = await ClientID.getClientID();
+ Assert.notEqual(
+ KNOWN_UUID,
+ newClientID,
+ "After reset Client ID should be random."
+ );
+ Assert.notEqual(
+ firstClientID,
+ newClientID,
+ "After reset Client ID should be new."
+ );
+ Assert.ok(
+ ClientID.wasCanaryClientID(),
+ "After reset we should have detected a canary client ID"
+ );
+
+ await ClientID.removeClientIDs();
+ let clientID = await ClientID.getClientID();
+ Assert.notEqual(
+ KNOWN_UUID,
+ clientID,
+ "After reset Client ID should be random."
+ );
+ Assert.notEqual(
+ newClientID,
+ clientID,
+ "After reset Client ID should be new."
+ );
+ Assert.ok(
+ !ClientID.wasCanaryClientID(),
+ "After reset we should not have detected a canary client ID"
+ );
+ }
+);
diff --git a/toolkit/components/telemetry/tests/unit/xpcshell.ini b/toolkit/components/telemetry/tests/unit/xpcshell.ini
new file mode 100644
index 0000000000..02e98356a3
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/xpcshell.ini
@@ -0,0 +1,100 @@
+[DEFAULT]
+head = head.js
+firefox-appdir = browser
+# The *.xpi files are only needed for test_TelemetryEnvironment.js, but
+# xpcshell fails to install tests if we move them under the test entry.
+support-files =
+ data/search-extensions/engines.json
+ data/search-extensions/telemetrySearchIdentifier/manifest.json
+ dictionary.xpi
+ engine.xml
+ system.xpi
+ restartless.xpi
+ testUnicodePDB32.dll
+ testNoPDB32.dll
+ testUnicodePDB64.dll
+ testNoPDB64.dll
+ testUnicodePDBAArch64.dll
+ testNoPDBAArch64.dll
+ !/toolkit/mozapps/extensions/test/xpcshell/head_addons.js
+generated-files =
+ dictionary.xpi
+ system.xpi
+ restartless.xpi
+
+[test_UserInteraction.js]
+[test_UserInteraction_annotations.js]
+# BHR is disabled on tsan, asan, android and outside of Nightly.
+skip-if = debug || asan || tsan || os == "android" || release_or_beta
+[test_client_id.js]
+[test_MigratePendingPings.js]
+[test_TelemetryHistograms.js]
+[test_SubsessionChaining.js]
+tags = addons
+[test_SyncPingIntegration.js]
+skip-if = os == "android"
+[test_TelemetryEnvironment.js]
+skip-if = os == "android"
+tags = addons
+[test_PingAPI.js]
+[test_TelemetryFlagClear.js]
+[test_TelemetryLateWrites.js]
+[test_TelemetryLockCount.js]
+[test_TelemetryController.js]
+[test_TelemetryClientID_reset.js]
+skip-if = os == "android" # Disabled as Android/GeckoView doesn't run TelemetryController
+[test_HealthPing.js]
+skip-if = (verify && (os == 'win')) || (os == 'android' && processor == 'x86_64')
+tags = addons
+[test_TelemetryController_idle.js]
+[test_TelemetryControllerShutdown.js]
+tags = addons
+[test_TelemetryStopwatch.js]
+[test_TelemetryControllerBuildID.js]
+[test_TelemetrySendOldPings.js]
+skip-if = os == "android" # Disabled due to intermittent orange on Android
+tags = addons
+[test_TelemetrySession.js]
+tags = addons
+skip-if = (verify && debug && os == 'linux')
+[test_TelemetrySession_abortedSessionQueued.js]
+skip-if = os == "android"
+[test_TelemetrySession_activeTicks.js]
+[test_TelemetrySend.js]
+skip-if = !debug #Bug 1457984
+[test_ChildHistograms.js]
+skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
+tags = addons
+[test_ChildScalars.js]
+skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
+[test_SocketScalars.js]
+[test_TelemetryReportingPolicy.js]
+tags = addons
+[test_TelemetryScalars.js]
+[test_TelemetryScalars_buildFaster.js]
+[test_TelemetryScalars_impressionId.js]
+[test_TelemetryScalars_multistore.js]
+[test_TelemetryTimestamps.js]
+[test_TelemetryChildEvents_buildFaster.js]
+skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
+[test_TelemetryEvents.js]
+[test_TelemetryEvents_buildFaster.js]
+[test_ChildEvents.js]
+skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
+[test_ModulesPing.js]
+skip-if = (os == "win" && processor == "aarch64") # bug 1530759
+[test_PingSender.js]
+skip-if = (os == "android") || (os == "linux" && bits == 32)
+[test_TelemetryAndroidEnvironment.js]
+[test_TelemetryUtils.js]
+[test_ThirdPartyModulesPing.js]
+run-if = nightly_build && (os == 'win')
+[test_EcosystemTelemetry.js]
+skip-if = appname == "thunderbird"
+[test_EventPing.js]
+tags = coverage
+[test_CoveragePing.js]
+[test_PrioPing.js]
+[test_bug1555798.js]
+[test_UninstallPing.js]
+run-if = os == 'win'
diff --git a/toolkit/components/telemetry/tests/utils/TelemetryTestUtils.jsm b/toolkit/components/telemetry/tests/utils/TelemetryTestUtils.jsm
new file mode 100644
index 0000000000..12ae335b6a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/utils/TelemetryTestUtils.jsm
@@ -0,0 +1,368 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const EXPORTED_SYMBOLS = ["TelemetryTestUtils"];
+
+const { Assert } = ChromeUtils.import("resource://testing-common/Assert.jsm");
+const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
+
+var TelemetryTestUtils = {
+ /* Scalars */
+
+ /**
+ * An helper that asserts the value of a scalar if it's expected to be > 0,
+ * otherwise makes sure that the scalar has not been reported.
+ *
+ * @param {Object} scalars The snapshot of the scalars.
+ * @param {String} scalarName The name of the scalar to check.
+ * @param {Number} value The expected value for the provided scalar.
+ * @param {String} msg The message to print when checking the value.
+ */
+ assertScalar(scalars, scalarName, value, msg) {
+ if (value > 0) {
+ Assert.equal(scalars[scalarName], value, msg);
+ return;
+ }
+ Assert.ok(!(scalarName in scalars), scalarName + " must not be reported.");
+ },
+
+ /**
+ * Asserts if the snapshotted keyed scalars contain the expected
+ * data.
+ *
+ * @param {Object} scalars The snapshot of the keyed scalars.
+ * @param {String} scalarName The name of the keyed scalar to check.
+ * @param {String} key The key that must be within the keyed scalar.
+ * @param {String|Boolean|Number} expectedValue The expected value for the
+ * provided key in the scalar.
+ */
+ assertKeyedScalar(scalars, scalarName, key, expectedValue) {
+ Assert.ok(scalarName in scalars, scalarName + " must be recorded.");
+ Assert.ok(
+ key in scalars[scalarName],
+ scalarName + " must contain the '" + key + "' key."
+ );
+ Assert.equal(
+ scalars[scalarName][key],
+ expectedValue,
+ scalarName + "['" + key + "'] must contain the expected value"
+ );
+ },
+
+ /**
+ * Returns a snapshot of scalars from the specified process.
+ *
+ * @param {String} aProcessName Name of the process. Could be parent or
+ * something else.
+ * @param {boolean} [aKeyed] Set to true if keyed scalars rather than normal
+ * scalars should be snapshotted.
+ * @param {boolean} [aClear] Set to true to clear the scalars once the snapshot
+ * has been obtained.
+ * @param {Number} aChannel The channel dataset type from nsITelemetry.
+ * @returns {Object} The snapshotted scalars from the parent process.
+ */
+ getProcessScalars(
+ aProcessName,
+ aKeyed = false,
+ aClear = false,
+ aChannel = Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS
+ ) {
+ const extended = aChannel == Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS;
+ const currentExtended = Services.telemetry.canRecordExtended;
+ Services.telemetry.canRecordExtended = extended;
+ const scalars = aKeyed
+ ? Services.telemetry.getSnapshotForKeyedScalars("main", aClear)[
+ aProcessName
+ ]
+ : Services.telemetry.getSnapshotForScalars("main", aClear)[aProcessName];
+ Services.telemetry.canRecordExtended = currentExtended;
+ return scalars || {};
+ },
+
+ /* Events */
+
+ /**
+ * Asserts that the number of events, after filtering, is equal to numEvents.
+ *
+ * @param {Number} numEvents The number of events to assert.
+ * @param {Object} filter As per assertEvents.
+ * @param {Object} options As per assertEvents.
+ */
+ assertNumberOfEvents(numEvents, filter, options) {
+ // Create an array of empty objects of length numEvents
+ TelemetryTestUtils.assertEvents(
+ Array.from({ length: numEvents }, () => ({})),
+ filter,
+ options
+ );
+ },
+
+ /**
+ * Asserts that, after optional filtering, the current events snapshot
+ * matches expectedEvents.
+ *
+ * @param {Array} expectedEvents An array of event structures of the form
+ * [category, method, object, value, extra]
+ * or the same as an object with fields named as above.
+ * The array can be empty to assert that there are no events
+ * that match the filter.
+ * Each field can be absent/undefined (to match
+ * everything), a string or null (to match that value), a
+ * RegExp to match what it can match, or a function which
+ * matches by returning true when called with the field.
+ * `extra` is slightly different. If present it must be an
+ * object whose fields are treated the same way as the others.
+ * @param {Object} filter An object of strings or RegExps for first filtering
+ * the event snapshot. Of the form {category, method, object}.
+ * Absent filters filter nothing.
+ * @param {Object} options An object containing any of
+ * - clear {bool} clear events. Default true.
+ * - process {string} the process to examine. Default parent.
+ */
+ assertEvents(
+ expectedEvents,
+ filter = {},
+ { clear = true, process = "parent" } = {}
+ ) {
+ // Step 0: Snapshot and clear.
+ let snapshots = Services.telemetry.snapshotEvents(
+ Ci.nsITelemetry.DATASET_PRERELEASE_CHANNELS,
+ clear
+ );
+ if (expectedEvents.length === 0 && !(process in snapshots)) {
+ // Job's done!
+ return;
+ }
+ Assert.ok(
+ process in snapshots,
+ `${process} must be in snapshot. Has [${Object.keys(snapshots)}].`
+ );
+ let snapshot = snapshots[process];
+
+ // Step 1: Filter.
+ let {
+ category: filterCategory,
+ method: filterMethod,
+ object: filterObject,
+ } = filter;
+ let matches = (expected, actual) => {
+ if (expected === undefined) {
+ return true;
+ } else if (expected && expected.test) {
+ // Possibly a RegExp.
+ return expected.test(actual);
+ } else if (typeof expected === "function") {
+ return expected(actual);
+ }
+ return expected === actual;
+ };
+
+ let filtered = snapshot
+ .map(([, /* timestamp */ category, method, object, value, extra]) => {
+ // We don't care about the `timestamp` value.
+ // Tests that examine that value should use `snapshotEvents` directly.
+ return [category, method, object, value, extra];
+ })
+ .filter(([category, method, object]) => {
+ return (
+ matches(filterCategory, category) &&
+ matches(filterMethod, method) &&
+ matches(filterObject, object)
+ );
+ });
+
+ // Step 2: Match.
+ Assert.equal(
+ filtered.length,
+ expectedEvents.length,
+ "After filtering we must have the expected number of events."
+ );
+ if (expectedEvents.length === 0) {
+ // Job's done!
+ return;
+ }
+
+ // Transform object-type expected events to array-type to match snapshot.
+ if (!Array.isArray(expectedEvents[0])) {
+ expectedEvents = expectedEvents.map(
+ ({ category, method, object, value, extra }) => [
+ category,
+ method,
+ object,
+ value,
+ extra,
+ ]
+ );
+ }
+
+ const FIELD_NAMES = ["category", "method", "object", "value", "extra"];
+ const EXTRA_INDEX = 4;
+ for (let i = 0; i < expectedEvents.length; ++i) {
+ let expected = expectedEvents[i];
+ let actual = filtered[i];
+
+ // Match everything up to `extra`
+ for (let j = 0; j < EXTRA_INDEX; ++j) {
+ if (expected[j] === undefined) {
+ // Don't spam the assert log with unspecified fields.
+ continue;
+ }
+ Assert.report(
+ !matches(expected[j], actual[j]),
+ actual[j],
+ expected[j],
+ `${FIELD_NAMES[j]} in event ${actual[0]}#${actual[1]}#${actual[2]} must match.`,
+ "matches"
+ );
+ }
+
+ // Match extra
+ if (
+ expected.length > EXTRA_INDEX &&
+ expected[EXTRA_INDEX] !== undefined
+ ) {
+ Assert.ok(
+ actual.length > EXTRA_INDEX,
+ `Actual event ${actual[0]}#${actual[1]}#${actual[2]} expected to have extra.`
+ );
+ let expectedExtra = expected[EXTRA_INDEX];
+ let actualExtra = actual[EXTRA_INDEX];
+ for (let [key, value] of Object.entries(expectedExtra)) {
+ Assert.ok(
+ key in actualExtra,
+ `Expected key ${key} must be in actual extra. Actual keys: [${Object.keys(
+ actualExtra
+ )}].`
+ );
+ Assert.report(
+ !matches(value, actualExtra[key]),
+ actualExtra[key],
+ value,
+ `extra[${key}] must match in event ${actual[0]}#${actual[1]}#${actual[2]}.`,
+ "matches"
+ );
+ }
+ }
+ }
+ },
+
+ /* Histograms */
+
+ /**
+ * Clear and get the named histogram.
+ *
+ * @param {String} name The name of the histogram
+ * @returns {Object} The obtained histogram.
+ */
+ getAndClearHistogram(name) {
+ let histogram = Services.telemetry.getHistogramById(name);
+ histogram.clear();
+ return histogram;
+ },
+
+ /**
+ * Clear and get the named keyed histogram.
+ *
+ * @param {String} name The name of the keyed histogram
+ * @returns {Object} The obtained keyed histogram.
+ */
+ getAndClearKeyedHistogram(name) {
+ let histogram = Services.telemetry.getKeyedHistogramById(name);
+ histogram.clear();
+ return histogram;
+ },
+
+ /**
+ * Assert that the histogram index is the right value. It expects that
+ * other indexes are all zero.
+ *
+ * @param {Object} histogram The histogram to check.
+ * @param {Number} index The index to check against the expected value.
+ * @param {Number} expected The expected value of the index.
+ */
+ assertHistogram(histogram, index, expected) {
+ const snapshot = histogram.snapshot();
+ let found = false;
+ for (let [i, val] of Object.entries(snapshot.values)) {
+ if (i == index) {
+ found = true;
+ Assert.equal(
+ val,
+ expected,
+ `expected counts should match for ${histogram.name()} at index ${i}`
+ );
+ } else {
+ Assert.equal(
+ val,
+ 0,
+ `unexpected counts should be zero for ${histogram.name()} at index ${i}`
+ );
+ }
+ }
+ Assert.ok(
+ found,
+ `Should have found an entry for ${histogram.name()} at index ${index}`
+ );
+ },
+
+ /**
+ * Assert that a key within a keyed histogram contains the required sum.
+ *
+ * @param {Object} histogram The keyed histogram to check.
+ * @param {String} key The key to check.
+ * @param {Number} [expected] The expected sum for the key.
+ */
+ assertKeyedHistogramSum(histogram, key, expected) {
+ const snapshot = histogram.snapshot();
+ if (expected === undefined) {
+ Assert.ok(
+ !(key in snapshot),
+ `The histogram ${histogram.name()} must not contain ${key}.`
+ );
+ return;
+ }
+ Assert.ok(
+ key in snapshot,
+ `The histogram ${histogram.name()} must contain ${key}.`
+ );
+ Assert.equal(
+ snapshot[key].sum,
+ expected,
+ `The key ${key} must contain the expected sum in ${histogram.name()}.`
+ );
+ },
+
+ /**
+ * Assert that the value of a key within a keyed histogram is the right value.
+ * It expects that other values are all zero.
+ *
+ * @param {Object} histogram The keyed histogram to check.
+ * @param {String} key The key to check.
+ * @param {Number} index The index to check against the expected value.
+ * @param {Number} [expected] The expected values for the key.
+ */
+ assertKeyedHistogramValue(histogram, key, index, expected) {
+ const snapshot = histogram.snapshot();
+ if (!(key in snapshot)) {
+ Assert.ok(false, `The histogram ${histogram.name()} must contain ${key}`);
+ return;
+ }
+ for (let [i, val] of Object.entries(snapshot[key].values)) {
+ if (i == index) {
+ Assert.equal(
+ val,
+ expected,
+ `expected counts should match for ${histogram.name()} at index ${i}`
+ );
+ } else {
+ Assert.equal(
+ val,
+ 0,
+ `unexpected counts should be zero for ${histogram.name()} at index ${i}`
+ );
+ }
+ }
+ },
+};