summaryrefslogtreecommitdiffstats
path: root/toolkit/components/telemetry
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:29 +0000
commit59203c63bb777a3bacec32fb8830fba33540e809 (patch)
tree58298e711c0ff0575818c30485b44a2f21bf28a0 /toolkit/components/telemetry
parentAdding upstream version 126.0.1. (diff)
downloadfirefox-59203c63bb777a3bacec32fb8830fba33540e809.tar.xz
firefox-59203c63bb777a3bacec32fb8830fba33540e809.zip
Adding upstream version 127.0.upstream/127.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'toolkit/components/telemetry')
-rw-r--r--toolkit/components/telemetry/Events.yaml27
-rw-r--r--toolkit/components/telemetry/Histograms.json114
-rw-r--r--toolkit/components/telemetry/Scalars.yaml47
-rw-r--r--toolkit/components/telemetry/app/TelemetryControllerParent.sys.mjs21
-rw-r--r--toolkit/components/telemetry/app/TelemetryEnvironment.sys.mjs7
-rw-r--r--toolkit/components/telemetry/build_scripts/mozparsers/parse_events.py6
-rw-r--r--toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py22
-rw-r--r--toolkit/components/telemetry/build_scripts/mozparsers/parse_scalars.py7
-rw-r--r--toolkit/components/telemetry/build_scripts/mozparsers/shared_telemetry_utils.py8
-rw-r--r--toolkit/components/telemetry/core/Telemetry.cpp3
-rw-r--r--toolkit/components/telemetry/core/TelemetryCommon.cpp6
-rw-r--r--toolkit/components/telemetry/core/TelemetryCommon.h6
-rw-r--r--toolkit/components/telemetry/core/TelemetryHistogram.cpp647
-rw-r--r--toolkit/components/telemetry/core/TelemetryHistogram.h14
-rw-r--r--toolkit/components/telemetry/core/TelemetryScalar.cpp794
-rw-r--r--toolkit/components/telemetry/core/TelemetryScalar.h18
-rw-r--r--toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs7
-rw-r--r--toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml2
-rw-r--r--toolkit/components/telemetry/dap/ffi-gtest/test.rs2
-rw-r--r--toolkit/components/telemetry/dap/ffi/Cargo.toml3
-rw-r--r--toolkit/components/telemetry/dap/ffi/src/lib.rs119
-rw-r--r--toolkit/components/telemetry/dap/ffi/src/types.rs83
-rw-r--r--toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js2
-rw-r--r--toolkit/components/telemetry/docs/data/environment.rst12
-rw-r--r--toolkit/components/telemetry/docs/internals/preferences.rst20
-rw-r--r--toolkit/components/telemetry/docs/internals/review.rst2
-rw-r--r--toolkit/components/telemetry/geckoview/gtest/TestGeckoViewStreaming.cpp237
-rw-r--r--toolkit/components/telemetry/geckoview/gtest/moz.build28
-rw-r--r--toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.cpp282
-rw-r--r--toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.h55
-rw-r--r--toolkit/components/telemetry/geckoview/streaming/metrics.yaml13
-rw-r--r--toolkit/components/telemetry/moz.build17
-rw-r--r--toolkit/components/telemetry/tests/gtest/TestScalars.cpp62
-rw-r--r--toolkit/components/telemetry/tests/python/test_histogramtools_strict.py47
-rw-r--r--toolkit/components/telemetry/tests/python/test_parse_events.py16
-rw-r--r--toolkit/components/telemetry/tests/python/test_parse_scalars.py19
-rw-r--r--toolkit/components/telemetry/tests/unit/TelemetryEnvironmentTesting.sys.mjs7
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js58
-rw-r--r--toolkit/components/telemetry/tests/unit/xpcshell.toml1
39 files changed, 263 insertions, 2578 deletions
diff --git a/toolkit/components/telemetry/Events.yaml b/toolkit/components/telemetry/Events.yaml
index 5d3d433a05..339fd1feb8 100644
--- a/toolkit/components/telemetry/Events.yaml
+++ b/toolkit/components/telemetry/Events.yaml
@@ -294,33 +294,6 @@ addonsManager:
record_in_processes: ["main"]
bug_numbers: [1433335, 1515697, 1523641, 1549770, 1590736, 1630596, 1672570, 1714251, 1749878, 1781974, 1817100, 1861295]
release_channel_collection: opt-out
- report:
- description: >
- An abuse report submitted by a user for a given extension. The object of the event
- represent the report entry point, the value is the id of the addon being reported.
- objects:
- - amo
- - menu
- - toolbar_context_menu
- - unified_context_menu
- - uninstall
- extra_keys:
- addon_type: >
- The type of the add-on being reported (missing on ERROR_ADDON_NOT_FOUND, ERROR_AMODETAILS_NOTFOUND
- and ERROR_AMODETAILS_FAILURE).
- error_type: >
- AbuseReport Error Type (included in case of submission failures). The error types include
- ERROR_ABORTED_SUBMIT, ERROR_ADDON_NOT_FOUND, ERROR_CLIENT, ERROR_NETWORK, ERROR_UNKNOWN,
- ERROR_RECENT_SUBMIT, ERROR_SERVER, ERROR_AMODETAILS_NOTFOUND, ERROR_AMODETAILS_FAILURE.
- notification_emails: ["addons-dev-internal@mozilla.com"]
- expiry_version: "132"
- products:
- - "firefox"
- - "fennec"
- - "thunderbird"
- record_in_processes: ["main"]
- bug_numbers: [1544927, 1580561, 1590736, 1630596, 1672570, 1714251, 1749878, 1780746, 1781974, 1817100, 1861295]
- release_channel_collection: opt-out
reportSuspiciousSite:
description: >
Sent when a user clicks "Report Suspicious Site" on the dropdown menu of the third-
diff --git a/toolkit/components/telemetry/Histograms.json b/toolkit/components/telemetry/Histograms.json
index 4f0f0e2329..db58482aad 100644
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -8944,7 +8944,7 @@
"record_in_processes": ["main"],
"products": ["firefox"],
"alert_emails": ["fx-search-telemetry@mozilla.com"],
- "expires_in_version": "128",
+ "expires_in_version": "131",
"kind": "exponential",
"low": 16,
"high": 5000,
@@ -9738,7 +9738,7 @@
},
"TELEMETRY_TEST_CATEGORICAL_OPTOUT": {
"record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec", "geckoview_streaming", "thunderbird"],
+ "products": ["firefox", "fennec", "thunderbird"],
"alert_emails": ["telemetry-client-dev@mozilla.com"],
"bug_numbers": [1188888],
"expires_in_version": "never",
@@ -9956,7 +9956,7 @@
},
"TELEMETRY_TEST_EXPIRED": {
"record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec", "thunderbird", "geckoview_streaming"],
+ "products": ["firefox", "fennec", "thunderbird"],
"alert_emails": ["telemetry-client-dev@mozilla.com"],
"expires_in_version": "4",
"kind": "linear",
@@ -10211,32 +10211,6 @@
"description": "a testing histogram; not meant to be touched",
"record_into_store": ["main", "sync"]
},
- "TELEMETRY_TEST_STREAMING": {
- "record_in_processes": ["main"],
- "products": ["geckoview_streaming"],
- "alert_emails": ["telemetry-client-dev@mozilla.com"],
- "expires_in_version": "never",
- "kind": "linear",
- "low": 1,
- "high": 2147483646,
- "n_buckets": 10,
- "releaseChannelCollection": "opt-out",
- "bug_numbers": [1566366],
- "description": "a testing histogram; not meant to be touched"
- },
- "TELEMETRY_TEST_STREAMING_2": {
- "record_in_processes": ["main"],
- "products": ["geckoview_streaming"],
- "alert_emails": ["telemetry-client-dev@mozilla.com"],
- "expires_in_version": "never",
- "kind": "linear",
- "low": 1,
- "high": 2147483646,
- "n_buckets": 10,
- "releaseChannelCollection": "opt-out",
- "bug_numbers": [1566366],
- "description": "a testing histogram; not meant to be touched"
- },
"STARTUP_CRASH_DETECTED": {
"record_in_processes": ["main", "content"],
"products": ["firefox", "fennec", "thunderbird"],
@@ -16096,68 +16070,6 @@
"description": "The amount of time it takes for a PageAction popup to open, keyed by addon id.",
"keyed": true
},
- "WEBEXT_STORAGE_LOCAL_GET_MS": {
- "record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec"],
- "alert_emails": ["addons-dev-internal@mozilla.com", "lgreco@mozilla.com"],
- "bug_numbers": [
- 1371398, 1513556, 1578225, 1623315, 1666980, 1706839, 1745271, 1777402,
- 1811155, 1861303
- ],
- "expires_in_version": "132",
- "kind": "exponential",
- "releaseChannelCollection": "opt-out",
- "high": 50000,
- "n_buckets": 100,
- "description": "The amount of time it takes to perform a get via storage.local using the JSONFile backend."
- },
- "WEBEXT_STORAGE_LOCAL_GET_MS_BY_ADDONID": {
- "record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec"],
- "alert_emails": ["addons-dev-internal@mozilla.com", "lgreco@mozilla.com"],
- "bug_numbers": [
- 1483002, 1513556, 1578225, 1623315, 1666980, 1706839, 1745271, 1777402,
- 1811155, 1861303
- ],
- "expires_in_version": "132",
- "kind": "exponential",
- "releaseChannelCollection": "opt-out",
- "high": 50000,
- "n_buckets": 100,
- "description": "The amount of time it takes to perform a get via storage.local using the JSONFile backend, keyed by addon id.",
- "keyed": true
- },
- "WEBEXT_STORAGE_LOCAL_SET_MS": {
- "record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec"],
- "alert_emails": ["addons-dev-internal@mozilla.com", "lgreco@mozilla.com"],
- "bug_numbers": [
- 1371398, 1513556, 1578225, 1623315, 1666980, 1706839, 1745271, 1777402,
- 1811155, 1861303
- ],
- "expires_in_version": "132",
- "kind": "exponential",
- "releaseChannelCollection": "opt-out",
- "high": 50000,
- "n_buckets": 100,
- "description": "The amount of time it takes to perform a set via storage.local using the JSONFile backend."
- },
- "WEBEXT_STORAGE_LOCAL_SET_MS_BY_ADDONID": {
- "record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec"],
- "alert_emails": ["addons-dev-internal@mozilla.com", "lgreco@mozilla.com"],
- "bug_numbers": [
- 1483002, 1513556, 1578225, 1623315, 1666980, 1706839, 1745271, 1777402,
- 1811155, 1861303
- ],
- "expires_in_version": "132",
- "kind": "exponential",
- "releaseChannelCollection": "opt-out",
- "high": 50000,
- "n_buckets": 100,
- "description": "The amount of time it takes to perform a set via storage.local using the JSONFile backend, keyed by addon id.",
- "keyed": true
- },
"WEBEXT_STORAGE_LOCAL_IDB_GET_MS": {
"record_in_processes": ["main", "content"],
"products": ["firefox", "fennec"],
@@ -16863,26 +16775,6 @@
"n_buckets": 32,
"description": "The number of style and layout flushes that occur per Refresh Driver tick."
},
- "IDB_CUSTOM_OPEN_WITH_OPTIONS_COUNT": {
- "record_in_processes": ["main", "content"],
- "products": ["firefox", "fennec"],
- "bug_numbers": [1566758],
- "alert_emails": ["sgiesecke@mozilla.com"],
- "expires_in_version": "74",
- "kind": "categorical",
- "labels": [
- "system",
- "content_file",
- "content_http_https",
- "content_moz_ext",
- "content_about",
- "content_other",
- "expanded",
- "other"
- ],
- "releaseChannelCollection": "opt-out",
- "description": "Tracking the use of the custom IDBFactory.open overload accepting an options dictionary in one of the categories."
- },
"PRESSHELL_LAYOUT_TOTAL_MS_PER_TICK": {
"record_in_processes": ["main", "content"],
"products": ["firefox", "fennec"],
diff --git a/toolkit/components/telemetry/Scalars.yaml b/toolkit/components/telemetry/Scalars.yaml
index f63b8b5fdb..0822865b4c 100644
--- a/toolkit/components/telemetry/Scalars.yaml
+++ b/toolkit/components/telemetry/Scalars.yaml
@@ -1786,9 +1786,10 @@ extensions.startupCache:
write_byteLength:
bug_numbers:
- 1767336
+ - 1892151
description: >
The amount of bytes writted into the Extensions StartupCache file.
- expires: "128"
+ expires: "135"
kind: uint
notification_emails:
- addons-dev-internal@mozilla.com
@@ -1800,10 +1801,11 @@ extensions.startupCache:
read_errors:
bug_numbers:
- 1767336
+ - 1892151
description: >
The amount of times an unexpected error has been raised while
reading the Extensions StartupCache file
- expires: "128"
+ expires: "135"
kind: uint
keyed: true
notification_emails:
@@ -2947,6 +2949,7 @@ media:
device_hardware_decoding_support:
bug_numbers:
- 1850594
+ - 1892516
description: >
Record the hardware decoding availability on devices, the key is the name of video codec, eg. h264, av1.
expires: never
@@ -2958,7 +2961,7 @@ media:
products:
- 'firefox'
record_in_processes:
- - 'gpu'
+ - 'main'
# The following section contains content process base counters.
dom.contentprocess:
@@ -8789,24 +8792,6 @@ contextual.services.quicksuggest:
- 'firefox'
record_in_processes:
- main
- help_dynamic_wikipedia:
- bug_numbers:
- - 1800993
- description: >
- A keyed uint recording how many times the user has clicked on the help
- button in sponsored Firefox Suggest dynamic wikipedia results
- in the urlbar (not including the help button). The key is the 1-based
- index of each result.
- expires: never
- kind: uint
- keyed: true
- notification_emails:
- - fx-search-telemetry@mozilla.com
- release_channel_collection: opt-out
- products:
- - 'firefox'
- record_in_processes:
- - main
help_nonsponsored:
bug_numbers:
- 1800993
@@ -8841,23 +8826,6 @@ contextual.services.quicksuggest:
- 'firefox'
record_in_processes:
- main
- help_weather:
- bug_numbers:
- - 1804536
- description: >
- A keyed uint recording how many times the user has clicked on the help
- button in Firefox Suggests (a.k.a. Quick Suggest) weather results in the
- urlbar. The key is the 1-based index of each result.
- expires: never
- kind: uint
- keyed: true
- notification_emails:
- - fx-search-telemetry@mozilla.com
- release_channel_collection: opt-out
- products:
- - 'firefox'
- record_in_processes:
- - main
block_dynamic_wikipedia:
bug_numbers:
- 1800993
@@ -9198,7 +9166,6 @@ telemetry.test:
products:
- 'firefox'
- 'fennec'
- - 'geckoview_streaming'
- 'thunderbird'
record_in_processes:
- 'main' # test_ChildScalars.js depends on this being main-only.
@@ -9216,7 +9183,6 @@ telemetry.test:
products:
- 'firefox'
- 'fennec'
- - 'geckoview_streaming'
- 'thunderbird'
record_in_processes:
- 'main'
@@ -9234,7 +9200,6 @@ telemetry.test:
products:
- 'firefox'
- 'fennec'
- - 'geckoview_streaming'
- 'thunderbird'
record_in_processes:
- 'main'
diff --git a/toolkit/components/telemetry/app/TelemetryControllerParent.sys.mjs b/toolkit/components/telemetry/app/TelemetryControllerParent.sys.mjs
index b9f8d5f85a..9a15142349 100644
--- a/toolkit/components/telemetry/app/TelemetryControllerParent.sys.mjs
+++ b/toolkit/components/telemetry/app/TelemetryControllerParent.sys.mjs
@@ -256,6 +256,15 @@ export var TelemetryController = Object.freeze({
promiseInitialized() {
return Impl.promiseInitialized();
},
+
+ /**
+ * Allows to trigger TelemetryControllers delayed initialization now and waiting for its completion.
+ * The returned promise is guaranteed to resolve before TelemetryController is shutting down.
+ * @return {Promise} Resolved when delayed TelemetryController initialization completed.
+ */
+ ensureInitialized() {
+ return Impl.ensureInitialized();
+ },
});
var Impl = {
@@ -1158,6 +1167,18 @@ var Impl = {
return this._delayedInitTaskDeferred.promise;
},
+ /**
+ * Allows to trigger TelemetryControllers delayed initialization now and waiting for its completion.
+ * This will complete before TelemetryController is shutting down.
+ * @return {Promise} Resolved when delayed TelemetryController initialization completed.
+ */
+ ensureInitialized() {
+ if (this._delayedInitTask) {
+ return this._delayedInitTask.finalize();
+ }
+ return Promise.resolve();
+ },
+
getCurrentPingData(aSubsession) {
this._log.trace("getCurrentPingData - subsession: " + aSubsession);
diff --git a/toolkit/components/telemetry/app/TelemetryEnvironment.sys.mjs b/toolkit/components/telemetry/app/TelemetryEnvironment.sys.mjs
index 18d46a3565..5b917293c1 100644
--- a/toolkit/components/telemetry/app/TelemetryEnvironment.sys.mjs
+++ b/toolkit/components/telemetry/app/TelemetryEnvironment.sys.mjs
@@ -895,6 +895,8 @@ EnvironmentAddonBuilder.prototype = {
hasBinaryComponents: false,
installDay: Utils.millisecondsToDays(installDate.getTime()),
updateDay: Utils.millisecondsToDays(updateDate.getTime()),
+ signedState: theme.signedState,
+ signedTypes: JSON.stringify(theme.signedTypes),
};
}
@@ -1672,6 +1674,7 @@ EnvironmentCache.prototype = {
let creationDate = await profileAccessor.created;
let resetDate = await profileAccessor.reset;
let firstUseDate = await profileAccessor.firstUse;
+ let recoveredFromBackup = await profileAccessor.recoveredFromBackup;
this._currentEnvironment.profile.creationDate =
Utils.millisecondsToDays(creationDate);
@@ -1683,6 +1686,10 @@ EnvironmentCache.prototype = {
this._currentEnvironment.profile.firstUseDate =
Utils.millisecondsToDays(firstUseDate);
}
+ if (recoveredFromBackup) {
+ this._currentEnvironment.profile.recoveredFromBackup =
+ Utils.millisecondsToDays(recoveredFromBackup);
+ }
},
/**
diff --git a/toolkit/components/telemetry/build_scripts/mozparsers/parse_events.py b/toolkit/components/telemetry/build_scripts/mozparsers/parse_events.py
index 09ed651917..1275e03a5e 100644
--- a/toolkit/components/telemetry/build_scripts/mozparsers/parse_events.py
+++ b/toolkit/components/telemetry/build_scripts/mozparsers/parse_events.py
@@ -265,12 +265,6 @@ class EventData:
ParserError(
self.identifier + ": Unknown value in products: " + product
).handle_later()
- if utils.is_geckoview_streaming_product(product):
- ParserError(
- "{}: Product `{}` unsupported for Event Telemetry".format(
- self.identifier, product
- )
- ).handle_later()
# Check operating_systems.
operating_systems = definition.get("operating_systems", [])
diff --git a/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py b/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py
index 626188bf06..92fff31a48 100644
--- a/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py
+++ b/toolkit/components/telemetry/build_scripts/mozparsers/parse_histograms.py
@@ -46,12 +46,6 @@ BASE_DOC_URL = (
HISTOGRAMS_DOC_URL = BASE_DOC_URL + "collection/histograms.html"
SCALARS_DOC_URL = BASE_DOC_URL + "collection/scalars.html"
-GECKOVIEW_STREAMING_SUPPORTED_KINDS = [
- "linear",
- "exponential",
- "categorical",
-]
-
def linear_buckets(dmin, dmax, n_buckets):
ret_array = [0] * n_buckets
@@ -435,22 +429,6 @@ class Histogram:
'Histogram "%s" has unknown product "%s" in %s.\n%s'
% (name, product, field, DOC_URL)
).handle_later()
- if utils.is_geckoview_streaming_product(product):
- kind = definition.get("kind")
- if kind not in GECKOVIEW_STREAMING_SUPPORTED_KINDS:
- ParserError(
- (
- 'Histogram "%s" is of kind "%s" which is unsupported for '
- 'product "%s".'
- )
- % (name, kind, product)
- ).handle_later()
- keyed = definition.get("keyed")
- if keyed:
- ParserError(
- 'Keyed histograms like "%s" are unsupported for product "%s"'
- % (name, product)
- ).handle_later()
def check_operating_systems(self, name, definition):
if not self._strict_type_checks:
diff --git a/toolkit/components/telemetry/build_scripts/mozparsers/parse_scalars.py b/toolkit/components/telemetry/build_scripts/mozparsers/parse_scalars.py
index 5ec591b393..b46cc7c3db 100644
--- a/toolkit/components/telemetry/build_scripts/mozparsers/parse_scalars.py
+++ b/toolkit/components/telemetry/build_scripts/mozparsers/parse_scalars.py
@@ -308,13 +308,6 @@ class ScalarType:
+ product
+ ".\nSee: {}".format(BASE_DOC_URL)
).handle_later()
- if utils.is_geckoview_streaming_product(product):
- keyed = definition.get("keyed")
- if keyed:
- ParserError(
- "%s - keyed Scalars not supported for product %s"
- % (self._name, product)
- ).handle_later()
# Validate the expiration version.
# Historical versions of Scalars.json may contain expiration versions
diff --git a/toolkit/components/telemetry/build_scripts/mozparsers/shared_telemetry_utils.py b/toolkit/components/telemetry/build_scripts/mozparsers/shared_telemetry_utils.py
index 4b4cc9f685..80ae685c45 100644
--- a/toolkit/components/telemetry/build_scripts/mozparsers/shared_telemetry_utils.py
+++ b/toolkit/components/telemetry/build_scripts/mozparsers/shared_telemetry_utils.py
@@ -26,15 +26,13 @@ KNOWN_PROCESS_FLAGS = {
"all_childs": "AllChildren", # Supporting files from before bug 1363725
}
-GECKOVIEW_STREAMING_PRODUCT = "geckoview_streaming"
-
SUPPORTED_PRODUCTS = {
"firefox": "Firefox",
"fennec": "Fennec",
- GECKOVIEW_STREAMING_PRODUCT: "GeckoviewStreaming",
"thunderbird": "Thunderbird",
# Historical, deprecated values:
# 'geckoview': 'Geckoview',
+ # "geckoview_streaming": "GeckoviewStreaming",
}
SUPPORTED_OPERATING_SYSTEMS = [
@@ -114,10 +112,6 @@ def is_valid_product(name):
return name in SUPPORTED_PRODUCTS
-def is_geckoview_streaming_product(name):
- return name == GECKOVIEW_STREAMING_PRODUCT
-
-
def is_valid_os(name):
return name in SUPPORTED_OPERATING_SYSTEMS
diff --git a/toolkit/components/telemetry/core/Telemetry.cpp b/toolkit/components/telemetry/core/Telemetry.cpp
index a0effb02bb..3214210c39 100644
--- a/toolkit/components/telemetry/core/Telemetry.cpp
+++ b/toolkit/components/telemetry/core/Telemetry.cpp
@@ -16,9 +16,6 @@
#endif
#include "base/pickle.h"
#include "base/process_util.h"
-#if defined(MOZ_TELEMETRY_GECKOVIEW)
-# include "geckoview/TelemetryGeckoViewPersistence.h"
-#endif
#include "ipc/TelemetryIPCAccumulator.h"
#include "jsapi.h"
#include "jsfriendapi.h"
diff --git a/toolkit/components/telemetry/core/TelemetryCommon.cpp b/toolkit/components/telemetry/core/TelemetryCommon.cpp
index 7113a682c9..278763eaff 100644
--- a/toolkit/components/telemetry/core/TelemetryCommon.cpp
+++ b/toolkit/components/telemetry/core/TelemetryCommon.cpp
@@ -194,11 +194,7 @@ JSString* ToJSString(JSContext* cx, const nsAString& aStr) {
SupportedProduct GetCurrentProduct() {
#if defined(MOZ_WIDGET_ANDROID)
- if (mozilla::StaticPrefs::toolkit_telemetry_geckoview_streaming()) {
- return SupportedProduct::GeckoviewStreaming;
- } else {
- return SupportedProduct::Fennec;
- }
+ return SupportedProduct::Fennec;
#elif defined(MOZ_THUNDERBIRD)
return SupportedProduct::Thunderbird;
#else
diff --git a/toolkit/components/telemetry/core/TelemetryCommon.h b/toolkit/components/telemetry/core/TelemetryCommon.h
index 141410f150..47feef94d2 100644
--- a/toolkit/components/telemetry/core/TelemetryCommon.h
+++ b/toolkit/components/telemetry/core/TelemetryCommon.h
@@ -42,10 +42,8 @@ static_assert(static_cast<uint16_t>(RecordedProcessType::Main) == 1,
enum class SupportedProduct : uint8_t {
Firefox = (1 << 0),
Fennec = (1 << 1),
- // Note that `1 << 2` (former GeckoView) is missing in the representation
- // but isn't necessary to be maintained, but we see no point in filling it
- // at this time.
- GeckoviewStreaming = (1 << 3),
+ // Note that `1 << 2` and `1 << 3` (former GeckoView, GeckoviewStreaming) are
+ // missing in the representation. We see no point in filling it at this time.
Thunderbird = (1 << 4),
};
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(SupportedProduct);
diff --git a/toolkit/components/telemetry/core/TelemetryHistogram.cpp b/toolkit/components/telemetry/core/TelemetryHistogram.cpp
index 88ac88eb9e..0ba7009d51 100644
--- a/toolkit/components/telemetry/core/TelemetryHistogram.cpp
+++ b/toolkit/components/telemetry/core/TelemetryHistogram.cpp
@@ -8,7 +8,6 @@
#include <limits>
#include "base/histogram.h"
-#include "geckoview/streaming/GeckoViewStreamingTelemetry.h"
#include "ipc/TelemetryIPCAccumulator.h"
#include "jsapi.h"
#include "jsfriendapi.h"
@@ -700,15 +699,6 @@ nsresult internal_HistogramAdd(const StaticMutexAutoLock& aLock,
return NS_OK;
}
- if (&histogram != gExpiredHistogram &&
- GetCurrentProduct() == SupportedProduct::GeckoviewStreaming) {
- const HistogramInfo& info = gHistogramInfos[id];
- GeckoViewStreamingTelemetry::HistogramAccumulate(
- nsDependentCString(info.name()),
- info.histogramType == nsITelemetry::HISTOGRAM_CATEGORICAL, value);
- return NS_OK;
- }
-
// The internal representation of a base::Histogram's buckets uses `int`.
// Clamp large values of `value` to be INT_MAX so they continue to be treated
// as large values (instead of negative ones).
@@ -3038,640 +3028,3 @@ size_t TelemetryHistogram::GetHistogramSizesOfIncludingThis(
return n;
}
-
-////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////
-//
-// PRIVATE: GeckoView specific helpers
-
-namespace base {
-class PersistedSampleSet : public base::Histogram::SampleSet {
- public:
- explicit PersistedSampleSet(const nsTArray<base::Histogram::Count>& aCounts,
- int64_t aSampleSum);
-};
-
-PersistedSampleSet::PersistedSampleSet(
- const nsTArray<base::Histogram::Count>& aCounts, int64_t aSampleSum) {
- // Initialize the data in the base class. See Histogram::SampleSet
- // for the fields documentation.
- const size_t numCounts = aCounts.Length();
- counts_.SetLength(numCounts);
-
- for (size_t i = 0; i < numCounts; i++) {
- counts_[i] = aCounts[i];
- redundant_count_ += aCounts[i];
- }
- sum_ = aSampleSum;
-};
-} // namespace base
-
-namespace {
-/**
- * Helper function to write histogram properties to JSON.
- * Please note that this needs to be called between
- * StartObjectProperty/EndObject calls that mark the histogram's
- * JSON creation.
- */
-void internal_ReflectHistogramToJSON(const HistogramSnapshotData& aSnapshot,
- mozilla::JSONWriter& aWriter) {
- aWriter.IntProperty("sum", aSnapshot.mSampleSum);
-
- // Fill the "counts" property.
- aWriter.StartArrayProperty("counts");
- for (size_t i = 0; i < aSnapshot.mBucketCounts.Length(); i++) {
- aWriter.IntElement(aSnapshot.mBucketCounts[i]);
- }
- aWriter.EndArray();
-}
-
-bool internal_CanRecordHistogram(const HistogramID id, ProcessID aProcessType) {
- // Check if we are allowed to record the data.
- if (!CanRecordDataset(gHistogramInfos[id].dataset, internal_CanRecordBase(),
- internal_CanRecordExtended())) {
- return false;
- }
-
- // Check if we're allowed to record in the given process.
- if (aProcessType == ProcessID::Parent && !internal_IsRecordingEnabled(id)) {
- return false;
- }
-
- if (aProcessType != ProcessID::Parent &&
- !CanRecordInProcess(gHistogramInfos[id].record_in_processes,
- aProcessType)) {
- return false;
- }
-
- // Don't record if the current platform is not enabled
- if (!CanRecordProduct(gHistogramInfos[id].products)) {
- return false;
- }
-
- return true;
-}
-
-nsresult internal_ParseHistogramData(
- JSContext* aCx, JS::Handle<JS::PropertyKey> aEntryId,
- JS::Handle<JSObject*> aContainerObj, nsACString& aOutName,
- nsTArray<base::Histogram::Count>& aOutCountArray, int64_t& aOutSum) {
- // Get the histogram name.
- nsAutoJSString histogramName;
- if (!histogramName.init(aCx, aEntryId)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- CopyUTF16toUTF8(histogramName, aOutName);
-
- // Get the data for this histogram.
- JS::Rooted<JS::Value> histogramData(aCx);
- if (!JS_GetPropertyById(aCx, aContainerObj, aEntryId, &histogramData)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- if (!histogramData.isObject()) {
- // base::Histogram data need to be an object. If that's not the case, skip
- // it and try to load the rest of the data.
- return NS_ERROR_FAILURE;
- }
-
- // Get the "sum" property.
- JS::Rooted<JS::Value> sumValue(aCx);
- JS::Rooted<JSObject*> histogramObj(aCx, &histogramData.toObject());
- if (!JS_GetProperty(aCx, histogramObj, "sum", &sumValue)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- if (!JS::ToInt64(aCx, sumValue, &aOutSum)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // Get the "counts" array.
- JS::Rooted<JS::Value> countsArray(aCx);
- bool countsIsArray = false;
- if (!JS_GetProperty(aCx, histogramObj, "counts", &countsArray) ||
- !JS::IsArrayObject(aCx, countsArray, &countsIsArray)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- if (!countsIsArray) {
- // The "counts" property needs to be an array. If this is not the case,
- // skip this histogram.
- return NS_ERROR_FAILURE;
- }
-
- // Get the length of the array.
- uint32_t countsLen = 0;
- JS::Rooted<JSObject*> countsArrayObj(aCx, &countsArray.toObject());
- if (!JS::GetArrayLength(aCx, countsArrayObj, &countsLen)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // Parse the "counts" in the array.
- for (uint32_t arrayIdx = 0; arrayIdx < countsLen; arrayIdx++) {
- JS::Rooted<JS::Value> elementValue(aCx);
- int countAsInt = 0;
- if (!JS_GetElement(aCx, countsArrayObj, arrayIdx, &elementValue) ||
- !JS::ToInt32(aCx, elementValue, &countAsInt)) {
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
- aOutCountArray.AppendElement(countAsInt);
- }
-
- return NS_OK;
-}
-
-} // Anonymous namespace
-
-////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////
-//
-// PUBLIC: GeckoView serialization/deserialization functions.
-
-nsresult TelemetryHistogram::SerializeHistograms(mozilla::JSONWriter& aWriter) {
- MOZ_ASSERT(XRE_IsParentProcess(),
- "Only save histograms in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- // Include the GPU process in histogram snapshots only if we actually tried
- // to launch a process for it.
- bool includeGPUProcess = internal_AttemptedGPUProcess();
-
- // Take a snapshot of the histograms.
- HistogramProcessSnapshotsArray processHistArray;
- {
- StaticMutexAutoLock locker(gTelemetryHistogramMutex);
- // We always request the "opt-in"/"prerelease" dataset: we internally
- // record the right subset, so this will only return "prerelease" if
- // it was recorded.
- if (NS_FAILED(internal_GetHistogramsSnapshot(
- locker, "main"_ns, nsITelemetry::DATASET_PRERELEASE_CHANNELS,
- false /* aClearSubsession */, includeGPUProcess,
- false /* aFilterTest */, processHistArray))) {
- return NS_ERROR_FAILURE;
- }
- }
-
- // Make the JSON calls on the stashed histograms for every process
- for (uint32_t process = 0; process < processHistArray.length(); ++process) {
- aWriter.StartObjectProperty(
- mozilla::MakeStringSpan(GetNameForProcessID(ProcessID(process))));
-
- for (const HistogramSnapshotInfo& hData : processHistArray[process]) {
- HistogramID id = hData.histogramID;
-
- aWriter.StartObjectProperty(
- mozilla::MakeStringSpan(gHistogramInfos[id].name()));
- internal_ReflectHistogramToJSON(hData.data, aWriter);
- aWriter.EndObject();
- }
- aWriter.EndObject();
- }
-
- return NS_OK;
-}
-
-nsresult TelemetryHistogram::SerializeKeyedHistograms(
- mozilla::JSONWriter& aWriter) {
- MOZ_ASSERT(XRE_IsParentProcess(),
- "Only save keyed histograms in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- // Include the GPU process in histogram snapshots only if we actually tried
- // to launch a process for it.
- bool includeGPUProcess = internal_AttemptedGPUProcess();
-
- // Take a snapshot of the keyed histograms.
- KeyedHistogramProcessSnapshotsArray processHistArray;
- {
- StaticMutexAutoLock locker(gTelemetryHistogramMutex);
- // We always request the "opt-in"/"prerelease" dataset: we internally
- // record the right subset, so this will only return "prerelease" if
- // it was recorded.
- if (NS_FAILED(internal_GetKeyedHistogramsSnapshot(
- locker, "main"_ns, nsITelemetry::DATASET_PRERELEASE_CHANNELS,
- false /* aClearSubsession */, includeGPUProcess,
- false /* aFilterTest */, processHistArray))) {
- return NS_ERROR_FAILURE;
- }
- }
-
- // Serialize the keyed histograms for every process.
- for (uint32_t process = 0; process < processHistArray.length(); ++process) {
- aWriter.StartObjectProperty(
- mozilla::MakeStringSpan(GetNameForProcessID(ProcessID(process))));
-
- const KeyedHistogramSnapshotsArray& hArray = processHistArray[process];
- for (size_t i = 0; i < hArray.length(); ++i) {
- const KeyedHistogramSnapshotInfo& hData = hArray[i];
- HistogramID id = hData.histogramId;
- const HistogramInfo& info = gHistogramInfos[id];
-
- aWriter.StartObjectProperty(mozilla::MakeStringSpan(info.name()));
-
- // Each key is a new object with a "sum" and a "counts" property.
- for (const auto& entry : hData.data) {
- const HistogramSnapshotData& keyData = entry.GetData();
- aWriter.StartObjectProperty(PromiseFlatCString(entry.GetKey()));
- internal_ReflectHistogramToJSON(keyData, aWriter);
- aWriter.EndObject();
- }
-
- aWriter.EndObject();
- }
- aWriter.EndObject();
- }
-
- return NS_OK;
-}
-
-nsresult TelemetryHistogram::DeserializeHistograms(
- JSContext* aCx, JS::Handle<JS::Value> aData) {
- MOZ_ASSERT(XRE_IsParentProcess(),
- "Only load histograms in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- // Telemetry is disabled. This should never happen, but let's leave this check
- // for consistency with other histogram updates routines.
- if (!internal_CanRecordBase()) {
- return NS_OK;
- }
-
- typedef std::tuple<nsCString, nsTArray<base::Histogram::Count>, int64_t>
- PersistedHistogramTuple;
- typedef mozilla::Vector<PersistedHistogramTuple> PersistedHistogramArray;
- typedef mozilla::Vector<PersistedHistogramArray> PersistedHistogramStorage;
-
- // Before updating the histograms, we need to get the data out of the JS
- // wrappers. We can't hold the histogram mutex while handling JS stuff.
- // Build a <histogram name, value> map.
- JS::Rooted<JSObject*> histogramDataObj(aCx, &aData.toObject());
- JS::Rooted<JS::IdVector> processes(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, histogramDataObj, &processes)) {
- // We can't even enumerate the processes in the loaded data, so
- // there is nothing we could recover from the persistence file. Bail out.
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // Make sure we have enough storage for all the processes.
- PersistedHistogramStorage histogramsToUpdate;
- if (!histogramsToUpdate.resize(static_cast<uint32_t>(ProcessID::Count))) {
- return NS_ERROR_OUT_OF_MEMORY;
- }
-
- // The following block of code attempts to extract as much data as possible
- // from the serialized JSON, even in case of light data corruptions: if, for
- // example, the data for a single process is corrupted or is in an unexpected
- // form, we press on and attempt to load the data for the other processes.
- JS::Rooted<JS::PropertyKey> process(aCx);
- for (auto& processVal : processes) {
- // This is required as JS API calls require an Handle<jsid> and not a
- // plain jsid.
- process = processVal;
- // Get the process name.
- nsAutoJSString processNameJS;
- if (!processNameJS.init(aCx, process)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Make sure it's valid. Note that this is safe to call outside
- // of a locked section.
- NS_ConvertUTF16toUTF8 processName(processNameJS);
- ProcessID processID = GetIDForProcessName(processName.get());
- if (processID == ProcessID::Count) {
- NS_WARNING(
- nsPrintfCString("Failed to get process ID for %s", processName.get())
- .get());
- continue;
- }
-
- // And its probes.
- JS::Rooted<JS::Value> processData(aCx);
- if (!JS_GetPropertyById(aCx, histogramDataObj, process, &processData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (!processData.isObject()) {
- // |processData| should be an object containing histograms. If this is
- // not the case, silently skip and try to load the data for the other
- // processes.
- continue;
- }
-
- // Iterate through each histogram.
- JS::Rooted<JSObject*> processDataObj(aCx, &processData.toObject());
- JS::Rooted<JS::IdVector> histograms(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, processDataObj, &histograms)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get a reference to the deserialized data for this process.
- PersistedHistogramArray& deserializedProcessData =
- histogramsToUpdate[static_cast<uint32_t>(processID)];
-
- JS::Rooted<JS::PropertyKey> histogram(aCx);
- for (auto& histogramVal : histograms) {
- histogram = histogramVal;
-
- int64_t sum = 0;
- nsTArray<base::Histogram::Count> deserializedCounts;
- nsCString histogramName;
- if (NS_FAILED(internal_ParseHistogramData(aCx, histogram, processDataObj,
- histogramName,
- deserializedCounts, sum))) {
- continue;
- }
-
- // Finally append the deserialized data to the storage.
- if (!deserializedProcessData.emplaceBack(std::make_tuple(
- std::move(histogramName), std::move(deserializedCounts), sum))) {
- return NS_ERROR_OUT_OF_MEMORY;
- }
- }
- }
-
- // Update the histogram storage.
- {
- StaticMutexAutoLock locker(gTelemetryHistogramMutex);
-
- for (uint32_t process = 0; process < histogramsToUpdate.length();
- ++process) {
- PersistedHistogramArray& processArray = histogramsToUpdate[process];
-
- for (auto& histogramData : processArray) {
- // Attempt to get the corresponding ID for the deserialized histogram
- // name.
- HistogramID id;
- if (NS_FAILED(internal_GetHistogramIdByName(
- locker, std::get<0>(histogramData), &id))) {
- continue;
- }
-
- ProcessID procID = static_cast<ProcessID>(process);
- if (!internal_CanRecordHistogram(id, procID)) {
- // We're not allowed to record this, so don't try to restore it.
- continue;
- }
-
- // Get the Histogram instance: this will instantiate it if it doesn't
- // exist.
- Histogram* w = internal_GetHistogramById(locker, id, procID);
- MOZ_ASSERT(w);
-
- if (!w || w->IsExpired()) {
- continue;
- }
-
- base::Histogram* h = nullptr;
- constexpr auto store = "main"_ns;
- if (!w->GetHistogram(store, &h)) {
- continue;
- }
- MOZ_ASSERT(h);
-
- if (!h) {
- // Don't restore expired histograms.
- continue;
- }
-
- // Make sure that histogram counts have matching sizes. If not,
- // |AddSampleSet| will fail and crash.
- size_t numCounts = std::get<1>(histogramData).Length();
- if (h->bucket_count() != numCounts) {
- MOZ_ASSERT(false,
- "The number of restored buckets does not match with the "
- "on in the definition");
- continue;
- }
-
- // Update the data for the histogram.
- h->AddSampleSet(base::PersistedSampleSet(
- std::move(std::get<1>(histogramData)), std::get<2>(histogramData)));
- }
- }
- }
-
- return NS_OK;
-}
-
-nsresult TelemetryHistogram::DeserializeKeyedHistograms(
- JSContext* aCx, JS::Handle<JS::Value> aData) {
- MOZ_ASSERT(XRE_IsParentProcess(),
- "Only load keyed histograms in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- // Telemetry is disabled. This should never happen, but let's leave this check
- // for consistency with other histogram updates routines.
- if (!internal_CanRecordBase()) {
- return NS_OK;
- }
-
- typedef std::tuple<nsCString, nsCString, nsTArray<base::Histogram::Count>,
- int64_t>
- PersistedKeyedHistogramTuple;
- typedef mozilla::Vector<PersistedKeyedHistogramTuple>
- PersistedKeyedHistogramArray;
- typedef mozilla::Vector<PersistedKeyedHistogramArray>
- PersistedKeyedHistogramStorage;
-
- // Before updating the histograms, we need to get the data out of the JS
- // wrappers. We can't hold the histogram mutex while handling JS stuff.
- // Build a <histogram name, value> map.
- JS::Rooted<JSObject*> histogramDataObj(aCx, &aData.toObject());
- JS::Rooted<JS::IdVector> processes(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, histogramDataObj, &processes)) {
- // We can't even enumerate the processes in the loaded data, so
- // there is nothing we could recover from the persistence file. Bail out.
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // Make sure we have enough storage for all the processes.
- PersistedKeyedHistogramStorage histogramsToUpdate;
- if (!histogramsToUpdate.resize(static_cast<uint32_t>(ProcessID::Count))) {
- return NS_ERROR_OUT_OF_MEMORY;
- }
-
- // The following block of code attempts to extract as much data as possible
- // from the serialized JSON, even in case of light data corruptions: if, for
- // example, the data for a single process is corrupted or is in an unexpected
- // form, we press on and attempt to load the data for the other processes.
- JS::Rooted<JS::PropertyKey> process(aCx);
- for (auto& processVal : processes) {
- // This is required as JS API calls require an Handle<jsid> and not a
- // plain jsid.
- process = processVal;
- // Get the process name.
- nsAutoJSString processNameJS;
- if (!processNameJS.init(aCx, process)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Make sure it's valid. Note that this is safe to call outside
- // of a locked section.
- NS_ConvertUTF16toUTF8 processName(processNameJS);
- ProcessID processID = GetIDForProcessName(processName.get());
- if (processID == ProcessID::Count) {
- NS_WARNING(
- nsPrintfCString("Failed to get process ID for %s", processName.get())
- .get());
- continue;
- }
-
- // And its probes.
- JS::Rooted<JS::Value> processData(aCx);
- if (!JS_GetPropertyById(aCx, histogramDataObj, process, &processData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (!processData.isObject()) {
- // |processData| should be an object containing histograms. If this is
- // not the case, silently skip and try to load the data for the other
- // processes.
- continue;
- }
-
- // Iterate through each keyed histogram.
- JS::Rooted<JSObject*> processDataObj(aCx, &processData.toObject());
- JS::Rooted<JS::IdVector> histograms(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, processDataObj, &histograms)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get a reference to the deserialized data for this process.
- PersistedKeyedHistogramArray& deserializedProcessData =
- histogramsToUpdate[static_cast<uint32_t>(processID)];
-
- JS::Rooted<JS::PropertyKey> histogram(aCx);
- for (auto& histogramVal : histograms) {
- histogram = histogramVal;
- // Get the histogram name.
- nsAutoJSString histogramName;
- if (!histogramName.init(aCx, histogram)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get the data for this histogram.
- JS::Rooted<JS::Value> histogramData(aCx);
- if (!JS_GetPropertyById(aCx, processDataObj, histogram, &histogramData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Iterate through each key in the histogram.
- JS::Rooted<JSObject*> keysDataObj(aCx, &histogramData.toObject());
- JS::Rooted<JS::IdVector> keys(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, keysDataObj, &keys)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- JS::Rooted<JS::PropertyKey> key(aCx);
- for (auto& keyVal : keys) {
- key = keyVal;
-
- int64_t sum = 0;
- nsTArray<base::Histogram::Count> deserializedCounts;
- nsCString keyName;
- if (NS_FAILED(internal_ParseHistogramData(
- aCx, key, keysDataObj, keyName, deserializedCounts, sum))) {
- continue;
- }
-
- // Finally append the deserialized data to the storage.
- if (!deserializedProcessData.emplaceBack(std::make_tuple(
- nsCString(NS_ConvertUTF16toUTF8(histogramName)),
- std::move(keyName), std::move(deserializedCounts), sum))) {
- return NS_ERROR_OUT_OF_MEMORY;
- }
- }
- }
- }
-
- // Update the keyed histogram storage.
- {
- StaticMutexAutoLock locker(gTelemetryHistogramMutex);
-
- for (uint32_t process = 0; process < histogramsToUpdate.length();
- ++process) {
- PersistedKeyedHistogramArray& processArray = histogramsToUpdate[process];
-
- for (auto& histogramData : processArray) {
- // Attempt to get the corresponding ID for the deserialized histogram
- // name.
- HistogramID id;
- if (NS_FAILED(internal_GetHistogramIdByName(
- locker, std::get<0>(histogramData), &id))) {
- continue;
- }
-
- ProcessID procID = static_cast<ProcessID>(process);
- if (!internal_CanRecordHistogram(id, procID)) {
- // We're not allowed to record this, so don't try to restore it.
- continue;
- }
-
- KeyedHistogram* keyed = internal_GetKeyedHistogramById(id, procID);
- MOZ_ASSERT(keyed);
-
- if (!keyed || keyed->IsExpired()) {
- // Don't restore if we don't have a destination storage or the
- // histogram is expired.
- continue;
- }
-
- // Get data for the key we're looking for.
- base::Histogram* h = nullptr;
- if (NS_FAILED(keyed->GetHistogram("main"_ns, std::get<1>(histogramData),
- &h))) {
- continue;
- }
- MOZ_ASSERT(h);
-
- if (!h) {
- // Don't restore if we don't have a destination storage.
- continue;
- }
-
- // Make sure that histogram counts have matching sizes. If not,
- // |AddSampleSet| will fail and crash.
- size_t numCounts = std::get<2>(histogramData).Length();
- if (h->bucket_count() != numCounts) {
- MOZ_ASSERT(false,
- "The number of restored buckets does not match with the "
- "on in the definition");
- continue;
- }
-
- // Update the data for the histogram.
- h->AddSampleSet(base::PersistedSampleSet(
- std::move(std::get<2>(histogramData)), std::get<3>(histogramData)));
- }
- }
- }
-
- return NS_OK;
-}
diff --git a/toolkit/components/telemetry/core/TelemetryHistogram.h b/toolkit/components/telemetry/core/TelemetryHistogram.h
index 9f415f3637..f5aaa60634 100644
--- a/toolkit/components/telemetry/core/TelemetryHistogram.h
+++ b/toolkit/components/telemetry/core/TelemetryHistogram.h
@@ -12,11 +12,6 @@
#include "nsXULAppAPI.h"
#include "TelemetryCommon.h"
-namespace mozilla {
-// This is only used for the GeckoView persistence.
-class JSONWriter;
-} // namespace mozilla
-
// This module is internal to Telemetry. It encapsulates Telemetry's
// histogram accumulation and storage logic. It should only be used by
// Telemetry.cpp. These functions should not be used anywhere else.
@@ -110,15 +105,6 @@ nsresult GetKeyedHistogramSnapshots(JSContext* aCx,
size_t GetHistogramSizesOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
-// These functions are only meant to be used for GeckoView persistence.
-// They are responsible for updating in-memory probes with the data persisted
-// on the disk and vice-versa.
-nsresult SerializeHistograms(mozilla::JSONWriter& aWriter);
-nsresult SerializeKeyedHistograms(mozilla::JSONWriter& aWriter);
-nsresult DeserializeHistograms(JSContext* aCx, JS::Handle<JS::Value> aData);
-nsresult DeserializeKeyedHistograms(JSContext* aCx,
- JS::Handle<JS::Value> aData);
-
} // namespace TelemetryHistogram
#endif // TelemetryHistogram_h__
diff --git a/toolkit/components/telemetry/core/TelemetryScalar.cpp b/toolkit/components/telemetry/core/TelemetryScalar.cpp
index 8a121e8f3f..312e25ddc4 100644
--- a/toolkit/components/telemetry/core/TelemetryScalar.cpp
+++ b/toolkit/components/telemetry/core/TelemetryScalar.cpp
@@ -6,7 +6,6 @@
#include "TelemetryScalar.h"
-#include "geckoview/streaming/GeckoViewStreamingTelemetry.h"
#include "ipc/TelemetryIPCAccumulator.h"
#include "js/Array.h" // JS::GetArrayLength, JS::IsArrayObject
#include "js/PropertyAndElement.h" // JS_DefineProperty, JS_DefineUCProperty, JS_Enumerate, JS_GetElement, JS_GetProperty, JS_GetPropertyById, JS_HasProperty
@@ -27,6 +26,7 @@
#include "nsJSUtils.h"
#include "nsPrintfCString.h"
#include "nsVariant.h"
+#include "TelemetryCommon.h"
#include "TelemetryScalarData.h"
using mozilla::MakeUnique;
@@ -112,11 +112,6 @@ const uint32_t kMaximumScalarNameLength = 40;
const uint32_t kScalarCount =
static_cast<uint32_t>(mozilla::Telemetry::ScalarID::ScalarCount);
-// To stop growing unbounded in memory while waiting for scalar deserialization
-// to finish, we immediately apply pending operations if the array reaches
-// a certain high water mark of elements.
-const size_t kScalarActionsArrayHighWaterMark = 10000;
-
const char* TEST_SCALAR_PREFIX = "telemetry.test.";
// The max offset supported by gScalarStoresTable for static scalars' stores.
@@ -284,45 +279,6 @@ ScalarResult GetVariantFromIVariant(nsIVariant* aInput, uint32_t aScalarKind,
return ScalarResult::Ok;
}
-/**
- * Write a nsIVariant with a JSONWriter, used for GeckoView persistence.
- */
-nsresult WriteVariantToJSONWriter(
- uint32_t aScalarType, nsIVariant* aInputValue,
- const mozilla::Span<const char>& aPropertyName,
- mozilla::JSONWriter& aWriter) {
- MOZ_ASSERT(aInputValue);
-
- switch (aScalarType) {
- case nsITelemetry::SCALAR_TYPE_COUNT: {
- uint32_t val = 0;
- nsresult rv = aInputValue->GetAsUint32(&val);
- NS_ENSURE_SUCCESS(rv, rv);
- aWriter.IntProperty(aPropertyName, val);
- break;
- }
- case nsITelemetry::SCALAR_TYPE_STRING: {
- nsCString val;
- nsresult rv = aInputValue->GetAsACString(val);
- NS_ENSURE_SUCCESS(rv, rv);
- aWriter.StringProperty(aPropertyName, val);
- break;
- }
- case nsITelemetry::SCALAR_TYPE_BOOLEAN: {
- bool val = false;
- nsresult rv = aInputValue->GetAsBool(&val);
- NS_ENSURE_SUCCESS(rv, rv);
- aWriter.BoolProperty(aPropertyName, val);
- break;
- }
- default:
- MOZ_ASSERT(false, "Unknown scalar kind.");
- return NS_ERROR_FAILURE;
- }
-
- return NS_OK;
-}
-
// Implements the methods for ScalarInfo.
const char* ScalarInfo::name() const {
return &gScalarsStringTable[this->name_offset];
@@ -517,10 +473,6 @@ ScalarResult ScalarUnsigned::SetValue(nsIVariant* aValue) {
}
void ScalarUnsigned::SetValue(uint32_t aValue) {
- if (GetCurrentProduct() == SupportedProduct::GeckoviewStreaming) {
- GeckoViewStreamingTelemetry::UintScalarSet(mName, aValue);
- return;
- }
for (auto& val : mStorage) {
val = aValue;
}
@@ -544,7 +496,6 @@ ScalarResult ScalarUnsigned::AddValue(nsIVariant* aValue) {
}
void ScalarUnsigned::AddValue(uint32_t aValue) {
- MOZ_ASSERT(GetCurrentProduct() != SupportedProduct::GeckoviewStreaming);
for (auto& val : mStorage) {
val += aValue;
}
@@ -552,7 +503,6 @@ void ScalarUnsigned::AddValue(uint32_t aValue) {
}
ScalarResult ScalarUnsigned::SetMaximum(nsIVariant* aValue) {
- MOZ_ASSERT(GetCurrentProduct() != SupportedProduct::GeckoviewStreaming);
ScalarResult sr = CheckInput(aValue);
if (sr == ScalarResult::UnsignedNegativeValue) {
return sr;
@@ -678,13 +628,6 @@ ScalarResult ScalarString::SetValue(nsIVariant* aValue) {
ScalarResult ScalarString::SetValue(const nsAString& aValue) {
auto str = Substring(aValue, 0, kMaximumStringValueLength);
- if (GetCurrentProduct() == SupportedProduct::GeckoviewStreaming) {
- GeckoViewStreamingTelemetry::StringScalarSet(mName,
- NS_ConvertUTF16toUTF8(str));
- return aValue.Length() > kMaximumStringValueLength
- ? ScalarResult::StringTooLong
- : ScalarResult::Ok;
- }
for (auto& val : mStorage) {
val.Assign(str);
}
@@ -779,10 +722,6 @@ ScalarResult ScalarBoolean::SetValue(nsIVariant* aValue) {
};
void ScalarBoolean::SetValue(bool aValue) {
- if (GetCurrentProduct() == SupportedProduct::GeckoviewStreaming) {
- GeckoViewStreamingTelemetry::BoolScalarSet(mName, aValue);
- return;
- }
for (auto& val : mStorage) {
val = aValue;
}
@@ -1208,20 +1147,6 @@ ProcessesKeyedScalarsMapType gKeyedScalarStorageMap;
// needed to support "build faster" in local developer builds.
ProcessesScalarsMapType gDynamicBuiltinScalarStorageMap;
ProcessesKeyedScalarsMapType gDynamicBuiltinKeyedScalarStorageMap;
-
-// Whether or not the deserialization of persisted scalars is still in progress.
-// This is never the case on Desktop or Fennec.
-// Only GeckoView restores persisted scalars.
-bool gIsDeserializing = false;
-// This batches scalar accumulations that should be applied once loading
-// finished.
-StaticAutoPtr<nsTArray<ScalarAction>> gScalarsActions;
-StaticAutoPtr<nsTArray<KeyedScalarAction>> gKeyedScalarsActions;
-
-bool internal_IsScalarDeserializing(const StaticMutexAutoLock& lock) {
- return gIsDeserializing;
-}
-
} // namespace
////////////////////////////////////////////////////////////////////////
@@ -1402,8 +1327,6 @@ bool internal_CanRecordForScalarID(const StaticMutexAutoLock& lock,
* @param aKeyed Are we attempting to write a keyed scalar?
* @param aForce Whether to allow recording even if the probe is not allowed on
* the current process.
- * This must only be true for GeckoView persistence and recorded
- * actions.
* @return ScalarResult::Ok if we can record, an error code otherwise.
*/
ScalarResult internal_CanRecordScalar(const StaticMutexAutoLock& lock,
@@ -1546,108 +1469,6 @@ nsresult internal_GetScalarByEnum(const StaticMutexAutoLock& lock,
return NS_OK;
}
-void internal_ApplyPendingOperations(const StaticMutexAutoLock& lock);
-
-/**
- * Record the given action on a scalar into the pending actions list.
- *
- * If the pending actions list overflows the high water mark length
- * all operations are immediately applied, including the passed action.
- *
- * @param aScalarAction The action to record.
- */
-void internal_RecordScalarAction(const StaticMutexAutoLock& lock,
- const ScalarAction& aScalarAction) {
- // Make sure to have the storage.
- if (!gScalarsActions) {
- gScalarsActions = new nsTArray<ScalarAction>();
- }
-
- // Store the action.
- gScalarsActions->AppendElement(aScalarAction);
-
- // If this action overflows the pending actions array, we immediately apply
- // pending operations and assume loading is over. If loading still happens
- // afterwards, some scalar values might be overwritten and inconsistent, but
- // we won't lose operations on otherwise untouched probes.
- if (gScalarsActions->Length() > kScalarActionsArrayHighWaterMark) {
- internal_ApplyPendingOperations(lock);
- return;
- }
-}
-
-/**
- * Record the given action on a scalar on the main process into the pending
- * actions list.
- *
- * If the pending actions list overflows the high water mark length
- * all operations are immediately applied, including the passed action.
- *
- * @param aId The scalar's ID this action applies to
- * @param aDynamic Determines if the scalar is dynamic
- * @param aAction The action to record
- * @param aValue The additional data for the recorded action
- */
-void internal_RecordScalarAction(const StaticMutexAutoLock& lock, uint32_t aId,
- bool aDynamic, ScalarActionType aAction,
- const ScalarVariant& aValue) {
- internal_RecordScalarAction(
- lock,
- ScalarAction{aId, aDynamic, aAction, Some(aValue), ProcessID::Parent});
-}
-
-/**
- * Record the given action on a keyed scalar into the pending actions list.
- *
- * If the pending actions list overflows the high water mark length
- * all operations are immediately applied, including the passed action.
- *
- * @param aScalarAction The action to record.
- */
-void internal_RecordKeyedScalarAction(const StaticMutexAutoLock& lock,
- const KeyedScalarAction& aScalarAction) {
- // Make sure to have the storage.
- if (!gKeyedScalarsActions) {
- gKeyedScalarsActions = new nsTArray<KeyedScalarAction>();
- }
-
- // Store the action.
- gKeyedScalarsActions->AppendElement(aScalarAction);
-
- // If this action overflows the pending actions array, we immediately apply
- // pending operations and assume loading is over. If loading still happens
- // afterwards, some scalar values might be overwritten and inconsistent, but
- // we won't lose operations on otherwise untouched probes.
- if (gKeyedScalarsActions->Length() > kScalarActionsArrayHighWaterMark) {
- internal_ApplyPendingOperations(lock);
- return;
- }
-}
-
-/**
- * Record the given action on a keyed scalar on the main process into the
- * pending actions list.
- *
- * If the pending actions list overflows the high water mark length
- * all operations are immediately applied, including the passed action.
- *
- * @param aId The scalar's ID this action applies to
- * @param aDynamic Determines if the scalar is dynamic
- * @param aKey The scalar's key
- * @param aAction The action to record
- * @param aValue The additional data for the recorded action
- */
-void internal_RecordKeyedScalarAction(const StaticMutexAutoLock& lock,
- uint32_t aId, bool aDynamic,
- const nsAString& aKey,
- ScalarActionType aAction,
- const ScalarVariant& aValue) {
- internal_RecordKeyedScalarAction(
- lock,
- KeyedScalarAction{aId, aDynamic, aAction, NS_ConvertUTF16toUTF8(aKey),
- Some(aValue), ProcessID::Parent});
-}
-
/**
* Update the scalar with the provided value. This is used by the JS API.
*
@@ -1655,16 +1476,11 @@ void internal_RecordKeyedScalarAction(const StaticMutexAutoLock& lock,
* @param aName The scalar name.
* @param aType The action type for updating the scalar.
* @param aValue The value to use for updating the scalar.
- * @param aProcessOverride The process for which the scalar must be updated.
- * This must only be used for GeckoView persistence. It must be
- * set to the ProcessID::Parent for all the other cases.
- * @param aForce Whether to force updating even if load is in progress.
* @return a ScalarResult error value.
*/
-ScalarResult internal_UpdateScalar(
- const StaticMutexAutoLock& lock, const nsACString& aName,
- ScalarActionType aType, nsIVariant* aValue,
- ProcessID aProcessOverride = ProcessID::Parent, bool aForce = false) {
+ScalarResult internal_UpdateScalar(const StaticMutexAutoLock& lock,
+ const nsACString& aName,
+ ScalarActionType aType, nsIVariant* aValue) {
ScalarKey uniqueId;
nsresult rv = internal_GetEnumByScalarName(lock, aName, &uniqueId);
if (NS_FAILED(rv)) {
@@ -1672,7 +1488,7 @@ ScalarResult internal_UpdateScalar(
: ScalarResult::UnknownScalar;
}
- ScalarResult sr = internal_CanRecordScalar(lock, uniqueId, false, aForce);
+ ScalarResult sr = internal_CanRecordScalar(lock, uniqueId, false, false);
if (sr != ScalarResult::Ok) {
if (sr == ScalarResult::CannotRecordDataset) {
return ScalarResult::Ok;
@@ -1695,23 +1511,9 @@ ScalarResult internal_UpdateScalar(
return ScalarResult::Ok;
}
- if (!aForce && internal_IsScalarDeserializing(lock)) {
- const BaseScalarInfo& info = internal_GetScalarInfo(lock, uniqueId);
- // Convert the nsIVariant to a Variant.
- mozilla::Maybe<ScalarVariant> variantValue;
- sr = GetVariantFromIVariant(aValue, info.kind, variantValue);
- if (sr != ScalarResult::Ok) {
- MOZ_ASSERT(false, "Unable to convert nsIVariant to mozilla::Variant.");
- return sr;
- }
- internal_RecordScalarAction(lock, uniqueId.id, uniqueId.dynamic, aType,
- variantValue.ref());
- return ScalarResult::Ok;
- }
-
// Finally get the scalar.
ScalarBase* scalar = nullptr;
- rv = internal_GetScalarByEnum(lock, uniqueId, aProcessOverride, &scalar);
+ rv = internal_GetScalarByEnum(lock, uniqueId, ProcessID::Parent, &scalar);
if (NS_FAILED(rv)) {
// Don't throw on expired scalars.
if (rv == NS_ERROR_NOT_AVAILABLE) {
@@ -1823,15 +1625,13 @@ nsresult internal_GetKeyedScalarByEnum(const StaticMutexAutoLock& lock,
* @param aKey The key name.
* @param aType The action type for updating the scalar.
* @param aValue The value to use for updating the scalar.
- * @param aProcessOverride The process for which the scalar must be updated.
- * This must only be used for GeckoView persistence. It must be
- * set to the ProcessID::Parent for all the other cases.
* @return a ScalarResult error value.
*/
-ScalarResult internal_UpdateKeyedScalar(
- const StaticMutexAutoLock& lock, const nsACString& aName,
- const nsAString& aKey, ScalarActionType aType, nsIVariant* aValue,
- ProcessID aProcessOverride = ProcessID::Parent, bool aForce = false) {
+ScalarResult internal_UpdateKeyedScalar(const StaticMutexAutoLock& lock,
+ const nsACString& aName,
+ const nsAString& aKey,
+ ScalarActionType aType,
+ nsIVariant* aValue) {
ScalarKey uniqueId;
nsresult rv = internal_GetEnumByScalarName(lock, aName, &uniqueId);
if (NS_FAILED(rv)) {
@@ -1839,7 +1639,7 @@ ScalarResult internal_UpdateKeyedScalar(
: ScalarResult::UnknownScalar;
}
- ScalarResult sr = internal_CanRecordScalar(lock, uniqueId, true, aForce);
+ ScalarResult sr = internal_CanRecordScalar(lock, uniqueId, true, false);
if (sr != ScalarResult::Ok) {
if (sr == ScalarResult::CannotRecordDataset) {
return ScalarResult::Ok;
@@ -1862,23 +1662,10 @@ ScalarResult internal_UpdateKeyedScalar(
return ScalarResult::Ok;
}
- if (!aForce && internal_IsScalarDeserializing(lock)) {
- const BaseScalarInfo& info = internal_GetScalarInfo(lock, uniqueId);
- // Convert the nsIVariant to a Variant.
- mozilla::Maybe<ScalarVariant> variantValue;
- sr = GetVariantFromIVariant(aValue, info.kind, variantValue);
- if (sr != ScalarResult::Ok) {
- MOZ_ASSERT(false, "Unable to convert nsIVariant to mozilla::Variant.");
- return sr;
- }
- internal_RecordKeyedScalarAction(lock, uniqueId.id, uniqueId.dynamic, aKey,
- aType, variantValue.ref());
- return ScalarResult::Ok;
- }
-
// Finally get the scalar.
KeyedScalar* scalar = nullptr;
- rv = internal_GetKeyedScalarByEnum(lock, uniqueId, aProcessOverride, &scalar);
+ rv =
+ internal_GetKeyedScalarByEnum(lock, uniqueId, ProcessID::Parent, &scalar);
if (NS_FAILED(rv)) {
// Don't throw on expired scalars.
if (rv == NS_ERROR_NOT_AVAILABLE) {
@@ -2392,21 +2179,6 @@ void internal_ApplyKeyedScalarActions(
}
}
-void internal_ApplyPendingOperations(const StaticMutexAutoLock& lock) {
- if (gScalarsActions && gScalarsActions->Length() > 0) {
- internal_ApplyScalarActions(lock, *gScalarsActions);
- gScalarsActions->Clear();
- }
-
- if (gKeyedScalarsActions && gKeyedScalarsActions->Length() > 0) {
- internal_ApplyKeyedScalarActions(lock, *gKeyedScalarsActions);
- gKeyedScalarsActions->Clear();
- }
-
- // After all pending operations are applied deserialization is done
- gIsDeserializing = false;
-}
-
} // namespace
////////////////////////////////////////////////////////////////////////
@@ -2477,16 +2249,6 @@ void TelemetryScalar::DeInitializeGlobalState() {
gInitDone = false;
}
-void TelemetryScalar::DeserializationStarted() {
- StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- gIsDeserializing = true;
-}
-
-void TelemetryScalar::ApplyPendingOperations() {
- StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- internal_ApplyPendingOperations(locker);
-}
-
void TelemetryScalar::SetCanRecordBase(bool b) {
StaticMutexAutoLock locker(gTelemetryScalarsMutex);
gCanRecordBase = b;
@@ -2596,12 +2358,6 @@ void TelemetryScalar::Add(mozilla::Telemetry::ScalarID aId, uint32_t aValue) {
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- ScalarActionType::eAdd, ScalarVariant(aValue));
- return;
- }
-
ScalarBase* scalar = nullptr;
nsresult rv =
internal_GetScalarByEnum(locker, uniqueId, ProcessID::Parent, &scalar);
@@ -2642,13 +2398,6 @@ void TelemetryScalar::Add(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordKeyedScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- aKey, ScalarActionType::eAdd,
- ScalarVariant(aValue));
- return;
- }
-
KeyedScalar* scalar = nullptr;
nsresult rv = internal_GetKeyedScalarByEnum(locker, uniqueId,
ProcessID::Parent, &scalar);
@@ -2758,12 +2507,6 @@ void TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, uint32_t aValue) {
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- ScalarActionType::eSet, ScalarVariant(aValue));
- return;
- }
-
ScalarBase* scalar = nullptr;
nsresult rv =
internal_GetScalarByEnum(locker, uniqueId, ProcessID::Parent, &scalar);
@@ -2803,13 +2546,6 @@ void TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- ScalarActionType::eSet,
- ScalarVariant(nsString(aValue)));
- return;
- }
-
ScalarBase* scalar = nullptr;
nsresult rv =
internal_GetScalarByEnum(locker, uniqueId, ProcessID::Parent, &scalar);
@@ -2848,12 +2584,6 @@ void TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, bool aValue) {
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- ScalarActionType::eSet, ScalarVariant(aValue));
- return;
- }
-
ScalarBase* scalar = nullptr;
nsresult rv =
internal_GetScalarByEnum(locker, uniqueId, ProcessID::Parent, &scalar);
@@ -2894,13 +2624,6 @@ void TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordKeyedScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- aKey, ScalarActionType::eSet,
- ScalarVariant(aValue));
- return;
- }
-
KeyedScalar* scalar = nullptr;
nsresult rv = internal_GetKeyedScalarByEnum(locker, uniqueId,
ProcessID::Parent, &scalar);
@@ -2941,13 +2664,6 @@ void TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordKeyedScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- aKey, ScalarActionType::eSet,
- ScalarVariant(aValue));
- return;
- }
-
KeyedScalar* scalar = nullptr;
nsresult rv = internal_GetKeyedScalarByEnum(locker, uniqueId,
ProcessID::Parent, &scalar);
@@ -3061,13 +2777,6 @@ void TelemetryScalar::SetMaximum(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- ScalarActionType::eSetMaximum,
- ScalarVariant(aValue));
- return;
- }
-
ScalarBase* scalar = nullptr;
nsresult rv =
internal_GetScalarByEnum(locker, uniqueId, ProcessID::Parent, &scalar);
@@ -3108,13 +2817,6 @@ void TelemetryScalar::SetMaximum(mozilla::Telemetry::ScalarID aId,
return;
}
- if (internal_IsScalarDeserializing(locker)) {
- internal_RecordKeyedScalarAction(locker, uniqueId.id, uniqueId.dynamic,
- aKey, ScalarActionType::eSetMaximum,
- ScalarVariant(aValue));
- return;
- }
-
KeyedScalar* scalar = nullptr;
nsresult rv = internal_GetKeyedScalarByEnum(locker, uniqueId,
ProcessID::Parent, &scalar);
@@ -3538,8 +3240,6 @@ void TelemetryScalar::ClearScalars() {
gKeyedScalarStorageMap.Clear();
gDynamicBuiltinScalarStorageMap.Clear();
gDynamicBuiltinKeyedScalarStorageMap.Clear();
- gScalarsActions = nullptr;
- gKeyedScalarsActions = nullptr;
}
size_t TelemetryScalar::GetMapShallowSizesOfExcludingThis(
@@ -3580,20 +3280,6 @@ void TelemetryScalar::UpdateChildData(
"parent process.");
StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- // If scalars are still being deserialized, we need to record the incoming
- // operations as well.
- if (internal_IsScalarDeserializing(locker)) {
- for (const ScalarAction& action : aScalarActions) {
- // We're only getting immutable access, so let's copy it
- ScalarAction copy = action;
- // Fix up the process type
- copy.mProcessType = aProcessType;
- internal_RecordScalarAction(locker, copy);
- }
-
- return;
- }
-
internal_ApplyScalarActions(locker, aScalarActions, Some(aProcessType));
}
@@ -3605,20 +3291,6 @@ void TelemetryScalar::UpdateChildKeyedData(
"from the parent process.");
StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- // If scalars are still being deserialized, we need to record the incoming
- // operations as well.
- if (internal_IsScalarDeserializing(locker)) {
- for (const KeyedScalarAction& action : aScalarActions) {
- // We're only getting immutable access, so let's copy it
- KeyedScalarAction copy = action;
- // Fix up the process type
- copy.mProcessType = aProcessType;
- internal_RecordKeyedScalarAction(locker, copy);
- }
-
- return;
- }
-
internal_ApplyKeyedScalarActions(locker, aScalarActions, Some(aProcessType));
}
@@ -3632,10 +3304,6 @@ void TelemetryScalar::RecordDiscardedData(
return;
}
- if (GetCurrentProduct() == SupportedProduct::GeckoviewStreaming) {
- return;
- }
-
ScalarBase* scalar = nullptr;
mozilla::DebugOnly<nsresult> rv;
@@ -3754,437 +3422,3 @@ nsresult TelemetryScalar::GetAllStores(StringHashSet& set) {
return NS_OK;
}
-
-////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////
-//
-// PUBLIC: GeckoView serialization/deserialization functions.
-
-/**
- * Write the scalar data to the provided Json object, for
- * GeckoView measurement persistence. The output format is the same one used
- * for snapshotting the scalars.
- *
- * @param {aWriter} The JSON object to write to.
- * @returns NS_OK or a failure value explaining why persistence failed.
- */
-nsresult TelemetryScalar::SerializeScalars(mozilla::JSONWriter& aWriter) {
- // Get a copy of the data, without clearing.
- ScalarSnapshotTable scalarsToReflect;
- {
- StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- // For persistence, we care about all the datasets. Worst case, they
- // will be empty.
- nsresult rv = internal_GetScalarSnapshot(
- locker, scalarsToReflect, nsITelemetry::DATASET_PRERELEASE_CHANNELS,
- false, /*aClearScalars*/
- "main"_ns);
- if (NS_FAILED(rv)) {
- return rv;
- }
- }
-
- // Persist the scalars to the JSON object.
- for (const auto& entry : scalarsToReflect) {
- const ScalarTupleArray& processScalars = entry.GetData();
- const char* processName = GetNameForProcessID(ProcessID(entry.GetKey()));
-
- aWriter.StartObjectProperty(mozilla::MakeStringSpan(processName));
-
- for (const ScalarDataTuple& scalar : processScalars) {
- nsresult rv = WriteVariantToJSONWriter(
- std::get<2>(scalar) /*aScalarType*/,
- std::get<1>(scalar) /*aInputValue*/,
- mozilla::MakeStringSpan(std::get<0>(scalar)) /*aPropertyName*/,
- aWriter /*aWriter*/);
- if (NS_FAILED(rv)) {
- // Skip this scalar if we failed to write it. We don't bail out just
- // yet as we may salvage other scalars. We eventually need to call
- // EndObject.
- continue;
- }
- }
-
- aWriter.EndObject();
- }
-
- return NS_OK;
-}
-
-/**
- * Write the keyed scalar data to the provided Json object, for
- * GeckoView measurement persistence. The output format is the same
- * one used for snapshotting the keyed scalars.
- *
- * @param {aWriter} The JSON object to write to.
- * @returns NS_OK or a failure value explaining why persistence failed.
- */
-nsresult TelemetryScalar::SerializeKeyedScalars(mozilla::JSONWriter& aWriter) {
- // Get a copy of the data, without clearing.
- KeyedScalarSnapshotTable keyedScalarsToReflect;
- {
- StaticMutexAutoLock locker(gTelemetryScalarsMutex);
- // For persistence, we care about all the datasets. Worst case, they
- // will be empty.
- nsresult rv = internal_GetKeyedScalarSnapshot(
- locker, keyedScalarsToReflect,
- nsITelemetry::DATASET_PRERELEASE_CHANNELS, false, /*aClearScalars*/
- "main"_ns);
- if (NS_FAILED(rv)) {
- return rv;
- }
- }
-
- // Persist the scalars to the JSON object.
- for (const auto& entry : keyedScalarsToReflect) {
- const KeyedScalarTupleArray& processScalars = entry.GetData();
- const char* processName = GetNameForProcessID(ProcessID(entry.GetKey()));
-
- aWriter.StartObjectProperty(mozilla::MakeStringSpan(processName));
-
- for (const KeyedScalarDataTuple& keyedScalarData : processScalars) {
- aWriter.StartObjectProperty(
- mozilla::MakeStringSpan(std::get<0>(keyedScalarData)));
-
- // Define a property for each scalar key, then add it to the keyed scalar
- // object.
- const nsTArray<KeyedScalar::KeyValuePair>& keyProps =
- std::get<1>(keyedScalarData);
- for (const KeyedScalar::KeyValuePair& keyData : keyProps) {
- nsresult rv = WriteVariantToJSONWriter(
- std::get<2>(keyedScalarData) /*aScalarType*/,
- keyData.second /*aInputValue*/,
- PromiseFlatCString(keyData.first) /*aOutKey*/, aWriter /*aWriter*/);
- if (NS_FAILED(rv)) {
- // Skip this scalar if we failed to write it. We don't bail out just
- // yet as we may salvage other scalars. We eventually need to call
- // EndObject.
- continue;
- }
- }
- aWriter.EndObject();
- }
- aWriter.EndObject();
- }
-
- return NS_OK;
-}
-
-/**
- * Load the persisted measurements from a Json object and inject them
- * in the relevant process storage.
- *
- * @param {aData} The input Json object.
- * @returns NS_OK if loading was performed, an error code explaining the
- * failure reason otherwise.
- */
-nsresult TelemetryScalar::DeserializePersistedScalars(
- JSContext* aCx, JS::Handle<JS::Value> aData) {
- MOZ_ASSERT(XRE_IsParentProcess(), "Only load scalars in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- typedef std::pair<nsCString, nsCOMPtr<nsIVariant>> PersistedScalarPair;
- typedef nsTArray<PersistedScalarPair> PersistedScalarArray;
- typedef nsTHashMap<ProcessIDHashKey, PersistedScalarArray>
- PeristedScalarStorage;
-
- PeristedScalarStorage scalarsToUpdate;
-
- // Before updating the scalars, we need to get the data out of the JS
- // wrappers. We can't hold the scalars mutex while handling JS stuff.
- // Build a <scalar name, value> map.
- JS::Rooted<JSObject*> scalarDataObj(aCx, &aData.toObject());
- JS::Rooted<JS::IdVector> processes(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, scalarDataObj, &processes)) {
- // We can't even enumerate the processes in the loaded data, so
- // there is nothing we could recover from the persistence file. Bail out.
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // The following block of code attempts to extract as much data as possible
- // from the serialized JSON, even in case of light data corruptions: if, for
- // example, the data for a single process is corrupted or is in an unexpected
- // form, we press on and attempt to load the data for the other processes.
- JS::Rooted<JS::PropertyKey> process(aCx);
- for (auto& processVal : processes) {
- // This is required as JS API calls require an Handle<jsid> and not a
- // plain jsid.
- process = processVal;
- // Get the process name.
- nsAutoJSString processNameJS;
- if (!processNameJS.init(aCx, process)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Make sure it's valid. Note that this is safe to call outside
- // of a locked section.
- NS_ConvertUTF16toUTF8 processName(processNameJS);
- ProcessID processID = GetIDForProcessName(processName.get());
- if (processID == ProcessID::Count) {
- NS_WARNING(
- nsPrintfCString("Failed to get process ID for %s", processName.get())
- .get());
- continue;
- }
-
- // And its probes.
- JS::Rooted<JS::Value> processData(aCx);
- if (!JS_GetPropertyById(aCx, scalarDataObj, process, &processData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (!processData.isObject()) {
- // |processData| should be an object containing scalars. If this is
- // not the case, silently skip and try to load the data for the other
- // processes.
- continue;
- }
-
- // Iterate through each scalar.
- JS::Rooted<JSObject*> processDataObj(aCx, &processData.toObject());
- JS::Rooted<JS::IdVector> scalars(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, processDataObj, &scalars)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- JS::Rooted<JS::PropertyKey> scalar(aCx);
- for (auto& scalarVal : scalars) {
- scalar = scalarVal;
- // Get the scalar name.
- nsAutoJSString scalarName;
- if (!scalarName.init(aCx, scalar)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get the scalar value as a JS value.
- JS::Rooted<JS::Value> scalarValue(aCx);
- if (!JS_GetPropertyById(aCx, processDataObj, scalar, &scalarValue)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (scalarValue.isNullOrUndefined()) {
- // We can't set scalars to null or undefined values, skip this
- // and try to load other scalars.
- continue;
- }
-
- // Unpack the aVal to nsIVariant.
- nsCOMPtr<nsIVariant> unpackedVal;
- nsresult rv = nsContentUtils::XPConnect()->JSToVariant(
- aCx, scalarValue, getter_AddRefs(unpackedVal));
- if (NS_FAILED(rv)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Add the scalar to the map.
- PersistedScalarArray& processScalars =
- scalarsToUpdate.LookupOrInsert(static_cast<uint32_t>(processID));
- processScalars.AppendElement(std::make_pair(
- nsCString(NS_ConvertUTF16toUTF8(scalarName)), unpackedVal));
- }
- }
-
- // Now that all the JS specific operations are finished, update the scalars.
- {
- StaticMutexAutoLock lock(gTelemetryScalarsMutex);
-
- for (const auto& entry : scalarsToUpdate) {
- const PersistedScalarArray& processScalars = entry.GetData();
- for (PersistedScalarArray::size_type i = 0; i < processScalars.Length();
- i++) {
- mozilla::Unused << internal_UpdateScalar(
- lock, processScalars[i].first, ScalarActionType::eSet,
- processScalars[i].second, ProcessID(entry.GetKey()),
- true /* aForce */);
- }
- }
- }
-
- return NS_OK;
-}
-
-/**
- * Load the persisted measurements from a Json object and injects them
- * in the relevant process storage.
- *
- * @param {aData} The input Json object.
- * @returns NS_OK if loading was performed, an error code explaining the
- * failure reason otherwise.
- */
-nsresult TelemetryScalar::DeserializePersistedKeyedScalars(
- JSContext* aCx, JS::Handle<JS::Value> aData) {
- MOZ_ASSERT(XRE_IsParentProcess(), "Only load scalars in the parent process");
- if (!XRE_IsParentProcess()) {
- return NS_ERROR_FAILURE;
- }
-
- typedef std::tuple<nsCString, nsString, nsCOMPtr<nsIVariant>>
- PersistedKeyedScalarTuple;
- typedef nsTArray<PersistedKeyedScalarTuple> PersistedKeyedScalarArray;
- typedef nsTHashMap<ProcessIDHashKey, PersistedKeyedScalarArray>
- PeristedKeyedScalarStorage;
-
- PeristedKeyedScalarStorage scalarsToUpdate;
-
- // Before updating the keyed scalars, we need to get the data out of the JS
- // wrappers. We can't hold the scalars mutex while handling JS stuff.
- // Build a <scalar name, value> map.
- JS::Rooted<JSObject*> scalarDataObj(aCx, &aData.toObject());
- JS::Rooted<JS::IdVector> processes(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, scalarDataObj, &processes)) {
- // We can't even enumerate the processes in the loaded data, so
- // there is nothing we could recover from the persistence file. Bail out.
- JS_ClearPendingException(aCx);
- return NS_ERROR_FAILURE;
- }
-
- // The following block of code attempts to extract as much data as possible
- // from the serialized JSON, even in case of light data corruptions: if, for
- // example, the data for a single process is corrupted or is in an unexpected
- // form, we press on and attempt to load the data for the other processes.
- JS::Rooted<JS::PropertyKey> process(aCx);
- for (auto& processVal : processes) {
- process = processVal;
- // Get the process name.
- nsAutoJSString processNameJS;
- if (!processNameJS.init(aCx, process)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Make sure it's valid. Note that this is safe to call outside
- // of a locked section.
- NS_ConvertUTF16toUTF8 processName(processNameJS);
- ProcessID processID = GetIDForProcessName(processName.get());
- if (processID == ProcessID::Count) {
- NS_WARNING(
- nsPrintfCString("Failed to get process ID for %s", processName.get())
- .get());
- continue;
- }
-
- // And its probes.
- JS::Rooted<JS::Value> processData(aCx);
- if (!JS_GetPropertyById(aCx, scalarDataObj, process, &processData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (!processData.isObject()) {
- // |processData| should be an object containing scalars. If this is
- // not the case, silently skip and try to load the data for the other
- // processes.
- continue;
- }
-
- // Iterate through each keyed scalar.
- JS::Rooted<JSObject*> processDataObj(aCx, &processData.toObject());
- JS::Rooted<JS::IdVector> keyedScalars(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, processDataObj, &keyedScalars)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- JS::Rooted<JS::PropertyKey> keyedScalar(aCx);
- for (auto& keyedScalarVal : keyedScalars) {
- keyedScalar = keyedScalarVal;
- // Get the scalar name.
- nsAutoJSString scalarName;
- if (!scalarName.init(aCx, keyedScalar)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get the data for this keyed scalar.
- JS::Rooted<JS::Value> keyedScalarData(aCx);
- if (!JS_GetPropertyById(aCx, processDataObj, keyedScalar,
- &keyedScalarData)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (!keyedScalarData.isObject()) {
- // Keyed scalar data need to be an object. If that's not the case, skip
- // it and try to load the rest of the data.
- continue;
- }
-
- // Get the keys in the keyed scalar.
- JS::Rooted<JSObject*> keyedScalarDataObj(aCx,
- &keyedScalarData.toObject());
- JS::Rooted<JS::IdVector> keys(aCx, JS::IdVector(aCx));
- if (!JS_Enumerate(aCx, keyedScalarDataObj, &keys)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- JS::Rooted<JS::PropertyKey> key(aCx);
- for (auto keyVal : keys) {
- key = keyVal;
- // Get the process name.
- nsAutoJSString keyName;
- if (!keyName.init(aCx, key)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Get the scalar value as a JS value.
- JS::Rooted<JS::Value> scalarValue(aCx);
- if (!JS_GetPropertyById(aCx, keyedScalarDataObj, key, &scalarValue)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- if (scalarValue.isNullOrUndefined()) {
- // We can't set scalars to null or undefined values, skip this
- // and try to load other scalars.
- continue;
- }
-
- // Unpack the aVal to nsIVariant.
- nsCOMPtr<nsIVariant> unpackedVal;
- nsresult rv = nsContentUtils::XPConnect()->JSToVariant(
- aCx, scalarValue, getter_AddRefs(unpackedVal));
- if (NS_FAILED(rv)) {
- JS_ClearPendingException(aCx);
- continue;
- }
-
- // Add the scalar to the map.
- PersistedKeyedScalarArray& processScalars =
- scalarsToUpdate.LookupOrInsert(static_cast<uint32_t>(processID));
- processScalars.AppendElement(
- std::make_tuple(nsCString(NS_ConvertUTF16toUTF8(scalarName)),
- nsString(keyName), unpackedVal));
- }
- }
- }
-
- // Now that all the JS specific operations are finished, update the scalars.
- {
- StaticMutexAutoLock lock(gTelemetryScalarsMutex);
-
- for (const auto& entry : scalarsToUpdate) {
- const PersistedKeyedScalarArray& processScalars = entry.GetData();
- for (PersistedKeyedScalarArray::size_type i = 0;
- i < processScalars.Length(); i++) {
- mozilla::Unused << internal_UpdateKeyedScalar(
- lock, std::get<0>(processScalars[i]),
- std::get<1>(processScalars[i]), ScalarActionType::eSet,
- std::get<2>(processScalars[i]), ProcessID(entry.GetKey()),
- true /* aForce */);
- }
- }
- }
-
- return NS_OK;
-}
diff --git a/toolkit/components/telemetry/core/TelemetryScalar.h b/toolkit/components/telemetry/core/TelemetryScalar.h
index c7e5352860..d45d12cca5 100644
--- a/toolkit/components/telemetry/core/TelemetryScalar.h
+++ b/toolkit/components/telemetry/core/TelemetryScalar.h
@@ -18,8 +18,6 @@
// For the public interface to Telemetry functionality, see Telemetry.h.
namespace mozilla {
-// This is only used for the GeckoView persistence.
-class JSONWriter;
namespace Telemetry {
struct ScalarAction;
struct KeyedScalarAction;
@@ -112,22 +110,6 @@ void AddDynamicScalarDefinitions(
* This includes dynamic stores.
*/
nsresult GetAllStores(mozilla::Telemetry::Common::StringHashSet& set);
-
-// They are responsible for updating in-memory probes with the data persisted
-// on the disk and vice-versa.
-nsresult SerializeScalars(mozilla::JSONWriter& aWriter);
-nsresult SerializeKeyedScalars(mozilla::JSONWriter& aWriter);
-nsresult DeserializePersistedScalars(JSContext* aCx,
- JS::Handle<JS::Value> aData);
-nsresult DeserializePersistedKeyedScalars(JSContext* aCx,
- JS::Handle<JS::Value> aData);
-// Mark deserialization as in progress.
-// After this, all scalar operations are recorded into the pending operations
-// list.
-void DeserializationStarted();
-// Apply all operations from the pending operations list and mark
-// deserialization finished afterwards.
-void ApplyPendingOperations();
} // namespace TelemetryScalar
#endif // TelemetryScalar_h__
diff --git a/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs b/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs
index 9dcc949788..b948e80681 100644
--- a/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs
+++ b/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs
@@ -35,7 +35,12 @@ XPCOMUtils.defineLazyPreferenceGetter(lazy, "HELPER", PREF_HELPER, undefined);
*/
export const DAPTelemetrySender = new (class {
- startup() {
+ async startup() {
+ await lazy.NimbusFeatures.dapTelemetry.ready();
+ if (!lazy.NimbusFeatures.dapTelemetry.getVariable("enabled")) {
+ return;
+ }
+
lazy.logConsole.debug("Performing DAP startup");
if (lazy.NimbusFeatures.dapTelemetry.getVariable("visitCountingEnabled")) {
diff --git a/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml b/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml
index abad140b7d..3abbb9d60c 100644
--- a/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml
+++ b/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml
@@ -11,7 +11,7 @@ edition = "2021"
[dependencies]
dap_ffi = { path = "../ffi" }
hex = { version = "0.4.3", features = ["serde"] }
-prio = {version = "0.15.3", default-features = false }
+prio = {version = "0.16.2", default-features = false }
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0" }
thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
diff --git a/toolkit/components/telemetry/dap/ffi-gtest/test.rs b/toolkit/components/telemetry/dap/ffi-gtest/test.rs
index 3871965107..874e0e99a5 100644
--- a/toolkit/components/telemetry/dap/ffi-gtest/test.rs
+++ b/toolkit/components/telemetry/dap/ffi-gtest/test.rs
@@ -17,7 +17,7 @@ use prio::codec::{Decode, Encode};
pub extern "C" fn dap_test_encoding() {
let r = Report::new_dummy();
let mut encoded = Vec::<u8>::new();
- Report::encode(&r, &mut encoded);
+ Report::encode(&r, &mut encoded).expect("Report encoding failed!");
let decoded = Report::decode(&mut Cursor::new(&encoded)).expect("Report decoding failed!");
if r != decoded {
println!("Report:");
diff --git a/toolkit/components/telemetry/dap/ffi/Cargo.toml b/toolkit/components/telemetry/dap/ffi/Cargo.toml
index dfb69e4821..5e94bf30a7 100644
--- a/toolkit/components/telemetry/dap/ffi/Cargo.toml
+++ b/toolkit/components/telemetry/dap/ffi/Cargo.toml
@@ -8,6 +8,7 @@ authors = [
license = "MPL-2.0"
[dependencies]
-prio = {version = "0.15.3", default-features = false }
+prio = {version = "0.16.2", default-features = false }
thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
rand = "0.8"
+log = "0.4"
diff --git a/toolkit/components/telemetry/dap/ffi/src/lib.rs b/toolkit/components/telemetry/dap/ffi/src/lib.rs
index 998c8af204..2d595c1a8f 100644
--- a/toolkit/components/telemetry/dap/ffi/src/lib.rs
+++ b/toolkit/components/telemetry/dap/ffi/src/lib.rs
@@ -5,6 +5,8 @@
use std::error::Error;
use std::io::Cursor;
+use log::{debug, warn};
+
use prio::vdaf::prio3::Prio3Sum;
use prio::vdaf::prio3::Prio3SumVec;
use thin_vec::ThinVec;
@@ -19,8 +21,6 @@ use types::Time;
use prio::codec::Encode;
use prio::codec::{decode_u16_items, encode_u32_items};
-use prio::flp::types::{Sum, SumVec};
-use prio::vdaf::prio3::Prio3;
use prio::vdaf::Client;
use prio::vdaf::VdafError;
@@ -41,7 +41,7 @@ extern "C" {
) -> bool;
}
-pub fn new_prio_u8(num_aggregators: u8, bits: u32) -> Result<Prio3Sum, VdafError> {
+pub fn new_prio_sum(num_aggregators: u8, bits: usize) -> Result<Prio3Sum, VdafError> {
if bits > 64 {
return Err(VdafError::Uncategorized(format!(
"bit length ({}) exceeds limit for aggregate type (64)",
@@ -49,17 +49,16 @@ pub fn new_prio_u8(num_aggregators: u8, bits: u32) -> Result<Prio3Sum, VdafError
)));
}
- Prio3::new(num_aggregators, Sum::new(bits as usize)?)
+ Prio3Sum::new_sum(num_aggregators, bits)
}
-pub fn new_prio_vecu8(num_aggregators: u8, len: usize) -> Result<Prio3SumVec, VdafError> {
+pub fn new_prio_sumvec(
+ num_aggregators: u8,
+ len: usize,
+ bits: usize,
+) -> Result<Prio3SumVec, VdafError> {
let chunk_length = prio::vdaf::prio3::optimal_chunk_length(8 * len);
- Prio3::new(num_aggregators, SumVec::new(8, len, chunk_length)?)
-}
-
-pub fn new_prio_vecu16(num_aggregators: u8, len: usize) -> Result<Prio3SumVec, VdafError> {
- let chunk_length = prio::vdaf::prio3::optimal_chunk_length(16 * len);
- Prio3::new(num_aggregators, SumVec::new(16, len, chunk_length)?)
+ Prio3SumVec::new_sum_vec(num_aggregators, bits, len, chunk_length)
}
enum Role {
@@ -112,14 +111,17 @@ impl Shardable for u8 {
&self,
nonce: &[u8; 16],
) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> {
- let prio = new_prio_u8(2, 2)?;
+ let prio = new_prio_sum(2, 8)?;
let (public_share, input_shares) = prio.shard(&(*self as u128), nonce)?;
debug_assert_eq!(input_shares.len(), 2);
- let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect();
- let encoded_public_share = public_share.get_encoded();
+ let encoded_input_shares = input_shares
+ .iter()
+ .map(|s| s.get_encoded())
+ .collect::<Result<Vec<_>, _>>()?;
+ let encoded_public_share = public_share.get_encoded()?;
Ok((encoded_public_share, encoded_input_shares))
}
}
@@ -129,15 +131,18 @@ impl Shardable for ThinVec<u8> {
&self,
nonce: &[u8; 16],
) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> {
- let prio = new_prio_vecu8(2, self.len())?;
+ let prio = new_prio_sumvec(2, self.len(), 8)?;
let measurement: Vec<u128> = self.iter().map(|e| (*e as u128)).collect();
let (public_share, input_shares) = prio.shard(&measurement, nonce)?;
debug_assert_eq!(input_shares.len(), 2);
- let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect();
- let encoded_public_share = public_share.get_encoded();
+ let encoded_input_shares = input_shares
+ .iter()
+ .map(|s| s.get_encoded())
+ .collect::<Result<Vec<_>, _>>()?;
+ let encoded_public_share = public_share.get_encoded()?;
Ok((encoded_public_share, encoded_input_shares))
}
}
@@ -147,15 +152,18 @@ impl Shardable for ThinVec<u16> {
&self,
nonce: &[u8; 16],
) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> {
- let prio = new_prio_vecu16(2, self.len())?;
+ let prio = new_prio_sumvec(2, self.len(), 16)?;
let measurement: Vec<u128> = self.iter().map(|e| (*e as u128)).collect();
let (public_share, input_shares) = prio.shard(&measurement, nonce)?;
debug_assert_eq!(input_shares.len(), 2);
- let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect();
- let encoded_public_share = public_share.get_encoded();
+ let encoded_input_shares = input_shares
+ .iter()
+ .map(|s| s.get_encoded())
+ .collect::<Result<Vec<_>, _>>()?;
+ let encoded_public_share = public_share.get_encoded()?;
Ok((encoded_public_share, encoded_input_shares))
}
}
@@ -163,7 +171,7 @@ impl Shardable for ThinVec<u16> {
/// Pre-fill the info part of the HPKE sealing with the constants from the standard.
fn make_base_info() -> Vec<u8> {
let mut info = Vec::<u8>::new();
- const START: &[u8] = "dap-07 input share".as_bytes();
+ const START: &[u8] = "dap-09 input share".as_bytes();
info.extend(START);
const FIXED: u8 = 1;
info.push(FIXED);
@@ -215,7 +223,8 @@ fn get_dap_report_internal<T: Shardable>(
}
.get_encoded()
})
- .collect();
+ .collect::<Result<Vec<_>, _>>()?;
+ debug!("Plaintext input shares computed.");
let metadata = ReportMetadata {
report_id,
@@ -230,18 +239,20 @@ fn get_dap_report_internal<T: Shardable>(
let mut info = make_base_info();
let mut aad = Vec::from(*task_id);
- metadata.encode(&mut aad);
- encode_u32_items(&mut aad, &(), &encoded_public_share);
+ metadata.encode(&mut aad)?;
+ encode_u32_items(&mut aad, &(), &encoded_public_share)?;
info.push(Role::Leader as u8);
let leader_payload =
hpke_encrypt_wrapper(&plaintext_input_shares[0], &aad, &info, &leader_hpke_config)?;
+ debug!("Leader payload encrypted.");
*info.last_mut().unwrap() = Role::Helper as u8;
let helper_payload =
hpke_encrypt_wrapper(&plaintext_input_shares[1], &aad, &info, &helper_hpke_config)?;
+ debug!("Helper payload encrypted.");
Ok(Report {
metadata,
@@ -264,20 +275,22 @@ pub extern "C" fn dapGetReportU8(
) -> bool {
assert_eq!(task_id.len(), 32);
- if let Ok(report) = get_dap_report_internal::<u8>(
+ let Ok(report) = get_dap_report_internal::<u8>(
leader_hpke_config_encoded,
helper_hpke_config_encoded,
&measurement,
&task_id.as_slice().try_into().unwrap(),
time_precision,
- ) {
- let encoded_report = report.get_encoded();
- out_report.extend(encoded_report);
-
- true
- } else {
- false
- }
+ ) else {
+ warn!("Creating report failed!");
+ return false;
+ };
+ let Ok(encoded_report) = report.get_encoded() else {
+ warn!("Encoding report failed!");
+ return false;
+ };
+ out_report.extend(encoded_report);
+ true
}
#[no_mangle]
@@ -291,20 +304,22 @@ pub extern "C" fn dapGetReportVecU8(
) -> bool {
assert_eq!(task_id.len(), 32);
- if let Ok(report) = get_dap_report_internal::<ThinVec<u8>>(
+ let Ok(report) = get_dap_report_internal::<ThinVec<u8>>(
leader_hpke_config_encoded,
helper_hpke_config_encoded,
measurement,
&task_id.as_slice().try_into().unwrap(),
time_precision,
- ) {
- let encoded_report = report.get_encoded();
- out_report.extend(encoded_report);
-
- true
- } else {
- false
- }
+ ) else {
+ warn!("Creating report failed!");
+ return false;
+ };
+ let Ok(encoded_report) = report.get_encoded() else {
+ warn!("Encoding report failed!");
+ return false;
+ };
+ out_report.extend(encoded_report);
+ true
}
#[no_mangle]
@@ -318,18 +333,20 @@ pub extern "C" fn dapGetReportVecU16(
) -> bool {
assert_eq!(task_id.len(), 32);
- if let Ok(report) = get_dap_report_internal::<ThinVec<u16>>(
+ let Ok(report) = get_dap_report_internal::<ThinVec<u16>>(
leader_hpke_config_encoded,
helper_hpke_config_encoded,
measurement,
&task_id.as_slice().try_into().unwrap(),
time_precision,
- ) {
- let encoded_report = report.get_encoded();
- out_report.extend(encoded_report);
-
- true
- } else {
- false
- }
+ ) else {
+ warn!("Creating report failed!");
+ return false;
+ };
+ let Ok(encoded_report) = report.get_encoded() else {
+ warn!("Encoding report failed!");
+ return false;
+ };
+ out_report.extend(encoded_report);
+ true
}
diff --git a/toolkit/components/telemetry/dap/ffi/src/types.rs b/toolkit/components/telemetry/dap/ffi/src/types.rs
index e8f6385dcd..c84cbf16bc 100644
--- a/toolkit/components/telemetry/dap/ffi/src/types.rs
+++ b/toolkit/components/telemetry/dap/ffi/src/types.rs
@@ -34,8 +34,9 @@ impl Decode for TaskID {
}
impl Encode for TaskID {
- fn encode(&self, bytes: &mut Vec<u8>) {
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
bytes.extend_from_slice(&self.0);
+ Ok(())
}
}
@@ -52,8 +53,9 @@ impl Decode for Time {
}
impl Encode for Time {
- fn encode(&self, bytes: &mut Vec<u8>) {
- u64::encode(&self.0, bytes);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ u64::encode(&self.0, bytes)?;
+ Ok(())
}
}
@@ -93,9 +95,10 @@ impl Decode for Extension {
}
impl Encode for Extension {
- fn encode(&self, bytes: &mut Vec<u8>) {
- (self.extension_type as u16).encode(bytes);
- encode_u16_items(bytes, &(), &self.extension_data);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ (self.extension_type as u16).encode(bytes)?;
+ encode_u16_items(bytes, &(), &self.extension_data)?;
+ Ok(())
}
}
@@ -131,9 +134,10 @@ pub struct PlaintextInputShare {
}
impl Encode for PlaintextInputShare {
- fn encode(&self, bytes: &mut Vec<u8>) {
- encode_u16_items(bytes, &(), &self.extensions);
- encode_u32_items(bytes, &(), &self.payload);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ encode_u16_items(bytes, &(), &self.extensions)?;
+ encode_u32_items(bytes, &(), &self.payload)?;
+ Ok(())
}
}
@@ -150,8 +154,9 @@ impl Decode for HpkeConfigId {
}
impl Encode for HpkeConfigId {
- fn encode(&self, bytes: &mut Vec<u8>) {
- self.0.encode(bytes);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ self.0.encode(bytes)?;
+ Ok(())
}
}
@@ -189,12 +194,13 @@ impl Decode for HpkeConfig {
}
impl Encode for HpkeConfig {
- fn encode(&self, bytes: &mut Vec<u8>) {
- self.id.encode(bytes);
- self.kem_id.encode(bytes);
- self.kdf_id.encode(bytes);
- self.aead_id.encode(bytes);
- encode_u16_items(bytes, &(), &self.public_key);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ self.id.encode(bytes)?;
+ self.kem_id.encode(bytes)?;
+ self.kdf_id.encode(bytes)?;
+ self.aead_id.encode(bytes)?;
+ encode_u16_items(bytes, &(), &self.public_key)?;
+ Ok(())
}
}
@@ -227,10 +233,11 @@ impl Decode for HpkeCiphertext {
}
impl Encode for HpkeCiphertext {
- fn encode(&self, bytes: &mut Vec<u8>) {
- self.config_id.encode(bytes);
- encode_u16_items(bytes, &(), &self.enc);
- encode_u32_items(bytes, &(), &self.payload);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ self.config_id.encode(bytes)?;
+ encode_u16_items(bytes, &(), &self.enc)?;
+ encode_u32_items(bytes, &(), &self.payload)?;
+ Ok(())
}
}
@@ -248,8 +255,9 @@ impl Decode for ReportID {
}
impl Encode for ReportID {
- fn encode(&self, bytes: &mut Vec<u8>) {
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
bytes.extend_from_slice(&self.0);
+ Ok(())
}
}
@@ -286,9 +294,10 @@ impl Decode for ReportMetadata {
}
impl Encode for ReportMetadata {
- fn encode(&self, bytes: &mut Vec<u8>) {
- self.report_id.encode(bytes);
- self.time.encode(bytes);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ self.report_id.encode(bytes)?;
+ self.time.encode(bytes)?;
+ Ok(())
}
}
@@ -307,7 +316,6 @@ pub struct Report {
pub helper_encrypted_input_share: HpkeCiphertext,
}
-
impl Report {
/// Creates a minimal report for use in tests.
pub fn new_dummy() -> Self {
@@ -321,8 +329,16 @@ impl Report {
time: Time::generate(1),
},
public_share: vec![],
- leader_encrypted_input_share: HpkeCiphertext { config_id: HpkeConfigId(5), enc: vec![1, 2, 3, 4, 5], payload: vec![6, 7, 8, 9, 10] },
- helper_encrypted_input_share: HpkeCiphertext { config_id: HpkeConfigId(100), enc: enc.into(), payload: payload.into() },
+ leader_encrypted_input_share: HpkeCiphertext {
+ config_id: HpkeConfigId(5),
+ enc: vec![1, 2, 3, 4, 5],
+ payload: vec![6, 7, 8, 9, 10],
+ },
+ helper_encrypted_input_share: HpkeCiphertext {
+ config_id: HpkeConfigId(100),
+ enc: enc.into(),
+ payload: payload.into(),
+ },
}
}
}
@@ -349,10 +365,11 @@ impl Decode for Report {
}
impl Encode for Report {
- fn encode(&self, bytes: &mut Vec<u8>) {
- self.metadata.encode(bytes);
- encode_u32_items(bytes, &(), &self.public_share);
- self.leader_encrypted_input_share.encode(bytes);
- self.helper_encrypted_input_share.encode(bytes);
+ fn encode(&self, bytes: &mut Vec<u8>) -> Result<(), CodecError> {
+ self.metadata.encode(bytes)?;
+ encode_u32_items(bytes, &(), &self.public_share)?;
+ self.leader_encrypted_input_share.encode(bytes)?;
+ self.helper_encrypted_input_share.encode(bytes)?;
+ Ok(())
}
}
diff --git a/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js b/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js
index deb68bafef..ba973c647f 100644
--- a/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js
+++ b/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js
@@ -91,7 +91,7 @@ function uploadHandler(request, response) {
console.log(body.available());
Assert.equal(
true,
- body.available() == 406 || body.available() == 3654,
+ body.available() == 886 || body.available() == 3654,
"Wrong request body size."
);
received = true;
diff --git a/toolkit/components/telemetry/docs/data/environment.rst b/toolkit/components/telemetry/docs/data/environment.rst
index 9e966fe707..6b0649bda7 100644
--- a/toolkit/components/telemetry/docs/data/environment.rst
+++ b/toolkit/components/telemetry/docs/data/environment.rst
@@ -104,6 +104,7 @@ Structure:
creationDate: <integer>, // integer days since UNIX epoch, e.g. 16446
resetDate: <integer>, // integer days since UNIX epoch, e.g. 16446 - optional
firstUseDate: <integer>, // integer days since UNIX epoch, e.g. 16446 - optional
+ recoveredFromBackup: <integer>, // integer days since UNIX epoch, e.g. 16446 - optional
},
partner: { // This section may not be immediately available on startup
distributionId: <string>, // pref "distribution.id", null on failure
@@ -281,7 +282,7 @@ Structure:
hasBinaryComponents: <bool>,
installDay: <number>, // days since UNIX epoch, 0 on failure
updateDay: <number>, // days since UNIX epoch, 0 on failure
- signedState: <integer>, // whether the add-on is signed by AMO, only present for extensions
+ signedState: <integer>, // whether the add-on is signed by AMO
signedTypes: <string>, // JSON-stringified array of signature types found (see nsIAppSignatureInfo's SignatureAlgorithm enum)
isSystem: <bool>, // true if this is a System Add-on
isWebExtension: <bool>, // true if this is a WebExtension
@@ -302,6 +303,8 @@ Structure:
hasBinaryComponents: <bool>
installDay: <number>, // days since UNIX epoch, 0 on failure
updateDay: <number>, // days since UNIX epoch, 0 on failure
+ signedState: <integer>, // whether the add-on is signed by AMO
+ signedTypes: <string>, // JSON-stringified array of signature types found (see nsIAppSignatureInfo's SignatureAlgorithm enum)
},
activeGMPlugins: {
<gmp id>: {
@@ -547,6 +550,13 @@ The time of the first use of profile. If this is an old profile where we can't
determine this this field will not be present.
It's read from a file-stored timestamp from the client's profile directory.
+recoveredFromBackup
+~~~~~~~~~~~~~~~~~~~
+
+The time that this profile was recovered from a backup. If the profile was never
+recovered from a backup, this field will not be present.
+It's read from a file-stored timestamp from the client's profile directory.
+
partner
-------
diff --git a/toolkit/components/telemetry/docs/internals/preferences.rst b/toolkit/components/telemetry/docs/internals/preferences.rst
index 0c63ae031d..1ce8647322 100644
--- a/toolkit/components/telemetry/docs/internals/preferences.rst
+++ b/toolkit/components/telemetry/docs/internals/preferences.rst
@@ -202,26 +202,6 @@ Data-choices notification
This is the only channel-specific version that we currently use for the minimum policy version.
-GeckoView
----------
-
-``toolkit.telemetry.geckoview.streaming``
-
- Whether the GeckoView mode we're running in is the variety that uses the :doc:`GeckoView Streaming Telemetry API <../internals/geckoview-streaming>` or not.
- Defaults to false.
-
-``toolkit.telemetry.geckoview.batchDurationMS``
-
- The duration in milliseconds over which :doc:`GeckoView Streaming Telemetry <../internals/geckoview-streaming>` will batch accumulations before passing it on to its delegate.
- Defaults to 5000.
-
-``toolkit.telemetry.geckoview.maxBatchStalenessMS``
-
- The maximum time (in milliseconds) between flushes of the
- :doc:`GeckoView Streaming Telemetry <../internals/geckoview-streaming>`
- batch to its delegate.
- Defaults to 60000.
-
Testing
-------
diff --git a/toolkit/components/telemetry/docs/internals/review.rst b/toolkit/components/telemetry/docs/internals/review.rst
index 80a3bd57de..d38bcfcc3b 100644
--- a/toolkit/components/telemetry/docs/internals/review.rst
+++ b/toolkit/components/telemetry/docs/internals/review.rst
@@ -67,7 +67,7 @@ Considerations for all changes
- Does this affect products more broadly than expected?
- Consider all our platforms: Windows, Mac, Linux, Android.
- - Consider all our products: Firefox, Fennec, GeckoView, Glean.
+ - Consider all our products: Firefox, Fenix, Glean.
- Does this fall afoul of common architectural failures?
diff --git a/toolkit/components/telemetry/geckoview/gtest/TestGeckoViewStreaming.cpp b/toolkit/components/telemetry/geckoview/gtest/TestGeckoViewStreaming.cpp
deleted file mode 100644
index ebaa7099a8..0000000000
--- a/toolkit/components/telemetry/geckoview/gtest/TestGeckoViewStreaming.cpp
+++ /dev/null
@@ -1,237 +0,0 @@
-/* vim:set ts=2 sw=2 sts=2 et: */
-/* Any copyright is dedicated to the Public Domain.
- * http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "mozilla/gtest/MozAssertions.h"
-#include "mozilla/Preferences.h"
-#include "mozilla/Services.h"
-#include "mozilla/Telemetry.h"
-#include "nsIObserverService.h"
-#include "nsThreadUtils.h"
-#include "TelemetryFixture.h"
-#include "TelemetryTestHelpers.h"
-#include "streaming/GeckoViewStreamingTelemetry.h"
-
-using namespace mozilla;
-using namespace mozilla::Telemetry;
-using namespace TelemetryTestHelpers;
-using GeckoViewStreamingTelemetry::StreamingTelemetryDelegate;
-using mozilla::Telemetry::ScalarID;
-using ::testing::_;
-using ::testing::Eq;
-using ::testing::StrictMock;
-
-namespace {
-
-const char* kGeckoViewStreamingPref = "toolkit.telemetry.geckoview.streaming";
-const char* kBatchTimeoutPref = "toolkit.telemetry.geckoview.batchDurationMS";
-
-constexpr auto kTestHgramName = "TELEMETRY_TEST_STREAMING"_ns;
-constexpr auto kTestHgramName2 = "TELEMETRY_TEST_STREAMING_2"_ns;
-constexpr auto kTestCategoricalName = "TELEMETRY_TEST_CATEGORICAL_OPTOUT"_ns;
-const HistogramID kTestHgram = Telemetry::TELEMETRY_TEST_STREAMING;
-const HistogramID kTestHgram2 = Telemetry::TELEMETRY_TEST_STREAMING_2;
-
-class TelemetryStreamingFixture : public TelemetryTestFixture {
- protected:
- virtual void SetUp() {
- TelemetryTestFixture::SetUp();
- Preferences::SetBool(kGeckoViewStreamingPref, true);
- Preferences::SetInt(kBatchTimeoutPref, 5000);
- }
- virtual void TearDown() {
- TelemetryTestFixture::TearDown();
- Preferences::SetBool(kGeckoViewStreamingPref, false);
- GeckoViewStreamingTelemetry::RegisterDelegate(nullptr);
- }
-};
-
-class MockDelegate : public StreamingTelemetryDelegate {
- public:
- MOCK_METHOD2(ReceiveHistogramSamples,
- void(const nsCString& aHistogramName,
- const nsTArray<uint32_t>& aSamples));
- MOCK_METHOD2(ReceiveCategoricalHistogramSamples,
- void(const nsCString& aHistogramName,
- const nsTArray<uint32_t>& aSamples));
- MOCK_METHOD2(ReceiveBoolScalarValue,
- void(const nsCString& aScalarName, bool aValue));
- MOCK_METHOD2(ReceiveStringScalarValue,
- void(const nsCString& aScalarName, const nsCString& aValue));
- MOCK_METHOD2(ReceiveUintScalarValue,
- void(const nsCString& aScalarName, uint32_t aValue));
-}; // class MockDelegate
-
-TEST_F(TelemetryStreamingFixture, HistogramSamples) {
- const uint32_t kSampleOne = 401;
- const uint32_t kSampleTwo = 2019;
-
- CopyableTArray<uint32_t> samplesArray;
- samplesArray.AppendElement(kSampleOne);
- samplesArray.AppendElement(kSampleTwo);
-
- auto md = MakeRefPtr<MockDelegate>();
- EXPECT_CALL(*md, ReceiveHistogramSamples(Eq(kTestHgramName),
- Eq(std::move(samplesArray))));
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_STREAMING, kSampleOne);
- Preferences::SetInt(kBatchTimeoutPref, 0);
- Telemetry::Accumulate(Telemetry::TELEMETRY_TEST_STREAMING, kSampleTwo);
-}
-
-TEST_F(TelemetryStreamingFixture, CategoricalHistogramSamples) {
- auto kSampleOne =
- Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL_OPTOUT::CommonLabel;
- auto kSampleTwo = Telemetry::LABELS_TELEMETRY_TEST_CATEGORICAL_OPTOUT::Label5;
-
- CopyableTArray<uint32_t> samplesArray;
- samplesArray.AppendElement(static_cast<uint32_t>(kSampleOne));
- samplesArray.AppendElement(static_cast<uint32_t>(kSampleOne));
- samplesArray.AppendElement(static_cast<uint32_t>(kSampleTwo));
-
- auto md = MakeRefPtr<MockDelegate>();
- EXPECT_CALL(*md, ReceiveCategoricalHistogramSamples(
- Eq(kTestCategoricalName), Eq(std::move(samplesArray))));
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- Telemetry::AccumulateCategorical(kSampleOne);
- Telemetry::AccumulateCategorical(kSampleOne);
- Preferences::SetInt(kBatchTimeoutPref, 0);
- Telemetry::AccumulateCategorical(kSampleTwo);
-}
-
-TEST_F(TelemetryStreamingFixture, MultipleHistograms) {
- const uint32_t kSample1 = 400;
- const uint32_t kSample2 = 1 << 31;
- const uint32_t kSample3 = 7;
- CopyableTArray<uint32_t> samplesArray1;
- samplesArray1.AppendElement(kSample1);
- samplesArray1.AppendElement(kSample2);
- CopyableTArray<uint32_t> samplesArray2;
- samplesArray2.AppendElement(kSample3);
-
- auto md = MakeRefPtr<MockDelegate>();
- EXPECT_CALL(*md, ReceiveHistogramSamples(Eq(kTestHgramName),
- Eq(std::move(samplesArray1))));
- EXPECT_CALL(*md, ReceiveHistogramSamples(Eq(kTestHgramName2),
- Eq(std::move(samplesArray2))));
-
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- Telemetry::Accumulate(kTestHgram, kSample1);
- Telemetry::Accumulate(kTestHgram2, kSample3);
- Preferences::SetInt(kBatchTimeoutPref, 0);
- Telemetry::Accumulate(kTestHgram, kSample2);
-}
-
-// If we can find a way to convert the expectation's arg into an stl container,
-// we can use gmock's own ::testing::UnorderedElementsAre() instead.
-auto MatchUnordered(uint32_t sample1, uint32_t sample2) {
- CopyableTArray<uint32_t> samplesArray1;
- samplesArray1.AppendElement(sample1);
- samplesArray1.AppendElement(sample2);
-
- CopyableTArray<uint32_t> samplesArray2;
- samplesArray2.AppendElement(sample2);
- samplesArray2.AppendElement(sample1);
-
- return ::testing::AnyOf(Eq(std::move(samplesArray1)),
- Eq(std::move(samplesArray2)));
-}
-
-TEST_F(TelemetryStreamingFixture, MultipleThreads) {
- const uint32_t kSample1 = 4;
- const uint32_t kSample2 = 14;
-
- auto md = MakeRefPtr<MockDelegate>();
- // In this test, samples for the second test hgram are uninteresting.
- EXPECT_CALL(*md, ReceiveHistogramSamples(Eq(kTestHgramName2), _));
- EXPECT_CALL(*md, ReceiveHistogramSamples(Eq(kTestHgramName),
- MatchUnordered(kSample1, kSample2)));
-
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- nsCOMPtr<nsIThread> t1;
- nsCOMPtr<nsIThread> t2;
- nsCOMPtr<nsIThread> t3;
-
- nsCOMPtr<nsIRunnable> r1 = NS_NewRunnableFunction(
- "accumulate 4", [&]() { Telemetry::Accumulate(kTestHgram, kSample1); });
- nsCOMPtr<nsIRunnable> r2 = NS_NewRunnableFunction(
- "accumulate 14", [&]() { Telemetry::Accumulate(kTestHgram, kSample2); });
-
- nsresult rv = NS_NewNamedThread("t1", getter_AddRefs(t1), r1);
- EXPECT_NS_SUCCEEDED(rv);
- rv = NS_NewNamedThread("t2", getter_AddRefs(t2), r2);
- EXPECT_NS_SUCCEEDED(rv);
-
- // Give the threads a chance to do their work.
- PR_Sleep(PR_MillisecondsToInterval(1));
-
- Preferences::SetInt(kBatchTimeoutPref, 0);
- Telemetry::Accumulate(kTestHgram2, kSample1);
-}
-
-TEST_F(TelemetryStreamingFixture, ScalarValues) {
- constexpr auto kBoolScalarName = "telemetry.test.boolean_kind"_ns;
- constexpr auto kStringScalarName = "telemetry.test.string_kind"_ns;
- constexpr auto kUintScalarName = "telemetry.test.unsigned_int_kind"_ns;
-
- const bool kBoolScalarValue = true;
- constexpr auto kStringScalarValue = "a string scalar value"_ns;
- const uint32_t kUintScalarValue = 42;
-
- auto md = MakeRefPtr<MockDelegate>();
- EXPECT_CALL(
- *md, ReceiveBoolScalarValue(Eq(kBoolScalarName), Eq(kBoolScalarValue)));
- EXPECT_CALL(*md, ReceiveStringScalarValue(Eq(kStringScalarName),
- Eq(kStringScalarValue)));
- EXPECT_CALL(
- *md, ReceiveUintScalarValue(Eq(kUintScalarName), Eq(kUintScalarValue)));
-
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- Telemetry::ScalarSet(ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, kBoolScalarValue);
- Telemetry::ScalarSet(ScalarID::TELEMETRY_TEST_STRING_KIND,
- NS_ConvertUTF8toUTF16(kStringScalarValue));
- Preferences::SetInt(kBatchTimeoutPref,
- 0); // Trigger batch on next accumulation.
- Telemetry::ScalarSet(ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
- kUintScalarValue);
-}
-
-TEST_F(TelemetryStreamingFixture, ExpiredHistogram) {
- const HistogramID kExpiredHistogram = Telemetry::TELEMETRY_TEST_EXPIRED;
- const uint32_t kSample = 401;
-
- // Strict Mock fails on any method calls.
- auto md = MakeRefPtr<StrictMock<MockDelegate>>();
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
-
- Preferences::SetInt(kBatchTimeoutPref, 0);
- Telemetry::Accumulate(kExpiredHistogram, kSample);
-}
-
-TEST_F(TelemetryStreamingFixture, SendOnAppBackground) {
- constexpr auto kBoolScalarName = "telemetry.test.boolean_kind"_ns;
- const bool kBoolScalarValue = true;
- const char* kApplicationBackgroundTopic = "application-background";
-
- auto md = MakeRefPtr<MockDelegate>();
- EXPECT_CALL(
- *md, ReceiveBoolScalarValue(Eq(kBoolScalarName), Eq(kBoolScalarValue)));
-
- GeckoViewStreamingTelemetry::RegisterDelegate(md);
- Telemetry::ScalarSet(ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, kBoolScalarValue);
-
- nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
- ASSERT_TRUE(!!os)
- << "Observer Service unavailable?!?!";
- os->NotifyObservers(nullptr, kApplicationBackgroundTopic, nullptr);
-}
-
-} // namespace
diff --git a/toolkit/components/telemetry/geckoview/gtest/moz.build b/toolkit/components/telemetry/geckoview/gtest/moz.build
deleted file mode 100644
index eb6a2f9293..0000000000
--- a/toolkit/components/telemetry/geckoview/gtest/moz.build
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
-# vim: set filetype=python:
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, you can obtain one at http://mozilla.org/MPL/2.0/.
-
-Library("telemetrygeckoviewtest")
-
-LOCAL_INCLUDES += [
- "../",
- "../..",
- "../../..",
- "/toolkit/components/telemetry/tests/gtest",
- "/xpcom/io",
-]
-
-# GeckoView Streaming Telemetry is only available on Android.
-if CONFIG["MOZ_WIDGET_TOOLKIT"] == "android":
- UNIFIED_SOURCES += [
- "TestGeckoViewStreaming.cpp",
- ]
-
-# We need the following line otherwise including
-# "TelemetryHistogram.h" in tests will fail due to
-# missing headers.
-include("/ipc/chromium/chromium-config.mozbuild")
-
-FINAL_LIBRARY = "xul-gtest"
diff --git a/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.cpp b/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.cpp
deleted file mode 100644
index 6c4b9590c0..0000000000
--- a/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.cpp
+++ /dev/null
@@ -1,282 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "GeckoViewStreamingTelemetry.h"
-
-#include "mozilla/Assertions.h"
-#include "mozilla/Services.h"
-#include "mozilla/StaticMutex.h"
-#include "mozilla/StaticPtr.h"
-#include "mozilla/StaticPrefs_toolkit.h"
-#include "mozilla/TimeStamp.h"
-#include "nsTHashMap.h"
-#include "nsIObserver.h"
-#include "nsIObserverService.h"
-#include "nsITimer.h"
-#include "nsTArray.h"
-#include "nsThreadUtils.h"
-
-using mozilla::Runnable;
-using mozilla::StaticMutex;
-using mozilla::StaticMutexAutoLock;
-using mozilla::StaticRefPtr;
-using mozilla::TimeStamp;
-
-// Batches and streams Telemetry samples to a JNI delegate which will
-// (presumably) do something with the data. Expected to be used to route data
-// up to the Android Components layer to be translated into Glean metrics.
-namespace GeckoViewStreamingTelemetry {
-
-class LifecycleObserver;
-void SendBatch(const StaticMutexAutoLock& aLock);
-
-// Topic on which we flush the batch.
-static const char* const kApplicationBackgroundTopic = "application-background";
-
-static StaticMutex gMutex MOZ_UNANNOTATED;
-
-// -- The following state is accessed across threads.
-// -- Do not touch these if you do not hold gMutex.
-
-// The time the batch began.
-TimeStamp gBatchBegan;
-// The batch of histograms and samples.
-typedef nsTHashMap<nsCStringHashKey, nsTArray<uint32_t>> HistogramBatch;
-HistogramBatch gBatch;
-HistogramBatch gCategoricalBatch;
-// The batches of Scalars and their values.
-typedef nsTHashMap<nsCStringHashKey, bool> BoolScalarBatch;
-BoolScalarBatch gBoolScalars;
-typedef nsTHashMap<nsCStringHashKey, nsCString> StringScalarBatch;
-StringScalarBatch gStringScalars;
-typedef nsTHashMap<nsCStringHashKey, uint32_t> UintScalarBatch;
-UintScalarBatch gUintScalars;
-// The delegate to receive the samples and values.
-StaticRefPtr<StreamingTelemetryDelegate> gDelegate;
-// Lifecycle observer used to flush the batch when backgrounded.
-StaticRefPtr<LifecycleObserver> gObserver;
-
-// -- End of gMutex-protected thread-unsafe-accessed data
-
-// Timer that ensures data in the batch never gets too stale.
-// This timer may only be manipulated on the Main Thread.
-StaticRefPtr<nsITimer> gJICTimer;
-
-class LifecycleObserver final : public nsIObserver {
- public:
- NS_DECL_ISUPPORTS
- NS_DECL_NSIOBSERVER
-
- LifecycleObserver() = default;
-
- protected:
- ~LifecycleObserver() = default;
-};
-
-NS_IMPL_ISUPPORTS(LifecycleObserver, nsIObserver);
-
-NS_IMETHODIMP
-LifecycleObserver::Observe(nsISupports* aSubject, const char* aTopic,
- const char16_t* aData) {
- if (!strcmp(aTopic, kApplicationBackgroundTopic)) {
- StaticMutexAutoLock lock(gMutex);
- SendBatch(lock);
- }
- return NS_OK;
-}
-
-void RegisterDelegate(const RefPtr<StreamingTelemetryDelegate>& aDelegate) {
- StaticMutexAutoLock lock(gMutex);
- gDelegate = aDelegate;
-}
-
-class SendBatchRunnable : public Runnable {
- public:
- explicit SendBatchRunnable(RefPtr<StreamingTelemetryDelegate> aDelegate,
- HistogramBatch&& aBatch,
- HistogramBatch&& aCategoricalBatch,
- BoolScalarBatch&& aBoolScalars,
- StringScalarBatch&& aStringScalars,
- UintScalarBatch&& aUintScalars)
- : Runnable("SendBatchRunnable"),
- mDelegate(std::move(aDelegate)),
- mBatch(std::move(aBatch)),
- mCategoricalBatch(std::move(aCategoricalBatch)),
- mBoolScalars(std::move(aBoolScalars)),
- mStringScalars(std::move(aStringScalars)),
- mUintScalars(std::move(aUintScalars)) {}
-
- NS_IMETHOD Run() override {
- MOZ_ASSERT(NS_IsMainThread());
- MOZ_ASSERT(mDelegate);
-
- if (gJICTimer) {
- gJICTimer->Cancel();
- }
-
- for (const auto& entry : mBatch) {
- const nsCString& histogramName = PromiseFlatCString(entry.GetKey());
- const nsTArray<uint32_t>& samples = entry.GetData();
-
- mDelegate->ReceiveHistogramSamples(histogramName, samples);
- }
- mBatch.Clear();
-
- for (const auto& entry : mCategoricalBatch) {
- const nsCString& histogramName = PromiseFlatCString(entry.GetKey());
- const nsTArray<uint32_t>& samples = entry.GetData();
-
- mDelegate->ReceiveCategoricalHistogramSamples(histogramName, samples);
- }
- mCategoricalBatch.Clear();
-
- for (const auto& entry : mBoolScalars) {
- const nsCString& scalarName = PromiseFlatCString(entry.GetKey());
- mDelegate->ReceiveBoolScalarValue(scalarName, entry.GetData());
- }
- mBoolScalars.Clear();
-
- for (const auto& entry : mStringScalars) {
- const nsCString& scalarName = PromiseFlatCString(entry.GetKey());
- const nsCString& scalarValue = PromiseFlatCString(entry.GetData());
- mDelegate->ReceiveStringScalarValue(scalarName, scalarValue);
- }
- mStringScalars.Clear();
-
- for (const auto& entry : mUintScalars) {
- const nsCString& scalarName = PromiseFlatCString(entry.GetKey());
- mDelegate->ReceiveUintScalarValue(scalarName, entry.GetData());
- }
- mUintScalars.Clear();
-
- return NS_OK;
- }
-
- private:
- RefPtr<StreamingTelemetryDelegate> mDelegate;
- HistogramBatch mBatch;
- HistogramBatch mCategoricalBatch;
- BoolScalarBatch mBoolScalars;
- StringScalarBatch mStringScalars;
- UintScalarBatch mUintScalars;
-}; // class SendBatchRunnable
-
-// Can be called on any thread.
-// NOTE: Pay special attention to what you call in this method as if it
-// accumulates to a gv-streaming-enabled probe we will deadlock the calling
-// thread.
-void SendBatch(const StaticMutexAutoLock& aLock) {
- if (!gDelegate) {
- NS_WARNING(
- "Being asked to send Streaming Telemetry with no registered Streaming "
- "Telemetry Delegate. Will try again later.");
- // Give us another full Batch Duration to register a delegate.
- gBatchBegan = TimeStamp::Now();
- return;
- }
-
- // To make it so accumulations within the delegation don't deadlock us,
- // move the batches' contents into the Runner.
- HistogramBatch histogramCopy;
- gBatch.SwapElements(histogramCopy);
- HistogramBatch categoricalCopy;
- gCategoricalBatch.SwapElements(categoricalCopy);
- BoolScalarBatch boolScalarCopy;
- gBoolScalars.SwapElements(boolScalarCopy);
- StringScalarBatch stringScalarCopy;
- gStringScalars.SwapElements(stringScalarCopy);
- UintScalarBatch uintScalarCopy;
- gUintScalars.SwapElements(uintScalarCopy);
- RefPtr<SendBatchRunnable> runnable = new SendBatchRunnable(
- gDelegate, std::move(histogramCopy), std::move(categoricalCopy),
- std::move(boolScalarCopy), std::move(stringScalarCopy),
- std::move(uintScalarCopy));
-
- // To make things easier for the delegate, dispatch to the main thread.
- NS_DispatchToMainThread(runnable);
-}
-
-// Can be called on any thread.
-void BatchCheck(const StaticMutexAutoLock& aLock) {
- if (!gObserver) {
- gObserver = new LifecycleObserver();
- nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
- if (os) {
- os->AddObserver(gObserver, kApplicationBackgroundTopic, false);
- }
- }
- if (gBatchBegan.IsNull()) {
- // Time to begin a new batch.
- gBatchBegan = TimeStamp::Now();
- // Set a just-in-case timer to enforce an upper-bound on batch staleness.
- NS_DispatchToMainThread(NS_NewRunnableFunction(
- "GeckoviewStreamingTelemetry::ArmTimer", []() -> void {
- if (!gJICTimer) {
- gJICTimer = NS_NewTimer().take();
- }
- if (gJICTimer) {
- gJICTimer->InitWithNamedFuncCallback(
- [](nsITimer*, void*) -> void {
- StaticMutexAutoLock locker(gMutex);
- SendBatch(locker);
- },
- nullptr,
- mozilla::StaticPrefs::
- toolkit_telemetry_geckoview_maxBatchStalenessMS(),
- nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY,
- "GeckoviewStreamingTelemetry::SendBatch");
- }
- }));
- }
- double batchDurationMs = (TimeStamp::Now() - gBatchBegan).ToMilliseconds();
- if (batchDurationMs >
- mozilla::StaticPrefs::toolkit_telemetry_geckoview_batchDurationMS()) {
- SendBatch(aLock);
- gBatchBegan = TimeStamp();
- }
-}
-
-// Can be called on any thread.
-void HistogramAccumulate(const nsCString& aName, bool aIsCategorical,
- uint32_t aValue) {
- StaticMutexAutoLock lock(gMutex);
-
- if (aIsCategorical) {
- nsTArray<uint32_t>& samples = gCategoricalBatch.LookupOrInsert(aName);
- samples.AppendElement(aValue);
- } else {
- nsTArray<uint32_t>& samples = gBatch.LookupOrInsert(aName);
- samples.AppendElement(aValue);
- }
-
- BatchCheck(lock);
-}
-
-void BoolScalarSet(const nsCString& aName, bool aValue) {
- StaticMutexAutoLock lock(gMutex);
-
- gBoolScalars.InsertOrUpdate(aName, aValue);
-
- BatchCheck(lock);
-}
-
-void StringScalarSet(const nsCString& aName, const nsCString& aValue) {
- StaticMutexAutoLock lock(gMutex);
-
- gStringScalars.InsertOrUpdate(aName, aValue);
-
- BatchCheck(lock);
-}
-
-void UintScalarSet(const nsCString& aName, uint32_t aValue) {
- StaticMutexAutoLock lock(gMutex);
-
- gUintScalars.InsertOrUpdate(aName, aValue);
-
- BatchCheck(lock);
-}
-
-} // namespace GeckoViewStreamingTelemetry
diff --git a/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.h b/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.h
deleted file mode 100644
index 458224a3c2..0000000000
--- a/toolkit/components/telemetry/geckoview/streaming/GeckoViewStreamingTelemetry.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim: set ts=8 sts=2 et sw=2 tw=80: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef GeckoViewStreamingTelemetry_h__
-#define GeckoViewStreamingTelemetry_h__
-
-#include "mozilla/Assertions.h"
-#include "mozilla/RefPtr.h"
-#include "nsISupports.h"
-#include "nsStringFwd.h"
-#include "nsTArray.h"
-
-#include <cstdint>
-
-namespace GeckoViewStreamingTelemetry {
-
-void HistogramAccumulate(const nsCString& aName, bool aIsCategorical,
- uint32_t aValue);
-
-void BoolScalarSet(const nsCString& aName, bool aValue);
-void StringScalarSet(const nsCString& aName, const nsCString& aValue);
-void UintScalarSet(const nsCString& aName, uint32_t aValue);
-
-// Classes wishing to receive Streaming Telemetry must implement this interface
-// and register themselves via RegisterDelegate.
-class StreamingTelemetryDelegate {
- public:
- NS_INLINE_DECL_THREADSAFE_REFCOUNTING(StreamingTelemetryDelegate)
-
- // Receive* methods will be called from time to time on the main thread.
- virtual void ReceiveHistogramSamples(const nsCString& aName,
- const nsTArray<uint32_t>& aSamples) = 0;
- virtual void ReceiveCategoricalHistogramSamples(
- const nsCString& aName, const nsTArray<uint32_t>& aSamples) = 0;
- virtual void ReceiveBoolScalarValue(const nsCString& aName, bool aValue) = 0;
- virtual void ReceiveStringScalarValue(const nsCString& aName,
- const nsCString& aValue) = 0;
- virtual void ReceiveUintScalarValue(const nsCString& aName,
- uint32_t aValue) = 0;
-
- protected:
- virtual ~StreamingTelemetryDelegate() = default;
-};
-
-// Registers the provided StreamingTelemetryDelegate to receive Streaming
-// Telemetry, overwriting any previous delegate registration.
-// Call on any thread.
-void RegisterDelegate(const RefPtr<StreamingTelemetryDelegate>& aDelegate);
-
-} // namespace GeckoViewStreamingTelemetry
-
-#endif // GeckoViewStreamingTelemetry_h__
diff --git a/toolkit/components/telemetry/geckoview/streaming/metrics.yaml b/toolkit/components/telemetry/geckoview/streaming/metrics.yaml
deleted file mode 100644
index 2386fb2171..0000000000
--- a/toolkit/components/telemetry/geckoview/streaming/metrics.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-# This file defines the metrics that are recorded by the Glean SDK. They are
-# automatically converted to platform-specific code at build time using the
-# `glean_parser` PyPI package.
-
-# Adding a new metric? Please don't!
-# (At least not without the permission of a Telemetry Module Peer)
-
----
-$schema: moz://mozilla.org/schemas/glean/metrics/2-0-0
diff --git a/toolkit/components/telemetry/moz.build b/toolkit/components/telemetry/moz.build
index 718120cf78..6da23fa372 100644
--- a/toolkit/components/telemetry/moz.build
+++ b/toolkit/components/telemetry/moz.build
@@ -30,9 +30,7 @@ with Files("docs/**"):
SCHEDULES.exclusive = ["docs"]
if CONFIG["ENABLE_TESTS"]:
- # We used to need GeckoView tests as a separate directory. This
- # is no longer true and we could probably move it to tests/gtest.
- DIRS += ["geckoview/gtest", "tests/gtest"]
+ DIRS += ["tests/gtest"]
TEST_DIRS += ["tests", "dap/ffi-gtest"]
@@ -78,7 +76,6 @@ SOURCES += [
"core/TelemetryScalar.cpp",
"core/TelemetryUserInteraction.cpp",
"dap/DAPTelemetry.cpp",
- "geckoview/streaming/GeckoViewStreamingTelemetry.cpp",
"other/CombinedStacks.cpp",
"other/ProcessedStack.cpp",
"other/TelemetryIOInterposeObserver.cpp",
@@ -244,18 +241,6 @@ GeneratedFile(
inputs=processes_files,
)
-# Ensure that the GeckoView metrics file parses correctly prior to it
-# being released in Android components. This triggers glean_parser by
-# pretending to generate a file named 'glean_checks'.
-# While this currently only applies to Android, in the medium-term it
-# is going to generate code for Firefox as well (project FOG).
-# Prior art for this was in bug 1063728, within SpiderMonkey tests.
-GeneratedFile(
- "glean_checks",
- script="build_scripts/run_glean_parser.py",
- inputs=["geckoview/streaming/metrics.yaml"],
-)
-
# Generate UserInteraction file
userinteraction_files = [
"UserInteractions.yaml",
diff --git a/toolkit/components/telemetry/tests/gtest/TestScalars.cpp b/toolkit/components/telemetry/tests/gtest/TestScalars.cpp
index 201641e77a..3828cbdcd8 100644
--- a/toolkit/components/telemetry/tests/gtest/TestScalars.cpp
+++ b/toolkit/components/telemetry/tests/gtest/TestScalars.cpp
@@ -395,68 +395,6 @@ TEST_F(TelemetryTestFixture, ScalarEventSummary_Dynamic) {
scalarsSnapshot, 2);
}
-TEST_F(TelemetryTestFixture, WrongScalarOperator) {
- AutoJSContextWithGlobal cx(mCleanGlobal);
-
- // Make sure we don't get scalars from other tests.
- Unused << mTelemetry->ClearScalars();
-
- const uint32_t expectedValue = 1172015;
-
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
- expectedValue);
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND,
- NS_LITERAL_STRING_FROM_CSTRING(EXPECTED_STRING));
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, true);
-
- TelemetryScalar::DeserializationStarted();
-
- Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_STRING_KIND, 1447);
- Telemetry::ScalarAdd(Telemetry::ScalarID::TELEMETRY_TEST_BOOLEAN_KIND, 1447);
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_UNSIGNED_INT_KIND,
- true);
- TelemetryScalar::ApplyPendingOperations();
-
- JS::Rooted<JS::Value> scalarsSnapshot(cx.GetJSContext());
- GetScalarsSnapshot(false, cx.GetJSContext(), &scalarsSnapshot);
- CheckStringScalar("telemetry.test.string_kind", cx.GetJSContext(),
- scalarsSnapshot, EXPECTED_STRING);
- CheckBoolScalar("telemetry.test.boolean_kind", cx.GetJSContext(),
- scalarsSnapshot, true);
- CheckUintScalar("telemetry.test.unsigned_int_kind", cx.GetJSContext(),
- scalarsSnapshot, expectedValue);
-}
-
-TEST_F(TelemetryTestFixture, WrongKeyedScalarOperator) {
- AutoJSContextWithGlobal cx(mCleanGlobal);
-
- // Make sure we don't get scalars from other tests.
- Unused << mTelemetry->ClearScalars();
-
- const uint32_t kExpectedUint = 1172017;
-
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
- u"key1"_ns, kExpectedUint);
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
- u"key2"_ns, true);
-
- TelemetryScalar::DeserializationStarted();
-
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_UNSIGNED_INT,
- u"key1"_ns, false);
- Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_KEYED_BOOLEAN_KIND,
- u"key2"_ns, static_cast<uint32_t>(13));
-
- TelemetryScalar::ApplyPendingOperations();
-
- JS::Rooted<JS::Value> scalarsSnapshot(cx.GetJSContext());
- GetScalarsSnapshot(true, cx.GetJSContext(), &scalarsSnapshot);
- CheckKeyedUintScalar("telemetry.test.keyed_unsigned_int", "key1",
- cx.GetJSContext(), scalarsSnapshot, kExpectedUint);
- CheckKeyedBoolScalar("telemetry.test.keyed_boolean_kind", "key2",
- cx.GetJSContext(), scalarsSnapshot, true);
-}
-
TEST_F(TelemetryTestFixture, TestKeyedScalarAllowedKeys) {
AutoJSContextWithGlobal cx(mCleanGlobal);
// Make sure we don't get scalars from other tests.
diff --git a/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py b/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py
index 2109cd7d35..28d4646dfd 100644
--- a/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py
+++ b/toolkit/components/telemetry/tests/python/test_histogramtools_strict.py
@@ -485,53 +485,6 @@ class TestParser(unittest.TestCase):
)
self.assertRaises(SystemExit, ParserError.exit_func)
- def test_gv_streaming_unsupported_kind(self):
- SAMPLE_HISTOGRAM = {
- "TEST_HISTOGRAM_GV_STREAMING": {
- "record_in_processes": ["main", "content"],
- "alert_emails": ["team@mozilla.xyz"],
- "bug_numbers": [1383793],
- "expires_in_version": "never",
- "kind": "boolean",
- "description": "Test histogram",
- "products": ["geckoview_streaming"],
- }
- }
- histograms = load_histogram(SAMPLE_HISTOGRAM)
- parse_histograms.load_allowlist()
- parse_histograms.Histogram(
- "TEST_HISTOGRAM_GV_STREAMING",
- histograms["TEST_HISTOGRAM_GV_STREAMING"],
- strict_type_checks=True,
- )
- self.assertRaises(SystemExit, ParserError.exit_func)
-
- def test_gv_streaming_keyed(self):
- SAMPLE_HISTOGRAM = {
- "TEST_HISTOGRAM_GV_STREAMING": {
- "record_in_processes": ["main", "content"],
- "alert_emails": ["team@mozilla.xyz"],
- "bug_numbers": [1383793],
- "expires_in_version": "never",
- "kind": "exponential",
- "low": 1024,
- "high": 2**64,
- "n_buckets": 100,
- "keyed": "true",
- "description": "Test histogram",
- "products": ["geckoview_streaming"],
- }
- }
- histograms = load_histogram(SAMPLE_HISTOGRAM)
- parse_histograms.load_allowlist()
- parse_histograms.Histogram(
- "TEST_HISTOGRAM_GV_STREAMING",
- histograms["TEST_HISTOGRAM_GV_STREAMING"],
- strict_type_checks=True,
- )
-
- self.assertRaises(SystemExit, ParserError.exit_func)
-
def test_enumerated_histogram_with_100_buckets(self):
SAMPLE_HISTOGRAM = {
"TEST_100_BUCKETS_HISTOGRAM": {
diff --git a/toolkit/components/telemetry/tests/python/test_parse_events.py b/toolkit/components/telemetry/tests/python/test_parse_events.py
index 0b7b91efcc..d6c7859eee 100644
--- a/toolkit/components/telemetry/tests/python/test_parse_events.py
+++ b/toolkit/components/telemetry/tests/python/test_parse_events.py
@@ -145,22 +145,6 @@ expiry_version: never
),
)
- def test_geckoview_streaming_product(self):
- SAMPLE_EVENT = """
-methods: ["method1", "method2"]
-objects: ["object1", "object2"]
-bug_numbers: [12345]
-notification_emails: ["test01@mozilla.com", "test02@mozilla.com"]
-record_in_processes: ["content"]
-description: This is a test entry for Telemetry.
-products: ["geckoview_streaming"]
-expiry_version: never
-"""
- event = load_event(SAMPLE_EVENT)
- parse_events.EventData("CATEGORY", "test_event", event, strict_type_checks=True)
-
- self.assertRaises(SystemExit, ParserError.exit_func)
-
if __name__ == "__main__":
mozunit.main()
diff --git a/toolkit/components/telemetry/tests/python/test_parse_scalars.py b/toolkit/components/telemetry/tests/python/test_parse_scalars.py
index c699cdb4d8..315c920a4a 100644
--- a/toolkit/components/telemetry/tests/python/test_parse_scalars.py
+++ b/toolkit/components/telemetry/tests/python/test_parse_scalars.py
@@ -243,25 +243,6 @@ bug_numbers:
parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
self.assertRaises(SystemExit, ParserError.exit_func)
- def test_gv_streaming_keyed(self):
- SAMPLE_SCALAR = """
-description: A nice one-line description.
-expires: never
-record_in_processes:
- - 'main'
-kind: uint
-notification_emails:
- - test01@mozilla.com
-products: ['geckoview_streaming']
-keyed: true
-bug_numbers:
- - 12345
-"""
-
- scalar = load_scalar(SAMPLE_SCALAR)
- parse_scalars.ScalarType("CATEGORY", "PROVE", scalar, strict_type_checks=True)
- self.assertRaises(SystemExit, ParserError.exit_func)
-
if __name__ == "__main__":
mozunit.main()
diff --git a/toolkit/components/telemetry/tests/unit/TelemetryEnvironmentTesting.sys.mjs b/toolkit/components/telemetry/tests/unit/TelemetryEnvironmentTesting.sys.mjs
index 0df6dfc2c1..1a4015df36 100644
--- a/toolkit/components/telemetry/tests/unit/TelemetryEnvironmentTesting.sys.mjs
+++ b/toolkit/components/telemetry/tests/unit/TelemetryEnvironmentTesting.sys.mjs
@@ -40,6 +40,8 @@ const PROFILE_RESET_DATE_MS = Date.now();
// The profile creation date, in milliseconds (Yesterday).
const PROFILE_FIRST_USE_MS = PROFILE_RESET_DATE_MS - MILLISECONDS_PER_DAY;
const PROFILE_CREATION_DATE_MS = PROFILE_FIRST_USE_MS - MILLISECONDS_PER_DAY;
+const PROFILE_RECOVERED_FROM_BACKUP =
+ PROFILE_RESET_DATE_MS - MILLISECONDS_PER_HOUR;
const GFX_VENDOR_ID = "0xabcd";
const GFX_DEVICE_ID = "0x1234";
@@ -126,6 +128,7 @@ export var TelemetryEnvironmentTesting = {
created: PROFILE_CREATION_DATE_MS,
reset: PROFILE_RESET_DATE_MS,
firstUse: PROFILE_FIRST_USE_MS,
+ recoveredFromBackup: PROFILE_RECOVERED_FROM_BACKUP,
}
);
},
@@ -392,6 +395,10 @@ export var TelemetryEnvironmentTesting = {
data.profile.firstUseDate,
truncateToDays(PROFILE_FIRST_USE_MS)
);
+ lazy.Assert.equal(
+ data.profile.recoveredFromBackup,
+ truncateToDays(PROFILE_RECOVERED_FROM_BACKUP)
+ );
},
checkPartnerSection(data, isInitial) {
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
index 9990d83fdf..ca43772a07 100644
--- a/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
@@ -712,6 +712,64 @@ add_task(async function test_addons() {
await addon.uninstall();
});
+add_task(async function test_signedTheme() {
+ AddonTestUtils.useRealCertChecks = true;
+
+ const { PKCS7_WITH_SHA1, COSE_WITH_SHA256 } = Ci.nsIAppSignatureInfo;
+
+ const ADDON_THEME_INSTALL_URL = gDataRoot + "webext-implicit-id.xpi";
+ const ADDON_THEME_ID = "{46607a7b-1b2a-40ce-9afe-91cda52c46a6}";
+
+ // Install the theme.
+ let deferred = Promise.withResolvers();
+ TelemetryEnvironment.registerChangeListener(
+ "test_signedAddon",
+ deferred.resolve
+ );
+ let theme = await installXPIFromURL(ADDON_THEME_INSTALL_URL);
+ await theme.enable();
+ ok(theme.isActive, "Theme should be active");
+
+ // Install an extension to force the telemetry environment to be
+ // updated (currently theme add-ons changes do not seem to be
+ // notified as changes, see EnvironmentAddonBuilder _updateAddons
+ // method for how changes to the environment.addons property are
+ // being detected).
+ const ADDON_INSTALL_URL = gDataRoot + "amosigned.xpi";
+ let addon = await installXPIFromURL(ADDON_INSTALL_URL);
+
+ await deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("test_signedAddon");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ TelemetryEnvironmentTesting.checkEnvironmentData(data);
+
+ // Check signedState and signedTypes on active theme data
+ // (NOTE: other properties of active theme are technically
+ // not covered by any other test task in this xpcshell test).
+ Assert.equal(
+ data.addons.theme.id,
+ ADDON_THEME_ID,
+ "Theme should be in the environment."
+ );
+ Assert.equal(
+ data.addons.theme.signedState,
+ AddonManager.SIGNEDSTATE_SIGNED,
+ "Got expected signedState on activeTheme"
+ );
+ Assert.equal(
+ data.addons.theme.signedTypes,
+ JSON.stringify([COSE_WITH_SHA256, PKCS7_WITH_SHA1]),
+ "Got expected signedTypes on activeTheme"
+ );
+
+ AddonTestUtils.useRealCertChecks = false;
+ await addon.startupPromise;
+ await addon.uninstall();
+ await theme.startupPromise;
+ await theme.uninstall();
+});
+
add_task(async function test_signedAddon() {
AddonTestUtils.useRealCertChecks = true;
diff --git a/toolkit/components/telemetry/tests/unit/xpcshell.toml b/toolkit/components/telemetry/tests/unit/xpcshell.toml
index 0c660888ed..ea5f6f418c 100644
--- a/toolkit/components/telemetry/tests/unit/xpcshell.toml
+++ b/toolkit/components/telemetry/tests/unit/xpcshell.toml
@@ -18,6 +18,7 @@ support-files = [
"testNoPDBAArch64.dll",
"!/toolkit/mozapps/extensions/test/xpcshell/head_addons.js",
"../../../../mozapps/extensions/test/xpinstall/amosigned.xpi",
+ "../../../../mozapps/extensions/test/xpcshell/data/webext-implicit-id.xpi",
]
generated-files = [
"system.xpi",