diff options
Diffstat (limited to 'toolkit/components/telemetry/dap')
18 files changed, 1974 insertions, 0 deletions
diff --git a/toolkit/components/telemetry/dap/DAPTelemetry.cpp b/toolkit/components/telemetry/dap/DAPTelemetry.cpp new file mode 100644 index 0000000000..31d634e47b --- /dev/null +++ b/toolkit/components/telemetry/dap/DAPTelemetry.cpp @@ -0,0 +1,223 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DAPTelemetryBindings.h" + +#include "mozilla/Logging.h" +#include "nsPromiseFlatString.h" + +#include "nss.h" +#include "nsNSSComponent.h" +#include "secmodt.h" +#include "pk11pub.h" +#include "ScopedNSSTypes.h" + +static mozilla::LazyLogModule sLogger("DAPTelemetry"); +#undef LOG +#define LOG(...) MOZ_LOG(sLogger, mozilla::LogLevel::Debug, (__VA_ARGS__)) + +namespace mozilla { + +NS_IMPL_ISUPPORTS(DAPTelemetry, nsIDAPTelemetry) + +// This function was copied from pk11_hpke_unittest.cc +// And modified to take a Span. +static std::vector<uint8_t> Pkcs8(Span<const uint8_t> sk, + Span<const uint8_t> pk) { + // Only X25519 format. + std::vector<uint8_t> v(105); + v.assign({ + 0x30, 0x67, 0x02, 0x01, 0x00, 0x30, 0x14, 0x06, 0x07, 0x2a, 0x86, 0x48, + 0xce, 0x3d, 0x02, 0x01, 0x06, 0x09, 0x2b, 0x06, 0x01, 0x04, 0x01, 0xda, + 0x47, 0x0f, 0x01, 0x04, 0x4c, 0x30, 0x4a, 0x02, 0x01, 0x01, 0x04, 0x20, + }); + v.insert(v.end(), sk.begin(), sk.end()); + v.insert(v.end(), { + 0xa1, + 0x23, + 0x03, + 0x21, + 0x00, + }); + v.insert(v.end(), pk.begin(), pk.end()); + return v; +} + +// This function was copied from cpputil.h +static unsigned char* toUcharPtr(const uint8_t* v) { + return const_cast<unsigned char*>(static_cast<const unsigned char*>(v)); +} + +/// If successful this returns a pointer to a HpkeContext which must be +/// released using dapDestroyHpkeContext or PK11_HPKE_DestroyContext. +HpkeContext* dapSetupHpkeContextInternal( + const uint8_t* aKey, uint32_t aKeyLength, const uint8_t* aInfo, + uint32_t aInfoLength, SECKEYPublicKey* aPkE, SECKEYPrivateKey* aSkE, + nsTArray<uint8_t>* aOutputEncapsulatedKey) { + SECStatus status = PK11_HPKE_ValidateParameters( + HpkeDhKemX25519Sha256, HpkeKdfHkdfSha256, HpkeAeadAes128Gcm); + if (status != SECSuccess) { + MOZ_LOG(sLogger, mozilla::LogLevel::Error, + ("Invalid HKPE parameters found.")); + return nullptr; + } + + UniqueHpkeContext context( + PK11_HPKE_NewContext(HpkeDhKemX25519Sha256, HpkeKdfHkdfSha256, + HpkeAeadAes128Gcm, nullptr, nullptr)); + + SECKEYPublicKey* pkR_raw = nullptr; + status = PK11_HPKE_Deserialize(context.get(), aKey, aKeyLength, &pkR_raw); + UniqueSECKEYPublicKey pkR(pkR_raw); + pkR_raw = nullptr; + if (status != SECSuccess) { + MOZ_LOG(sLogger, mozilla::LogLevel::Error, + ("Failed to deserialize HPKE encryption key.")); + return nullptr; + } + + const SECItem hpkeInfo = {siBuffer, toUcharPtr(aInfo), aInfoLength}; + + status = PK11_HPKE_SetupS(context.get(), aPkE, aSkE, pkR.get(), &hpkeInfo); + if (status != SECSuccess) { + MOZ_LOG(sLogger, mozilla::LogLevel::Error, ("HPKE setup failed.")); + return nullptr; + } + + const SECItem* hpkeEncapKey = PK11_HPKE_GetEncapPubKey(context.get()); + if (!hpkeEncapKey) { + MOZ_LOG(sLogger, mozilla::LogLevel::Error, + ("Failed to get HPKE encapsulated public key.")); + return nullptr; + } + + aOutputEncapsulatedKey->AppendElements(hpkeEncapKey->data, hpkeEncapKey->len); + + return context.release(); +} + +extern "C" { +/// Takes additional ephemeral keys to make everything deterministic for test +/// vectors. +/// If successful this returns a pointer to a HpkeContext which must be +/// released using dapDestroyHpkeContext or PK11_HPKE_DestroyContext. +HpkeContext* dapSetupHpkeContextForTesting( + const uint8_t* aKey, uint32_t aKeyLength, const uint8_t* aInfo, + uint32_t aInfoLength, const uint8_t* aPkEm, uint32_t aPkEmLength, + const uint8_t* aSkEm, uint32_t aSkEmLength, + nsTArray<uint8_t>* aOutputEncapsulatedKey) { + Span<const uint8_t> sk_e(aSkEm, aSkEm + aSkEmLength); + Span<const uint8_t> pk_e(aPkEm, aPkEm + aPkEmLength); + std::vector<uint8_t> pkcs8_e = Pkcs8(sk_e, pk_e); + + MOZ_RELEASE_ASSERT(EnsureNSSInitializedChromeOrContent(), + "Could not initialize NSS."); + + UniquePK11SlotInfo slot(PK11_GetInternalSlot()); + MOZ_RELEASE_ASSERT(slot, "Failed to get slot."); + + SECItem keys_e = {siBuffer, toUcharPtr(pkcs8_e.data()), + static_cast<unsigned int>(pkcs8_e.size())}; + SECKEYPrivateKey* internal_skE_raw = nullptr; + SECStatus rv = PK11_ImportDERPrivateKeyInfoAndReturnKey( + slot.get(), &keys_e, nullptr, nullptr, false, false, KU_ALL, + &internal_skE_raw, nullptr); + UniqueSECKEYPrivateKey internal_skE(internal_skE_raw); + internal_skE_raw = nullptr; + MOZ_RELEASE_ASSERT(rv == SECSuccess, "Failed to import skE/pkE."); + + UniqueSECKEYPublicKey internal_pkE( + SECKEY_ConvertToPublicKey(internal_skE.get())); + + UniqueHpkeContext result(dapSetupHpkeContextInternal( + aKey, aKeyLength, aInfo, aInfoLength, internal_pkE.get(), + internal_skE.get(), aOutputEncapsulatedKey)); + + return result.release(); +} + +void dapDestroyHpkeContext(HpkeContext* aContext) { + PK11_HPKE_DestroyContext(aContext, true); +} + +bool dapHpkeEncrypt(HpkeContext* aContext, const uint8_t* aAad, + uint32_t aAadLength, const uint8_t* aPlaintext, + uint32_t aPlaintextLength, + nsTArray<uint8_t>* aOutputShare) { + SECItem aad_si = {siBuffer, toUcharPtr(aAad), aAadLength}; + SECItem plaintext_si = {siBuffer, toUcharPtr(aPlaintext), aPlaintextLength}; + SECItem* chCt = nullptr; + SECStatus rv = PK11_HPKE_Seal(aContext, &aad_si, &plaintext_si, &chCt); + if (rv != SECSuccess) { + return false; + } + UniqueSECItem ct(chCt); + + aOutputShare->AppendElements(ct->data, ct->len); + return true; +} + +bool dapHpkeEncryptOneshot(const uint8_t* aKey, uint32_t aKeyLength, + const uint8_t* aInfo, uint32_t aInfoLength, + const uint8_t* aAad, uint32_t aAadLength, + const uint8_t* aPlaintext, uint32_t aPlaintextLength, + nsTArray<uint8_t>* aOutputEncapsulatedKey, + nsTArray<uint8_t>* aOutputShare) { + MOZ_RELEASE_ASSERT(EnsureNSSInitializedChromeOrContent(), + "Could not initialize NSS."); + UniqueHpkeContext context( + dapSetupHpkeContextInternal(aKey, aKeyLength, aInfo, aInfoLength, nullptr, + nullptr, aOutputEncapsulatedKey)); + if (!context) { + return false; + } + + return dapHpkeEncrypt(context.get(), aAad, aAadLength, aPlaintext, + aPlaintextLength, aOutputShare); +} +} + +NS_IMETHODIMP DAPTelemetry::GetReportU8( + const nsTArray<uint8_t>& aLeaderHpkeConfig, + const nsTArray<uint8_t>& aHelperHpkeConfig, uint8_t aMeasurement, + const nsTArray<uint8_t>& aTaskID, const uint64_t aTimePrecision, + nsTArray<uint8_t>& aOutReport) { + MOZ_RELEASE_ASSERT(aTaskID.Length() == 32, "TaskID must have 32 bytes."); + if (!dapGetReportU8(&aLeaderHpkeConfig, &aHelperHpkeConfig, aMeasurement, + &aTaskID, aTimePrecision, &aOutReport)) { + return NS_ERROR_FAILURE; + } + + return NS_OK; +} + +NS_IMETHODIMP DAPTelemetry::GetReportVecU8( + const nsTArray<uint8_t>& aLeaderHpkeConfig, + const nsTArray<uint8_t>& aHelperHpkeConfig, + const nsTArray<uint8_t>& aMeasurement, const nsTArray<uint8_t>& aTaskID, + const uint64_t aTimePrecision, nsTArray<uint8_t>& aOutReport) { + MOZ_RELEASE_ASSERT(aTaskID.Length() == 32, "TaskID must have 32 bytes."); + if (!dapGetReportVecU8(&aLeaderHpkeConfig, &aHelperHpkeConfig, &aMeasurement, + &aTaskID, aTimePrecision, &aOutReport)) { + return NS_ERROR_FAILURE; + } + + return NS_OK; +} + +NS_IMETHODIMP DAPTelemetry::GetReportVecU16( + const nsTArray<uint8_t>& aLeaderHpkeConfig, + const nsTArray<uint8_t>& aHelperHpkeConfig, + const nsTArray<uint16_t>& aMeasurement, const nsTArray<uint8_t>& aTaskID, + const uint64_t aTimePrecision, nsTArray<uint8_t>& aOutReport) { + MOZ_RELEASE_ASSERT(aTaskID.Length() == 32, "TaskID must have 32 bytes."); + if (!dapGetReportVecU16(&aLeaderHpkeConfig, &aHelperHpkeConfig, &aMeasurement, + &aTaskID, aTimePrecision, &aOutReport)) { + return NS_ERROR_FAILURE; + } + + return NS_OK; +} + +} // namespace mozilla diff --git a/toolkit/components/telemetry/dap/DAPTelemetry.h b/toolkit/components/telemetry/dap/DAPTelemetry.h new file mode 100644 index 0000000000..b6a0c8c17f --- /dev/null +++ b/toolkit/components/telemetry/dap/DAPTelemetry.h @@ -0,0 +1,25 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef mozilla_nsIDAPTelemetry_h__ +#define mozilla_nsIDAPTelemetry_h__ + +#include "nsIDAPTelemetry.h" + +namespace mozilla { + +class DAPTelemetry final : public nsIDAPTelemetry { + NS_DECL_ISUPPORTS + + NS_DECL_NSIDAPTELEMETRY + + public: + DAPTelemetry() = default; + + private: + ~DAPTelemetry() = default; +}; + +} // namespace mozilla + +#endif diff --git a/toolkit/components/telemetry/dap/DAPTelemetryBindings.h b/toolkit/components/telemetry/dap/DAPTelemetryBindings.h new file mode 100644 index 0000000000..bf62d7c0bc --- /dev/null +++ b/toolkit/components/telemetry/dap/DAPTelemetryBindings.h @@ -0,0 +1,11 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DAPTelemetryBindings_h +#define DAPTelemetryBindings_h + +#include "DAPTelemetry.h" +#include "mozilla/dap_ffi_generated.h" + +#endif // DAPTelemetryBindings_h diff --git a/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs b/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs new file mode 100644 index 0000000000..0a2516ad1f --- /dev/null +++ b/toolkit/components/telemetry/dap/DAPTelemetrySender.sys.mjs @@ -0,0 +1,335 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs"; +import { HPKEConfigManager } from "resource://gre/modules/HPKEConfigManager.sys.mjs"; + +let lazy = {}; + +ChromeUtils.defineLazyGetter(lazy, "logConsole", function () { + return console.createInstance({ + prefix: "DAPTelemetrySender", + maxLogLevelPref: "toolkit.telemetry.dap.logLevel", + }); +}); +ChromeUtils.defineESModuleGetters(lazy, { + AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs", + NimbusFeatures: "resource://nimbus/ExperimentAPI.sys.mjs", + DAPVisitCounter: "resource://gre/modules/DAPVisitCounter.sys.mjs", + setTimeout: "resource://gre/modules/Timer.sys.mjs", +}); + +const PREF_LEADER = "toolkit.telemetry.dap_leader"; +const PREF_HELPER = "toolkit.telemetry.dap_helper"; + +XPCOMUtils.defineLazyPreferenceGetter(lazy, "LEADER", PREF_LEADER, undefined); +XPCOMUtils.defineLazyPreferenceGetter(lazy, "HELPER", PREF_HELPER, undefined); + +/** + * The purpose of this singleton is to handle sending of DAP telemetry data. + * The current DAP draft standard is available here: + * https://github.com/ietf-wg-ppm/draft-ietf-ppm-dap + * + * The specific purpose of this singleton is to make the necessary calls to fetch to do networking. + */ + +export const DAPTelemetrySender = new (class { + startup() { + lazy.logConsole.debug("Performing DAP startup"); + + if (lazy.NimbusFeatures.dapTelemetry.getVariable("visitCountingEnabled")) { + lazy.DAPVisitCounter.startup(); + } + + if (lazy.NimbusFeatures.dapTelemetry.getVariable("task1Enabled")) { + let tasks = []; + lazy.logConsole.debug("Task 1 is enabled."); + let task1_id = + lazy.NimbusFeatures.dapTelemetry.getVariable("task1TaskId"); + if (task1_id !== undefined && task1_id != "") { + /** @typedef { 'u8' | 'vecu8' | 'vecu16' } measurementtype */ + + /** + * @typedef {object} Task + * @property {string} id - The task ID, base 64 encoded. + * @property {string} leader_endpoint - Base URL for the leader. + * @property {string} helper_endpoint - Base URL for the helper. + * @property {number} time_precision - Timestamps (in s) are rounded to the nearest multiple of this. + * @property {measurementtype} measurement_type - Defines measurements and aggregations used by this task. Effectively specifying the VDAF. + */ + let task = { + // this is testing task 1 + id: task1_id, + leader_endpoint: null, + helper_endpoint: null, + time_precision: 300, + measurement_type: "vecu8", + }; + tasks.push(task); + + lazy.setTimeout( + () => this.timedSendTestReports(tasks), + this.timeout_value() + ); + + lazy.NimbusFeatures.dapTelemetry.onUpdate(async (event, reason) => { + if (typeof this.counters !== "undefined") { + await this.sendTestReports(tasks, 30 * 1000, "nimbus-update"); + } + }); + } + + this._asyncShutdownBlocker = async () => { + lazy.logConsole.debug(`Sending on shutdown.`); + // Shorter timeout to prevent crashing due to blocking shutdown + await this.sendTestReports(tasks, 2 * 1000, "shutdown"); + }; + + lazy.AsyncShutdown.quitApplicationGranted.addBlocker( + "DAPTelemetrySender: sending data", + this._asyncShutdownBlocker + ); + } + } + + async sendTestReports(tasks, timeout, reason) { + for (let task of tasks) { + let measurement; + if (task.measurement_type == "u8") { + measurement = 3; + } else if (task.measurement_type == "vecu8") { + measurement = new Uint8Array(20); + let r = Math.floor(Math.random() * 10); + measurement[r] += 1; + measurement[19] += 1; + } + + await this.sendDAPMeasurement(task, measurement, timeout, reason); + } + } + + async timedSendTestReports(tasks) { + lazy.logConsole.debug("Sending on timer."); + await this.sendTestReports(tasks, 30 * 1000, "periodic"); + lazy.setTimeout( + () => this.timedSendTestReports(tasks), + this.timeout_value() + ); + } + + timeout_value() { + const MINUTE = 60 * 1000; + return MINUTE * (9 + Math.random() * 2); // 9 - 11 minutes + } + + /** + * Creates a DAP report for a specific task from a measurement and sends it. + * + * @param {Task} task + * Definition of the task for which the measurement was taken. + * @param {number} measurement + * The measured value for which a report is generated. + */ + async sendDAPMeasurement(task, measurement, timeout, reason) { + task.leader_endpoint = lazy.LEADER; + if (!task.leader_endpoint) { + lazy.logConsole.error('Preference "' + PREF_LEADER + '" not set'); + return; + } + + task.helper_endpoint = lazy.HELPER; + if (!task.helper_endpoint) { + lazy.logConsole.error('Preference "' + PREF_HELPER + '" not set'); + return; + } + + try { + const controller = new AbortController(); + lazy.setTimeout(() => controller.abort(), timeout); + let report = await this.generateReport( + task, + measurement, + controller.signal + ); + Glean.dap.reportGenerationStatus.success.add(1); + await this.sendReport( + task.leader_endpoint, + task.id, + report, + controller.signal, + reason + ); + } catch (e) { + if (e.name === "AbortError") { + Glean.dap.reportGenerationStatus.abort.add(1); + lazy.logConsole.error("Aborted DAP report generation: ", e); + } else { + Glean.dap.reportGenerationStatus.failure.add(1); + lazy.logConsole.error("DAP report generation failed: " + e); + } + } + } + + /** + * Downloads HPKE configs for endpoints and generates report. + * + * @param {Task} task + * Definition of the task for which the measurement was taken. + * @param {number} measurement + * The measured value for which a report is generated. + * @returns Promise + * @resolves {Uint8Array} The generated binary report data. + * @rejects {Error} If an exception is thrown while generating the report. + */ + async generateReport(task, measurement, abortSignal) { + let [leader_config_bytes, helper_config_bytes] = await Promise.all([ + this.getHpkeConfig( + task.leader_endpoint + "/hpke_config?task_id=" + task.id, + abortSignal + ), + this.getHpkeConfig( + task.helper_endpoint + "/hpke_config?task_id=" + task.id, + abortSignal + ), + ]); + if (leader_config_bytes == null) { + lazy.logConsole.error("HPKE config download failed for leader."); + Glean.dap.reportGenerationStatus.hpke_leader_fail.add(1); + } + if (helper_config_bytes == null) { + lazy.logConsole.error("HPKE config download failed for helper."); + Glean.dap.reportGenerationStatus.hpke_helper_fail.add(1); + } + if (abortSignal.aborted) { + throw new DOMException("HPKE config download was aborted", "AbortError"); + } + if (leader_config_bytes === null || helper_config_bytes === null) { + throw new Error(`HPKE config download failed.`); + } + + let task_id = new Uint8Array( + ChromeUtils.base64URLDecode(task.id, { padding: "ignore" }) + ); + let report = {}; + if (task.measurement_type == "u8") { + Services.DAPTelemetry.GetReportU8( + leader_config_bytes, + helper_config_bytes, + measurement, + task_id, + task.time_precision, + report + ); + } else if (task.measurement_type == "vecu8") { + Services.DAPTelemetry.GetReportVecU8( + leader_config_bytes, + helper_config_bytes, + measurement, + task_id, + task.time_precision, + report + ); + } else if (task.measurement_type == "vecu16") { + Services.DAPTelemetry.GetReportVecU16( + leader_config_bytes, + helper_config_bytes, + measurement, + task_id, + task.time_precision, + report + ); + } else { + throw new Error( + `Unknown measurement type for task ${task.id}: ${task.measurement_type}` + ); + } + let reportData = new Uint8Array(report.value); + return reportData; + } + + /** + * Fetches TLS encoded HPKE config from a URL. + * + * @param {string} endpoint + * The URL from where to get the data. + * @returns Promise + * @resolves {Uint8Array} The binary representation of the endpoint configuration. + * @rejects {Error} If an exception is thrown while fetching the configuration. + */ + async getHpkeConfig(endpoint, abortSignal) { + // Use HPKEConfigManager to cache config for up to 24 hr. This reduces + // unecessary requests while limiting how long a stale config can be stuck + // if a server change is made ungracefully. + let buffer = await HPKEConfigManager.get(endpoint, { + maxAge: 24 * 60 * 60 * 1000, + abortSignal, + }); + if (buffer === null) { + return null; + } + let hpke_config_bytes = new Uint8Array(buffer); + return hpke_config_bytes; + } + + /** + * Sends a report to the leader. + * + * @param {string} leader_endpoint + * The URL for the leader. + * @param {Uint8Array} report + * Raw bytes of the TLS encoded report. + * @returns Promise + * @resolves {undefined} Once the attempt to send the report completes, whether or not it was successful. + */ + async sendReport(leader_endpoint, task_id, report, abortSignal, reason) { + const upload_path = leader_endpoint + "/tasks/" + task_id + "/reports"; + try { + let response = await fetch(upload_path, { + method: "PUT", + headers: { "Content-Type": "application/dap-report" }, + body: report, + signal: abortSignal, + }); + + if (response.status != 200) { + if (response.status == 502) { + Glean.dap.uploadStatus.http_502.add(1); + } else { + Glean.dap.uploadStatus.http_error.add(1); + } + const content_type = response.headers.get("content-type"); + if (content_type && content_type === "application/json") { + // A JSON error from the DAP server. + let error = await response.json(); + lazy.logConsole.error( + `Sending failed. HTTP response: ${response.status} ${response.statusText}. Error: ${error.type} ${error.title}` + ); + } else { + // A different error, e.g. from a load-balancer. + let error = await response.text(); + lazy.logConsole.error( + `Sending failed. HTTP response: ${response.status} ${response.statusText}. Error: ${error}` + ); + } + } else { + lazy.logConsole.debug("DAP report sent"); + Glean.dap.uploadStatus.success.add(1); + } + } catch (err) { + if (err.name === "AbortError") { + lazy.logConsole.error("Aborted DAP report sending: ", err); + if (reason == "periodic") { + Glean.dap.uploadStatus.abort_timed.add(1); + } else if (reason == "shutdown") { + Glean.dap.uploadStatus.abort_shutdown.add(1); + } else { + Glean.dap.uploadStatus.abort.add(1); + } + } else { + lazy.logConsole.error("Failed to send report: ", err); + Glean.dap.uploadStatus.failure.add(1); + } + } + } +})(); diff --git a/toolkit/components/telemetry/dap/DAPVisitCounter.sys.mjs b/toolkit/components/telemetry/dap/DAPVisitCounter.sys.mjs new file mode 100644 index 0000000000..dfee5d0ff6 --- /dev/null +++ b/toolkit/components/telemetry/dap/DAPVisitCounter.sys.mjs @@ -0,0 +1,160 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { DAPTelemetrySender } from "./DAPTelemetrySender.sys.mjs"; + +let lazy = {}; + +ChromeUtils.defineLazyGetter(lazy, "logConsole", function () { + return console.createInstance({ + prefix: "DAPVisitCounter", + maxLogLevelPref: "toolkit.telemetry.dap.logLevel", + }); +}); +ChromeUtils.defineESModuleGetters(lazy, { + AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs", + NimbusFeatures: "resource://nimbus/ExperimentAPI.sys.mjs", + PlacesUtils: "resource://gre/modules/PlacesUtils.sys.mjs", + setTimeout: "resource://gre/modules/Timer.sys.mjs", +}); + +export const DAPVisitCounter = new (class { + startup() { + this._asyncShutdownBlocker = async () => { + lazy.logConsole.debug(`Sending on shutdown.`); + await this.send(2 * 1000, "shutdown"); + }; + + lazy.AsyncShutdown.quitApplicationGranted.addBlocker( + "DAPVisitCounter: sending data", + this._asyncShutdownBlocker + ); + + const listener = events => { + // Even using the event.hidden flag there mayb be some double counting + // here. It would have to be fixed in the Places API. + for (const event of events) { + lazy.logConsole.debug(`Visited: ${event.url}`); + if (event.hidden) { + continue; + } + for (const counter of this.counters) { + for (const pattern of counter.patterns) { + if (pattern.matches(event.url)) { + lazy.logConsole.debug(`${pattern.pattern} matched!`); + counter.count += 1; + } + } + } + } + }; + + lazy.NimbusFeatures.dapTelemetry.onUpdate(async (event, reason) => { + if (typeof this.counters !== "undefined") { + await this.send(30 * 1000, "nimbus-update"); + } + this.initialize_counters(); + }); + + if (typeof this.counters === "undefined") { + this.initialize_counters(); + } + + lazy.PlacesUtils.observers.addListener(["page-visited"], listener); + + lazy.setTimeout(() => this.timed_send(), this.timeout_value()); + } + + initialize_counters() { + let experiments = lazy.NimbusFeatures.dapTelemetry.getVariable( + "visitCountingExperimentList" + ); + + this.counters = []; + // This allows two different formats for distributing the URLs for the + // experiment. The experiments get quite large and over 4096 bytes they + // result in a warning (when mirrored in a pref as in this case). + if (Array.isArray(experiments)) { + for (const experiment of experiments) { + let counter = { experiment, count: 0, patterns: [] }; + this.counters.push(counter); + for (const url of experiment.urls) { + let mpattern = new MatchPattern(url); + counter.patterns.push(mpattern); + } + } + } else { + for (const [task, urls] of Object.entries(experiments)) { + for (const [idx, url] of urls.entries()) { + const fullUrl = `*://${url}/*`; + + this.counters.push({ + experiment: { + task_id: task, + task_veclen: 20, + bucket: idx, + }, + count: 0, + patterns: [new MatchPattern(fullUrl)], + }); + } + } + } + } + + async timed_send() { + lazy.logConsole.debug("Sending on timer."); + await this.send(30 * 1000, "periodic"); + lazy.setTimeout(() => this.timed_send(), this.timeout_value()); + } + + timeout_value() { + const MINUTE = 60 * 1000; + return MINUTE * (9 + Math.random() * 2); // 9 - 11 minutes + } + + async send(timeout, reason) { + let collected_measurements = new Map(); + for (const counter of this.counters) { + if (!collected_measurements.has(counter.experiment.task_id)) { + collected_measurements.set( + counter.experiment.task_id, + new Uint8Array(counter.experiment.task_veclen) + ); + } + collected_measurements.get(counter.experiment.task_id)[ + counter.experiment.bucket + ] = counter.count; + counter.count = 0; + } + + let send_promises = []; + for (const [task_id, measurement] of collected_measurements) { + let task = { + id: task_id, + time_precision: 60, + measurement_type: "vecu8", + }; + + send_promises.push( + DAPTelemetrySender.sendDAPMeasurement( + task, + measurement, + timeout, + reason + ) + ); + } + await Promise.all(send_promises); + } + + show() { + for (const counter of this.counters) { + lazy.logConsole.info( + `Experiment: ${counter.experiment.url} -> ${counter.count}` + ); + } + return this.counters; + } +})(); diff --git a/toolkit/components/telemetry/dap/components.conf b/toolkit/components/telemetry/dap/components.conf new file mode 100644 index 0000000000..237868bcee --- /dev/null +++ b/toolkit/components/telemetry/dap/components.conf @@ -0,0 +1,10 @@ +Classes = [ + { + 'cid': '{58a4c579-d2dd-46b7-9c3b-6881a1c36c6a}', + 'interfaces': ['nsIDAPTelemetry'], + 'contract_ids': ['@mozilla.org/base/daptelemetry;1'], + 'type': 'mozilla::DAPTelemetry', + 'headers': ['mozilla/DAPTelemetry.h'], + 'js_name': 'DAPTelemetry', + }, +] diff --git a/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml b/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml new file mode 100644 index 0000000000..abad140b7d --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi-gtest/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dap_ffi-gtest" +version = "0.1.0" +authors = [ + "Simon Friedberger <simon@mozilla.com>", +] +license = "MPL-2.0" +description = "Tests for Rust code for DAP; mainly encoding and NSS bindings." +edition = "2021" + +[dependencies] +dap_ffi = { path = "../ffi" } +hex = { version = "0.4.3", features = ["serde"] } +prio = {version = "0.15.3", default-features = false } +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0" } +thin-vec = { version = "0.2.1", features = ["gecko-ffi"] } + +[lib] +path = "test.rs" diff --git a/toolkit/components/telemetry/dap/ffi-gtest/TestDAPTelemetry.cpp b/toolkit/components/telemetry/dap/ffi-gtest/TestDAPTelemetry.cpp new file mode 100644 index 0000000000..80821cd118 --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi-gtest/TestDAPTelemetry.cpp @@ -0,0 +1,19 @@ + +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "gtest/gtest.h" +#include "mozilla/DAPTelemetryBindings.h" + +using namespace mozilla; + +extern "C" void dap_test_hpke_encrypt(); +TEST(DAPTelemetryTests, TestHpkeEnc) +{ dap_test_hpke_encrypt(); } + +extern "C" void dap_test_encoding(); +TEST(DAPTelemetryTests, TestReportSerialization) +{ dap_test_encoding(); } diff --git a/toolkit/components/telemetry/dap/ffi-gtest/moz.build b/toolkit/components/telemetry/dap/ffi-gtest/moz.build new file mode 100644 index 0000000000..d809ebc418 --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi-gtest/moz.build @@ -0,0 +1,6 @@ +UNIFIED_SOURCES = ["TestDAPTelemetry.cpp"] +FINAL_LIBRARY = "xul-gtest" + +TEST_HARNESS_FILES.gtest += [ + "../../../../../security/nss/gtests/pk11_gtest/hpke-vectors.json", +] diff --git a/toolkit/components/telemetry/dap/ffi-gtest/test.rs b/toolkit/components/telemetry/dap/ffi-gtest/test.rs new file mode 100644 index 0000000000..3871965107 --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi-gtest/test.rs @@ -0,0 +1,204 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +use serde::Deserialize; +use std::ffi::c_void; +use std::fs::File; +use std::io::Cursor; + +use thin_vec::ThinVec; + +use dap_ffi::types::Report; + +use prio::codec::{Decode, Encode}; + +#[no_mangle] +pub extern "C" fn dap_test_encoding() { + let r = Report::new_dummy(); + let mut encoded = Vec::<u8>::new(); + Report::encode(&r, &mut encoded); + let decoded = Report::decode(&mut Cursor::new(&encoded)).expect("Report decoding failed!"); + if r != decoded { + println!("Report:"); + println!("{:?}", r); + println!("Encoded Report:"); + println!("{:?}", encoded); + println!("Decoded Report:"); + println!("{:?}", decoded); + panic!("Report changed after encoding & decoding."); + } +} + +extern "C" { + pub fn dapHpkeEncrypt( + aContext: *mut c_void, + aAad: *mut u8, + aAadLength: u32, + aPlaintext: *mut u8, + aPlaintextLength: u32, + aOutputShare: &mut ThinVec<u8>, + ) -> bool; + pub fn dapSetupHpkeContextForTesting( + aKey: *const u8, + aKeyLength: u32, + aInfo: *mut u8, + aInfoLength: u32, + aPkEm: *const u8, + aPkEmLength: u32, + aSkEm: *const u8, + aSkEmLength: u32, + aOutputEncapsulatedKey: &mut ThinVec<u8>, + ) -> *mut c_void; + pub fn dapDestroyHpkeContext(aContext: *mut c_void); +} + +struct HpkeContext(*mut c_void); + +impl Drop for HpkeContext { + fn drop(&mut self) { + unsafe { + dapDestroyHpkeContext(self.0); + } + } +} + +type Testsuites = Vec<CiphersuiteTest>; + +#[derive(Debug, Deserialize)] +pub struct HexString(#[serde(with = "hex")] Vec<u8>); +impl AsRef<[u8]> for HexString { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} +#[allow(dead_code)] +#[derive(Debug, Deserialize)] +struct CiphersuiteTest { + mode: i64, + kem_id: i64, + kdf_id: i64, + aead_id: i64, + info: HexString, + #[serde(rename = "ikmR")] + ikm_r: HexString, + #[serde(rename = "ikmE")] + ikm_e: HexString, + #[serde(rename = "skRm")] + sk_r_m: HexString, + #[serde(rename = "skEm")] + sk_e_m: HexString, + #[serde(rename = "pkRm")] + pk_r_m: HexString, + #[serde(rename = "pkEm")] + pk_e_m: HexString, + enc: HexString, + shared_secret: HexString, + key_schedule_context: HexString, + secret: HexString, + key: HexString, + base_nonce: HexString, + exporter_secret: HexString, + encryptions: Vec<Encryption>, + exports: Vec<Export>, + psk: Option<HexString>, + psk_id: Option<HexString>, + ikm_s: Option<HexString>, + sk_sm: Option<HexString>, + pk_sm: Option<HexString>, +} + +#[derive(Debug, Deserialize)] +pub struct Encryption { + pub aad: HexString, + pub ciphertext: HexString, + pub nonce: HexString, + pub plaintext: HexString, +} + +#[derive(Debug, Deserialize)] +pub struct Export { + pub exporter_context: HexString, + #[serde(rename = "L")] + pub length: i64, + pub exported_value: HexString, +} + +#[no_mangle] +pub extern "C" fn dap_test_hpke_encrypt() { + let file = File::open("hpke-vectors.json").unwrap(); + let tests: Testsuites = serde_json::from_reader(file).unwrap(); + + let mut have_tested = false; + + for (test_idx, test) in tests.into_iter().enumerate() { + // Mode must be "Base" + if test.mode != 0 + // KEM must be DHKEM(X25519, HKDF-SHA256) + || test.kem_id != 32 + // KDF must be HKDF-SHA256 + || test.kdf_id != 1 + // AEAD must be AES-128-GCM + || test.aead_id != 1 + { + continue; + } + + have_tested = true; + + let mut pk_r_serialized = test.pk_r_m.0; + let mut info = test.info.0; + let mut pk_e_serialized = test.pk_e_m.0; + let mut sk_e_serialized = test.sk_e_m.0; + + let mut encapsulated_key = ThinVec::<u8>::new(); + + let ctx = HpkeContext(unsafe { + dapSetupHpkeContextForTesting( + pk_r_serialized.as_mut_ptr(), + pk_r_serialized.len().try_into().unwrap(), + info.as_mut_ptr(), + info.len().try_into().unwrap(), + pk_e_serialized.as_mut_ptr(), + pk_e_serialized.len().try_into().unwrap(), + sk_e_serialized.as_mut_ptr(), + sk_e_serialized.len().try_into().unwrap(), + &mut encapsulated_key, + ) + }); + if ctx.0.is_null() { + panic!("Failed to set up HPKE context."); + } + if encapsulated_key != test.enc.0 { + panic!("Encapsulated key is wrong!"); + } + + for (encryption_idx, encryption) in test.encryptions.into_iter().enumerate() { + let mut encrypted_share = ThinVec::<u8>::new(); + + let mut aad = encryption.aad.0.clone(); + let mut pt = encryption.plaintext.0.clone(); + unsafe { + dapHpkeEncrypt( + ctx.0, + aad.as_mut_ptr(), + aad.len().try_into().unwrap(), + pt.as_mut_ptr(), + pt.len().try_into().unwrap(), + &mut encrypted_share, + ); + } + + if encrypted_share != encryption.ciphertext.0 { + println!("Test: {}, Encryption: {}", test_idx, encryption_idx); + println!("Expected:"); + println!("{:?}", encryption.ciphertext.0); + println!("Actual:"); + println!("{:?}", encrypted_share); + panic!("Encryption outputs did not match!"); + } + } + } + + assert!(have_tested); +} diff --git a/toolkit/components/telemetry/dap/ffi/Cargo.toml b/toolkit/components/telemetry/dap/ffi/Cargo.toml new file mode 100644 index 0000000000..dfb69e4821 --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "dap_ffi" +version = "0.1.0" +edition = "2021" +authors = [ + "Simon Friedberger <simon@mozilla.com>", +] +license = "MPL-2.0" + +[dependencies] +prio = {version = "0.15.3", default-features = false } +thin-vec = { version = "0.2.1", features = ["gecko-ffi"] } +rand = "0.8" diff --git a/toolkit/components/telemetry/dap/ffi/cbindgen.toml b/toolkit/components/telemetry/dap/ffi/cbindgen.toml new file mode 100644 index 0000000000..e2c032133a --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi/cbindgen.toml @@ -0,0 +1,11 @@ +header = """/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */""" +autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen. See RunCbindgen.py */ +#ifndef DAPTelemetryBindings_h +#error "Don't include this file directly, instead include DAPTelemetryBindings.h" +#endif +""" + +[export.rename] +"ThinVec" = "nsTArray" diff --git a/toolkit/components/telemetry/dap/ffi/src/lib.rs b/toolkit/components/telemetry/dap/ffi/src/lib.rs new file mode 100644 index 0000000000..998c8af204 --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi/src/lib.rs @@ -0,0 +1,335 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +use std::error::Error; +use std::io::Cursor; + +use prio::vdaf::prio3::Prio3Sum; +use prio::vdaf::prio3::Prio3SumVec; +use thin_vec::ThinVec; + +pub mod types; +use types::HpkeConfig; +use types::PlaintextInputShare; +use types::Report; +use types::ReportID; +use types::ReportMetadata; +use types::Time; + +use prio::codec::Encode; +use prio::codec::{decode_u16_items, encode_u32_items}; +use prio::flp::types::{Sum, SumVec}; +use prio::vdaf::prio3::Prio3; +use prio::vdaf::Client; +use prio::vdaf::VdafError; + +use crate::types::HpkeCiphertext; + +extern "C" { + pub fn dapHpkeEncryptOneshot( + aKey: *const u8, + aKeyLength: u32, + aInfo: *const u8, + aInfoLength: u32, + aAad: *const u8, + aAadLength: u32, + aPlaintext: *const u8, + aPlaintextLength: u32, + aOutputEncapsulatedKey: &mut ThinVec<u8>, + aOutputShare: &mut ThinVec<u8>, + ) -> bool; +} + +pub fn new_prio_u8(num_aggregators: u8, bits: u32) -> Result<Prio3Sum, VdafError> { + if bits > 64 { + return Err(VdafError::Uncategorized(format!( + "bit length ({}) exceeds limit for aggregate type (64)", + bits + ))); + } + + Prio3::new(num_aggregators, Sum::new(bits as usize)?) +} + +pub fn new_prio_vecu8(num_aggregators: u8, len: usize) -> Result<Prio3SumVec, VdafError> { + let chunk_length = prio::vdaf::prio3::optimal_chunk_length(8 * len); + Prio3::new(num_aggregators, SumVec::new(8, len, chunk_length)?) +} + +pub fn new_prio_vecu16(num_aggregators: u8, len: usize) -> Result<Prio3SumVec, VdafError> { + let chunk_length = prio::vdaf::prio3::optimal_chunk_length(16 * len); + Prio3::new(num_aggregators, SumVec::new(16, len, chunk_length)?) +} + +enum Role { + Leader = 2, + Helper = 3, +} + +/// A minimal wrapper around the FFI function which mostly just converts datatypes. +fn hpke_encrypt_wrapper( + plain_share: &Vec<u8>, + aad: &Vec<u8>, + info: &Vec<u8>, + hpke_config: &HpkeConfig, +) -> Result<HpkeCiphertext, Box<dyn std::error::Error>> { + let mut encrypted_share = ThinVec::<u8>::new(); + let mut encapsulated_key = ThinVec::<u8>::new(); + unsafe { + if !dapHpkeEncryptOneshot( + hpke_config.public_key.as_ptr(), + hpke_config.public_key.len() as u32, + info.as_ptr(), + info.len() as u32, + aad.as_ptr(), + aad.len() as u32, + plain_share.as_ptr(), + plain_share.len() as u32, + &mut encapsulated_key, + &mut encrypted_share, + ) { + return Err(Box::from("Encryption failed.")); + } + } + + Ok(HpkeCiphertext { + config_id: hpke_config.id, + enc: encapsulated_key.to_vec(), + payload: encrypted_share.to_vec(), + }) +} + +trait Shardable { + fn shard( + &self, + nonce: &[u8; 16], + ) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>>; +} + +impl Shardable for u8 { + fn shard( + &self, + nonce: &[u8; 16], + ) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> { + let prio = new_prio_u8(2, 2)?; + + let (public_share, input_shares) = prio.shard(&(*self as u128), nonce)?; + + debug_assert_eq!(input_shares.len(), 2); + + let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect(); + let encoded_public_share = public_share.get_encoded(); + Ok((encoded_public_share, encoded_input_shares)) + } +} + +impl Shardable for ThinVec<u8> { + fn shard( + &self, + nonce: &[u8; 16], + ) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> { + let prio = new_prio_vecu8(2, self.len())?; + + let measurement: Vec<u128> = self.iter().map(|e| (*e as u128)).collect(); + let (public_share, input_shares) = prio.shard(&measurement, nonce)?; + + debug_assert_eq!(input_shares.len(), 2); + + let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect(); + let encoded_public_share = public_share.get_encoded(); + Ok((encoded_public_share, encoded_input_shares)) + } +} + +impl Shardable for ThinVec<u16> { + fn shard( + &self, + nonce: &[u8; 16], + ) -> Result<(Vec<u8>, Vec<Vec<u8>>), Box<dyn std::error::Error>> { + let prio = new_prio_vecu16(2, self.len())?; + + let measurement: Vec<u128> = self.iter().map(|e| (*e as u128)).collect(); + let (public_share, input_shares) = prio.shard(&measurement, nonce)?; + + debug_assert_eq!(input_shares.len(), 2); + + let encoded_input_shares = input_shares.iter().map(|s| s.get_encoded()).collect(); + let encoded_public_share = public_share.get_encoded(); + Ok((encoded_public_share, encoded_input_shares)) + } +} + +/// Pre-fill the info part of the HPKE sealing with the constants from the standard. +fn make_base_info() -> Vec<u8> { + let mut info = Vec::<u8>::new(); + const START: &[u8] = "dap-07 input share".as_bytes(); + info.extend(START); + const FIXED: u8 = 1; + info.push(FIXED); + + info +} + +fn select_hpke_config(configs: Vec<HpkeConfig>) -> Result<HpkeConfig, Box<dyn Error>> { + for config in configs { + if config.kem_id == 0x20 /* DHKEM(X25519, HKDF-SHA256) */ && + config.kdf_id == 0x01 /* HKDF-SHA256 */ && + config.aead_id == 0x01 + /* AES-128-GCM */ + { + return Ok(config); + } + } + + Err("No suitable HPKE config found.".into()) +} + +/// This function creates a full report - ready to send - for a measurement. +/// +/// To do that it also needs the HPKE configurations for the endpoints and some +/// additional data which is part of the authentication. +fn get_dap_report_internal<T: Shardable>( + leader_hpke_config_encoded: &ThinVec<u8>, + helper_hpke_config_encoded: &ThinVec<u8>, + measurement: &T, + task_id: &[u8; 32], + time_precision: u64, +) -> Result<Report, Box<dyn std::error::Error>> { + let leader_hpke_configs: Vec<HpkeConfig> = + decode_u16_items(&(), &mut Cursor::new(leader_hpke_config_encoded))?; + let leader_hpke_config = select_hpke_config(leader_hpke_configs)?; + let helper_hpke_configs: Vec<HpkeConfig> = + decode_u16_items(&(), &mut Cursor::new(helper_hpke_config_encoded))?; + let helper_hpke_config = select_hpke_config(helper_hpke_configs)?; + + let report_id = ReportID::generate(); + let (encoded_public_share, encoded_input_shares) = measurement.shard(report_id.as_ref())?; + + let plaintext_input_shares: Vec<Vec<u8>> = encoded_input_shares + .into_iter() + .map(|encoded_input_share| { + PlaintextInputShare { + extensions: Vec::new(), + payload: encoded_input_share, + } + .get_encoded() + }) + .collect(); + + let metadata = ReportMetadata { + report_id, + time: Time::generate(time_precision), + }; + + // This quote from the standard describes which info and aad to use for the encryption: + // enc, payload = SealBase(pk, + // "dap-02 input share" || 0x01 || server_role, + // task_id || metadata || public_share, input_share) + // https://www.ietf.org/archive/id/draft-ietf-ppm-dap-02.html#name-upload-request + let mut info = make_base_info(); + + let mut aad = Vec::from(*task_id); + metadata.encode(&mut aad); + encode_u32_items(&mut aad, &(), &encoded_public_share); + + info.push(Role::Leader as u8); + + let leader_payload = + hpke_encrypt_wrapper(&plaintext_input_shares[0], &aad, &info, &leader_hpke_config)?; + + *info.last_mut().unwrap() = Role::Helper as u8; + + let helper_payload = + hpke_encrypt_wrapper(&plaintext_input_shares[1], &aad, &info, &helper_hpke_config)?; + + Ok(Report { + metadata, + public_share: encoded_public_share, + leader_encrypted_input_share: leader_payload, + helper_encrypted_input_share: helper_payload, + }) +} + +/// Wraps the function above with minor C interop. +/// Mostly it turns any error result into a return value of false. +#[no_mangle] +pub extern "C" fn dapGetReportU8( + leader_hpke_config_encoded: &ThinVec<u8>, + helper_hpke_config_encoded: &ThinVec<u8>, + measurement: u8, + task_id: &ThinVec<u8>, + time_precision: u64, + out_report: &mut ThinVec<u8>, +) -> bool { + assert_eq!(task_id.len(), 32); + + if let Ok(report) = get_dap_report_internal::<u8>( + leader_hpke_config_encoded, + helper_hpke_config_encoded, + &measurement, + &task_id.as_slice().try_into().unwrap(), + time_precision, + ) { + let encoded_report = report.get_encoded(); + out_report.extend(encoded_report); + + true + } else { + false + } +} + +#[no_mangle] +pub extern "C" fn dapGetReportVecU8( + leader_hpke_config_encoded: &ThinVec<u8>, + helper_hpke_config_encoded: &ThinVec<u8>, + measurement: &ThinVec<u8>, + task_id: &ThinVec<u8>, + time_precision: u64, + out_report: &mut ThinVec<u8>, +) -> bool { + assert_eq!(task_id.len(), 32); + + if let Ok(report) = get_dap_report_internal::<ThinVec<u8>>( + leader_hpke_config_encoded, + helper_hpke_config_encoded, + measurement, + &task_id.as_slice().try_into().unwrap(), + time_precision, + ) { + let encoded_report = report.get_encoded(); + out_report.extend(encoded_report); + + true + } else { + false + } +} + +#[no_mangle] +pub extern "C" fn dapGetReportVecU16( + leader_hpke_config_encoded: &ThinVec<u8>, + helper_hpke_config_encoded: &ThinVec<u8>, + measurement: &ThinVec<u16>, + task_id: &ThinVec<u8>, + time_precision: u64, + out_report: &mut ThinVec<u8>, +) -> bool { + assert_eq!(task_id.len(), 32); + + if let Ok(report) = get_dap_report_internal::<ThinVec<u16>>( + leader_hpke_config_encoded, + helper_hpke_config_encoded, + measurement, + &task_id.as_slice().try_into().unwrap(), + time_precision, + ) { + let encoded_report = report.get_encoded(); + out_report.extend(encoded_report); + + true + } else { + false + } +} diff --git a/toolkit/components/telemetry/dap/ffi/src/types.rs b/toolkit/components/telemetry/dap/ffi/src/types.rs new file mode 100644 index 0000000000..e8f6385dcd --- /dev/null +++ b/toolkit/components/telemetry/dap/ffi/src/types.rs @@ -0,0 +1,358 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +//! This file contains structs for use in the DAP protocol and implements TLS compatible +//! serialization/deserialization as required for the wire protocol. +//! +//! The current draft standard with the definition of these structs is available here: +//! https://github.com/ietf-wg-ppm/draft-ietf-ppm-dap +//! This code is based on version 07 of the standard available here: +//! https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html + +use prio::codec::{ + decode_u16_items, decode_u32_items, encode_u16_items, encode_u32_items, CodecError, Decode, + Encode, +}; +use std::io::{Cursor, Read}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use rand::{Rng, RngCore}; + +/// opaque TaskId[32]; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-task-configuration +#[derive(Debug, PartialEq, Eq)] +pub struct TaskID(pub [u8; 32]); + +impl Decode for TaskID { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + // this should probably be available in codec...? + let mut data: [u8; 32] = [0; 32]; + bytes.read_exact(&mut data)?; + Ok(TaskID(data)) + } +} + +impl Encode for TaskID { + fn encode(&self, bytes: &mut Vec<u8>) { + bytes.extend_from_slice(&self.0); + } +} + +/// uint64 Time; +/// seconds elapsed since start of UNIX epoch +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-protocol-definition +#[derive(Debug, PartialEq, Eq)] +pub struct Time(pub u64); + +impl Decode for Time { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + Ok(Time(u64::decode(bytes)?)) + } +} + +impl Encode for Time { + fn encode(&self, bytes: &mut Vec<u8>) { + u64::encode(&self.0, bytes); + } +} + +impl Time { + /// Generates a Time for the current system time rounded to the desired precision. + pub fn generate(time_precision: u64) -> Time { + let now_secs = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Failed to get time.") + .as_secs(); + let timestamp = (now_secs / time_precision) * time_precision; + Time(timestamp) + } +} + +/// struct { +/// ExtensionType extension_type; +/// opaque extension_data<0..2^16-1>; +/// } Extension; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-upload-extensions +#[derive(Debug, PartialEq)] +pub struct Extension { + extension_type: ExtensionType, + extension_data: Vec<u8>, +} + +impl Decode for Extension { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + let extension_type = ExtensionType::from_u16(u16::decode(bytes)?); + let extension_data: Vec<u8> = decode_u16_items(&(), bytes)?; + + Ok(Extension { + extension_type, + extension_data, + }) + } +} + +impl Encode for Extension { + fn encode(&self, bytes: &mut Vec<u8>) { + (self.extension_type as u16).encode(bytes); + encode_u16_items(bytes, &(), &self.extension_data); + } +} + +/// enum { +/// TBD(0), +/// (65535) +/// } ExtensionType; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-upload-extensions +#[derive(Debug, PartialEq, Clone, Copy)] +#[repr(u16)] +enum ExtensionType { + Tbd = 0, +} + +impl ExtensionType { + fn from_u16(value: u16) -> ExtensionType { + match value { + 0 => ExtensionType::Tbd, + _ => panic!("Unknown value for Extension Type: {}", value), + } + } +} + +/// struct { +/// Extension extensions<0..2^16-1>; +/// opaque payload<0..2^32-1>; +/// } PlaintextInputShare; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#section-4.4.2-9 +#[derive(Debug)] +pub struct PlaintextInputShare { + pub extensions: Vec<Extension>, + pub payload: Vec<u8>, +} + +impl Encode for PlaintextInputShare { + fn encode(&self, bytes: &mut Vec<u8>) { + encode_u16_items(bytes, &(), &self.extensions); + encode_u32_items(bytes, &(), &self.payload); + } +} + +/// Identifier for a server's HPKE configuration +/// uint8 HpkeConfigId; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-protocol-definition +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct HpkeConfigId(u8); + +impl Decode for HpkeConfigId { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + Ok(HpkeConfigId(u8::decode(bytes)?)) + } +} + +impl Encode for HpkeConfigId { + fn encode(&self, bytes: &mut Vec<u8>) { + self.0.encode(bytes); + } +} + +/// struct { +/// HpkeConfigId id; +/// HpkeKemId kem_id; +/// HpkeKdfId kdf_id; +/// HpkeAeadId aead_id; +/// HpkePublicKey public_key; +/// } HpkeConfig; +/// opaque HpkePublicKey<1..2^16-1>; +/// uint16 HpkeAeadId; /* Defined in [HPKE] */ +/// uint16 HpkeKemId; /* Defined in [HPKE] */ +/// uint16 HpkeKdfId; /* Defined in [HPKE] */ +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-hpke-configuration-request +#[derive(Debug, Clone)] +pub struct HpkeConfig { + pub id: HpkeConfigId, + pub kem_id: u16, + pub kdf_id: u16, + pub aead_id: u16, + pub public_key: Vec<u8>, +} + +impl Decode for HpkeConfig { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + Ok(HpkeConfig { + id: HpkeConfigId::decode(bytes)?, + kem_id: u16::decode(bytes)?, + kdf_id: u16::decode(bytes)?, + aead_id: u16::decode(bytes)?, + public_key: decode_u16_items(&(), bytes)?, + }) + } +} + +impl Encode for HpkeConfig { + fn encode(&self, bytes: &mut Vec<u8>) { + self.id.encode(bytes); + self.kem_id.encode(bytes); + self.kdf_id.encode(bytes); + self.aead_id.encode(bytes); + encode_u16_items(bytes, &(), &self.public_key); + } +} + +/// An HPKE ciphertext. +/// struct { +/// HpkeConfigId config_id; /* config ID */ +/// opaque enc<1..2^16-1>; /* encapsulated HPKE key */ +/// opaque payload<1..2^32-1>; /* ciphertext */ +/// } HpkeCiphertext; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-protocol-definition +#[derive(Debug, PartialEq, Eq)] +pub struct HpkeCiphertext { + pub config_id: HpkeConfigId, + pub enc: Vec<u8>, + pub payload: Vec<u8>, +} + +impl Decode for HpkeCiphertext { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + let config_id = HpkeConfigId::decode(bytes)?; + let enc: Vec<u8> = decode_u16_items(&(), bytes)?; + let payload: Vec<u8> = decode_u32_items(&(), bytes)?; + + Ok(HpkeCiphertext { + config_id, + enc, + payload, + }) + } +} + +impl Encode for HpkeCiphertext { + fn encode(&self, bytes: &mut Vec<u8>) { + self.config_id.encode(bytes); + encode_u16_items(bytes, &(), &self.enc); + encode_u32_items(bytes, &(), &self.payload); + } +} + +/// opaque ReportID[16]; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-protocol-definition +#[derive(Debug, PartialEq, Eq)] +pub struct ReportID(pub [u8; 16]); + +impl Decode for ReportID { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + let mut data: [u8; 16] = [0; 16]; + bytes.read_exact(&mut data)?; + Ok(ReportID(data)) + } +} + +impl Encode for ReportID { + fn encode(&self, bytes: &mut Vec<u8>) { + bytes.extend_from_slice(&self.0); + } +} + +impl ReportID { + pub fn generate() -> ReportID { + ReportID(rand::thread_rng().gen()) + } +} + +impl AsRef<[u8; 16]> for ReportID { + fn as_ref(&self) -> &[u8; 16] { + &self.0 + } +} + +/// struct { +/// ReportID report_id; +/// Time time; +/// } ReportMetadata; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-upload-request +#[derive(Debug, PartialEq)] +pub struct ReportMetadata { + pub report_id: ReportID, + pub time: Time, +} + +impl Decode for ReportMetadata { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + let report_id = ReportID::decode(bytes)?; + let time = Time::decode(bytes)?; + + Ok(ReportMetadata { report_id, time }) + } +} + +impl Encode for ReportMetadata { + fn encode(&self, bytes: &mut Vec<u8>) { + self.report_id.encode(bytes); + self.time.encode(bytes); + } +} + +/// struct { +/// ReportMetadata metadata; +/// opaque public_share<0..2^32-1>; +/// HpkeCiphertext leader_encrypted_input_share; +/// HpkeCiphertext helper_encrypted_input_share; +/// } Report; +/// https://www.ietf.org/archive/id/draft-ietf-ppm-dap-07.html#name-upload-request +#[derive(Debug, PartialEq)] +pub struct Report { + pub metadata: ReportMetadata, + pub public_share: Vec<u8>, + pub leader_encrypted_input_share: HpkeCiphertext, + pub helper_encrypted_input_share: HpkeCiphertext, +} + + +impl Report { + /// Creates a minimal report for use in tests. + pub fn new_dummy() -> Self { + let mut enc = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut enc); + let mut payload = [0u8; 200]; + rand::thread_rng().fill_bytes(&mut payload); + Report { + metadata: ReportMetadata { + report_id: ReportID::generate(), + time: Time::generate(1), + }, + public_share: vec![], + leader_encrypted_input_share: HpkeCiphertext { config_id: HpkeConfigId(5), enc: vec![1, 2, 3, 4, 5], payload: vec![6, 7, 8, 9, 10] }, + helper_encrypted_input_share: HpkeCiphertext { config_id: HpkeConfigId(100), enc: enc.into(), payload: payload.into() }, + } + } +} + +impl Decode for Report { + fn decode(bytes: &mut Cursor<&[u8]>) -> Result<Self, CodecError> { + let metadata = ReportMetadata::decode(bytes)?; + let public_share: Vec<u8> = decode_u32_items(&(), bytes)?; + let leader_encrypted_input_share: HpkeCiphertext = HpkeCiphertext::decode(bytes)?; + let helper_encrypted_input_share: HpkeCiphertext = HpkeCiphertext::decode(bytes)?; + + let remaining_bytes = bytes.get_ref().len() - (bytes.position() as usize); + if remaining_bytes == 0 { + Ok(Report { + metadata, + public_share, + leader_encrypted_input_share, + helper_encrypted_input_share, + }) + } else { + Err(CodecError::BytesLeftOver(remaining_bytes)) + } + } +} + +impl Encode for Report { + fn encode(&self, bytes: &mut Vec<u8>) { + self.metadata.encode(bytes); + encode_u32_items(bytes, &(), &self.public_share); + self.leader_encrypted_input_share.encode(bytes); + self.helper_encrypted_input_share.encode(bytes); + } +} diff --git a/toolkit/components/telemetry/dap/metrics.yaml b/toolkit/components/telemetry/dap/metrics.yaml new file mode 100644 index 0000000000..6ab2d4f9f3 --- /dev/null +++ b/toolkit/components/telemetry/dap/metrics.yaml @@ -0,0 +1,54 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +# Adding a new metric? We have docs for that! +# https://firefox-source-docs.mozilla.org/toolkit/components/glean/user/new_definitions_file.html + +--- +$schema: moz://mozilla.org/schemas/glean/metrics/2-0-0 +$tags: + - 'Toolkit :: Telemetry' + +dap: + upload_status: + type: labeled_counter + labels: + - success + - failure + - http_error + - http_502 + - abort + - abort_timed + - abort_shutdown + description: > + The result of trying to upload a report to the DAP server. + bugs: + - https://bugzilla.mozilla.org/show_bug.cgi?id=1775035 + data_reviews: + - https://bugzilla.mozilla.org/show_bug.cgi?id=1775035 + data_sensitivity: + - technical + notification_emails: + - simon@mozilla.com + expires: never + + report_generation_status: + type: labeled_counter + labels: + - success + - failure + - abort + - hpke_leader_fail + - hpke_helper_fail + description: > + The result of trying to generate a DAP report. + bugs: + - https://bugzilla.mozilla.org/show_bug.cgi?id=1775035 + data_reviews: + - https://bugzilla.mozilla.org/show_bug.cgi?id=1775035 + data_sensitivity: + - technical + notification_emails: + - simon@mozilla.com + expires: never diff --git a/toolkit/components/telemetry/dap/nsIDAPTelemetry.idl b/toolkit/components/telemetry/dap/nsIDAPTelemetry.idl new file mode 100644 index 0000000000..909d5ab3f3 --- /dev/null +++ b/toolkit/components/telemetry/dap/nsIDAPTelemetry.idl @@ -0,0 +1,37 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsISupports.idl" + +[scriptable, builtinclass, uuid(58a4c579-d2dd-46b7-9c3b-6881a1c36c6a)] +interface nsIDAPTelemetry : nsISupports { + /** + * Split measurement into shares and create a report with encrypted shares. + * + * @param leaderHpkeConfig The leader share will be encrypted with this + * config. + * @param helperHpkeConfig Same for the helper. + * @param measurement The data which will be encoded and shared. + * @param task_id Identifies which task this measurement is for + * which influences both encoding and encryption. + * @param time_precision Determines the report timestamp. + * + * @return The raw bytes of a report, ready for sending. + * + * @note This can potentially run for a long time. Take care not to block + * the main thread for too long. + */ + void GetReportU8(in Array<uint8_t> leaderHpkeConfig, + in Array<uint8_t> helperHpkeConfig, in uint8_t measurement, + in Array<uint8_t> task_id, in uint64_t time_precision, + out Array<uint8_t> report); + void GetReportVecU8(in Array<uint8_t> leaderHpkeConfig, + in Array<uint8_t> helperHpkeConfig, + in Array<uint8_t> measurement, in Array<uint8_t> task_id, + in uint64_t time_precision, out Array<uint8_t> report); + void GetReportVecU16(in Array<uint8_t> leaderHpkeConfig, + in Array<uint8_t> helperHpkeConfig, + in Array<uint16_t> measurement, in Array<uint8_t> task_id, + in uint64_t time_precision, out Array<uint8_t> report); +}; diff --git a/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js b/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js new file mode 100644 index 0000000000..deb68bafef --- /dev/null +++ b/toolkit/components/telemetry/dap/tests/xpcshell/test_dap.js @@ -0,0 +1,149 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +"use strict"; + +const { HttpServer } = ChromeUtils.importESModule( + "resource://testing-common/httpd.sys.mjs" +); + +const lazy = {}; + +ChromeUtils.defineESModuleGetters(lazy, { + DAPTelemetrySender: "resource://gre/modules/DAPTelemetrySender.sys.mjs", +}); + +const BinaryOutputStream = Components.Constructor( + "@mozilla.org/binaryoutputstream;1", + "nsIBinaryOutputStream", + "setOutputStream" +); + +const BinaryInputStream = Components.Constructor( + "@mozilla.org/binaryinputstream;1", + "nsIBinaryInputStream", + "setInputStream" +); + +const PREF_LEADER = "toolkit.telemetry.dap_leader"; +const PREF_HELPER = "toolkit.telemetry.dap_helper"; + +let received = false; +let server; +let server_addr; + +const tasks = [ + { + // this is testing task 1 + id: "QjMD4n8l_MHBoLrbCfLTFi8hC264fC59SKHPviPF0q8", + leader_endpoint: null, + helper_endpoint: null, + time_precision: 300, + measurement_type: "u8", + }, + { + // this is testing task 2 + id: "DSZGMFh26hBYXNaKvhL_N4AHA3P5lDn19on1vFPBxJM", + leader_endpoint: null, + helper_endpoint: null, + time_precision: 300, + measurement_type: "vecu8", + }, +]; + +function hpkeConfigHandler(request, response) { + if ( + request.queryString == + "task_id=QjMD4n8l_MHBoLrbCfLTFi8hC264fC59SKHPviPF0q8" || + request.queryString == "task_id=DSZGMFh26hBYXNaKvhL_N4AHA3P5lDn19on1vFPBxJM" + ) { + let config_bytes; + if (request.path.startsWith("/leader")) { + config_bytes = new Uint8Array([ + 0, 41, 47, 0, 32, 0, 1, 0, 1, 0, 32, 11, 33, 206, 33, 131, 56, 220, 82, + 153, 110, 228, 200, 53, 98, 210, 38, 177, 197, 252, 198, 36, 201, 86, + 121, 169, 238, 220, 34, 143, 112, 177, 10, + ]); + } else { + config_bytes = new Uint8Array([ + 0, 41, 42, 0, 32, 0, 1, 0, 1, 0, 32, 28, 62, 242, 195, 117, 7, 173, 149, + 250, 15, 139, 178, 86, 241, 117, 143, 75, 26, 57, 60, 88, 130, 199, 175, + 195, 9, 241, 130, 61, 47, 215, 101, + ]); + } + response.setHeader("Content-Type", "application/dap-hpke-config"); + let bos = new BinaryOutputStream(response.bodyOutputStream); + bos.writeByteArray(config_bytes); + } else { + Assert.ok(false, `Unknown query string: ${request.queryString}`); + } +} + +function uploadHandler(request, response) { + Assert.equal( + request.getHeader("Content-Type"), + "application/dap-report", + "Wrong Content-Type header." + ); + + let body = new BinaryInputStream(request.bodyInputStream); + console.log(body.available()); + Assert.equal( + true, + body.available() == 406 || body.available() == 3654, + "Wrong request body size." + ); + received = true; + response.setStatusLine(request.httpVersion, 200); +} + +add_setup(async function () { + do_get_profile(); + Services.fog.initializeFOG(); + + // Set up a mock server to represent the DAP endpoints. + server = new HttpServer(); + server.registerPathHandler("/leader_endpoint/hpke_config", hpkeConfigHandler); + server.registerPathHandler("/helper_endpoint/hpke_config", hpkeConfigHandler); + server.registerPrefixHandler("/leader_endpoint/tasks/", uploadHandler); + server.start(-1); + + const orig_leader = Services.prefs.getStringPref(PREF_LEADER); + const orig_helper = Services.prefs.getStringPref(PREF_HELPER); + const i = server.identity; + server_addr = i.primaryScheme + "://" + i.primaryHost + ":" + i.primaryPort; + Services.prefs.setStringPref(PREF_LEADER, server_addr + "/leader_endpoint"); + Services.prefs.setStringPref(PREF_HELPER, server_addr + "/helper_endpoint"); + registerCleanupFunction(() => { + Services.prefs.setStringPref(PREF_LEADER, orig_leader); + Services.prefs.setStringPref(PREF_HELPER, orig_helper); + + return new Promise(resolve => { + server.stop(resolve); + }); + }); +}); + +add_task(async function testVerificationTask() { + Services.fog.testResetFOG(); + let before = Glean.dap.uploadStatus.success.testGetValue() ?? 0; + await lazy.DAPTelemetrySender.sendTestReports(tasks, 5000); + let after = Glean.dap.uploadStatus.success.testGetValue() ?? 0; + + Assert.equal(before + 2, after, "Successful submissions should be counted."); + Assert.ok(received, "Report upload successful."); +}); + +add_task(async function testNetworkError() { + Services.fog.testResetFOG(); + let before = Glean.dap.reportGenerationStatus.failure.testGetValue() ?? 0; + Services.prefs.setStringPref(PREF_LEADER, server_addr + "/invalid-endpoint"); + await lazy.DAPTelemetrySender.sendTestReports(tasks, 5000); + let after = Glean.dap.reportGenerationStatus.failure.testGetValue() ?? 0; + Assert.equal( + before + 2, + after, + "Failed report generation should be counted." + ); +}); diff --git a/toolkit/components/telemetry/dap/tests/xpcshell/xpcshell.toml b/toolkit/components/telemetry/dap/tests/xpcshell/xpcshell.toml new file mode 100644 index 0000000000..6bdebc17f2 --- /dev/null +++ b/toolkit/components/telemetry/dap/tests/xpcshell/xpcshell.toml @@ -0,0 +1,4 @@ +[DEFAULT] + +["test_dap.js"] +skip-if = ["os == 'android'"] # DAP is not supported on Android |